Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
F
ffmpeg.wasm-core
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Linshizhi
ffmpeg.wasm-core
Commits
fd6045ba
Commit
fd6045ba
authored
Jan 18, 2009
by
Måns Rullgård
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Alpha: fix pix_abs16
Originally committed as revision 16675 to
svn://svn.ffmpeg.org/ffmpeg/trunk
parent
27a9466b
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
55 additions
and
62 deletions
+55
-62
dsputil_alpha.c
libavcodec/alpha/dsputil_alpha.c
+3
-9
motion_est_mvi_asm.S
libavcodec/alpha/motion_est_mvi_asm.S
+52
-53
No files found.
libavcodec/alpha/dsputil_alpha.c
View file @
fd6045ba
...
...
@@ -42,7 +42,7 @@ void get_pixels_mvi(DCTELEM *restrict block,
void
diff_pixels_mvi
(
DCTELEM
*
block
,
const
uint8_t
*
s1
,
const
uint8_t
*
s2
,
int
stride
);
int
pix_abs8x8_mvi
(
void
*
v
,
uint8_t
*
pix1
,
uint8_t
*
pix2
,
int
line_size
,
int
h
);
int
pix_abs16x16_mvi_asm
(
uint8_t
*
pix1
,
uint8_t
*
pix2
,
int
line_size
);
int
pix_abs16x16_mvi_asm
(
void
*
v
,
uint8_t
*
pix1
,
uint8_t
*
pix2
,
int
line_size
,
int
h
);
int
pix_abs16x16_x2_mvi
(
void
*
v
,
uint8_t
*
pix1
,
uint8_t
*
pix2
,
int
line_size
,
int
h
);
int
pix_abs16x16_y2_mvi
(
void
*
v
,
uint8_t
*
pix1
,
uint8_t
*
pix2
,
int
line_size
,
int
h
);
int
pix_abs16x16_xy2_mvi
(
void
*
v
,
uint8_t
*
pix1
,
uint8_t
*
pix2
,
int
line_size
,
int
h
);
...
...
@@ -287,11 +287,6 @@ void put_pixels16_axp_asm(uint8_t *block, const uint8_t *pixels,
put_pixels_axp_asm
(
block
+
8
,
pixels
+
8
,
line_size
,
h
);
}
static
int
sad16x16_mvi
(
void
*
s
,
uint8_t
*
a
,
uint8_t
*
b
,
int
stride
)
{
return
pix_abs16x16_mvi_asm
(
a
,
b
,
stride
);
}
void
dsputil_init_alpha
(
DSPContext
*
c
,
AVCodecContext
*
avctx
)
{
c
->
put_pixels_tab
[
0
][
0
]
=
put_pixels16_axp_asm
;
...
...
@@ -343,10 +338,9 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
c
->
get_pixels
=
get_pixels_mvi
;
c
->
diff_pixels
=
diff_pixels_mvi
;
c
->
sad
[
0
]
=
sad16x16_mvi
;
c
->
sad
[
0
]
=
pix_abs16x16_mvi_asm
;
c
->
sad
[
1
]
=
pix_abs8x8_mvi
;
// c->pix_abs[0][0] = pix_abs16x16_mvi_asm; //FIXME function arguments for the asm must be fixed
c
->
pix_abs
[
0
][
0
]
=
sad16x16_mvi
;
c
->
pix_abs
[
0
][
0
]
=
pix_abs16x16_mvi_asm
;
c
->
pix_abs
[
1
][
0
]
=
pix_abs8x8_mvi
;
c
->
pix_abs
[
0
][
1
]
=
pix_abs16x16_x2_mvi
;
c
->
pix_abs
[
0
][
2
]
=
pix_abs16x16_y2_mvi
;
...
...
libavcodec/alpha/motion_est_mvi_asm.S
View file @
fd6045ba
...
...
@@ -60,9 +60,8 @@ pix_abs16x16_mvi_asm:
jsr AT, (AT), _mcount
#endif
and a
1
, 7, t0
and a
2
, 7, t0
clr v0
lda a3, 16
beq t0, $aligned
.align 4
$unaligned:
...
...
@@ -86,80 +85,80 @@ $unaligned:
td: error right */
/* load line 0 */
ldq_u t0, 0(a
1
) # left_u
ldq_u t1, 8(a
1
) # mid
ldq_u t2, 16(a
1
) # right_u
ldq t3, 0(a
0
) # ref left
ldq t4, 8(a
0
) # ref right
addq a
0, a2, a0
# pix1
addq a
1, a2, a1
# pix2
ldq_u t0, 0(a
2
) # left_u
ldq_u t1, 8(a
2
) # mid
ldq_u t2, 16(a
2
) # right_u
ldq t3, 0(a
1
) # ref left
ldq t4, 8(a
1
) # ref right
addq a
1, a3, a1
# pix1
addq a
2, a3, a2
# pix2
/* load line 1 */
ldq_u t5, 0(a
1
) # left_u
ldq_u t6, 8(a
1
) # mid
ldq_u t7, 16(a
1
) # right_u
ldq t8, 0(a
0
) # ref left
ldq t9, 8(a
0
) # ref right
addq a
0, a2, a0
# pix1
addq a
1, a2, a1
# pix2
ldq_u t5, 0(a
2
) # left_u
ldq_u t6, 8(a
2
) # mid
ldq_u t7, 16(a
2
) # right_u
ldq t8, 0(a
1
) # ref left
ldq t9, 8(a
1
) # ref right
addq a
1, a3, a1
# pix1
addq a
2, a3, a2
# pix2
/* calc line 0 */
extql t0, a
1
, t0 # left lo
extqh t1, a
1
, ta # left hi
extql t1, a
1
, tb # right lo
extql t0, a
2
, t0 # left lo
extqh t1, a
2
, ta # left hi
extql t1, a
2
, tb # right lo
or t0, ta, t0 # left
extqh t2, a
1
, t2 # right hi
extqh t2, a
2
, t2 # right hi
perr t3, t0, tc # error left
or t2, tb, t2 # right
perr t4, t2, td # error right
addq v0, tc, v0 # add error left
addq v0, td, v0 # add error left
/* calc line 1 */
extql t5, a
1
, t5 # left lo
extqh t6, a
1
, ta # left hi
extql t6, a
1
, tb # right lo
extql t5, a
2
, t5 # left lo
extqh t6, a
2
, ta # left hi
extql t6, a
2
, tb # right lo
or t5, ta, t5 # left
extqh t7, a
1
, t7 # right hi
extqh t7, a
2
, t7 # right hi
perr t8, t5, tc # error left
or t7, tb, t7 # right
perr t9, t7, td # error right
addq v0, tc, v0 # add error left
addq v0, td, v0 # add error left
/* loop */
subq a
3, 2, a3
# h -= 2
bne a
3
, $unaligned
subq a
4, 2, a4
# h -= 2
bne a
4
, $unaligned
ret
.align 4
$aligned:
/* load line 0 */
ldq t0, 0(a
1
) # left
ldq t1, 8(a
1
) # right
addq a
1, a2, a1
# pix2
ldq t2, 0(a
0
) # ref left
ldq t3, 8(a
0
) # ref right
addq a
0, a2, a0
# pix1
ldq t0, 0(a
2
) # left
ldq t1, 8(a
2
) # right
addq a
2, a3, a2
# pix2
ldq t2, 0(a
1
) # ref left
ldq t3, 8(a
1
) # ref right
addq a
1, a3, a1
# pix1
/* load line 1 */
ldq t4, 0(a
1
) # left
ldq t5, 8(a
1
) # right
addq a
1, a2, a1
# pix2
ldq t6, 0(a
0
) # ref left
ldq t7, 8(a
0
) # ref right
addq a
0, a2, a0
# pix1
ldq t4, 0(a
2
) # left
ldq t5, 8(a
2
) # right
addq a
2, a3, a2
# pix2
ldq t6, 0(a
1
) # ref left
ldq t7, 8(a
1
) # ref right
addq a
1, a3, a1
# pix1
/* load line 2 */
ldq t8, 0(a
1
) # left
ldq t9, 8(a
1
) # right
addq a
1, a2, a1
# pix2
ldq ta, 0(a
0
) # ref left
ldq tb, 8(a
0
) # ref right
addq a
0, a2, a0
# pix1
ldq t8, 0(a
2
) # left
ldq t9, 8(a
2
) # right
addq a
2, a3, a2
# pix2
ldq ta, 0(a
1
) # ref left
ldq tb, 8(a
1
) # ref right
addq a
1, a3, a1
# pix1
/* load line 3 */
ldq tc, 0(a
1
) # left
ldq td, 8(a
1
) # right
addq a
1, a2, a1
# pix2
ldq te, 0(a
0
) # ref left
ldq
tf, 8(a0
) # ref right
ldq tc, 0(a
2
) # left
ldq td, 8(a
2
) # right
addq a
2, a3, a2
# pix2
ldq te, 0(a
1
) # ref left
ldq
a0, 8(a1
) # ref right
/* calc line 0 */
perr t0, t2, t0 # error left
addq a
0, a2, a0
# pix1
addq a
1, a3, a1
# pix1
perr t1, t3, t1 # error right
addq v0, t0, v0 # add error left
/* calc line 1 */
...
...
@@ -175,11 +174,11 @@ $aligned:
/* calc line 3 */
perr tc, te, t0 # error left
addq v0, t1, v0 # add error right
perr td,
tf
, t1 # error right
perr td,
a0
, t1 # error right
addq v0, t0, v0 # add error left
addq v0, t1, v0 # add error right
/* loop */
subq a
3, 4, a3
# h -= 4
bne a
3
, $aligned
subq a
4, 4, a4
# h -= 4
bne a
4
, $aligned
ret
.end pix_abs16x16_mvi_asm
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment