Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Contribute to GitLab
Sign in / Register
Toggle navigation
F
ffmpeg.wasm-core
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Linshizhi
ffmpeg.wasm-core
Commits
a1f51648
Commit
a1f51648
authored
Jan 11, 2014
by
Luca Barbato
Committed by
Diego Biurrun
Jan 12, 2014
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
vc1dsp: K&R formatting cosmetics
Signed-off-by:
Diego Biurrun
<
diego@biurrun.de
>
parent
547f8345
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
403 additions
and
321 deletions
+403
-321
vc1dsp.c
libavcodec/vc1dsp.c
+403
-321
No files found.
libavcodec/vc1dsp.c
View file @
a1f51648
...
...
@@ -20,7 +20,7 @@
*/
/**
* @file
* @file
* VC-1 and WMV3 decoder
*
*/
...
...
@@ -29,24 +29,22 @@
#include "h264chroma.h"
#include "vc1dsp.h"
/** Apply overlap transform to horizontal edge
*/
static
void
vc1_v_overlap_c
(
uint8_t
*
src
,
int
stride
)
/* Apply overlap transform to horizontal edge */
static
void
vc1_v_overlap_c
(
uint8_t
*
src
,
int
stride
)
{
int
i
;
int
a
,
b
,
c
,
d
;
int
d1
,
d2
;
int
rnd
=
1
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
a
=
src
[
-
2
*
stride
];
for
(
i
=
0
;
i
<
8
;
i
++
)
{
a
=
src
[
-
2
*
stride
];
b
=
src
[
-
stride
];
c
=
src
[
0
];
d
=
src
[
stride
];
d1
=
(
a
-
d
+
3
+
rnd
)
>>
3
;
d2
=
(
a
-
d
+
b
-
c
+
4
-
rnd
)
>>
3
;
src
[
-
2
*
stride
]
=
a
-
d1
;
src
[
-
2
*
stride
]
=
a
-
d1
;
src
[
-
stride
]
=
av_clip_uint8
(
b
-
d2
);
src
[
0
]
=
av_clip_uint8
(
c
+
d2
);
src
[
stride
]
=
d
+
d1
;
...
...
@@ -55,15 +53,14 @@ static void vc1_v_overlap_c(uint8_t* src, int stride)
}
}
/** Apply overlap transform to vertical edge
*/
static
void
vc1_h_overlap_c
(
uint8_t
*
src
,
int
stride
)
/* Apply overlap transform to vertical edge */
static
void
vc1_h_overlap_c
(
uint8_t
*
src
,
int
stride
)
{
int
i
;
int
a
,
b
,
c
,
d
;
int
d1
,
d2
;
int
rnd
=
1
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
i
=
0
;
i
<
8
;
i
++
)
{
a
=
src
[
-
2
];
b
=
src
[
-
1
];
c
=
src
[
0
];
...
...
@@ -86,7 +83,7 @@ static void vc1_v_s_overlap_c(int16_t *top, int16_t *bottom)
int
a
,
b
,
c
,
d
;
int
d1
,
d2
;
int
rnd1
=
4
,
rnd2
=
3
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
i
=
0
;
i
<
8
;
i
++
)
{
a
=
top
[
48
];
b
=
top
[
56
];
c
=
bottom
[
0
];
...
...
@@ -112,7 +109,7 @@ static void vc1_h_s_overlap_c(int16_t *left, int16_t *right)
int
a
,
b
,
c
,
d
;
int
d1
,
d2
;
int
rnd1
=
4
,
rnd2
=
3
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
i
=
0
;
i
<
8
;
i
++
)
{
a
=
left
[
6
];
b
=
left
[
7
];
c
=
right
[
0
];
...
...
@@ -140,31 +137,38 @@ static void vc1_h_s_overlap_c(int16_t *left, int16_t *right)
* @return whether other 3 pairs should be filtered or not
* @see 8.6
*/
static
av_always_inline
int
vc1_filter_line
(
uint8_t
*
src
,
int
stride
,
int
pq
){
int
a0
=
(
2
*
(
src
[
-
2
*
stride
]
-
src
[
1
*
stride
])
-
5
*
(
src
[
-
1
*
stride
]
-
src
[
0
*
stride
])
+
4
)
>>
3
;
static
av_always_inline
int
vc1_filter_line
(
uint8_t
*
src
,
int
stride
,
int
pq
)
{
int
a0
=
(
2
*
(
src
[
-
2
*
stride
]
-
src
[
1
*
stride
])
-
5
*
(
src
[
-
1
*
stride
]
-
src
[
0
*
stride
])
+
4
)
>>
3
;
int
a0_sign
=
a0
>>
31
;
/* Store sign */
a0
=
(
a0
^
a0_sign
)
-
a0_sign
;
/* a0 = FFABS(a0); */
if
(
a0
<
pq
){
int
a1
=
FFABS
((
2
*
(
src
[
-
4
*
stride
]
-
src
[
-
1
*
stride
])
-
5
*
(
src
[
-
3
*
stride
]
-
src
[
-
2
*
stride
])
+
4
)
>>
3
);
int
a2
=
FFABS
((
2
*
(
src
[
0
*
stride
]
-
src
[
3
*
stride
])
-
5
*
(
src
[
1
*
stride
]
-
src
[
2
*
stride
])
+
4
)
>>
3
);
if
(
a1
<
a0
||
a2
<
a0
){
int
clip
=
src
[
-
1
*
stride
]
-
src
[
0
*
stride
];
if
(
a0
<
pq
)
{
int
a1
=
FFABS
((
2
*
(
src
[
-
4
*
stride
]
-
src
[
-
1
*
stride
])
-
5
*
(
src
[
-
3
*
stride
]
-
src
[
-
2
*
stride
])
+
4
)
>>
3
);
int
a2
=
FFABS
((
2
*
(
src
[
0
*
stride
]
-
src
[
3
*
stride
])
-
5
*
(
src
[
1
*
stride
]
-
src
[
2
*
stride
])
+
4
)
>>
3
);
if
(
a1
<
a0
||
a2
<
a0
)
{
int
clip
=
src
[
-
1
*
stride
]
-
src
[
0
*
stride
];
int
clip_sign
=
clip
>>
31
;
clip
=
((
clip
^
clip_sign
)
-
clip_sign
)
>>
1
;
if
(
clip
){
clip
=
((
clip
^
clip_sign
)
-
clip_sign
)
>>
1
;
if
(
clip
)
{
int
a3
=
FFMIN
(
a1
,
a2
);
int
d
=
5
*
(
a3
-
a0
);
int
d_sign
=
(
d
>>
31
);
d
=
((
d
^
d_sign
)
-
d_sign
)
>>
3
;
d_sign
^=
a0_sign
;
if
(
d_sign
^
clip_sign
)
if
(
d_sign
^
clip_sign
)
d
=
0
;
else
{
else
{
d
=
FFMIN
(
d
,
clip
);
d
=
(
d
^
d_sign
)
-
d_sign
;
/* Restore sign */
src
[
-
1
*
stride
]
=
av_clip_uint8
(
src
[
-
1
*
stride
]
-
d
);
src
[
0
*
stride
]
=
av_clip_uint8
(
src
[
0
*
stride
]
+
d
);
src
[
-
1
*
stride
]
=
av_clip_uint8
(
src
[
-
1
*
stride
]
-
d
);
src
[
0
*
stride
]
=
av_clip_uint8
(
src
[
0
*
stride
]
+
d
);
}
return
1
;
}
...
...
@@ -182,17 +186,18 @@ static av_always_inline int vc1_filter_line(uint8_t* src, int stride, int pq){
* @param pq block quantizer
* @see 8.6
*/
static
inline
void
vc1_loop_filter
(
uint8_t
*
src
,
int
step
,
int
stride
,
int
len
,
int
pq
)
static
inline
void
vc1_loop_filter
(
uint8_t
*
src
,
int
step
,
int
stride
,
int
len
,
int
pq
)
{
int
i
;
int
filt3
;
for
(
i
=
0
;
i
<
len
;
i
+=
4
)
{
filt3
=
vc1_filter_line
(
src
+
2
*
step
,
stride
,
pq
);
if
(
filt3
)
{
vc1_filter_line
(
src
+
0
*
step
,
stride
,
pq
);
vc1_filter_line
(
src
+
1
*
step
,
stride
,
pq
);
vc1_filter_line
(
src
+
3
*
step
,
stride
,
pq
);
for
(
i
=
0
;
i
<
len
;
i
+=
4
)
{
filt3
=
vc1_filter_line
(
src
+
2
*
step
,
stride
,
pq
);
if
(
filt3
)
{
vc1_filter_line
(
src
+
0
*
step
,
stride
,
pq
);
vc1_filter_line
(
src
+
1
*
step
,
stride
,
pq
);
vc1_filter_line
(
src
+
3
*
step
,
stride
,
pq
);
}
src
+=
step
*
4
;
}
...
...
@@ -228,15 +233,16 @@ static void vc1_h_loop_filter16_c(uint8_t *src, int stride, int pq)
vc1_loop_filter
(
src
,
stride
,
1
,
16
,
pq
);
}
/** Do inverse transform on 8x8 block
*/
/* Do inverse transform on 8x8 block */
static
void
vc1_inv_trans_8x8_dc_c
(
uint8_t
*
dest
,
int
linesize
,
int16_t
*
block
)
{
int
i
;
int
dc
=
block
[
0
];
dc
=
(
3
*
dc
+
1
)
>>
1
;
dc
=
(
3
*
dc
+
16
)
>>
5
;
for
(
i
=
0
;
i
<
8
;
i
++
){
for
(
i
=
0
;
i
<
8
;
i
++
)
{
dest
[
0
]
=
av_clip_uint8
(
dest
[
0
]
+
dc
);
dest
[
1
]
=
av_clip_uint8
(
dest
[
1
]
+
dc
);
dest
[
2
]
=
av_clip_uint8
(
dest
[
2
]
+
dc
);
...
...
@@ -252,12 +258,12 @@ static void vc1_inv_trans_8x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
static
void
vc1_inv_trans_8x8_c
(
int16_t
block
[
64
])
{
int
i
;
register
int
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t8
;
register
int
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t8
;
int16_t
*
src
,
*
dst
,
temp
[
64
];
src
=
block
;
dst
=
temp
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
i
=
0
;
i
<
8
;
i
++
)
{
t1
=
12
*
(
src
[
0
]
+
src
[
32
])
+
4
;
t2
=
12
*
(
src
[
0
]
-
src
[
32
])
+
4
;
t3
=
16
*
src
[
16
]
+
6
*
src
[
48
];
...
...
@@ -288,7 +294,7 @@ static void vc1_inv_trans_8x8_c(int16_t block[64])
src
=
temp
;
dst
=
block
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
i
=
0
;
i
<
8
;
i
++
)
{
t1
=
12
*
(
src
[
0
]
+
src
[
32
])
+
64
;
t2
=
12
*
(
src
[
0
]
-
src
[
32
])
+
64
;
t3
=
16
*
src
[
16
]
+
6
*
src
[
48
];
...
...
@@ -318,15 +324,16 @@ static void vc1_inv_trans_8x8_c(int16_t block[64])
}
}
/** Do inverse transform on 8x4 part of block
*/
/* Do inverse transform on 8x4 part of block */
static
void
vc1_inv_trans_8x4_dc_c
(
uint8_t
*
dest
,
int
linesize
,
int16_t
*
block
)
{
int
i
;
int
dc
=
block
[
0
];
dc
=
(
3
*
dc
+
1
)
>>
1
;
dc
=
(
3
*
dc
+
1
)
>>
1
;
dc
=
(
17
*
dc
+
64
)
>>
7
;
for
(
i
=
0
;
i
<
4
;
i
++
){
for
(
i
=
0
;
i
<
4
;
i
++
)
{
dest
[
0
]
=
av_clip_uint8
(
dest
[
0
]
+
dc
);
dest
[
1
]
=
av_clip_uint8
(
dest
[
1
]
+
dc
);
dest
[
2
]
=
av_clip_uint8
(
dest
[
2
]
+
dc
);
...
...
@@ -342,12 +349,13 @@ static void vc1_inv_trans_8x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
static
void
vc1_inv_trans_8x4_c
(
uint8_t
*
dest
,
int
linesize
,
int16_t
*
block
)
{
int
i
;
register
int
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t8
;
register
int
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t8
;
int16_t
*
src
,
*
dst
;
src
=
block
;
dst
=
block
;
for
(
i
=
0
;
i
<
4
;
i
++
){
for
(
i
=
0
;
i
<
4
;
i
++
)
{
t1
=
12
*
(
src
[
0
]
+
src
[
4
])
+
4
;
t2
=
12
*
(
src
[
0
]
-
src
[
4
])
+
4
;
t3
=
16
*
src
[
2
]
+
6
*
src
[
6
];
...
...
@@ -377,31 +385,32 @@ static void vc1_inv_trans_8x4_c(uint8_t *dest, int linesize, int16_t *block)
}
src
=
block
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
for
(
i
=
0
;
i
<
8
;
i
++
)
{
t1
=
17
*
(
src
[
0
]
+
src
[
16
])
+
64
;
t2
=
17
*
(
src
[
0
]
-
src
[
16
])
+
64
;
t3
=
22
*
src
[
8
]
+
10
*
src
[
24
];
t4
=
22
*
src
[
24
]
-
10
*
src
[
8
];
dest
[
0
*
linesize
]
=
av_clip_uint8
(
dest
[
0
*
linesize
]
+
((
t1
+
t3
)
>>
7
));
dest
[
1
*
linesize
]
=
av_clip_uint8
(
dest
[
1
*
linesize
]
+
((
t2
-
t4
)
>>
7
));
dest
[
2
*
linesize
]
=
av_clip_uint8
(
dest
[
2
*
linesize
]
+
((
t2
+
t4
)
>>
7
));
dest
[
3
*
linesize
]
=
av_clip_uint8
(
dest
[
3
*
linesize
]
+
((
t1
-
t3
)
>>
7
));
dest
[
0
*
linesize
]
=
av_clip_uint8
(
dest
[
0
*
linesize
]
+
((
t1
+
t3
)
>>
7
));
dest
[
1
*
linesize
]
=
av_clip_uint8
(
dest
[
1
*
linesize
]
+
((
t2
-
t4
)
>>
7
));
dest
[
2
*
linesize
]
=
av_clip_uint8
(
dest
[
2
*
linesize
]
+
((
t2
+
t4
)
>>
7
));
dest
[
3
*
linesize
]
=
av_clip_uint8
(
dest
[
3
*
linesize
]
+
((
t1
-
t3
)
>>
7
));
src
++
;
src
++
;
dest
++
;
}
}
/** Do inverse transform on 4x8 parts of block
*/
/* Do inverse transform on 4x8 parts of block */
static
void
vc1_inv_trans_4x8_dc_c
(
uint8_t
*
dest
,
int
linesize
,
int16_t
*
block
)
{
int
i
;
int
dc
=
block
[
0
];
dc
=
(
17
*
dc
+
4
)
>>
3
;
dc
=
(
12
*
dc
+
64
)
>>
7
;
for
(
i
=
0
;
i
<
8
;
i
++
){
for
(
i
=
0
;
i
<
8
;
i
++
)
{
dest
[
0
]
=
av_clip_uint8
(
dest
[
0
]
+
dc
);
dest
[
1
]
=
av_clip_uint8
(
dest
[
1
]
+
dc
);
dest
[
2
]
=
av_clip_uint8
(
dest
[
2
]
+
dc
);
...
...
@@ -413,12 +422,13 @@ static void vc1_inv_trans_4x8_dc_c(uint8_t *dest, int linesize, int16_t *block)
static
void
vc1_inv_trans_4x8_c
(
uint8_t
*
dest
,
int
linesize
,
int16_t
*
block
)
{
int
i
;
register
int
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t8
;
register
int
t1
,
t2
,
t3
,
t4
,
t5
,
t6
,
t7
,
t8
;
int16_t
*
src
,
*
dst
;
src
=
block
;
dst
=
block
;
for
(
i
=
0
;
i
<
8
;
i
++
){
for
(
i
=
0
;
i
<
8
;
i
++
)
{
t1
=
17
*
(
src
[
0
]
+
src
[
2
])
+
4
;
t2
=
17
*
(
src
[
0
]
-
src
[
2
])
+
4
;
t3
=
22
*
src
[
1
]
+
10
*
src
[
3
];
...
...
@@ -434,7 +444,7 @@ static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block)
}
src
=
block
;
for
(
i
=
0
;
i
<
4
;
i
++
)
{
for
(
i
=
0
;
i
<
4
;
i
++
)
{
t1
=
12
*
(
src
[
0
]
+
src
[
32
])
+
64
;
t2
=
12
*
(
src
[
0
]
-
src
[
32
])
+
64
;
t3
=
16
*
src
[
16
]
+
6
*
src
[
48
];
...
...
@@ -450,29 +460,30 @@ static void vc1_inv_trans_4x8_c(uint8_t *dest, int linesize, int16_t *block)
t3
=
9
*
src
[
8
]
-
16
*
src
[
24
]
+
4
*
src
[
40
]
+
15
*
src
[
56
];
t4
=
4
*
src
[
8
]
-
9
*
src
[
24
]
+
15
*
src
[
40
]
-
16
*
src
[
56
];
dest
[
0
*
linesize
]
=
av_clip_uint8
(
dest
[
0
*
linesize
]
+
((
t5
+
t1
)
>>
7
));
dest
[
1
*
linesize
]
=
av_clip_uint8
(
dest
[
1
*
linesize
]
+
((
t6
+
t2
)
>>
7
));
dest
[
2
*
linesize
]
=
av_clip_uint8
(
dest
[
2
*
linesize
]
+
((
t7
+
t3
)
>>
7
));
dest
[
3
*
linesize
]
=
av_clip_uint8
(
dest
[
3
*
linesize
]
+
((
t8
+
t4
)
>>
7
));
dest
[
4
*
linesize
]
=
av_clip_uint8
(
dest
[
4
*
linesize
]
+
((
t8
-
t4
+
1
)
>>
7
));
dest
[
5
*
linesize
]
=
av_clip_uint8
(
dest
[
5
*
linesize
]
+
((
t7
-
t3
+
1
)
>>
7
));
dest
[
6
*
linesize
]
=
av_clip_uint8
(
dest
[
6
*
linesize
]
+
((
t6
-
t2
+
1
)
>>
7
));
dest
[
7
*
linesize
]
=
av_clip_uint8
(
dest
[
7
*
linesize
]
+
((
t5
-
t1
+
1
)
>>
7
));
dest
[
0
*
linesize
]
=
av_clip_uint8
(
dest
[
0
*
linesize
]
+
((
t5
+
t1
)
>>
7
));
dest
[
1
*
linesize
]
=
av_clip_uint8
(
dest
[
1
*
linesize
]
+
((
t6
+
t2
)
>>
7
));
dest
[
2
*
linesize
]
=
av_clip_uint8
(
dest
[
2
*
linesize
]
+
((
t7
+
t3
)
>>
7
));
dest
[
3
*
linesize
]
=
av_clip_uint8
(
dest
[
3
*
linesize
]
+
((
t8
+
t4
)
>>
7
));
dest
[
4
*
linesize
]
=
av_clip_uint8
(
dest
[
4
*
linesize
]
+
((
t8
-
t4
+
1
)
>>
7
));
dest
[
5
*
linesize
]
=
av_clip_uint8
(
dest
[
5
*
linesize
]
+
((
t7
-
t3
+
1
)
>>
7
));
dest
[
6
*
linesize
]
=
av_clip_uint8
(
dest
[
6
*
linesize
]
+
((
t6
-
t2
+
1
)
>>
7
));
dest
[
7
*
linesize
]
=
av_clip_uint8
(
dest
[
7
*
linesize
]
+
((
t5
-
t1
+
1
)
>>
7
));
src
++
;
src
++
;
dest
++
;
}
}
/** Do inverse transform on 4x4 part of block
*/
/* Do inverse transform on 4x4 part of block */
static
void
vc1_inv_trans_4x4_dc_c
(
uint8_t
*
dest
,
int
linesize
,
int16_t
*
block
)
{
int
i
;
int
dc
=
block
[
0
];
dc
=
(
17
*
dc
+
4
)
>>
3
;
dc
=
(
17
*
dc
+
64
)
>>
7
;
for
(
i
=
0
;
i
<
4
;
i
++
){
for
(
i
=
0
;
i
<
4
;
i
++
)
{
dest
[
0
]
=
av_clip_uint8
(
dest
[
0
]
+
dc
);
dest
[
1
]
=
av_clip_uint8
(
dest
[
1
]
+
dc
);
dest
[
2
]
=
av_clip_uint8
(
dest
[
2
]
+
dc
);
...
...
@@ -484,12 +495,12 @@ static void vc1_inv_trans_4x4_dc_c(uint8_t *dest, int linesize, int16_t *block)
static
void
vc1_inv_trans_4x4_c
(
uint8_t
*
dest
,
int
linesize
,
int16_t
*
block
)
{
int
i
;
register
int
t1
,
t2
,
t3
,
t4
;
register
int
t1
,
t2
,
t3
,
t4
;
int16_t
*
src
,
*
dst
;
src
=
block
;
dst
=
block
;
for
(
i
=
0
;
i
<
4
;
i
++
)
{
for
(
i
=
0
;
i
<
4
;
i
++
)
{
t1
=
17
*
(
src
[
0
]
+
src
[
2
])
+
4
;
t2
=
17
*
(
src
[
0
]
-
src
[
2
])
+
4
;
t3
=
22
*
src
[
1
]
+
10
*
src
[
3
];
...
...
@@ -505,36 +516,42 @@ static void vc1_inv_trans_4x4_c(uint8_t *dest, int linesize, int16_t *block)
}
src
=
block
;
for
(
i
=
0
;
i
<
4
;
i
++
)
{
t1
=
17
*
(
src
[
0
]
+
src
[
16
])
+
64
;
t2
=
17
*
(
src
[
0
]
-
src
[
16
])
+
64
;
t3
=
22
*
src
[
8
]
+
10
*
src
[
24
];
t4
=
22
*
src
[
24
]
-
10
*
src
[
8
];
for
(
i
=
0
;
i
<
4
;
i
++
)
{
t1
=
17
*
(
src
[
0
]
+
src
[
16
])
+
64
;
t2
=
17
*
(
src
[
0
]
-
src
[
16
])
+
64
;
t3
=
22
*
src
[
8
]
+
10
*
src
[
24
];
t4
=
22
*
src
[
24
]
-
10
*
src
[
8
];
dest
[
0
*
linesize
]
=
av_clip_uint8
(
dest
[
0
*
linesize
]
+
((
t1
+
t3
)
>>
7
));
dest
[
1
*
linesize
]
=
av_clip_uint8
(
dest
[
1
*
linesize
]
+
((
t2
-
t4
)
>>
7
));
dest
[
2
*
linesize
]
=
av_clip_uint8
(
dest
[
2
*
linesize
]
+
((
t2
+
t4
)
>>
7
));
dest
[
3
*
linesize
]
=
av_clip_uint8
(
dest
[
3
*
linesize
]
+
((
t1
-
t3
)
>>
7
));
dest
[
0
*
linesize
]
=
av_clip_uint8
(
dest
[
0
*
linesize
]
+
((
t1
+
t3
)
>>
7
));
dest
[
1
*
linesize
]
=
av_clip_uint8
(
dest
[
1
*
linesize
]
+
((
t2
-
t4
)
>>
7
));
dest
[
2
*
linesize
]
=
av_clip_uint8
(
dest
[
2
*
linesize
]
+
((
t2
+
t4
)
>>
7
));
dest
[
3
*
linesize
]
=
av_clip_uint8
(
dest
[
3
*
linesize
]
+
((
t1
-
t3
)
>>
7
));
src
++
;
src
++
;
dest
++
;
}
}
/* motion compensation functions */
/** Filter in case of 2 filters */
/* Filter in case of 2 filters */
#define VC1_MSPEL_FILTER_16B(DIR, TYPE) \
static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, int stride, int mode) \
static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src, \
int stride, \
int mode) \
{ \
switch(mode)
{
\
switch(mode)
{
\
case 0:
/* no shift - should not occur */
\
return 0; \
case 1:
/* 1/4 shift */
\
return -4*src[-stride] + 53*src[0] + 18*src[stride] - 3*src[stride*2]; \
return -4 * src[-stride] + 53 * src[0] + \
18 * src[stride] - 3 * src[stride * 2]; \
case 2:
/* 1/2 shift */
\
return -src[-stride] + 9*src[0] + 9*src[stride] - src[stride*2]; \
return -1 * src[-stride] + 9 * src[0] + \
9 * src[stride] - 1 * src[stride * 2]; \
case 3:
/* 3/4 shift */
\
return -3*src[-stride] + 18*src[0] + 53*src[stride] - 4*src[stride*2]; \
return -3 * src[-stride] + 18 * src[0] + \
53 * src[stride] - 4 * src[stride * 2]; \
} \
return 0;
/* should not occur */
\
}
...
...
@@ -542,80 +559,85 @@ static av_always_inline int vc1_mspel_ ## DIR ## _filter_16bits(const TYPE *src,
VC1_MSPEL_FILTER_16B
(
ver
,
uint8_t
)
VC1_MSPEL_FILTER_16B
(
hor
,
int16_t
)
/** Filter used to interpolate fractional pel values
*/
static
av_always_inline
int
vc1_mspel_filter
(
const
uint8_t
*
src
,
int
stride
,
int
mode
,
int
r
)
/* Filter used to interpolate fractional pel values */
static
av_always_inline
int
vc1_mspel_filter
(
const
uint8_t
*
src
,
int
stride
,
int
mode
,
int
r
)
{
switch
(
mode
)
{
case
0
:
//no shift
switch
(
mode
)
{
case
0
:
//
no shift
return
src
[
0
];
case
1
:
// 1/4 shift
return
(
-
4
*
src
[
-
stride
]
+
53
*
src
[
0
]
+
18
*
src
[
stride
]
-
3
*
src
[
stride
*
2
]
+
32
-
r
)
>>
6
;
return
(
-
4
*
src
[
-
stride
]
+
53
*
src
[
0
]
+
18
*
src
[
stride
]
-
3
*
src
[
stride
*
2
]
+
32
-
r
)
>>
6
;
case
2
:
// 1/2 shift
return
(
-
src
[
-
stride
]
+
9
*
src
[
0
]
+
9
*
src
[
stride
]
-
src
[
stride
*
2
]
+
8
-
r
)
>>
4
;
return
(
-
1
*
src
[
-
stride
]
+
9
*
src
[
0
]
+
9
*
src
[
stride
]
-
1
*
src
[
stride
*
2
]
+
8
-
r
)
>>
4
;
case
3
:
// 3/4 shift
return
(
-
3
*
src
[
-
stride
]
+
18
*
src
[
0
]
+
53
*
src
[
stride
]
-
4
*
src
[
stride
*
2
]
+
32
-
r
)
>>
6
;
return
(
-
3
*
src
[
-
stride
]
+
18
*
src
[
0
]
+
53
*
src
[
stride
]
-
4
*
src
[
stride
*
2
]
+
32
-
r
)
>>
6
;
}
return
0
;
//should not occur
return
0
;
//
should not occur
}
/** Function used to do motion compensation with bicubic interpolation
*/
#define VC1_MSPEL_MC(OP, OPNAME)\
static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride, int hmode, int vmode, int rnd)\
{\
int i, j;\
\
if (vmode) {
/* Horizontal filter to apply */
\
int r;\
\
if (hmode) {
/* Vertical filter to apply, output to tmp */
\
static const int shift_value[] = { 0, 5, 1, 5 };\
int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
int16_t tmp[11*8], *tptr = tmp;\
\
r = (1<<(shift-1)) + rnd-1;\
\
src -= 1;\
for(j = 0; j < 8; j++) {\
for(i = 0; i < 11; i++)\
tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode)+r)>>shift;\
src += stride;\
tptr += 11;\
}\
\
r = 64-rnd;\
tptr = tmp+1;\
for(j = 0; j < 8; j++) {\
for(i = 0; i < 8; i++)\
OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode)+r)>>7);\
dst += stride;\
tptr += 11;\
}\
\
return;\
}\
else {
/* No horizontal filter, output 8 lines to dst */
\
r = 1-rnd;\
\
for(j = 0; j < 8; j++) {\
for(i = 0; i < 8; i++)\
OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r));\
src += stride;\
dst += stride;\
}\
return;\
}\
}\
\
/* Horizontal mode with no vertical mode */
\
for(j = 0; j < 8; j++) {\
for(i = 0; i < 8; i++)\
OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd));\
dst += stride;\
src += stride;\
}\
/* Function used to do motion compensation with bicubic interpolation */
#define VC1_MSPEL_MC(OP, OPNAME) \
static av_always_inline void OPNAME ## vc1_mspel_mc(uint8_t *dst, \
const uint8_t *src, \
int stride, \
int hmode, \
int vmode, \
int rnd) \
{ \
int i, j; \
\
if (vmode) {
/* Horizontal filter to apply */
\
int r; \
\
if (hmode) {
/* Vertical filter to apply, output to tmp */
\
static const int shift_value[] = { 0, 5, 1, 5 }; \
int shift = (shift_value[hmode] + shift_value[vmode]) >> 1; \
int16_t tmp[11 * 8], *tptr = tmp; \
\
r = (1 << (shift - 1)) + rnd - 1; \
\
src -= 1; \
for (j = 0; j < 8; j++) { \
for (i = 0; i < 11; i++) \
tptr[i] = (vc1_mspel_ver_filter_16bits(src + i, stride, vmode) + r) >> shift; \
src += stride; \
tptr += 11; \
} \
\
r = 64 - rnd; \
tptr = tmp + 1; \
for (j = 0; j < 8; j++) { \
for (i = 0; i < 8; i++) \
OP(dst[i], (vc1_mspel_hor_filter_16bits(tptr + i, 1, hmode) + r) >> 7); \
dst += stride; \
tptr += 11; \
} \
\
return; \
} else {
/* No horizontal filter, output 8 lines to dst */
\
r = 1 - rnd; \
\
for (j = 0; j < 8; j++) { \
for (i = 0; i < 8; i++) \
OP(dst[i], vc1_mspel_filter(src + i, stride, vmode, r)); \
src += stride; \
dst += stride; \
} \
return; \
} \
} \
\
/* Horizontal mode with no vertical mode */
\
for (j = 0; j < 8; j++) { \
for (i = 0; i < 8; i++) \
OP(dst[i], vc1_mspel_filter(src + i, 1, hmode, rnd)); \
dst += stride; \
src += stride; \
} \
}
#define op_put(a, b) a = av_clip_uint8(b)
...
...
@@ -626,14 +648,14 @@ VC1_MSPEL_MC(op_avg, avg_)
/* pixel functions - really are entry points to vc1_mspel_mc */
#define PUT_VC1_MSPEL(a, b)\
static void put_vc1_mspel_mc ## a ## b ##
_c(uint8_t *dst,
\
#define PUT_VC1_MSPEL(a, b)
\
static void put_vc1_mspel_mc ## a ## b ##
_c(uint8_t *dst,
\
const uint8_t *src, \
ptrdiff_t stride, int rnd) \
{ \
put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
} \
static void avg_vc1_mspel_mc ## a ## b ##
_c(uint8_t *dst,
\
static void avg_vc1_mspel_mc ## a ## b ##
_c(uint8_t *dst,
\
const uint8_t *src, \
ptrdiff_t stride, int rnd) \
{ \
...
...
@@ -659,90 +681,131 @@ PUT_VC1_MSPEL(1, 3)
PUT_VC1_MSPEL
(
2
,
3
)
PUT_VC1_MSPEL
(
3
,
3
)
static
void
put_no_rnd_vc1_chroma_mc8_c
(
uint8_t
*
dst
/*align 8*/
,
uint8_t
*
src
/*align 1*/
,
int
stride
,
int
h
,
int
x
,
int
y
){
const
int
A
=
(
8
-
x
)
*
(
8
-
y
);
const
int
B
=
(
x
)
*
(
8
-
y
);
const
int
C
=
(
8
-
x
)
*
(
y
);
const
int
D
=
(
x
)
*
(
y
);
static
void
put_no_rnd_vc1_chroma_mc8_c
(
uint8_t
*
dst
/* align 8 */
,
uint8_t
*
src
/* align 1 */
,
int
stride
,
int
h
,
int
x
,
int
y
)
{
const
int
A
=
(
8
-
x
)
*
(
8
-
y
);
const
int
B
=
(
x
)
*
(
8
-
y
);
const
int
C
=
(
8
-
x
)
*
(
y
);
const
int
D
=
(
x
)
*
(
y
);
int
i
;
assert
(
x
<
8
&&
y
<
8
&&
x
>=
0
&&
y
>=
0
);
for
(
i
=
0
;
i
<
h
;
i
++
)
{
dst
[
0
]
=
(
A
*
src
[
0
]
+
B
*
src
[
1
]
+
C
*
src
[
stride
+
0
]
+
D
*
src
[
stride
+
1
]
+
32
-
4
)
>>
6
;
dst
[
1
]
=
(
A
*
src
[
1
]
+
B
*
src
[
2
]
+
C
*
src
[
stride
+
1
]
+
D
*
src
[
stride
+
2
]
+
32
-
4
)
>>
6
;
dst
[
2
]
=
(
A
*
src
[
2
]
+
B
*
src
[
3
]
+
C
*
src
[
stride
+
2
]
+
D
*
src
[
stride
+
3
]
+
32
-
4
)
>>
6
;
dst
[
3
]
=
(
A
*
src
[
3
]
+
B
*
src
[
4
]
+
C
*
src
[
stride
+
3
]
+
D
*
src
[
stride
+
4
]
+
32
-
4
)
>>
6
;
dst
[
4
]
=
(
A
*
src
[
4
]
+
B
*
src
[
5
]
+
C
*
src
[
stride
+
4
]
+
D
*
src
[
stride
+
5
]
+
32
-
4
)
>>
6
;
dst
[
5
]
=
(
A
*
src
[
5
]
+
B
*
src
[
6
]
+
C
*
src
[
stride
+
5
]
+
D
*
src
[
stride
+
6
]
+
32
-
4
)
>>
6
;
dst
[
6
]
=
(
A
*
src
[
6
]
+
B
*
src
[
7
]
+
C
*
src
[
stride
+
6
]
+
D
*
src
[
stride
+
7
]
+
32
-
4
)
>>
6
;
dst
[
7
]
=
(
A
*
src
[
7
]
+
B
*
src
[
8
]
+
C
*
src
[
stride
+
7
]
+
D
*
src
[
stride
+
8
]
+
32
-
4
)
>>
6
;
dst
+=
stride
;
src
+=
stride
;
assert
(
x
<
8
&&
y
<
8
&&
x
>=
0
&&
y
>=
0
);
for
(
i
=
0
;
i
<
h
;
i
++
)
{
dst
[
0
]
=
(
A
*
src
[
0
]
+
B
*
src
[
1
]
+
C
*
src
[
stride
+
0
]
+
D
*
src
[
stride
+
1
]
+
32
-
4
)
>>
6
;
dst
[
1
]
=
(
A
*
src
[
1
]
+
B
*
src
[
2
]
+
C
*
src
[
stride
+
1
]
+
D
*
src
[
stride
+
2
]
+
32
-
4
)
>>
6
;
dst
[
2
]
=
(
A
*
src
[
2
]
+
B
*
src
[
3
]
+
C
*
src
[
stride
+
2
]
+
D
*
src
[
stride
+
3
]
+
32
-
4
)
>>
6
;
dst
[
3
]
=
(
A
*
src
[
3
]
+
B
*
src
[
4
]
+
C
*
src
[
stride
+
3
]
+
D
*
src
[
stride
+
4
]
+
32
-
4
)
>>
6
;
dst
[
4
]
=
(
A
*
src
[
4
]
+
B
*
src
[
5
]
+
C
*
src
[
stride
+
4
]
+
D
*
src
[
stride
+
5
]
+
32
-
4
)
>>
6
;
dst
[
5
]
=
(
A
*
src
[
5
]
+
B
*
src
[
6
]
+
C
*
src
[
stride
+
5
]
+
D
*
src
[
stride
+
6
]
+
32
-
4
)
>>
6
;
dst
[
6
]
=
(
A
*
src
[
6
]
+
B
*
src
[
7
]
+
C
*
src
[
stride
+
6
]
+
D
*
src
[
stride
+
7
]
+
32
-
4
)
>>
6
;
dst
[
7
]
=
(
A
*
src
[
7
]
+
B
*
src
[
8
]
+
C
*
src
[
stride
+
7
]
+
D
*
src
[
stride
+
8
]
+
32
-
4
)
>>
6
;
dst
+=
stride
;
src
+=
stride
;
}
}
static
void
put_no_rnd_vc1_chroma_mc4_c
(
uint8_t
*
dst
,
uint8_t
*
src
,
int
stride
,
int
h
,
int
x
,
int
y
){
const
int
A
=
(
8
-
x
)
*
(
8
-
y
);
const
int
B
=
(
x
)
*
(
8
-
y
);
const
int
C
=
(
8
-
x
)
*
(
y
);
const
int
D
=
(
x
)
*
(
y
);
static
void
put_no_rnd_vc1_chroma_mc4_c
(
uint8_t
*
dst
,
uint8_t
*
src
,
int
stride
,
int
h
,
int
x
,
int
y
)
{
const
int
A
=
(
8
-
x
)
*
(
8
-
y
);
const
int
B
=
(
x
)
*
(
8
-
y
);
const
int
C
=
(
8
-
x
)
*
(
y
);
const
int
D
=
(
x
)
*
(
y
);
int
i
;
assert
(
x
<
8
&&
y
<
8
&&
x
>=
0
&&
y
>=
0
);
for
(
i
=
0
;
i
<
h
;
i
++
)
{
dst
[
0
]
=
(
A
*
src
[
0
]
+
B
*
src
[
1
]
+
C
*
src
[
stride
+
0
]
+
D
*
src
[
stride
+
1
]
+
32
-
4
)
>>
6
;
dst
[
1
]
=
(
A
*
src
[
1
]
+
B
*
src
[
2
]
+
C
*
src
[
stride
+
1
]
+
D
*
src
[
stride
+
2
]
+
32
-
4
)
>>
6
;
dst
[
2
]
=
(
A
*
src
[
2
]
+
B
*
src
[
3
]
+
C
*
src
[
stride
+
2
]
+
D
*
src
[
stride
+
3
]
+
32
-
4
)
>>
6
;
dst
[
3
]
=
(
A
*
src
[
3
]
+
B
*
src
[
4
]
+
C
*
src
[
stride
+
3
]
+
D
*
src
[
stride
+
4
]
+
32
-
4
)
>>
6
;
dst
+=
stride
;
src
+=
stride
;
assert
(
x
<
8
&&
y
<
8
&&
x
>=
0
&&
y
>=
0
);
for
(
i
=
0
;
i
<
h
;
i
++
)
{
dst
[
0
]
=
(
A
*
src
[
0
]
+
B
*
src
[
1
]
+
C
*
src
[
stride
+
0
]
+
D
*
src
[
stride
+
1
]
+
32
-
4
)
>>
6
;
dst
[
1
]
=
(
A
*
src
[
1
]
+
B
*
src
[
2
]
+
C
*
src
[
stride
+
1
]
+
D
*
src
[
stride
+
2
]
+
32
-
4
)
>>
6
;
dst
[
2
]
=
(
A
*
src
[
2
]
+
B
*
src
[
3
]
+
C
*
src
[
stride
+
2
]
+
D
*
src
[
stride
+
3
]
+
32
-
4
)
>>
6
;
dst
[
3
]
=
(
A
*
src
[
3
]
+
B
*
src
[
4
]
+
C
*
src
[
stride
+
3
]
+
D
*
src
[
stride
+
4
]
+
32
-
4
)
>>
6
;
dst
+=
stride
;
src
+=
stride
;
}
}
#define avg2(a,b) ((a+b+1)>>1)
static
void
avg_no_rnd_vc1_chroma_mc8_c
(
uint8_t
*
dst
/*align 8*/
,
uint8_t
*
src
/*align 1*/
,
int
stride
,
int
h
,
int
x
,
int
y
){
const
int
A
=
(
8
-
x
)
*
(
8
-
y
);
const
int
B
=
(
x
)
*
(
8
-
y
);
const
int
C
=
(
8
-
x
)
*
(
y
);
const
int
D
=
(
x
)
*
(
y
);
#define avg2(a, b) (((a) + (b) + 1) >> 1)
static
void
avg_no_rnd_vc1_chroma_mc8_c
(
uint8_t
*
dst
/* align 8 */
,
uint8_t
*
src
/* align 1 */
,
int
stride
,
int
h
,
int
x
,
int
y
)
{
const
int
A
=
(
8
-
x
)
*
(
8
-
y
);
const
int
B
=
(
x
)
*
(
8
-
y
);
const
int
C
=
(
8
-
x
)
*
(
y
);
const
int
D
=
(
x
)
*
(
y
);
int
i
;
assert
(
x
<
8
&&
y
<
8
&&
x
>=
0
&&
y
>=
0
);
for
(
i
=
0
;
i
<
h
;
i
++
)
{
dst
[
0
]
=
avg2
(
dst
[
0
],
((
A
*
src
[
0
]
+
B
*
src
[
1
]
+
C
*
src
[
stride
+
0
]
+
D
*
src
[
stride
+
1
]
+
32
-
4
)
>>
6
));
dst
[
1
]
=
avg2
(
dst
[
1
],
((
A
*
src
[
1
]
+
B
*
src
[
2
]
+
C
*
src
[
stride
+
1
]
+
D
*
src
[
stride
+
2
]
+
32
-
4
)
>>
6
));
dst
[
2
]
=
avg2
(
dst
[
2
],
((
A
*
src
[
2
]
+
B
*
src
[
3
]
+
C
*
src
[
stride
+
2
]
+
D
*
src
[
stride
+
3
]
+
32
-
4
)
>>
6
));
dst
[
3
]
=
avg2
(
dst
[
3
],
((
A
*
src
[
3
]
+
B
*
src
[
4
]
+
C
*
src
[
stride
+
3
]
+
D
*
src
[
stride
+
4
]
+
32
-
4
)
>>
6
));
dst
[
4
]
=
avg2
(
dst
[
4
],
((
A
*
src
[
4
]
+
B
*
src
[
5
]
+
C
*
src
[
stride
+
4
]
+
D
*
src
[
stride
+
5
]
+
32
-
4
)
>>
6
));
dst
[
5
]
=
avg2
(
dst
[
5
],
((
A
*
src
[
5
]
+
B
*
src
[
6
]
+
C
*
src
[
stride
+
5
]
+
D
*
src
[
stride
+
6
]
+
32
-
4
)
>>
6
));
dst
[
6
]
=
avg2
(
dst
[
6
],
((
A
*
src
[
6
]
+
B
*
src
[
7
]
+
C
*
src
[
stride
+
6
]
+
D
*
src
[
stride
+
7
]
+
32
-
4
)
>>
6
));
dst
[
7
]
=
avg2
(
dst
[
7
],
((
A
*
src
[
7
]
+
B
*
src
[
8
]
+
C
*
src
[
stride
+
7
]
+
D
*
src
[
stride
+
8
]
+
32
-
4
)
>>
6
));
dst
+=
stride
;
src
+=
stride
;
assert
(
x
<
8
&&
y
<
8
&&
x
>=
0
&&
y
>=
0
);
for
(
i
=
0
;
i
<
h
;
i
++
)
{
dst
[
0
]
=
avg2
(
dst
[
0
],
((
A
*
src
[
0
]
+
B
*
src
[
1
]
+
C
*
src
[
stride
+
0
]
+
D
*
src
[
stride
+
1
]
+
32
-
4
)
>>
6
));
dst
[
1
]
=
avg2
(
dst
[
1
],
((
A
*
src
[
1
]
+
B
*
src
[
2
]
+
C
*
src
[
stride
+
1
]
+
D
*
src
[
stride
+
2
]
+
32
-
4
)
>>
6
));
dst
[
2
]
=
avg2
(
dst
[
2
],
((
A
*
src
[
2
]
+
B
*
src
[
3
]
+
C
*
src
[
stride
+
2
]
+
D
*
src
[
stride
+
3
]
+
32
-
4
)
>>
6
));
dst
[
3
]
=
avg2
(
dst
[
3
],
((
A
*
src
[
3
]
+
B
*
src
[
4
]
+
C
*
src
[
stride
+
3
]
+
D
*
src
[
stride
+
4
]
+
32
-
4
)
>>
6
));
dst
[
4
]
=
avg2
(
dst
[
4
],
((
A
*
src
[
4
]
+
B
*
src
[
5
]
+
C
*
src
[
stride
+
4
]
+
D
*
src
[
stride
+
5
]
+
32
-
4
)
>>
6
));
dst
[
5
]
=
avg2
(
dst
[
5
],
((
A
*
src
[
5
]
+
B
*
src
[
6
]
+
C
*
src
[
stride
+
5
]
+
D
*
src
[
stride
+
6
]
+
32
-
4
)
>>
6
));
dst
[
6
]
=
avg2
(
dst
[
6
],
((
A
*
src
[
6
]
+
B
*
src
[
7
]
+
C
*
src
[
stride
+
6
]
+
D
*
src
[
stride
+
7
]
+
32
-
4
)
>>
6
));
dst
[
7
]
=
avg2
(
dst
[
7
],
((
A
*
src
[
7
]
+
B
*
src
[
8
]
+
C
*
src
[
stride
+
7
]
+
D
*
src
[
stride
+
8
]
+
32
-
4
)
>>
6
));
dst
+=
stride
;
src
+=
stride
;
}
}
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
static
void
sprite_h_c
(
uint8_t
*
dst
,
const
uint8_t
*
src
,
int
offset
,
int
advance
,
int
count
)
static
void
sprite_h_c
(
uint8_t
*
dst
,
const
uint8_t
*
src
,
int
offset
,
int
advance
,
int
count
)
{
while
(
count
--
)
{
int
a
=
src
[(
offset
>>
16
)
];
int
a
=
src
[(
offset
>>
16
)];
int
b
=
src
[(
offset
>>
16
)
+
1
];
*
dst
++
=
a
+
((
b
-
a
)
*
(
offset
&
0xFFFF
)
>>
16
);
*
dst
++
=
a
+
((
b
-
a
)
*
(
offset
&
0xFFFF
)
>>
16
);
offset
+=
advance
;
}
}
static
av_always_inline
void
sprite_v_template
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset1
,
int
two_sprites
,
const
uint8_t
*
src2a
,
const
uint8_t
*
src2b
,
int
offset2
,
int
alpha
,
int
scaled
,
int
width
)
static
av_always_inline
void
sprite_v_template
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset1
,
int
two_sprites
,
const
uint8_t
*
src2a
,
const
uint8_t
*
src2b
,
int
offset2
,
int
alpha
,
int
scaled
,
int
width
)
{
int
a1
,
b1
,
a2
,
b2
;
while
(
width
--
)
{
...
...
@@ -763,32 +826,49 @@ static av_always_inline void sprite_v_template(uint8_t *dst, const uint8_t *src1
}
}
static
void
sprite_v_single_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset
,
int
width
)
static
void
sprite_v_single_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset
,
int
width
)
{
sprite_v_template
(
dst
,
src1a
,
src1b
,
offset
,
0
,
NULL
,
NULL
,
0
,
0
,
1
,
width
);
}
static
void
sprite_v_double_noscale_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src2a
,
int
alpha
,
int
width
)
static
void
sprite_v_double_noscale_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src2a
,
int
alpha
,
int
width
)
{
sprite_v_template
(
dst
,
src1a
,
NULL
,
0
,
1
,
src2a
,
NULL
,
0
,
alpha
,
0
,
width
);
}
static
void
sprite_v_double_onescale_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset1
,
const
uint8_t
*
src2a
,
int
alpha
,
int
width
)
{
sprite_v_template
(
dst
,
src1a
,
src1b
,
offset1
,
1
,
src2a
,
NULL
,
0
,
alpha
,
1
,
width
);
}
static
void
sprite_v_double_twoscale_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset1
,
const
uint8_t
*
src2a
,
const
uint8_t
*
src2b
,
int
offset2
,
static
void
sprite_v_double_onescale_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset1
,
const
uint8_t
*
src2a
,
int
alpha
,
int
width
)
{
sprite_v_template
(
dst
,
src1a
,
src1b
,
offset1
,
1
,
src2a
,
src2b
,
offset2
,
alpha
,
2
,
width
);
sprite_v_template
(
dst
,
src1a
,
src1b
,
offset1
,
1
,
src2a
,
NULL
,
0
,
alpha
,
1
,
width
);
}
static
void
sprite_v_double_twoscale_c
(
uint8_t
*
dst
,
const
uint8_t
*
src1a
,
const
uint8_t
*
src1b
,
int
offset1
,
const
uint8_t
*
src2a
,
const
uint8_t
*
src2b
,
int
offset2
,
int
alpha
,
int
width
)
{
sprite_v_template
(
dst
,
src1a
,
src1b
,
offset1
,
1
,
src2a
,
src2b
,
offset2
,
alpha
,
2
,
width
);
}
#endif
#endif
/* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
av_cold
void
ff_vc1dsp_init
(
VC1DSPContext
*
dsp
)
{
av_cold
void
ff_vc1dsp_init
(
VC1DSPContext
*
dsp
)
{
dsp
->
vc1_inv_trans_8x8
=
vc1_inv_trans_8x8_c
;
dsp
->
vc1_inv_trans_4x8
=
vc1_inv_trans_4x8_c
;
dsp
->
vc1_inv_trans_8x4
=
vc1_inv_trans_8x4_c
;
...
...
@@ -797,10 +877,12 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp
->
vc1_inv_trans_4x8_dc
=
vc1_inv_trans_4x8_dc_c
;
dsp
->
vc1_inv_trans_8x4_dc
=
vc1_inv_trans_8x4_dc_c
;
dsp
->
vc1_inv_trans_4x4_dc
=
vc1_inv_trans_4x4_dc_c
;
dsp
->
vc1_h_overlap
=
vc1_h_overlap_c
;
dsp
->
vc1_v_overlap
=
vc1_v_overlap_c
;
dsp
->
vc1_h_s_overlap
=
vc1_h_s_overlap_c
;
dsp
->
vc1_v_s_overlap
=
vc1_v_s_overlap_c
;
dsp
->
vc1_v_loop_filter4
=
vc1_v_loop_filter4_c
;
dsp
->
vc1_h_loop_filter4
=
vc1_h_loop_filter4_c
;
dsp
->
vc1_v_loop_filter8
=
vc1_v_loop_filter8_c
;
...
...
@@ -808,16 +890,16 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp
->
vc1_v_loop_filter16
=
vc1_v_loop_filter16_c
;
dsp
->
vc1_h_loop_filter16
=
vc1_h_loop_filter16_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
0
]
=
ff_put_pixels8x8_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
1
]
=
put_vc1_mspel_mc10_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
2
]
=
put_vc1_mspel_mc20_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
3
]
=
put_vc1_mspel_mc30_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
4
]
=
put_vc1_mspel_mc01_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
5
]
=
put_vc1_mspel_mc11_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
6
]
=
put_vc1_mspel_mc21_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
7
]
=
put_vc1_mspel_mc31_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
8
]
=
put_vc1_mspel_mc02_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
9
]
=
put_vc1_mspel_mc12_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
0
]
=
ff_put_pixels8x8_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
1
]
=
put_vc1_mspel_mc10_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
2
]
=
put_vc1_mspel_mc20_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
3
]
=
put_vc1_mspel_mc30_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
4
]
=
put_vc1_mspel_mc01_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
5
]
=
put_vc1_mspel_mc11_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
6
]
=
put_vc1_mspel_mc21_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
7
]
=
put_vc1_mspel_mc31_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
8
]
=
put_vc1_mspel_mc02_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
9
]
=
put_vc1_mspel_mc12_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
10
]
=
put_vc1_mspel_mc22_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
11
]
=
put_vc1_mspel_mc32_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
12
]
=
put_vc1_mspel_mc03_c
;
...
...
@@ -825,16 +907,16 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp
->
put_vc1_mspel_pixels_tab
[
14
]
=
put_vc1_mspel_mc23_c
;
dsp
->
put_vc1_mspel_pixels_tab
[
15
]
=
put_vc1_mspel_mc33_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
0
]
=
ff_avg_pixels8x8_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
1
]
=
avg_vc1_mspel_mc10_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
2
]
=
avg_vc1_mspel_mc20_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
3
]
=
avg_vc1_mspel_mc30_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
4
]
=
avg_vc1_mspel_mc01_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
5
]
=
avg_vc1_mspel_mc11_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
6
]
=
avg_vc1_mspel_mc21_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
7
]
=
avg_vc1_mspel_mc31_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
8
]
=
avg_vc1_mspel_mc02_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
9
]
=
avg_vc1_mspel_mc12_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
0
]
=
ff_avg_pixels8x8_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
1
]
=
avg_vc1_mspel_mc10_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
2
]
=
avg_vc1_mspel_mc20_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
3
]
=
avg_vc1_mspel_mc30_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
4
]
=
avg_vc1_mspel_mc01_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
5
]
=
avg_vc1_mspel_mc11_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
6
]
=
avg_vc1_mspel_mc21_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
7
]
=
avg_vc1_mspel_mc31_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
8
]
=
avg_vc1_mspel_mc02_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
9
]
=
avg_vc1_mspel_mc12_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
10
]
=
avg_vc1_mspel_mc22_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
11
]
=
avg_vc1_mspel_mc32_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
12
]
=
avg_vc1_mspel_mc03_c
;
...
...
@@ -842,8 +924,8 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp
->
avg_vc1_mspel_pixels_tab
[
14
]
=
avg_vc1_mspel_mc23_c
;
dsp
->
avg_vc1_mspel_pixels_tab
[
15
]
=
avg_vc1_mspel_mc33_c
;
dsp
->
put_no_rnd_vc1_chroma_pixels_tab
[
0
]
=
put_no_rnd_vc1_chroma_mc8_c
;
dsp
->
avg_no_rnd_vc1_chroma_pixels_tab
[
0
]
=
avg_no_rnd_vc1_chroma_mc8_c
;
dsp
->
put_no_rnd_vc1_chroma_pixels_tab
[
0
]
=
put_no_rnd_vc1_chroma_mc8_c
;
dsp
->
avg_no_rnd_vc1_chroma_pixels_tab
[
0
]
=
avg_no_rnd_vc1_chroma_mc8_c
;
dsp
->
put_no_rnd_vc1_chroma_pixels_tab
[
1
]
=
put_no_rnd_vc1_chroma_mc4_c
;
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
...
...
@@ -852,7 +934,7 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
dsp
->
sprite_v_double_noscale
=
sprite_v_double_noscale_c
;
dsp
->
sprite_v_double_onescale
=
sprite_v_double_onescale_c
;
dsp
->
sprite_v_double_twoscale
=
sprite_v_double_twoscale_c
;
#endif
#endif
/* CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER */
if
(
ARCH_ARM
)
ff_vc1dsp_init_arm
(
dsp
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment