Commit d18e243f authored by Michael Niedermayer's avatar Michael Niedermayer

h264: merge _internal & template files.

seems git missed them and we temporary lost our improvments in them.
Signed-off-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parent 3b6bbfa0
/*
* DSP utils
* Copyright (c) 2000, 2001 Fabrice Bellard
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
*
* gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DSP utils
*/
#include "h264_high_depth.h"
static inline void FUNC(copy_block2)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
int i;
for(i=0; i<h; i++)
{
AV_WN2P(dst , AV_RN2P(src ));
dst+=dstStride;
src+=srcStride;
}
}
static inline void FUNC(copy_block4)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
int i;
for(i=0; i<h; i++)
{
AV_WN4P(dst , AV_RN4P(src ));
dst+=dstStride;
src+=srcStride;
}
}
static inline void FUNC(copy_block8)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
int i;
for(i=0; i<h; i++)
{
AV_WN4P(dst , AV_RN4P(src ));
AV_WN4P(dst+4*sizeof(pixel), AV_RN4P(src+4*sizeof(pixel)));
dst+=dstStride;
src+=srcStride;
}
}
static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
{
int i;
for(i=0; i<h; i++)
{
AV_WN4P(dst , AV_RN4P(src ));
AV_WN4P(dst+ 4*sizeof(pixel), AV_RN4P(src+ 4*sizeof(pixel)));
AV_WN4P(dst+ 8*sizeof(pixel), AV_RN4P(src+ 8*sizeof(pixel)));
AV_WN4P(dst+12*sizeof(pixel), AV_RN4P(src+12*sizeof(pixel)));
dst+=dstStride;
src+=srcStride;
}
}
/* draw the edges of width 'w' of an image of size width, height */
//FIXME check that this is ok for mpeg4 interlaced
static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int sides)
{
pixel *buf = (pixel*)p_buf;
int wrap = p_wrap / sizeof(pixel);
pixel *ptr, *last_line;
int i;
/* left and right */
ptr = buf;
for(i=0;i<height;i++) {
#if BIT_DEPTH > 8
int j;
for (j = 0; j < w; j++) {
ptr[j-w] = ptr[0];
ptr[j+width] = ptr[width-1];
}
#else
memset(ptr - w, ptr[0], w);
memset(ptr + width, ptr[width-1], w);
#endif
ptr += wrap;
}
/* top and bottom + corners */
buf -= w;
last_line = buf + (height - 1) * wrap;
if (sides & EDGE_TOP)
for(i = 0; i < w; i++)
memcpy(buf - (i + 1) * wrap, buf, (width + w + w) * sizeof(pixel)); // top
if (sides & EDGE_BOTTOM)
for (i = 0; i < w; i++)
memcpy(last_line + (i + 1) * wrap, last_line, (width + w + w) * sizeof(pixel)); // bottom
}
/**
* Copy a rectangular area of samples to a temporary buffer and replicate the border samples.
* @param buf destination buffer
* @param src source buffer
* @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers
* @param block_w width of block
* @param block_h height of block
* @param src_x x coordinate of the top left sample of the block in the source buffer
* @param src_y y coordinate of the top left sample of the block in the source buffer
* @param w width of the source buffer
* @param h height of the source buffer
*/
void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, int block_w, int block_h,
int src_x, int src_y, int w, int h){
int x, y;
int start_y, start_x, end_y, end_x;
if(src_y>= h){
src+= (h-1-src_y)*linesize;
src_y=h-1;
}else if(src_y<=-block_h){
src+= (1-block_h-src_y)*linesize;
src_y=1-block_h;
}
if(src_x>= w){
src+= (w-1-src_x)*sizeof(pixel);
src_x=w-1;
}else if(src_x<=-block_w){
src+= (1-block_w-src_x)*sizeof(pixel);
src_x=1-block_w;
}
start_y= FFMAX(0, -src_y);
start_x= FFMAX(0, -src_x);
end_y= FFMIN(block_h, h-src_y);
end_x= FFMIN(block_w, w-src_x);
assert(start_y < end_y && block_h);
assert(start_x < end_x && block_w);
w = end_x - start_x;
src += start_y*linesize + start_x*sizeof(pixel);
buf += start_x*sizeof(pixel);
//top
for(y=0; y<start_y; y++){
memcpy(buf, src, w*sizeof(pixel));
buf += linesize;
}
// copy existing part
for(; y<end_y; y++){
memcpy(buf, src, w*sizeof(pixel));
src += linesize;
buf += linesize;
}
//bottom
src -= linesize;
for(; y<block_h; y++){
memcpy(buf, src, w*sizeof(pixel));
buf += linesize;
}
buf -= block_h * linesize + start_x*sizeof(pixel);
while (block_h--){
pixel *bufp = (pixel*)buf;
//left
for(x=0; x<start_x; x++){
bufp[x] = bufp[start_x];
}
//right
for(x=end_x; x<block_w; x++){
bufp[x] = bufp[end_x - 1];
}
buf += linesize;
}
}
static void FUNCC(add_pixels8)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
int i;
pixel *restrict pixels = (pixel *restrict)p_pixels;
dctcoef *block = (dctcoef*)p_block;
line_size >>= sizeof(pixel)-1;
for(i=0;i<8;i++) {
pixels[0] += block[0];
pixels[1] += block[1];
pixels[2] += block[2];
pixels[3] += block[3];
pixels[4] += block[4];
pixels[5] += block[5];
pixels[6] += block[6];
pixels[7] += block[7];
pixels += line_size;
block += 8;
}
}
static void FUNCC(add_pixels4)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
int i;
pixel *restrict pixels = (pixel *restrict)p_pixels;
dctcoef *block = (dctcoef*)p_block;
line_size >>= sizeof(pixel)-1;
for(i=0;i<4;i++) {
pixels[0] += block[0];
pixels[1] += block[1];
pixels[2] += block[2];
pixels[3] += block[3];
pixels += line_size;
block += 4;
}
}
#if 0
#define PIXOP2(OPNAME, OP) \
static void OPNAME ## _pixels(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
OP(*((uint64_t*)block), AV_RN64(pixels));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static void OPNAME ## _no_rnd_pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
const uint64_t a= AV_RN64(pixels );\
const uint64_t b= AV_RN64(pixels+1);\
OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static void OPNAME ## _pixels_x2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
const uint64_t a= AV_RN64(pixels );\
const uint64_t b= AV_RN64(pixels+1);\
OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static void OPNAME ## _no_rnd_pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
const uint64_t a= AV_RN64(pixels );\
const uint64_t b= AV_RN64(pixels+line_size);\
OP(*((uint64_t*)block), (a&b) + (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static void OPNAME ## _pixels_y2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
for(i=0; i<h; i++){\
const uint64_t a= AV_RN64(pixels );\
const uint64_t b= AV_RN64(pixels+line_size);\
OP(*((uint64_t*)block), (a|b) - (((a^b)&0xFEFEFEFEFEFEFEFEULL)>>1));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static void OPNAME ## _pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
const uint64_t a= AV_RN64(pixels );\
const uint64_t b= AV_RN64(pixels+1);\
uint64_t l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0202020202020202ULL;\
uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
uint64_t l1,h1;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
uint64_t a= AV_RN64(pixels );\
uint64_t b= AV_RN64(pixels+1);\
l1= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL);\
h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
a= AV_RN64(pixels );\
b= AV_RN64(pixels+1);\
l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0202020202020202ULL;\
h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static void OPNAME ## _no_rnd_pixels_xy2_c(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
int i;\
const uint64_t a= AV_RN64(pixels );\
const uint64_t b= AV_RN64(pixels+1);\
uint64_t l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0101010101010101ULL;\
uint64_t h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
uint64_t l1,h1;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
uint64_t a= AV_RN64(pixels );\
uint64_t b= AV_RN64(pixels+1);\
l1= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL);\
h1= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
a= AV_RN64(pixels );\
b= AV_RN64(pixels+1);\
l0= (a&0x0303030303030303ULL)\
+ (b&0x0303030303030303ULL)\
+ 0x0101010101010101ULL;\
h0= ((a&0xFCFCFCFCFCFCFCFCULL)>>2)\
+ ((b&0xFCFCFCFCFCFCFCFCULL)>>2);\
OP(*((uint64_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0F0F0F0F0FULL));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels_c , 8*sizeof(pixel))\
CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels_x2_c , 8*sizeof(pixel))\
CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels_y2_c , 8*sizeof(pixel))\
CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels_xy2_c, 8*sizeof(pixel))\
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_x2_c , OPNAME ## _no_rnd_pixels_x2_c , 8*sizeof(pixel))\
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_y2_c , OPNAME ## _no_rnd_pixels_y2_c , 8*sizeof(pixel))\
CALL_2X_PIXELS(OPNAME ## _no_rnd_pixels16_xy2_c, OPNAME ## _no_rnd_pixels_xy2_c, 8*sizeof(pixel))
#define op_avg(a, b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEFEFEFEFEULL)>>1) )
#else // 64 bit variant
#define PIXOP2(OPNAME, OP) \
static void FUNCC(OPNAME ## _pixels2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
OP(*((pixel2*)(block )), AV_RN2P(pixels ));\
pixels+=line_size;\
block +=line_size;\
}\
}\
static void FUNCC(OPNAME ## _pixels4)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
pixels+=line_size;\
block +=line_size;\
}\
}\
static void FUNCC(OPNAME ## _pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
int i;\
for(i=0; i<h; i++){\
OP(*((pixel4*)(block )), AV_RN4P(pixels ));\
OP(*((pixel4*)(block+4*sizeof(pixel))), AV_RN4P(pixels+4*sizeof(pixel)));\
pixels+=line_size;\
block +=line_size;\
}\
}\
static inline void FUNCC(OPNAME ## _no_rnd_pixels8)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNCC(OPNAME ## _pixels8)(block, pixels, line_size, h);\
}\
\
static inline void FUNC(OPNAME ## _no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
int src_stride1, int src_stride2, int h){\
int i;\
for(i=0; i<h; i++){\
pixel4 a,b;\
a= AV_RN4P(&src1[i*src_stride1 ]);\
b= AV_RN4P(&src2[i*src_stride2 ]);\
OP(*((pixel4*)&dst[i*dst_stride ]), no_rnd_avg_pixel4(a, b));\
a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), no_rnd_avg_pixel4(a, b));\
}\
}\
\
static inline void FUNC(OPNAME ## _pixels8_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
int src_stride1, int src_stride2, int h){\
int i;\
for(i=0; i<h; i++){\
pixel4 a,b;\
a= AV_RN4P(&src1[i*src_stride1 ]);\
b= AV_RN4P(&src2[i*src_stride2 ]);\
OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
a= AV_RN4P(&src1[i*src_stride1+4*sizeof(pixel)]);\
b= AV_RN4P(&src2[i*src_stride2+4*sizeof(pixel)]);\
OP(*((pixel4*)&dst[i*dst_stride+4*sizeof(pixel)]), rnd_avg_pixel4(a, b));\
}\
}\
\
static inline void FUNC(OPNAME ## _pixels4_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
int src_stride1, int src_stride2, int h){\
int i;\
for(i=0; i<h; i++){\
pixel4 a,b;\
a= AV_RN4P(&src1[i*src_stride1 ]);\
b= AV_RN4P(&src2[i*src_stride2 ]);\
OP(*((pixel4*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
}\
}\
\
static inline void FUNC(OPNAME ## _pixels2_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
int src_stride1, int src_stride2, int h){\
int i;\
for(i=0; i<h; i++){\
pixel4 a,b;\
a= AV_RN2P(&src1[i*src_stride1 ]);\
b= AV_RN2P(&src2[i*src_stride2 ]);\
OP(*((pixel2*)&dst[i*dst_stride ]), rnd_avg_pixel4(a, b));\
}\
}\
\
static inline void FUNC(OPNAME ## _pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
int src_stride1, int src_stride2, int h){\
FUNC(OPNAME ## _pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
FUNC(OPNAME ## _pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
}\
\
static inline void FUNC(OPNAME ## _no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int dst_stride, \
int src_stride1, int src_stride2, int h){\
FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst , src1 , src2 , dst_stride, src_stride1, src_stride2, h);\
FUNC(OPNAME ## _no_rnd_pixels8_l2)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, h);\
}\
\
static inline void FUNCC(OPNAME ## _no_rnd_pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
}\
\
static inline void FUNCC(OPNAME ## _pixels8_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
}\
\
static inline void FUNCC(OPNAME ## _no_rnd_pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _no_rnd_pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
}\
\
static inline void FUNCC(OPNAME ## _pixels8_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _pixels8_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
}\
\
static inline void FUNC(OPNAME ## _pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
/* FIXME HIGH BIT DEPTH */\
int i;\
for(i=0; i<h; i++){\
uint32_t a, b, c, d, l0, l1, h0, h1;\
a= AV_RN32(&src1[i*src_stride1]);\
b= AV_RN32(&src2[i*src_stride2]);\
c= AV_RN32(&src3[i*src_stride3]);\
d= AV_RN32(&src4[i*src_stride4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
l1= (c&0x03030303UL)\
+ (d&0x03030303UL);\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
a= AV_RN32(&src1[i*src_stride1+4]);\
b= AV_RN32(&src2[i*src_stride2+4]);\
c= AV_RN32(&src3[i*src_stride3+4]);\
d= AV_RN32(&src4[i*src_stride4+4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
l1= (c&0x03030303UL)\
+ (d&0x03030303UL);\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
}\
}\
\
static inline void FUNCC(OPNAME ## _pixels4_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
}\
\
static inline void FUNCC(OPNAME ## _pixels4_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _pixels4_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
}\
\
static inline void FUNCC(OPNAME ## _pixels2_x2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+sizeof(pixel), line_size, line_size, line_size, h);\
}\
\
static inline void FUNCC(OPNAME ## _pixels2_y2)(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
FUNC(OPNAME ## _pixels2_l2)(block, pixels, pixels+line_size, line_size, line_size, line_size, h);\
}\
\
static inline void FUNC(OPNAME ## _no_rnd_pixels8_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
/* FIXME HIGH BIT DEPTH*/\
int i;\
for(i=0; i<h; i++){\
uint32_t a, b, c, d, l0, l1, h0, h1;\
a= AV_RN32(&src1[i*src_stride1]);\
b= AV_RN32(&src2[i*src_stride2]);\
c= AV_RN32(&src3[i*src_stride3]);\
d= AV_RN32(&src4[i*src_stride4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
l1= (c&0x03030303UL)\
+ (d&0x03030303UL);\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
a= AV_RN32(&src1[i*src_stride1+4]);\
b= AV_RN32(&src2[i*src_stride2+4]);\
c= AV_RN32(&src3[i*src_stride3+4]);\
d= AV_RN32(&src4[i*src_stride4+4]);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
l1= (c&0x03030303UL)\
+ (d&0x03030303UL);\
h1= ((c&0xFCFCFCFCUL)>>2)\
+ ((d&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)&dst[i*dst_stride+4]), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
}\
}\
static inline void FUNC(OPNAME ## _pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
FUNC(OPNAME ## _pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
FUNC(OPNAME ## _pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
}\
static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, const uint8_t *src3, const uint8_t *src4,\
int dst_stride, int src_stride1, int src_stride2,int src_stride3,int src_stride4, int h){\
FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst , src1 , src2 , src3 , src4 , dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
}\
\
static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
{\
int i, a0, b0, a1, b1;\
pixel *block = (pixel*)p_block;\
const pixel *pixels = (const pixel*)p_pixels;\
line_size >>= sizeof(pixel)-1;\
a0= pixels[0];\
b0= pixels[1] + 2;\
a0 += b0;\
b0 += pixels[2];\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
a1= pixels[0];\
b1= pixels[1];\
a1 += b1;\
b1 += pixels[2];\
\
block[0]= (a1+a0)>>2; /* FIXME non put */\
block[1]= (b1+b0)>>2;\
\
pixels+=line_size;\
block +=line_size;\
\
a0= pixels[0];\
b0= pixels[1] + 2;\
a0 += b0;\
b0 += pixels[2];\
\
block[0]= (a1+a0)>>2;\
block[1]= (b1+b0)>>2;\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static inline void FUNCC(OPNAME ## _pixels4_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
/* FIXME HIGH BIT DEPTH */\
int i;\
const uint32_t a= AV_RN32(pixels );\
const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
uint32_t l1,h1;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
uint32_t a= AV_RN32(pixels );\
uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
a= AV_RN32(pixels );\
b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
}\
}\
\
static inline void FUNCC(OPNAME ## _pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
/* FIXME HIGH BIT DEPTH */\
int j;\
for(j=0; j<2; j++){\
int i;\
const uint32_t a= AV_RN32(pixels );\
const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
uint32_t l1,h1;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
uint32_t a= AV_RN32(pixels );\
uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
a= AV_RN32(pixels );\
b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x02020202UL;\
h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
}\
pixels+=4-line_size*(h+1);\
block +=4-line_size*h;\
}\
}\
\
static inline void FUNCC(OPNAME ## _no_rnd_pixels8_xy2)(uint8_t *block, const uint8_t *pixels, int line_size, int h)\
{\
/* FIXME HIGH BIT DEPTH */\
int j;\
for(j=0; j<2; j++){\
int i;\
const uint32_t a= AV_RN32(pixels );\
const uint32_t b= AV_RN32(pixels+1);\
uint32_t l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
uint32_t h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
uint32_t l1,h1;\
\
pixels+=line_size;\
for(i=0; i<h; i+=2){\
uint32_t a= AV_RN32(pixels );\
uint32_t b= AV_RN32(pixels+1);\
l1= (a&0x03030303UL)\
+ (b&0x03030303UL);\
h1= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
a= AV_RN32(pixels );\
b= AV_RN32(pixels+1);\
l0= (a&0x03030303UL)\
+ (b&0x03030303UL)\
+ 0x01010101UL;\
h0= ((a&0xFCFCFCFCUL)>>2)\
+ ((b&0xFCFCFCFCUL)>>2);\
OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\
pixels+=line_size;\
block +=line_size;\
}\
pixels+=4-line_size*(h+1);\
block +=4-line_size*h;\
}\
}\
\
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_x2) , FUNCC(OPNAME ## _pixels8_x2) , 8*sizeof(pixel))\
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_y2) , FUNCC(OPNAME ## _pixels8_y2) , 8*sizeof(pixel))\
CALL_2X_PIXELS(FUNCC(OPNAME ## _pixels16_xy2), FUNCC(OPNAME ## _pixels8_xy2), 8*sizeof(pixel))\
av_unused CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16) , FUNCC(OPNAME ## _pixels8) , 8*sizeof(pixel))\
CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_x2) , FUNCC(OPNAME ## _no_rnd_pixels8_x2) , 8*sizeof(pixel))\
CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_y2) , FUNCC(OPNAME ## _no_rnd_pixels8_y2) , 8*sizeof(pixel))\
CALL_2X_PIXELS(FUNCC(OPNAME ## _no_rnd_pixels16_xy2), FUNCC(OPNAME ## _no_rnd_pixels8_xy2), 8*sizeof(pixel))\
#define op_avg(a, b) a = rnd_avg_pixel4(a, b)
#endif
#define op_put(a, b) a = b
PIXOP2(avg, op_avg)
PIXOP2(put, op_put)
#undef op_avg
#undef op_put
#define put_no_rnd_pixels8_c put_pixels8_c
#define put_no_rnd_pixels16_c put_pixels16_c
static void FUNCC(put_no_rnd_pixels16_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
FUNC(put_no_rnd_pixels16_l2)(dst, a, b, stride, stride, stride, h);
}
static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const uint8_t *b, int stride, int h){
FUNC(put_no_rnd_pixels8_l2)(dst, a, b, stride, stride, stride, h);
}
#define H264_CHROMA_MC(OPNAME, OP)\
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride >>= sizeof(pixel)-1;\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
dst+= stride;\
src+= stride;\
}\
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride >>= sizeof(pixel)-1;\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
OP(dst[2], (A*src[2] + E*src[step+2]));\
OP(dst[3], (A*src[3] + E*src[step+3]));\
dst+= stride;\
src+= stride;\
}\
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride >>= sizeof(pixel)-1;\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
if(D){\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + B*src[1] + C*src[stride+0] + D*src[stride+1]));\
OP(dst[1], (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2]));\
OP(dst[2], (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3]));\
OP(dst[3], (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4]));\
OP(dst[4], (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5]));\
OP(dst[5], (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6]));\
OP(dst[6], (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7]));\
OP(dst[7], (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8]));\
dst+= stride;\
src+= stride;\
}\
}else{\
const int E= B+C;\
const int step= C ? stride : 1;\
for(i=0; i<h; i++){\
OP(dst[0], (A*src[0] + E*src[step+0]));\
OP(dst[1], (A*src[1] + E*src[step+1]));\
OP(dst[2], (A*src[2] + E*src[step+2]));\
OP(dst[3], (A*src[3] + E*src[step+3]));\
OP(dst[4], (A*src[4] + E*src[step+4]));\
OP(dst[5], (A*src[5] + E*src[step+5]));\
OP(dst[6], (A*src[6] + E*src[step+6]));\
OP(dst[7], (A*src[7] + E*src[step+7]));\
dst+= stride;\
src+= stride;\
}\
}\
}
#define op_avg(a, b) a = (((a)+(((b) + 32)>>6)+1)>>1)
#define op_put(a, b) a = (((b) + 32)>>6)
H264_CHROMA_MC(put_ , op_put)
H264_CHROMA_MC(avg_ , op_avg)
#undef op_avg
#undef op_put
#if 1
#define H264_LOWPASS(OPNAME, OP, OP2) \
static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=2;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=2;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
const int srcA= src[-1*srcStride];\
const int src0= src[0 *srcStride];\
const int src1= src[1 *srcStride];\
const int src2= src[2 *srcStride];\
const int src3= src[3 *srcStride];\
const int src4= src[4 *srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
dst++;\
src++;\
}\
}\
\
static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=2;\
const int w=2;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
tmp+=tmpStride;\
src+=srcStride;\
}\
tmp -= tmpStride*(h+5-2);\
for(i=0; i<w; i++)\
{\
const int tmpB= tmp[-2*tmpStride] - pad;\
const int tmpA= tmp[-1*tmpStride] - pad;\
const int tmp0= tmp[0 *tmpStride] - pad;\
const int tmp1= tmp[1 *tmpStride] - pad;\
const int tmp2= tmp[2 *tmpStride] - pad;\
const int tmp3= tmp[3 *tmpStride] - pad;\
const int tmp4= tmp[4 *tmpStride] - pad;\
OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
dst++;\
tmp++;\
}\
}\
static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=4;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]));\
OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]));\
OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=4;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
const int srcA= src[-1*srcStride];\
const int src0= src[0 *srcStride];\
const int src1= src[1 *srcStride];\
const int src2= src[2 *srcStride];\
const int src3= src[3 *srcStride];\
const int src4= src[4 *srcStride];\
const int src5= src[5 *srcStride];\
const int src6= src[6 *srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
dst++;\
src++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=4;\
const int w=4;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]) + pad;\
tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4]) + pad;\
tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5]) + pad;\
tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6]) + pad;\
tmp+=tmpStride;\
src+=srcStride;\
}\
tmp -= tmpStride*(h+5-2);\
for(i=0; i<w; i++)\
{\
const int tmpB= tmp[-2*tmpStride] - pad;\
const int tmpA= tmp[-1*tmpStride] - pad;\
const int tmp0= tmp[0 *tmpStride] - pad;\
const int tmp1= tmp[1 *tmpStride] - pad;\
const int tmp2= tmp[2 *tmpStride] - pad;\
const int tmp3= tmp[3 *tmpStride] - pad;\
const int tmp4= tmp[4 *tmpStride] - pad;\
const int tmp5= tmp[5 *tmpStride] - pad;\
const int tmp6= tmp[6 *tmpStride] - pad;\
OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
dst++;\
tmp++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=8;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
OP(dst[1], (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]));\
OP(dst[2], (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]));\
OP(dst[3], (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]));\
OP(dst[4], (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]));\
OP(dst[5], (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]));\
OP(dst[6], (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]));\
OP(dst[7], (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]));\
dst+=dstStride;\
src+=srcStride;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=8;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
const int srcA= src[-1*srcStride];\
const int src0= src[0 *srcStride];\
const int src1= src[1 *srcStride];\
const int src2= src[2 *srcStride];\
const int src3= src[3 *srcStride];\
const int src4= src[4 *srcStride];\
const int src5= src[5 *srcStride];\
const int src6= src[6 *srcStride];\
const int src7= src[7 *srcStride];\
const int src8= src[8 *srcStride];\
const int src9= src[9 *srcStride];\
const int src10=src[10*srcStride];\
OP(dst[0*dstStride], (src0+src1)*20 - (srcA+src2)*5 + (srcB+src3));\
OP(dst[1*dstStride], (src1+src2)*20 - (src0+src3)*5 + (srcA+src4));\
OP(dst[2*dstStride], (src2+src3)*20 - (src1+src4)*5 + (src0+src5));\
OP(dst[3*dstStride], (src3+src4)*20 - (src2+src5)*5 + (src1+src6));\
OP(dst[4*dstStride], (src4+src5)*20 - (src3+src6)*5 + (src2+src7));\
OP(dst[5*dstStride], (src5+src6)*20 - (src4+src7)*5 + (src3+src8));\
OP(dst[6*dstStride], (src6+src7)*20 - (src5+src8)*5 + (src4+src9));\
OP(dst[7*dstStride], (src7+src8)*20 - (src6+src9)*5 + (src5+src10));\
dst++;\
src++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=8;\
const int w=8;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
tmp[0]= (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]) + pad;\
tmp[1]= (src[1]+src[2])*20 - (src[0 ]+src[3])*5 + (src[-1]+src[4 ]) + pad;\
tmp[2]= (src[2]+src[3])*20 - (src[1 ]+src[4])*5 + (src[0 ]+src[5 ]) + pad;\
tmp[3]= (src[3]+src[4])*20 - (src[2 ]+src[5])*5 + (src[1 ]+src[6 ]) + pad;\
tmp[4]= (src[4]+src[5])*20 - (src[3 ]+src[6])*5 + (src[2 ]+src[7 ]) + pad;\
tmp[5]= (src[5]+src[6])*20 - (src[4 ]+src[7])*5 + (src[3 ]+src[8 ]) + pad;\
tmp[6]= (src[6]+src[7])*20 - (src[5 ]+src[8])*5 + (src[4 ]+src[9 ]) + pad;\
tmp[7]= (src[7]+src[8])*20 - (src[6 ]+src[9])*5 + (src[5 ]+src[10]) + pad;\
tmp+=tmpStride;\
src+=srcStride;\
}\
tmp -= tmpStride*(h+5-2);\
for(i=0; i<w; i++)\
{\
const int tmpB= tmp[-2*tmpStride] - pad;\
const int tmpA= tmp[-1*tmpStride] - pad;\
const int tmp0= tmp[0 *tmpStride] - pad;\
const int tmp1= tmp[1 *tmpStride] - pad;\
const int tmp2= tmp[2 *tmpStride] - pad;\
const int tmp3= tmp[3 *tmpStride] - pad;\
const int tmp4= tmp[4 *tmpStride] - pad;\
const int tmp5= tmp[5 *tmpStride] - pad;\
const int tmp6= tmp[6 *tmpStride] - pad;\
const int tmp7= tmp[7 *tmpStride] - pad;\
const int tmp8= tmp[8 *tmpStride] - pad;\
const int tmp9= tmp[9 *tmpStride] - pad;\
const int tmp10=tmp[10*tmpStride] - pad;\
OP2(dst[0*dstStride], (tmp0+tmp1)*20 - (tmpA+tmp2)*5 + (tmpB+tmp3));\
OP2(dst[1*dstStride], (tmp1+tmp2)*20 - (tmp0+tmp3)*5 + (tmpA+tmp4));\
OP2(dst[2*dstStride], (tmp2+tmp3)*20 - (tmp1+tmp4)*5 + (tmp0+tmp5));\
OP2(dst[3*dstStride], (tmp3+tmp4)*20 - (tmp2+tmp5)*5 + (tmp1+tmp6));\
OP2(dst[4*dstStride], (tmp4+tmp5)*20 - (tmp3+tmp6)*5 + (tmp2+tmp7));\
OP2(dst[5*dstStride], (tmp5+tmp6)*20 - (tmp4+tmp7)*5 + (tmp3+tmp8));\
OP2(dst[6*dstStride], (tmp6+tmp7)*20 - (tmp5+tmp8)*5 + (tmp4+tmp9));\
OP2(dst[7*dstStride], (tmp7+tmp8)*20 - (tmp6+tmp9)*5 + (tmp5+tmp10));\
dst++;\
tmp++;\
}\
}\
\
static void FUNC(OPNAME ## h264_qpel16_v_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_v_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
}\
\
static void FUNC(OPNAME ## h264_qpel16_h_lowpass)(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst , src , dstStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_h_lowpass)(dst+8*sizeof(pixel), src+8*sizeof(pixel), dstStride, srcStride);\
}\
\
static void FUNC(OPNAME ## h264_qpel16_hv_lowpass)(uint8_t *dst, int16_t *tmp, uint8_t *src, int dstStride, int tmpStride, int srcStride){\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
src += 8*srcStride;\
dst += 8*dstStride;\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst , tmp , src , dstStride, tmpStride, srcStride);\
FUNC(OPNAME ## h264_qpel8_hv_lowpass)(dst+8*sizeof(pixel), tmp+8, src+8*sizeof(pixel), dstStride, tmpStride, srcStride);\
}\
#define H264_MC(OPNAME, SIZE) \
static av_unused void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc00)(uint8_t *dst, uint8_t *src, int stride){\
FUNCC(OPNAME ## pixels ## SIZE)(dst, src, stride, SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc10)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src, half, stride, stride, SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc20)(uint8_t *dst, uint8_t *src, int stride){\
FUNC(OPNAME ## h264_qpel ## SIZE ## _h_lowpass)(dst, src, stride, stride);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc30)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(half, src, SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, src+sizeof(pixel), half, stride, stride, SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc01)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid, half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc02)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(OPNAME ## h264_qpel ## SIZE ## _v_lowpass)(dst, full_mid, stride, SIZE*sizeof(pixel));\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc03)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t half[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(half, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, full_mid+SIZE*sizeof(pixel), half, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc11)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc31)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc13)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc33)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc22)(uint8_t *dst, uint8_t *src, int stride){\
int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
FUNC(OPNAME ## h264_qpel ## SIZE ## _hv_lowpass)(dst, tmp, src, stride, SIZE*sizeof(pixel), stride);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc21)(uint8_t *dst, uint8_t *src, int stride){\
int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src, SIZE*sizeof(pixel), stride);\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc23)(uint8_t *dst, uint8_t *src, int stride){\
int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfH[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(put_h264_qpel ## SIZE ## _h_lowpass)(halfH, src + stride, SIZE*sizeof(pixel), stride);\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfH, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc12)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2, SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
\
static void FUNCC(OPNAME ## h264_qpel ## SIZE ## _mc32)(uint8_t *dst, uint8_t *src, int stride){\
uint8_t full[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t * const full_mid= full + SIZE*2*sizeof(pixel);\
int16_t tmp[SIZE*(SIZE+5)*sizeof(pixel)];\
uint8_t halfV[SIZE*SIZE*sizeof(pixel)];\
uint8_t halfHV[SIZE*SIZE*sizeof(pixel)];\
FUNC(copy_block ## SIZE )(full, src - stride*2 + sizeof(pixel), SIZE*sizeof(pixel), stride, SIZE + 5);\
FUNC(put_h264_qpel ## SIZE ## _v_lowpass)(halfV, full_mid, SIZE*sizeof(pixel), SIZE*sizeof(pixel));\
FUNC(put_h264_qpel ## SIZE ## _hv_lowpass)(halfHV, tmp, src, SIZE*sizeof(pixel), SIZE*sizeof(pixel), stride);\
FUNC(OPNAME ## pixels ## SIZE ## _l2)(dst, halfV, halfHV, stride, SIZE*sizeof(pixel), SIZE*sizeof(pixel), SIZE);\
}\
#define op_avg(a, b) a = (((a)+CLIP(((b) + 16)>>5)+1)>>1)
//#define op_avg2(a, b) a = (((a)*w1+cm[((b) + 16)>>5]*w2 + o + 64)>>7)
#define op_put(a, b) a = CLIP(((b) + 16)>>5)
#define op2_avg(a, b) a = (((a)+CLIP(((b) + 512)>>10)+1)>>1)
#define op2_put(a, b) a = CLIP(((b) + 512)>>10)
H264_LOWPASS(put_ , op_put, op2_put)
H264_LOWPASS(avg_ , op_avg, op2_avg)
H264_MC(put_, 2)
H264_MC(put_, 4)
H264_MC(put_, 8)
H264_MC(put_, 16)
H264_MC(avg_, 4)
H264_MC(avg_, 8)
H264_MC(avg_, 16)
#undef op_avg
#undef op_put
#undef op2_avg
#undef op2_put
#endif
#if BIT_DEPTH == 8
# define put_h264_qpel8_mc00_8_c ff_put_pixels8x8_8_c
# define avg_h264_qpel8_mc00_8_c ff_avg_pixels8x8_8_c
# define put_h264_qpel16_mc00_8_c ff_put_pixels16x16_8_c
# define avg_h264_qpel16_mc00_8_c ff_avg_pixels16x16_8_c
#elif BIT_DEPTH == 9
# define put_h264_qpel8_mc00_9_c ff_put_pixels8x8_9_c
# define avg_h264_qpel8_mc00_9_c ff_avg_pixels8x8_9_c
# define put_h264_qpel16_mc00_9_c ff_put_pixels16x16_9_c
# define avg_h264_qpel16_mc00_9_c ff_avg_pixels16x16_9_c
#elif BIT_DEPTH == 10
# define put_h264_qpel8_mc00_10_c ff_put_pixels8x8_10_c
# define avg_h264_qpel8_mc00_10_c ff_avg_pixels8x8_10_c
# define put_h264_qpel16_mc00_10_c ff_put_pixels16x16_10_c
# define avg_h264_qpel16_mc00_10_c ff_avg_pixels16x16_10_c
#endif
void FUNCC(ff_put_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
FUNCC(put_pixels8)(dst, src, stride, 8);
}
void FUNCC(ff_avg_pixels8x8)(uint8_t *dst, uint8_t *src, int stride) {
FUNCC(avg_pixels8)(dst, src, stride, 8);
}
void FUNCC(ff_put_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
FUNCC(put_pixels16)(dst, src, stride, 16);
}
void FUNCC(ff_avg_pixels16x16)(uint8_t *dst, uint8_t *src, int stride) {
FUNCC(avg_pixels16)(dst, src, stride, 16);
}
static void FUNCC(clear_block)(DCTELEM *block)
{
memset(block, 0, sizeof(dctcoef)*64);
}
/**
* memset(blocks, 0, sizeof(DCTELEM)*6*64)
*/
static void FUNCC(clear_blocks)(DCTELEM *blocks)
{
memset(blocks, 0, sizeof(dctcoef)*6*64);
}
......@@ -5,20 +5,20 @@
*
* gmc & q-pel & 32/64 bit based MC by Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of Libav.
* This file is part of FFmpeg.
*
* Libav is free software; you can redistribute it and/or
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
......@@ -79,10 +79,10 @@ static inline void FUNC(copy_block16)(uint8_t *dst, const uint8_t *src, int dstS
/* draw the edges of width 'w' of an image of size width, height */
//FIXME check that this is ok for mpeg4 interlaced
static void FUNCC(draw_edges)(uint8_t *_buf, int _wrap, int width, int height, int w, int sides)
static void FUNCC(draw_edges)(uint8_t *p_buf, int p_wrap, int width, int height, int w, int sides)
{
pixel *buf = (pixel*)_buf;
int wrap = _wrap / sizeof(pixel);
pixel *buf = (pixel*)p_buf;
int wrap = p_wrap / sizeof(pixel);
pixel *ptr, *last_line;
int i;
......@@ -192,12 +192,12 @@ void FUNC(ff_emulated_edge_mc)(uint8_t *buf, const uint8_t *src, int linesize, i
}
}
static void FUNCC(add_pixels8)(uint8_t *restrict _pixels, DCTELEM *_block, int line_size)
static void FUNCC(add_pixels8)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
int i;
pixel *restrict pixels = (pixel *restrict)_pixels;
dctcoef *block = (dctcoef*)_block;
line_size /= sizeof(pixel);
pixel *restrict pixels = (pixel *restrict)p_pixels;
dctcoef *block = (dctcoef*)p_block;
line_size >>= sizeof(pixel)-1;
for(i=0;i<8;i++) {
pixels[0] += block[0];
......@@ -213,12 +213,12 @@ static void FUNCC(add_pixels8)(uint8_t *restrict _pixels, DCTELEM *_block, int l
}
}
static void FUNCC(add_pixels4)(uint8_t *restrict _pixels, DCTELEM *_block, int line_size)
static void FUNCC(add_pixels4)(uint8_t *restrict p_pixels, DCTELEM *p_block, int line_size)
{
int i;
pixel *restrict pixels = (pixel *restrict)_pixels;
dctcoef *block = (dctcoef*)_block;
line_size /= sizeof(pixel);
pixel *restrict pixels = (pixel *restrict)p_pixels;
dctcoef *block = (dctcoef*)p_block;
line_size >>= sizeof(pixel)-1;
for(i=0;i<4;i++) {
pixels[0] += block[0];
......@@ -582,12 +582,12 @@ static inline void FUNC(OPNAME ## _no_rnd_pixels16_l4)(uint8_t *dst, const uint8
FUNC(OPNAME ## _no_rnd_pixels8_l4)(dst+8*sizeof(pixel), src1+8*sizeof(pixel), src2+8*sizeof(pixel), src3+8*sizeof(pixel), src4+8*sizeof(pixel), dst_stride, src_stride1, src_stride2, src_stride3, src_stride4, h);\
}\
\
static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *_block, const uint8_t *_pixels, int line_size, int h)\
static inline void FUNCC(OPNAME ## _pixels2_xy2)(uint8_t *p_block, const uint8_t *p_pixels, int line_size, int h)\
{\
int i, a0, b0, a1, b1;\
pixel *block = (pixel*)_block;\
const pixel *pixels = (const pixel*)_pixels;\
line_size /= sizeof(pixel);\
pixel *block = (pixel*)p_block;\
const pixel *pixels = (const pixel*)p_pixels;\
line_size >>= sizeof(pixel)-1;\
a0= pixels[0];\
b0= pixels[1] + 2;\
a0 += b0;\
......@@ -769,15 +769,15 @@ static void FUNCC(put_no_rnd_pixels8_l2)(uint8_t *dst, const uint8_t *a, const u
}
#define H264_CHROMA_MC(OPNAME, OP)\
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
stride >>= sizeof(pixel)-1;\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
......@@ -800,15 +800,15 @@ static void FUNCC(OPNAME ## h264_chroma_mc2)(uint8_t *_dst/*align 8*/, uint8_t *
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
stride >>= sizeof(pixel)-1;\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
......@@ -835,15 +835,15 @@ static void FUNCC(OPNAME ## h264_chroma_mc4)(uint8_t *_dst/*align 8*/, uint8_t *
}\
}\
\
static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *_dst/*align 8*/, uint8_t *_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
static void FUNCC(OPNAME ## h264_chroma_mc8)(uint8_t *p_dst/*align 8*/, uint8_t *p_src/*align 1*/, int stride, int h, int x, int y){\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
const int A=(8-x)*(8-y);\
const int B=( x)*(8-y);\
const int C=(8-x)*( y);\
const int D=( x)*( y);\
int i;\
stride /= sizeof(pixel);\
stride >>= sizeof(pixel)-1;\
\
assert(x<8 && y<8 && x>=0 && y>=0);\
\
......@@ -887,14 +887,14 @@ H264_CHROMA_MC(avg_ , op_avg)
#undef op_put
#define H264_LOWPASS(OPNAME, OP, OP2) \
static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=2;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
......@@ -904,14 +904,14 @@ static av_unused void FUNC(OPNAME ## h264_qpel2_h_lowpass)(uint8_t *_dst, uint8_
}\
}\
\
static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=2;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
......@@ -928,16 +928,16 @@ static av_unused void FUNC(OPNAME ## h264_qpel2_v_lowpass)(uint8_t *_dst, uint8_
}\
}\
\
static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=2;\
const int w=2;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
......@@ -962,14 +962,14 @@ static av_unused void FUNC(OPNAME ## h264_qpel2_hv_lowpass)(uint8_t *_dst, int16
tmp++;\
}\
}\
static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=4;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3]));\
......@@ -981,14 +981,14 @@ static void FUNC(OPNAME ## h264_qpel4_h_lowpass)(uint8_t *_dst, uint8_t *_src, i
}\
}\
\
static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=4;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
......@@ -1009,16 +1009,16 @@ static void FUNC(OPNAME ## h264_qpel4_v_lowpass)(uint8_t *_dst, uint8_t *_src, i
}\
}\
\
static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=4;\
const int w=4;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
......@@ -1050,14 +1050,14 @@ static void FUNC(OPNAME ## h264_qpel4_hv_lowpass)(uint8_t *_dst, int16_t *tmp, u
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int h=8;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<h; i++)\
{\
OP(dst[0], (src[0]+src[1])*20 - (src[-1]+src[2])*5 + (src[-2]+src[3 ]));\
......@@ -1073,14 +1073,14 @@ static void FUNC(OPNAME ## h264_qpel8_h_lowpass)(uint8_t *_dst, uint8_t *_src, i
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *_dst, uint8_t *_src, int dstStride, int srcStride){\
static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *p_dst, uint8_t *p_src, int dstStride, int srcStride){\
const int w=8;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
for(i=0; i<w; i++)\
{\
const int srcB= src[-2*srcStride];\
......@@ -1109,16 +1109,16 @@ static void FUNC(OPNAME ## h264_qpel8_v_lowpass)(uint8_t *_dst, uint8_t *_src, i
}\
}\
\
static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *_dst, int16_t *tmp, uint8_t *_src, int dstStride, int tmpStride, int srcStride){\
static void FUNC(OPNAME ## h264_qpel8_hv_lowpass)(uint8_t *p_dst, int16_t *tmp, uint8_t *p_src, int dstStride, int tmpStride, int srcStride){\
const int h=8;\
const int w=8;\
const int pad = (BIT_DEPTH > 9) ? (-10 * ((1<<BIT_DEPTH)-1)) : 0;\
INIT_CLIP\
int i;\
pixel *dst = (pixel*)_dst;\
pixel *src = (pixel*)_src;\
dstStride /= sizeof(pixel);\
srcStride /= sizeof(pixel);\
pixel *dst = (pixel*)p_dst;\
pixel *src = (pixel*)p_src;\
dstStride >>= sizeof(pixel)-1;\
srcStride >>= sizeof(pixel)-1;\
src -= 2*srcStride;\
for(i=0; i<h+5; i++)\
{\
......@@ -1389,3 +1389,4 @@ static void FUNCC(clear_blocks)(DCTELEM *blocks)
{
memset(blocks, 0, sizeof(dctcoef)*6*64);
}
#include "dsputil.h"
#ifndef BIT_DEPTH
#define BIT_DEPTH 8
#endif
#ifdef AVCODEC_H264_HIGH_DEPTH_H
# undef pixel
# undef pixel2
# undef pixel4
# undef dctcoef
# undef INIT_CLIP
# undef no_rnd_avg_pixel4
# undef rnd_avg_pixel4
# undef AV_RN2P
# undef AV_RN4P
# undef AV_WN2P
# undef AV_WN4P
# undef AV_WN4PA
# undef CLIP
# undef FUNC
# undef FUNCC
# undef av_clip_pixel
# undef PIXEL_SPLAT_X4
#else
# define AVCODEC_H264_HIGH_DEPTH_H
# define CLIP_PIXEL(depth)\
static inline uint16_t av_clip_pixel_ ## depth (int p)\
{\
const int pixel_max = (1 << depth)-1;\
return (p & ~pixel_max) ? (-p)>>31 & pixel_max : p;\
}
CLIP_PIXEL( 9)
CLIP_PIXEL(10)
#endif
#if BIT_DEPTH > 8
# define pixel uint16_t
# define pixel2 uint32_t
# define pixel4 uint64_t
# define dctcoef int32_t
# define INIT_CLIP
# define no_rnd_avg_pixel4 no_rnd_avg64
# define rnd_avg_pixel4 rnd_avg64
# define AV_RN2P AV_RN32
# define AV_RN4P AV_RN64
# define AV_WN2P AV_WN32
# define AV_WN4P AV_WN64
# define AV_WN4PA AV_WN64A
# define PIXEL_SPLAT_X4(x) ((x)*0x0001000100010001ULL)
#else
# define pixel uint8_t
# define pixel2 uint16_t
# define pixel4 uint32_t
# define dctcoef int16_t
# define INIT_CLIP uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;
# define no_rnd_avg_pixel4 no_rnd_avg32
# define rnd_avg_pixel4 rnd_avg32
# define AV_RN2P AV_RN16
# define AV_RN4P AV_RN32
# define AV_WN2P AV_WN16
# define AV_WN4P AV_WN32
# define AV_WN4PA AV_WN32A
# define PIXEL_SPLAT_X4(x) ((x)*0x01010101U)
#endif
#if BIT_DEPTH == 8
# define av_clip_pixel(a) av_clip_uint8(a)
# define CLIP(a) cm[a]
# define FUNC(a) a ## _8
# define FUNCC(a) a ## _8_c
#elif BIT_DEPTH == 9
# define av_clip_pixel(a) av_clip_pixel_9(a)
# define CLIP(a) av_clip_pixel_9(a)
# define FUNC(a) a ## _9
# define FUNCC(a) a ## _9_c
#elif BIT_DEPTH == 10
# define av_clip_pixel(a) av_clip_pixel_10(a)
# define CLIP(a) av_clip_pixel_10(a)
# define FUNC(a) a ## _10
# define FUNCC(a) a ## _10_c
#endif
/*
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003-2011 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* H.264 / AVC / MPEG4 part10 DSP functions.
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "h264_high_depth.h"
#define op_scale1(x) block[x] = av_clip_pixel( (block[x]*weight + offset) >> log2_denom )
#define op_scale2(x) dst[x] = av_clip_pixel( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
#define H264_WEIGHT(W,H) \
static void FUNCC(weight_h264_pixels ## W ## x ## H)(uint8_t *p_block, int stride, int log2_denom, int weight, int offset){ \
int y; \
pixel *block = (pixel*)p_block; \
stride >>= sizeof(pixel)-1; \
offset <<= (log2_denom + (BIT_DEPTH-8)); \
if(log2_denom) offset += 1<<(log2_denom-1); \
for(y=0; y<H; y++, block += stride){ \
op_scale1(0); \
op_scale1(1); \
if(W==2) continue; \
op_scale1(2); \
op_scale1(3); \
if(W==4) continue; \
op_scale1(4); \
op_scale1(5); \
op_scale1(6); \
op_scale1(7); \
if(W==8) continue; \
op_scale1(8); \
op_scale1(9); \
op_scale1(10); \
op_scale1(11); \
op_scale1(12); \
op_scale1(13); \
op_scale1(14); \
op_scale1(15); \
} \
} \
static void FUNCC(biweight_h264_pixels ## W ## x ## H)(uint8_t *p_dst, uint8_t *p_src, int stride, int log2_denom, int weightd, int weights, int offset){ \
int y; \
pixel *dst = (pixel*)p_dst; \
pixel *src = (pixel*)p_src; \
stride >>= sizeof(pixel)-1; \
offset = ((offset + 1) | 1) << log2_denom; \
for(y=0; y<H; y++, dst += stride, src += stride){ \
op_scale2(0); \
op_scale2(1); \
if(W==2) continue; \
op_scale2(2); \
op_scale2(3); \
if(W==4) continue; \
op_scale2(4); \
op_scale2(5); \
op_scale2(6); \
op_scale2(7); \
if(W==8) continue; \
op_scale2(8); \
op_scale2(9); \
op_scale2(10); \
op_scale2(11); \
op_scale2(12); \
op_scale2(13); \
op_scale2(14); \
op_scale2(15); \
} \
}
H264_WEIGHT(16,16)
H264_WEIGHT(16,8)
H264_WEIGHT(8,16)
H264_WEIGHT(8,8)
H264_WEIGHT(8,4)
H264_WEIGHT(4,8)
H264_WEIGHT(4,4)
H264_WEIGHT(4,2)
H264_WEIGHT(2,4)
H264_WEIGHT(2,2)
#undef op_scale1
#undef op_scale2
#undef H264_WEIGHT
static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
{
pixel *pix = (pixel*)p_pix;
int i, d;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
for( i = 0; i < 4; i++ ) {
if( tc0[i] < 0 ) {
pix += 4*ystride;
continue;
}
for( d = 0; d < 4; d++ ) {
const int p0 = pix[-1*xstride];
const int p1 = pix[-2*xstride];
const int p2 = pix[-3*xstride];
const int q0 = pix[0];
const int q1 = pix[1*xstride];
const int q2 = pix[2*xstride];
if( FFABS( p0 - q0 ) < alpha &&
FFABS( p1 - p0 ) < beta &&
FFABS( q1 - q0 ) < beta ) {
int tc = tc0[i];
int i_delta;
if( FFABS( p2 - p0 ) < beta ) {
if(tc0[i])
pix[-2*xstride] = p1 + av_clip( (( p2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - p1, -tc0[i], tc0[i] );
tc++;
}
if( FFABS( q2 - q0 ) < beta ) {
if(tc0[i])
pix[ xstride] = q1 + av_clip( (( q2 + ( ( p0 + q0 + 1 ) >> 1 ) ) >> 1) - q1, -tc0[i], tc0[i] );
tc++;
}
i_delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
pix[-xstride] = av_clip_pixel( p0 + i_delta ); /* p0' */
pix[0] = av_clip_pixel( q0 - i_delta ); /* q0' */
}
pix += ystride;
}
}
}
static void FUNCC(h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
FUNCC(h264_loop_filter_luma)(pix, stride, sizeof(pixel), alpha, beta, tc0);
}
static void FUNCC(h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
FUNCC(h264_loop_filter_luma)(pix, sizeof(pixel), stride, alpha, beta, tc0);
}
static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma_intra)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta)
{
pixel *pix = (pixel*)p_pix;
int d;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
for( d = 0; d < 16; d++ ) {
const int p2 = pix[-3*xstride];
const int p1 = pix[-2*xstride];
const int p0 = pix[-1*xstride];
const int q0 = pix[ 0*xstride];
const int q1 = pix[ 1*xstride];
const int q2 = pix[ 2*xstride];
if( FFABS( p0 - q0 ) < alpha &&
FFABS( p1 - p0 ) < beta &&
FFABS( q1 - q0 ) < beta ) {
if(FFABS( p0 - q0 ) < (( alpha >> 2 ) + 2 )){
if( FFABS( p2 - p0 ) < beta)
{
const int p3 = pix[-4*xstride];
/* p0', p1', p2' */
pix[-1*xstride] = ( p2 + 2*p1 + 2*p0 + 2*q0 + q1 + 4 ) >> 3;
pix[-2*xstride] = ( p2 + p1 + p0 + q0 + 2 ) >> 2;
pix[-3*xstride] = ( 2*p3 + 3*p2 + p1 + p0 + q0 + 4 ) >> 3;
} else {
/* p0' */
pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
}
if( FFABS( q2 - q0 ) < beta)
{
const int q3 = pix[3*xstride];
/* q0', q1', q2' */
pix[0*xstride] = ( p1 + 2*p0 + 2*q0 + 2*q1 + q2 + 4 ) >> 3;
pix[1*xstride] = ( p0 + q0 + q1 + q2 + 2 ) >> 2;
pix[2*xstride] = ( 2*q3 + 3*q2 + q1 + q0 + p0 + 4 ) >> 3;
} else {
/* q0' */
pix[0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
}else{
/* p0', q0' */
pix[-1*xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2;
pix[ 0*xstride] = ( 2*q1 + q0 + p1 + 2 ) >> 2;
}
}
pix += ystride;
}
}
static void FUNCC(h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
FUNCC(h264_loop_filter_luma_intra)(pix, stride, sizeof(pixel), alpha, beta);
}
static void FUNCC(h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
FUNCC(h264_loop_filter_luma_intra)(pix, sizeof(pixel), stride, alpha, beta);
}
static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta, int8_t *tc0)
{
pixel *pix = (pixel*)p_pix;
int i, d;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
for( i = 0; i < 4; i++ ) {
const int tc = tc0[i];
if( tc <= 0 ) {
pix += 2*ystride;
continue;
}
for( d = 0; d < 2; d++ ) {
const int p0 = pix[-1*xstride];
const int p1 = pix[-2*xstride];
const int q0 = pix[0];
const int q1 = pix[1*xstride];
if( FFABS( p0 - q0 ) < alpha &&
FFABS( p1 - p0 ) < beta &&
FFABS( q1 - q0 ) < beta ) {
int delta = av_clip( (((q0 - p0 ) << 2) + (p1 - q1) + 4) >> 3, -tc, tc );
pix[-xstride] = av_clip_pixel( p0 + delta ); /* p0' */
pix[0] = av_clip_pixel( q0 - delta ); /* q0' */
}
pix += ystride;
}
}
}
static void FUNCC(h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
FUNCC(h264_loop_filter_chroma)(pix, stride, sizeof(pixel), alpha, beta, tc0);
}
static void FUNCC(h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
FUNCC(h264_loop_filter_chroma)(pix, sizeof(pixel), stride, alpha, beta, tc0);
}
static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma_intra)(uint8_t *p_pix, int xstride, int ystride, int alpha, int beta)
{
pixel *pix = (pixel*)p_pix;
int d;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
for( d = 0; d < 8; d++ ) {
const int p0 = pix[-1*xstride];
const int p1 = pix[-2*xstride];
const int q0 = pix[0];
const int q1 = pix[1*xstride];
if( FFABS( p0 - q0 ) < alpha &&
FFABS( p1 - p0 ) < beta &&
FFABS( q1 - q0 ) < beta ) {
pix[-xstride] = ( 2*p1 + p0 + q1 + 2 ) >> 2; /* p0' */
pix[0] = ( 2*q1 + q0 + p1 + 2 ) >> 2; /* q0' */
}
pix += ystride;
}
}
static void FUNCC(h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
FUNCC(h264_loop_filter_chroma_intra)(pix, stride, sizeof(pixel), alpha, beta);
}
static void FUNCC(h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
{
FUNCC(h264_loop_filter_chroma_intra)(pix, sizeof(pixel), stride, alpha, beta);
}
/*
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2003-2011 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of Libav.
* This file is part of FFmpeg.
*
* Libav is free software; you can redistribute it and/or
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
......@@ -30,10 +30,10 @@
#define op_scale1(x) block[x] = av_clip_pixel( (block[x]*weight + offset) >> log2_denom )
#define op_scale2(x) dst[x] = av_clip_pixel( (src[x]*weights + dst[x]*weightd + offset) >> (log2_denom+1))
#define H264_WEIGHT(W,H) \
static void FUNCC(weight_h264_pixels ## W ## x ## H)(uint8_t *_block, int stride, int log2_denom, int weight, int offset){ \
static void FUNCC(weight_h264_pixels ## W ## x ## H)(uint8_t *p_block, int stride, int log2_denom, int weight, int offset){ \
int y; \
pixel *block = (pixel*)_block; \
stride /= sizeof(pixel); \
pixel *block = (pixel*)p_block; \
stride >>= sizeof(pixel)-1; \
offset <<= (log2_denom + (BIT_DEPTH-8)); \
if(log2_denom) offset += 1<<(log2_denom-1); \
for(y=0; y<H; y++, block += stride){ \
......@@ -58,11 +58,11 @@ static void FUNCC(weight_h264_pixels ## W ## x ## H)(uint8_t *_block, int stride
op_scale1(15); \
} \
} \
static void FUNCC(biweight_h264_pixels ## W ## x ## H)(uint8_t *_dst, uint8_t *_src, int stride, int log2_denom, int weightd, int weights, int offset){ \
static void FUNCC(biweight_h264_pixels ## W ## x ## H)(uint8_t *p_dst, uint8_t *p_src, int stride, int log2_denom, int weightd, int weights, int offset){ \
int y; \
pixel *dst = (pixel*)_dst; \
pixel *src = (pixel*)_src; \
stride /= sizeof(pixel); \
pixel *dst = (pixel*)p_dst; \
pixel *src = (pixel*)p_src; \
stride >>= sizeof(pixel)-1; \
offset = ((offset + 1) | 1) << log2_denom; \
for(y=0; y<H; y++, dst += stride, src += stride){ \
op_scale2(0); \
......@@ -102,12 +102,12 @@ H264_WEIGHT(2,2)
#undef op_scale2
#undef H264_WEIGHT
static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma)(uint8_t *_pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0)
static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0)
{
pixel *pix = (pixel*)_pix;
pixel *pix = (pixel*)p_pix;
int i, d;
xstride /= sizeof(pixel);
ystride /= sizeof(pixel);
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
alpha <<= BIT_DEPTH - 8;
beta <<= BIT_DEPTH - 8;
for( i = 0; i < 4; i++ ) {
......@@ -163,12 +163,12 @@ static void FUNCC(h264_h_loop_filter_luma_mbaff)(uint8_t *pix, int stride, int a
FUNCC(h264_loop_filter_luma)(pix, sizeof(pixel), stride, 2, alpha, beta, tc0);
}
static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma_intra)(uint8_t *_pix, int xstride, int ystride, int inner_iters, int alpha, int beta)
static av_always_inline av_flatten void FUNCC(h264_loop_filter_luma_intra)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta)
{
pixel *pix = (pixel*)_pix;
pixel *pix = (pixel*)p_pix;
int d;
xstride /= sizeof(pixel);
ystride /= sizeof(pixel);
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
alpha <<= BIT_DEPTH - 8;
beta <<= BIT_DEPTH - 8;
for( d = 0; d < 4 * inner_iters; d++ ) {
......@@ -229,14 +229,14 @@ static void FUNCC(h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix, int stride,
FUNCC(h264_loop_filter_luma_intra)(pix, sizeof(pixel), stride, 2, alpha, beta);
}
static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma)(uint8_t *_pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0)
static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta, int8_t *tc0)
{
pixel *pix = (pixel*)_pix;
pixel *pix = (pixel*)p_pix;
int i, d;
xstride /= sizeof(pixel);
ystride /= sizeof(pixel);
alpha <<= BIT_DEPTH - 8;
beta <<= BIT_DEPTH - 8;
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
for( i = 0; i < 4; i++ ) {
const int tc = ((tc0[i] - 1) << (BIT_DEPTH - 8)) + 1;
if( tc <= 0 ) {
......@@ -275,12 +275,12 @@ static void FUNCC(h264_h_loop_filter_chroma_mbaff)(uint8_t *pix, int stride, int
FUNCC(h264_loop_filter_chroma)(pix, sizeof(pixel), stride, 1, alpha, beta, tc0);
}
static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma_intra)(uint8_t *_pix, int xstride, int ystride, int inner_iters, int alpha, int beta)
static av_always_inline av_flatten void FUNCC(h264_loop_filter_chroma_intra)(uint8_t *p_pix, int xstride, int ystride, int inner_iters, int alpha, int beta)
{
pixel *pix = (pixel*)_pix;
pixel *pix = (pixel*)p_pix;
int d;
xstride /= sizeof(pixel);
ystride /= sizeof(pixel);
xstride >>= sizeof(pixel)-1;
ystride >>= sizeof(pixel)-1;
alpha <<= BIT_DEPTH - 8;
beta <<= BIT_DEPTH - 8;
for( d = 0; d < 4 * inner_iters; d++ ) {
......
/*
* H.264 IDCT
* Copyright (c) 2004-2011 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* H.264 IDCT.
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "h264_high_depth.h"
#ifndef AVCODEC_H264IDCT_INTERNAL_H
#define AVCODEC_H264IDCT_INTERNAL_H
//FIXME this table is a duplicate from h264data.h, and will be removed once the tables from, h264 have been split
static const uint8_t scan8[16 + 2*4]={
4+1*8, 5+1*8, 4+2*8, 5+2*8,
6+1*8, 7+1*8, 6+2*8, 7+2*8,
4+3*8, 5+3*8, 4+4*8, 5+4*8,
6+3*8, 7+3*8, 6+4*8, 7+4*8,
1+1*8, 2+1*8,
1+2*8, 2+2*8,
1+4*8, 2+4*8,
1+5*8, 2+5*8,
};
#endif
static av_always_inline void FUNCC(idct_internal)(uint8_t *p_dst, DCTELEM *p_block, int stride, int block_stride, int shift, int add){
int i;
INIT_CLIP
pixel *dst = (pixel*)p_dst;
dctcoef *block = (dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
block[0] += 1<<(shift-1);
for(i=0; i<4; i++){
const int z0= block[i + block_stride*0] + block[i + block_stride*2];
const int z1= block[i + block_stride*0] - block[i + block_stride*2];
const int z2= (block[i + block_stride*1]>>1) - block[i + block_stride*3];
const int z3= block[i + block_stride*1] + (block[i + block_stride*3]>>1);
block[i + block_stride*0]= z0 + z3;
block[i + block_stride*1]= z1 + z2;
block[i + block_stride*2]= z1 - z2;
block[i + block_stride*3]= z0 - z3;
}
for(i=0; i<4; i++){
const int z0= block[0 + block_stride*i] + block[2 + block_stride*i];
const int z1= block[0 + block_stride*i] - block[2 + block_stride*i];
const int z2= (block[1 + block_stride*i]>>1) - block[3 + block_stride*i];
const int z3= block[1 + block_stride*i] + (block[3 + block_stride*i]>>1);
dst[i + 0*stride]= CLIP(add*dst[i + 0*stride] + ((z0 + z3) >> shift));
dst[i + 1*stride]= CLIP(add*dst[i + 1*stride] + ((z1 + z2) >> shift));
dst[i + 2*stride]= CLIP(add*dst[i + 2*stride] + ((z1 - z2) >> shift));
dst[i + 3*stride]= CLIP(add*dst[i + 3*stride] + ((z0 - z3) >> shift));
}
}
void FUNCC(ff_h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride){
FUNCC(idct_internal)(dst, block, stride, 4, 6, 1);
}
void FUNCC(ff_h264_lowres_idct_add)(uint8_t *dst, int stride, DCTELEM *block){
FUNCC(idct_internal)(dst, block, stride, 8, 3, 1);
}
void FUNCC(ff_h264_lowres_idct_put)(uint8_t *dst, int stride, DCTELEM *block){
FUNCC(idct_internal)(dst, block, stride, 8, 3, 0);
}
void FUNCC(ff_h264_idct8_add)(uint8_t *p_dst, DCTELEM *p_block, int stride){
int i;
INIT_CLIP
pixel *dst = (pixel*)p_dst;
dctcoef *block = (dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
block[0] += 32;
for( i = 0; i < 8; i++ )
{
const int a0 = block[i+0*8] + block[i+4*8];
const int a2 = block[i+0*8] - block[i+4*8];
const int a4 = (block[i+2*8]>>1) - block[i+6*8];
const int a6 = (block[i+6*8]>>1) + block[i+2*8];
const int b0 = a0 + a6;
const int b2 = a2 + a4;
const int b4 = a2 - a4;
const int b6 = a0 - a6;
const int a1 = -block[i+3*8] + block[i+5*8] - block[i+7*8] - (block[i+7*8]>>1);
const int a3 = block[i+1*8] + block[i+7*8] - block[i+3*8] - (block[i+3*8]>>1);
const int a5 = -block[i+1*8] + block[i+7*8] + block[i+5*8] + (block[i+5*8]>>1);
const int a7 = block[i+3*8] + block[i+5*8] + block[i+1*8] + (block[i+1*8]>>1);
const int b1 = (a7>>2) + a1;
const int b3 = a3 + (a5>>2);
const int b5 = (a3>>2) - a5;
const int b7 = a7 - (a1>>2);
block[i+0*8] = b0 + b7;
block[i+7*8] = b0 - b7;
block[i+1*8] = b2 + b5;
block[i+6*8] = b2 - b5;
block[i+2*8] = b4 + b3;
block[i+5*8] = b4 - b3;
block[i+3*8] = b6 + b1;
block[i+4*8] = b6 - b1;
}
for( i = 0; i < 8; i++ )
{
const int a0 = block[0+i*8] + block[4+i*8];
const int a2 = block[0+i*8] - block[4+i*8];
const int a4 = (block[2+i*8]>>1) - block[6+i*8];
const int a6 = (block[6+i*8]>>1) + block[2+i*8];
const int b0 = a0 + a6;
const int b2 = a2 + a4;
const int b4 = a2 - a4;
const int b6 = a0 - a6;
const int a1 = -block[3+i*8] + block[5+i*8] - block[7+i*8] - (block[7+i*8]>>1);
const int a3 = block[1+i*8] + block[7+i*8] - block[3+i*8] - (block[3+i*8]>>1);
const int a5 = -block[1+i*8] + block[7+i*8] + block[5+i*8] + (block[5+i*8]>>1);
const int a7 = block[3+i*8] + block[5+i*8] + block[1+i*8] + (block[1+i*8]>>1);
const int b1 = (a7>>2) + a1;
const int b3 = a3 + (a5>>2);
const int b5 = (a3>>2) - a5;
const int b7 = a7 - (a1>>2);
dst[i + 0*stride] = CLIP( dst[i + 0*stride] + ((b0 + b7) >> 6) );
dst[i + 1*stride] = CLIP( dst[i + 1*stride] + ((b2 + b5) >> 6) );
dst[i + 2*stride] = CLIP( dst[i + 2*stride] + ((b4 + b3) >> 6) );
dst[i + 3*stride] = CLIP( dst[i + 3*stride] + ((b6 + b1) >> 6) );
dst[i + 4*stride] = CLIP( dst[i + 4*stride] + ((b6 - b1) >> 6) );
dst[i + 5*stride] = CLIP( dst[i + 5*stride] + ((b4 - b3) >> 6) );
dst[i + 6*stride] = CLIP( dst[i + 6*stride] + ((b2 - b5) >> 6) );
dst[i + 7*stride] = CLIP( dst[i + 7*stride] + ((b0 - b7) >> 6) );
}
}
// assumes all AC coefs are 0
void FUNCC(ff_h264_idct_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
int i, j;
int dc = (((dctcoef*)block)[0] + 32) >> 6;
INIT_CLIP
pixel *dst = (pixel*)p_dst;
stride >>= sizeof(pixel)-1;
for( j = 0; j < 4; j++ )
{
for( i = 0; i < 4; i++ )
dst[i] = CLIP( dst[i] + dc );
dst += stride;
}
}
void FUNCC(ff_h264_idct8_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
int i, j;
int dc = (((dctcoef*)block)[0] + 32) >> 6;
INIT_CLIP
pixel *dst = (pixel*)p_dst;
stride >>= sizeof(pixel)-1;
for( j = 0; j < 8; j++ )
{
for( i = 0; i < 8; i++ )
dst[i] = CLIP( dst[i] + dc );
dst += stride;
}
}
void FUNCC(ff_h264_idct_add16)(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
int i;
for(i=0; i<16; i++){
int nnz = nnzc[ scan8[i] ];
if(nnz){
if(nnz==1 && ((dctcoef*)block)[i*16]) FUNCC(ff_h264_idct_dc_add)(dst + block_offset[i], block + i*16*sizeof(pixel), stride);
else FUNCC(idct_internal )(dst + block_offset[i], block + i*16*sizeof(pixel), stride, 4, 6, 1);
}
}
}
void FUNCC(ff_h264_idct_add16intra)(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
int i;
for(i=0; i<16; i++){
if(nnzc[ scan8[i] ]) FUNCC(idct_internal )(dst + block_offset[i], block + i*16*sizeof(pixel), stride, 4, 6, 1);
else if(((dctcoef*)block)[i*16]) FUNCC(ff_h264_idct_dc_add)(dst + block_offset[i], block + i*16*sizeof(pixel), stride);
}
}
void FUNCC(ff_h264_idct8_add4)(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
int i;
for(i=0; i<16; i+=4){
int nnz = nnzc[ scan8[i] ];
if(nnz){
if(nnz==1 && ((dctcoef*)block)[i*16]) FUNCC(ff_h264_idct8_dc_add)(dst + block_offset[i], block + i*16*sizeof(pixel), stride);
else FUNCC(ff_h264_idct8_add )(dst + block_offset[i], block + i*16*sizeof(pixel), stride);
}
}
}
void FUNCC(ff_h264_idct_add8)(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]){
int i;
for(i=16; i<16+8; i++){
if(nnzc[ scan8[i] ])
FUNCC(ff_h264_idct_add )(dest[(i&4)>>2] + block_offset[i], block + i*16*sizeof(pixel), stride);
else if(((dctcoef*)block)[i*16])
FUNCC(ff_h264_idct_dc_add)(dest[(i&4)>>2] + block_offset[i], block + i*16*sizeof(pixel), stride);
}
}
/**
* IDCT transforms the 16 dc values and dequantizes them.
* @param qp quantization parameter
*/
void FUNCC(ff_h264_luma_dc_dequant_idct)(DCTELEM *p_output, DCTELEM *p_input, int qmul){
#define stride 16
int i;
int temp[16];
static const uint8_t x_offset[4]={0, 2*stride, 8*stride, 10*stride};
dctcoef *input = (dctcoef*)p_input;
dctcoef *output = (dctcoef*)p_output;
for(i=0; i<4; i++){
const int z0= input[4*i+0] + input[4*i+1];
const int z1= input[4*i+0] - input[4*i+1];
const int z2= input[4*i+2] - input[4*i+3];
const int z3= input[4*i+2] + input[4*i+3];
temp[4*i+0]= z0+z3;
temp[4*i+1]= z0-z3;
temp[4*i+2]= z1-z2;
temp[4*i+3]= z1+z2;
}
for(i=0; i<4; i++){
const int offset= x_offset[i];
const int z0= temp[4*0+i] + temp[4*2+i];
const int z1= temp[4*0+i] - temp[4*2+i];
const int z2= temp[4*1+i] - temp[4*3+i];
const int z3= temp[4*1+i] + temp[4*3+i];
output[stride* 0+offset]= ((((z0 + z3)*qmul + 128 ) >> 8));
output[stride* 1+offset]= ((((z1 + z2)*qmul + 128 ) >> 8));
output[stride* 4+offset]= ((((z1 - z2)*qmul + 128 ) >> 8));
output[stride* 5+offset]= ((((z0 - z3)*qmul + 128 ) >> 8));
}
#undef stride
}
void FUNCC(ff_h264_chroma_dc_dequant_idct)(DCTELEM *p_block, int qmul){
const int stride= 16*2;
const int xStride= 16;
int a,b,c,d,e;
dctcoef *block = (dctcoef*)p_block;
a= block[stride*0 + xStride*0];
b= block[stride*0 + xStride*1];
c= block[stride*1 + xStride*0];
d= block[stride*1 + xStride*1];
e= a-b;
a= a+b;
b= c-d;
c= c+d;
block[stride*0 + xStride*0]= ((a+c)*qmul) >> 7;
block[stride*0 + xStride*1]= ((e+b)*qmul) >> 7;
block[stride*1 + xStride*0]= ((a-c)*qmul) >> 7;
block[stride*1 + xStride*1]= ((e-b)*qmul) >> 7;
}
......@@ -2,20 +2,20 @@
* H.264 IDCT
* Copyright (c) 2004-2011 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of Libav.
* This file is part of FFmpeg.
*
* Libav is free software; you can redistribute it and/or
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
......@@ -25,7 +25,7 @@
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "high_bit_depth.h"
#include "h264_high_depth.h"
#ifndef AVCODEC_H264IDCT_INTERNAL_H
#define AVCODEC_H264IDCT_INTERNAL_H
......@@ -42,12 +42,12 @@ static const uint8_t scan8[16 + 2*4]={
};
#endif
static av_always_inline void FUNCC(idct_internal)(uint8_t *_dst, DCTELEM *_block, int stride, int block_stride, int shift, int add){
static av_always_inline void FUNCC(idct_internal)(uint8_t *p_dst, DCTELEM *p_block, int stride, int block_stride, int shift, int add){
int i;
INIT_CLIP
pixel *dst = (pixel*)_dst;
dctcoef *block = (dctcoef*)_block;
stride /= sizeof(pixel);
pixel *dst = (pixel*)p_dst;
dctcoef *block = (dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
block[0] += 1<<(shift-1);
......@@ -88,12 +88,12 @@ void FUNCC(ff_h264_lowres_idct_put)(uint8_t *dst, int stride, DCTELEM *block){
FUNCC(idct_internal)(dst, block, stride, 8, 3, 0);
}
void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
void FUNCC(ff_h264_idct8_add)(uint8_t *p_dst, DCTELEM *p_block, int stride){
int i;
INIT_CLIP
pixel *dst = (pixel*)_dst;
dctcoef *block = (dctcoef*)_block;
stride /= sizeof(pixel);
pixel *dst = (pixel*)p_dst;
dctcoef *block = (dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
block[0] += 32;
......@@ -162,12 +162,12 @@ void FUNCC(ff_h264_idct8_add)(uint8_t *_dst, DCTELEM *_block, int stride){
}
// assumes all AC coefs are 0
void FUNCC(ff_h264_idct_dc_add)(uint8_t *_dst, DCTELEM *block, int stride){
void FUNCC(ff_h264_idct_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
int i, j;
int dc = (((dctcoef*)block)[0] + 32) >> 6;
INIT_CLIP
pixel *dst = (pixel*)_dst;
stride /= sizeof(pixel);
pixel *dst = (pixel*)p_dst;
stride >>= sizeof(pixel)-1;
for( j = 0; j < 4; j++ )
{
for( i = 0; i < 4; i++ )
......@@ -176,12 +176,12 @@ void FUNCC(ff_h264_idct_dc_add)(uint8_t *_dst, DCTELEM *block, int stride){
}
}
void FUNCC(ff_h264_idct8_dc_add)(uint8_t *_dst, DCTELEM *block, int stride){
void FUNCC(ff_h264_idct8_dc_add)(uint8_t *p_dst, DCTELEM *block, int stride){
int i, j;
int dc = (((dctcoef*)block)[0] + 32) >> 6;
INIT_CLIP
pixel *dst = (pixel*)_dst;
stride /= sizeof(pixel);
pixel *dst = (pixel*)p_dst;
stride >>= sizeof(pixel)-1;
for( j = 0; j < 8; j++ )
{
for( i = 0; i < 8; i++ )
......@@ -233,13 +233,13 @@ void FUNCC(ff_h264_idct_add8)(uint8_t **dest, const int *block_offset, DCTELEM *
* IDCT transforms the 16 dc values and dequantizes them.
* @param qp quantization parameter
*/
void FUNCC(ff_h264_luma_dc_dequant_idct)(DCTELEM *_output, DCTELEM *_input, int qmul){
void FUNCC(ff_h264_luma_dc_dequant_idct)(DCTELEM *p_output, DCTELEM *p_input, int qmul){
#define stride 16
int i;
int temp[16];
static const uint8_t x_offset[4]={0, 2*stride, 8*stride, 10*stride};
dctcoef *input = (dctcoef*)_input;
dctcoef *output = (dctcoef*)_output;
dctcoef *input = (dctcoef*)p_input;
dctcoef *output = (dctcoef*)p_output;
for(i=0; i<4; i++){
const int z0= input[4*i+0] + input[4*i+1];
......@@ -268,11 +268,11 @@ void FUNCC(ff_h264_luma_dc_dequant_idct)(DCTELEM *_output, DCTELEM *_input, int
#undef stride
}
void FUNCC(ff_h264_chroma_dc_dequant_idct)(DCTELEM *_block, int qmul){
void FUNCC(ff_h264_chroma_dc_dequant_idct)(DCTELEM *p_block, int qmul){
const int stride= 16*2;
const int xStride= 16;
int a,b,c,d,e;
dctcoef *block = (dctcoef*)_block;
dctcoef *block = (dctcoef*)p_block;
a= block[stride*0 + xStride*0];
b= block[stride*0 + xStride*1];
......
/*
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003-2011 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* H.264 / AVC / MPEG4 part10 prediction functions.
* @author Michael Niedermayer <michaelni@gmx.at>
*/
#include "mathops.h"
#include "h264_high_depth.h"
static void FUNCC(pred4x4_vertical)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const pixel4 a= ((pixel4*)(src-stride))[0];
((pixel4*)(src+0*stride))[0]= a;
((pixel4*)(src+1*stride))[0]= a;
((pixel4*)(src+2*stride))[0]= a;
((pixel4*)(src+3*stride))[0]= a;
}
static void FUNCC(pred4x4_horizontal)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
((pixel4*)(src+0*stride))[0]= PIXEL_SPLAT_X4(src[-1+0*stride]);
((pixel4*)(src+1*stride))[0]= PIXEL_SPLAT_X4(src[-1+1*stride]);
((pixel4*)(src+2*stride))[0]= PIXEL_SPLAT_X4(src[-1+2*stride]);
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(src[-1+3*stride]);
}
static void FUNCC(pred4x4_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
+ src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
((pixel4*)(src+0*stride))[0]=
((pixel4*)(src+1*stride))[0]=
((pixel4*)(src+2*stride))[0]=
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(dc);
}
static void FUNCC(pred4x4_left_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
((pixel4*)(src+0*stride))[0]=
((pixel4*)(src+1*stride))[0]=
((pixel4*)(src+2*stride))[0]=
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(dc);
}
static void FUNCC(pred4x4_top_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
((pixel4*)(src+0*stride))[0]=
((pixel4*)(src+1*stride))[0]=
((pixel4*)(src+2*stride))[0]=
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(dc);
}
static void FUNCC(pred4x4_128_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
((pixel4*)(src+0*stride))[0]=
((pixel4*)(src+1*stride))[0]=
((pixel4*)(src+2*stride))[0]=
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1));
}
static void FUNCC(pred4x4_127_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
((pixel4*)(src+0*stride))[0]=
((pixel4*)(src+1*stride))[0]=
((pixel4*)(src+2*stride))[0]=
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))-1);
}
static void FUNCC(pred4x4_129_dc)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
((pixel4*)(src+0*stride))[0]=
((pixel4*)(src+1*stride))[0]=
((pixel4*)(src+2*stride))[0]=
((pixel4*)(src+3*stride))[0]= PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))+1);
}
#define LOAD_TOP_RIGHT_EDGE\
const int av_unused t4= topright[0];\
const int av_unused t5= topright[1];\
const int av_unused t6= topright[2];\
const int av_unused t7= topright[3];\
#define LOAD_DOWN_LEFT_EDGE\
const int av_unused l4= src[-1+4*stride];\
const int av_unused l5= src[-1+5*stride];\
const int av_unused l6= src[-1+6*stride];\
const int av_unused l7= src[-1+7*stride];\
#define LOAD_LEFT_EDGE\
const int av_unused l0= src[-1+0*stride];\
const int av_unused l1= src[-1+1*stride];\
const int av_unused l2= src[-1+2*stride];\
const int av_unused l3= src[-1+3*stride];\
#define LOAD_TOP_EDGE\
const int av_unused t0= src[ 0-1*stride];\
const int av_unused t1= src[ 1-1*stride];\
const int av_unused t2= src[ 2-1*stride];\
const int av_unused t3= src[ 3-1*stride];\
static void FUNCC(pred4x4_vertical_vp8)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
pixel4 v = PACK_4U8((lt + 2*t0 + t1 + 2) >> 2,
(t0 + 2*t1 + t2 + 2) >> 2,
(t1 + 2*t2 + t3 + 2) >> 2,
(t2 + 2*t3 + t4 + 2) >> 2);
AV_WN4PA(src+0*stride, v);
AV_WN4PA(src+1*stride, v);
AV_WN4PA(src+2*stride, v);
AV_WN4PA(src+3*stride, v);
}
static void FUNCC(pred4x4_horizontal_vp8)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_LEFT_EDGE
AV_WN4PA(src+0*stride, PIXEL_SPLAT_X4((lt + 2*l0 + l1 + 2) >> 2));
AV_WN4PA(src+1*stride, PIXEL_SPLAT_X4((l0 + 2*l1 + l2 + 2) >> 2));
AV_WN4PA(src+2*stride, PIXEL_SPLAT_X4((l1 + 2*l2 + l3 + 2) >> 2));
AV_WN4PA(src+3*stride, PIXEL_SPLAT_X4((l2 + 2*l3 + l3 + 2) >> 2));
}
static void FUNCC(pred4x4_down_right)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
src[0+3*stride]=(l3 + 2*l2 + l1 + 2)>>2;
src[0+2*stride]=
src[1+3*stride]=(l2 + 2*l1 + l0 + 2)>>2;
src[0+1*stride]=
src[1+2*stride]=
src[2+3*stride]=(l1 + 2*l0 + lt + 2)>>2;
src[0+0*stride]=
src[1+1*stride]=
src[2+2*stride]=
src[3+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
src[1+0*stride]=
src[2+1*stride]=
src[3+2*stride]=(lt + 2*t0 + t1 + 2)>>2;
src[2+0*stride]=
src[3+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[3+0*stride]=(t1 + 2*t2 + t3 + 2)>>2;
}
static void FUNCC(pred4x4_down_left)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
// LOAD_LEFT_EDGE
src[0+0*stride]=(t0 + t2 + 2*t1 + 2)>>2;
src[1+0*stride]=
src[0+1*stride]=(t1 + t3 + 2*t2 + 2)>>2;
src[2+0*stride]=
src[1+1*stride]=
src[0+2*stride]=(t2 + t4 + 2*t3 + 2)>>2;
src[3+0*stride]=
src[2+1*stride]=
src[1+2*stride]=
src[0+3*stride]=(t3 + t5 + 2*t4 + 2)>>2;
src[3+1*stride]=
src[2+2*stride]=
src[1+3*stride]=(t4 + t6 + 2*t5 + 2)>>2;
src[3+2*stride]=
src[2+3*stride]=(t5 + t7 + 2*t6 + 2)>>2;
src[3+3*stride]=(t6 + 3*t7 + 2)>>2;
}
static void FUNCC(pred4x4_down_left_svq3)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
const av_unused int unu0= t0;
const av_unused int unu1= l0;
src[0+0*stride]=(l1 + t1)>>1;
src[1+0*stride]=
src[0+1*stride]=(l2 + t2)>>1;
src[2+0*stride]=
src[1+1*stride]=
src[0+2*stride]=
src[3+0*stride]=
src[2+1*stride]=
src[1+2*stride]=
src[0+3*stride]=
src[3+1*stride]=
src[2+2*stride]=
src[1+3*stride]=
src[3+2*stride]=
src[2+3*stride]=
src[3+3*stride]=(l3 + t3)>>1;
}
static void FUNCC(pred4x4_down_left_rv40)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
LOAD_LEFT_EDGE
LOAD_DOWN_LEFT_EDGE
src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
src[1+0*stride]=
src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
src[2+0*stride]=
src[1+1*stride]=
src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + l4 + 2*l3 + 2)>>3;
src[3+0*stride]=
src[2+1*stride]=
src[1+2*stride]=
src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3 + l5 + 2*l4 + 2)>>3;
src[3+1*stride]=
src[2+2*stride]=
src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l4 + l6 + 2*l5 + 2)>>3;
src[3+2*stride]=
src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l5 + l7 + 2*l6 + 2)>>3;
src[3+3*stride]=(t6 + t7 + 1 + l6 + l7 + 1)>>2;
}
static void FUNCC(pred4x4_down_left_rv40_nodown)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
LOAD_LEFT_EDGE
src[0+0*stride]=(t0 + t2 + 2*t1 + 2 + l0 + l2 + 2*l1 + 2)>>3;
src[1+0*stride]=
src[0+1*stride]=(t1 + t3 + 2*t2 + 2 + l1 + l3 + 2*l2 + 2)>>3;
src[2+0*stride]=
src[1+1*stride]=
src[0+2*stride]=(t2 + t4 + 2*t3 + 2 + l2 + 3*l3 + 2)>>3;
src[3+0*stride]=
src[2+1*stride]=
src[1+2*stride]=
src[0+3*stride]=(t3 + t5 + 2*t4 + 2 + l3*4 + 2)>>3;
src[3+1*stride]=
src[2+2*stride]=
src[1+3*stride]=(t4 + t6 + 2*t5 + 2 + l3*4 + 2)>>3;
src[3+2*stride]=
src[2+3*stride]=(t5 + t7 + 2*t6 + 2 + l3*4 + 2)>>3;
src[3+3*stride]=(t6 + t7 + 1 + 2*l3 + 1)>>2;
}
static void FUNCC(pred4x4_vertical_right)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
src[0+0*stride]=
src[1+2*stride]=(lt + t0 + 1)>>1;
src[1+0*stride]=
src[2+2*stride]=(t0 + t1 + 1)>>1;
src[2+0*stride]=
src[3+2*stride]=(t1 + t2 + 1)>>1;
src[3+0*stride]=(t2 + t3 + 1)>>1;
src[0+1*stride]=
src[1+3*stride]=(l0 + 2*lt + t0 + 2)>>2;
src[1+1*stride]=
src[2+3*stride]=(lt + 2*t0 + t1 + 2)>>2;
src[2+1*stride]=
src[3+3*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[3+1*stride]=(t1 + 2*t2 + t3 + 2)>>2;
src[0+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
src[0+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
}
static void FUNCC(pred4x4_vertical_left)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
src[0+0*stride]=(t0 + t1 + 1)>>1;
src[1+0*stride]=
src[0+2*stride]=(t1 + t2 + 1)>>1;
src[2+0*stride]=
src[1+2*stride]=(t2 + t3 + 1)>>1;
src[3+0*stride]=
src[2+2*stride]=(t3 + t4+ 1)>>1;
src[3+2*stride]=(t4 + t5+ 1)>>1;
src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[1+1*stride]=
src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
src[2+1*stride]=
src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
src[3+1*stride]=
src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
}
static void FUNCC(pred4x4_vertical_left_rv40_internal)(uint8_t *p_src, const uint8_t *p_topright, int p_stride,
const int l0, const int l1, const int l2, const int l3, const int l4){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
src[0+0*stride]=(2*t0 + 2*t1 + l1 + 2*l2 + l3 + 4)>>3;
src[1+0*stride]=
src[0+2*stride]=(t1 + t2 + 1)>>1;
src[2+0*stride]=
src[1+2*stride]=(t2 + t3 + 1)>>1;
src[3+0*stride]=
src[2+2*stride]=(t3 + t4+ 1)>>1;
src[3+2*stride]=(t4 + t5+ 1)>>1;
src[0+1*stride]=(t0 + 2*t1 + t2 + l2 + 2*l3 + l4 + 4)>>3;
src[1+1*stride]=
src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
src[2+1*stride]=
src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
src[3+1*stride]=
src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
src[3+3*stride]=(t4 + 2*t5 + t6 + 2)>>2;
}
static void FUNCC(pred4x4_vertical_left_rv40)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_LEFT_EDGE
LOAD_DOWN_LEFT_EDGE
FUNCC(pred4x4_vertical_left_rv40_internal)(p_src, topright, p_stride, l0, l1, l2, l3, l4);
}
static void FUNCC(pred4x4_vertical_left_rv40_nodown)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_LEFT_EDGE
FUNCC(pred4x4_vertical_left_rv40_internal)(p_src, topright, p_stride, l0, l1, l2, l3, l3);
}
static void FUNCC(pred4x4_vertical_left_vp8)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
src[0+0*stride]=(t0 + t1 + 1)>>1;
src[1+0*stride]=
src[0+2*stride]=(t1 + t2 + 1)>>1;
src[2+0*stride]=
src[1+2*stride]=(t2 + t3 + 1)>>1;
src[3+0*stride]=
src[2+2*stride]=(t3 + t4 + 1)>>1;
src[0+1*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[1+1*stride]=
src[0+3*stride]=(t1 + 2*t2 + t3 + 2)>>2;
src[2+1*stride]=
src[1+3*stride]=(t2 + 2*t3 + t4 + 2)>>2;
src[3+1*stride]=
src[2+3*stride]=(t3 + 2*t4 + t5 + 2)>>2;
src[3+2*stride]=(t4 + 2*t5 + t6 + 2)>>2;
src[3+3*stride]=(t5 + 2*t6 + t7 + 2)>>2;
}
static void FUNCC(pred4x4_horizontal_up)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_LEFT_EDGE
src[0+0*stride]=(l0 + l1 + 1)>>1;
src[1+0*stride]=(l0 + 2*l1 + l2 + 2)>>2;
src[2+0*stride]=
src[0+1*stride]=(l1 + l2 + 1)>>1;
src[3+0*stride]=
src[1+1*stride]=(l1 + 2*l2 + l3 + 2)>>2;
src[2+1*stride]=
src[0+2*stride]=(l2 + l3 + 1)>>1;
src[3+1*stride]=
src[1+2*stride]=(l2 + 2*l3 + l3 + 2)>>2;
src[3+2*stride]=
src[1+3*stride]=
src[0+3*stride]=
src[2+2*stride]=
src[2+3*stride]=
src[3+3*stride]=l3;
}
static void FUNCC(pred4x4_horizontal_up_rv40)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_LEFT_EDGE
LOAD_DOWN_LEFT_EDGE
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
src[2+0*stride]=
src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
src[3+0*stride]=
src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
src[2+1*stride]=
src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
src[3+1*stride]=
src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
src[3+2*stride]=
src[1+3*stride]=(l3 + 2*l4 + l5 + 2)>>2;
src[0+3*stride]=
src[2+2*stride]=(t6 + t7 + l3 + l4 + 2)>>2;
src[2+3*stride]=(l4 + l5 + 1)>>1;
src[3+3*stride]=(l4 + 2*l5 + l6 + 2)>>2;
}
static void FUNCC(pred4x4_horizontal_up_rv40_nodown)(uint8_t *p_src, const uint8_t *p_topright, int p_stride){
pixel *src = (pixel*)p_src;
const pixel *topright = (const pixel*)p_topright;
int stride = p_stride>>(sizeof(pixel)-1);
LOAD_LEFT_EDGE
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
src[0+0*stride]=(t1 + 2*t2 + t3 + 2*l0 + 2*l1 + 4)>>3;
src[1+0*stride]=(t2 + 2*t3 + t4 + l0 + 2*l1 + l2 + 4)>>3;
src[2+0*stride]=
src[0+1*stride]=(t3 + 2*t4 + t5 + 2*l1 + 2*l2 + 4)>>3;
src[3+0*stride]=
src[1+1*stride]=(t4 + 2*t5 + t6 + l1 + 2*l2 + l3 + 4)>>3;
src[2+1*stride]=
src[0+2*stride]=(t5 + 2*t6 + t7 + 2*l2 + 2*l3 + 4)>>3;
src[3+1*stride]=
src[1+2*stride]=(t6 + 3*t7 + l2 + 3*l3 + 4)>>3;
src[3+2*stride]=
src[1+3*stride]=l3;
src[0+3*stride]=
src[2+2*stride]=(t6 + t7 + 2*l3 + 2)>>2;
src[2+3*stride]=
src[3+3*stride]=l3;
}
static void FUNCC(pred4x4_horizontal_down)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
src[0+0*stride]=
src[2+1*stride]=(lt + l0 + 1)>>1;
src[1+0*stride]=
src[3+1*stride]=(l0 + 2*lt + t0 + 2)>>2;
src[2+0*stride]=(lt + 2*t0 + t1 + 2)>>2;
src[3+0*stride]=(t0 + 2*t1 + t2 + 2)>>2;
src[0+1*stride]=
src[2+2*stride]=(l0 + l1 + 1)>>1;
src[1+1*stride]=
src[3+2*stride]=(lt + 2*l0 + l1 + 2)>>2;
src[0+2*stride]=
src[2+3*stride]=(l1 + l2+ 1)>>1;
src[1+2*stride]=
src[3+3*stride]=(l0 + 2*l1 + l2 + 2)>>2;
src[0+3*stride]=(l2 + l3 + 1)>>1;
src[1+3*stride]=(l1 + 2*l2 + l3 + 2)>>2;
}
static void FUNCC(pred4x4_tm_vp8)(uint8_t *p_src, const uint8_t *topright, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
pixel *top = src-stride;
int y;
for (y = 0; y < 4; y++) {
uint8_t *cm_in = cm + src[-1];
src[0] = cm_in[top[0]];
src[1] = cm_in[top[1]];
src[2] = cm_in[top[2]];
src[3] = cm_in[top[3]];
src += stride;
}
}
static void FUNCC(pred16x16_vertical)(uint8_t *p_src, int p_stride){
int i;
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const pixel4 a = ((pixel4*)(src-stride))[0];
const pixel4 b = ((pixel4*)(src-stride))[1];
const pixel4 c = ((pixel4*)(src-stride))[2];
const pixel4 d = ((pixel4*)(src-stride))[3];
for(i=0; i<16; i++){
((pixel4*)(src+i*stride))[0] = a;
((pixel4*)(src+i*stride))[1] = b;
((pixel4*)(src+i*stride))[2] = c;
((pixel4*)(src+i*stride))[3] = d;
}
}
static void FUNCC(pred16x16_horizontal)(uint8_t *p_src, int stride){
int i;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
for(i=0; i<16; i++){
((pixel4*)(src+i*stride))[0] =
((pixel4*)(src+i*stride))[1] =
((pixel4*)(src+i*stride))[2] =
((pixel4*)(src+i*stride))[3] = PIXEL_SPLAT_X4(src[-1+i*stride]);
}
}
#define PREDICT_16x16_DC(v)\
for(i=0; i<16; i++){\
AV_WN4P(src+ 0, v);\
AV_WN4P(src+ 4, v);\
AV_WN4P(src+ 8, v);\
AV_WN4P(src+12, v);\
src += stride;\
}
static void FUNCC(pred16x16_dc)(uint8_t *p_src, int stride){
int i, dc=0;
pixel *src = (pixel*)p_src;
pixel4 dcsplat;
stride >>= sizeof(pixel)-1;
for(i=0;i<16; i++){
dc+= src[-1+i*stride];
}
for(i=0;i<16; i++){
dc+= src[i-stride];
}
dcsplat = PIXEL_SPLAT_X4((dc+16)>>5);
PREDICT_16x16_DC(dcsplat);
}
static void FUNCC(pred16x16_left_dc)(uint8_t *p_src, int stride){
int i, dc=0;
pixel *src = (pixel*)p_src;
pixel4 dcsplat;
stride >>= sizeof(pixel)-1;
for(i=0;i<16; i++){
dc+= src[-1+i*stride];
}
dcsplat = PIXEL_SPLAT_X4((dc+8)>>4);
PREDICT_16x16_DC(dcsplat);
}
static void FUNCC(pred16x16_top_dc)(uint8_t *p_src, int stride){
int i, dc=0;
pixel *src = (pixel*)p_src;
pixel4 dcsplat;
stride >>= sizeof(pixel)-1;
for(i=0;i<16; i++){
dc+= src[i-stride];
}
dcsplat = PIXEL_SPLAT_X4((dc+8)>>4);
PREDICT_16x16_DC(dcsplat);
}
#define PRED16x16_X(n, v) \
static void FUNCC(pred16x16_##n##_dc)(uint8_t *p_src, int stride){\
int i;\
pixel *src = (pixel*)p_src;\
stride >>= sizeof(pixel)-1;\
PREDICT_16x16_DC(PIXEL_SPLAT_X4(v));\
}
PRED16x16_X(127, (1<<(BIT_DEPTH-1))-1);
PRED16x16_X(128, (1<<(BIT_DEPTH-1))+0);
PRED16x16_X(129, (1<<(BIT_DEPTH-1))+1);
static inline void FUNCC(pred16x16_plane_compat)(uint8_t *p_src, int p_stride, const int svq3, const int rv40){
int i, j, k;
int a;
INIT_CLIP
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const pixel * const src0 = src +7-stride;
const pixel * src1 = src +8*stride-1;
const pixel * src2 = src1-2*stride; // == src+6*stride-1;
int H = src0[1] - src0[-1];
int V = src1[0] - src2[ 0];
for(k=2; k<=8; ++k) {
src1 += stride; src2 -= stride;
H += k*(src0[k] - src0[-k]);
V += k*(src1[0] - src2[ 0]);
}
if(svq3){
H = ( 5*(H/4) ) / 16;
V = ( 5*(V/4) ) / 16;
/* required for 100% accuracy */
i = H; H = V; V = i;
}else if(rv40){
H = ( H + (H>>2) ) >> 4;
V = ( V + (V>>2) ) >> 4;
}else{
H = ( 5*H+32 ) >> 6;
V = ( 5*V+32 ) >> 6;
}
a = 16*(src1[0] + src2[16] + 1) - 7*(V+H);
for(j=16; j>0; --j) {
int b = a;
a += V;
for(i=-16; i<0; i+=4) {
src[16+i] = CLIP((b ) >> 5);
src[17+i] = CLIP((b+ H) >> 5);
src[18+i] = CLIP((b+2*H) >> 5);
src[19+i] = CLIP((b+3*H) >> 5);
b += 4*H;
}
src += stride;
}
}
static void FUNCC(pred16x16_plane)(uint8_t *src, int stride){
FUNCC(pred16x16_plane_compat)(src, stride, 0, 0);
}
static void FUNCC(pred16x16_plane_svq3)(uint8_t *src, int stride){
FUNCC(pred16x16_plane_compat)(src, stride, 1, 0);
}
static void FUNCC(pred16x16_plane_rv40)(uint8_t *src, int stride){
FUNCC(pred16x16_plane_compat)(src, stride, 0, 1);
}
static void FUNCC(pred16x16_tm_vp8)(uint8_t *src, int stride){
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
uint8_t *top = src-stride;
int y;
for (y = 0; y < 16; y++) {
uint8_t *cm_in = cm + src[-1];
src[0] = cm_in[top[0]];
src[1] = cm_in[top[1]];
src[2] = cm_in[top[2]];
src[3] = cm_in[top[3]];
src[4] = cm_in[top[4]];
src[5] = cm_in[top[5]];
src[6] = cm_in[top[6]];
src[7] = cm_in[top[7]];
src[8] = cm_in[top[8]];
src[9] = cm_in[top[9]];
src[10] = cm_in[top[10]];
src[11] = cm_in[top[11]];
src[12] = cm_in[top[12]];
src[13] = cm_in[top[13]];
src[14] = cm_in[top[14]];
src[15] = cm_in[top[15]];
src += stride;
}
}
static void FUNCC(pred8x8_vertical)(uint8_t *p_src, int p_stride){
int i;
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const pixel4 a= ((pixel4*)(src-stride))[0];
const pixel4 b= ((pixel4*)(src-stride))[1];
for(i=0; i<8; i++){
((pixel4*)(src+i*stride))[0]= a;
((pixel4*)(src+i*stride))[1]= b;
}
}
static void FUNCC(pred8x8_horizontal)(uint8_t *p_src, int stride){
int i;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
for(i=0; i<8; i++){
((pixel4*)(src+i*stride))[0]=
((pixel4*)(src+i*stride))[1]= PIXEL_SPLAT_X4(src[-1+i*stride]);
}
}
#define PRED8x8_X(n, v)\
static void FUNCC(pred8x8_##n##_dc)(uint8_t *p_src, int stride){\
int i;\
pixel *src = (pixel*)p_src;\
stride >>= sizeof(pixel)-1;\
for(i=0; i<8; i++){\
((pixel4*)(src+i*stride))[0]=\
((pixel4*)(src+i*stride))[1]= PIXEL_SPLAT_X4(v);\
}\
}
PRED8x8_X(127, (1<<(BIT_DEPTH-1))-1);
PRED8x8_X(128, (1<<(BIT_DEPTH-1))+0);
PRED8x8_X(129, (1<<(BIT_DEPTH-1))+1);
static void FUNCC(pred8x8_left_dc)(uint8_t *p_src, int stride){
int i;
int dc0, dc2;
pixel4 dc0splat, dc2splat;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
dc0=dc2=0;
for(i=0;i<4; i++){
dc0+= src[-1+i*stride];
dc2+= src[-1+(i+4)*stride];
}
dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2);
dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2);
for(i=0; i<4; i++){
((pixel4*)(src+i*stride))[0]=
((pixel4*)(src+i*stride))[1]= dc0splat;
}
for(i=4; i<8; i++){
((pixel4*)(src+i*stride))[0]=
((pixel4*)(src+i*stride))[1]= dc2splat;
}
}
static void FUNCC(pred8x8_left_dc_rv40)(uint8_t *p_src, int stride){
int i;
int dc0;
pixel4 dc0splat;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
dc0=0;
for(i=0;i<8; i++)
dc0+= src[-1+i*stride];
dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
for(i=0; i<8; i++){
((pixel4*)(src+i*stride))[0]=
((pixel4*)(src+i*stride))[1]= dc0splat;
}
}
static void FUNCC(pred8x8_top_dc)(uint8_t *p_src, int stride){
int i;
int dc0, dc1;
pixel4 dc0splat, dc1splat;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
dc0=dc1=0;
for(i=0;i<4; i++){
dc0+= src[i-stride];
dc1+= src[4+i-stride];
}
dc0splat = PIXEL_SPLAT_X4((dc0 + 2)>>2);
dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
for(i=0; i<4; i++){
((pixel4*)(src+i*stride))[0]= dc0splat;
((pixel4*)(src+i*stride))[1]= dc1splat;
}
for(i=4; i<8; i++){
((pixel4*)(src+i*stride))[0]= dc0splat;
((pixel4*)(src+i*stride))[1]= dc1splat;
}
}
static void FUNCC(pred8x8_top_dc_rv40)(uint8_t *p_src, int stride){
int i;
int dc0;
pixel4 dc0splat;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
dc0=0;
for(i=0;i<8; i++)
dc0+= src[i-stride];
dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
for(i=0; i<8; i++){
((pixel4*)(src+i*stride))[0]=
((pixel4*)(src+i*stride))[1]= dc0splat;
}
}
static void FUNCC(pred8x8_dc)(uint8_t *p_src, int stride){
int i;
int dc0, dc1, dc2;
pixel4 dc0splat, dc1splat, dc2splat, dc3splat;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
dc0=dc1=dc2=0;
for(i=0;i<4; i++){
dc0+= src[-1+i*stride] + src[i-stride];
dc1+= src[4+i-stride];
dc2+= src[-1+(i+4)*stride];
}
dc0splat = PIXEL_SPLAT_X4((dc0 + 4)>>3);
dc1splat = PIXEL_SPLAT_X4((dc1 + 2)>>2);
dc2splat = PIXEL_SPLAT_X4((dc2 + 2)>>2);
dc3splat = PIXEL_SPLAT_X4((dc1 + dc2 + 4)>>3);
for(i=0; i<4; i++){
((pixel4*)(src+i*stride))[0]= dc0splat;
((pixel4*)(src+i*stride))[1]= dc1splat;
}
for(i=4; i<8; i++){
((pixel4*)(src+i*stride))[0]= dc2splat;
((pixel4*)(src+i*stride))[1]= dc3splat;
}
}
//the following 4 function should not be optimized!
static void FUNC(pred8x8_mad_cow_dc_l0t)(uint8_t *src, int stride){
FUNCC(pred8x8_top_dc)(src, stride);
FUNCC(pred4x4_dc)(src, NULL, stride);
}
static void FUNC(pred8x8_mad_cow_dc_0lt)(uint8_t *src, int stride){
FUNCC(pred8x8_dc)(src, stride);
FUNCC(pred4x4_top_dc)(src, NULL, stride);
}
static void FUNC(pred8x8_mad_cow_dc_l00)(uint8_t *src, int stride){
FUNCC(pred8x8_left_dc)(src, stride);
FUNCC(pred4x4_128_dc)(src + 4*stride , NULL, stride);
FUNCC(pred4x4_128_dc)(src + 4*stride + 4*sizeof(pixel), NULL, stride);
}
static void FUNC(pred8x8_mad_cow_dc_0l0)(uint8_t *src, int stride){
FUNCC(pred8x8_left_dc)(src, stride);
FUNCC(pred4x4_128_dc)(src , NULL, stride);
FUNCC(pred4x4_128_dc)(src + 4*sizeof(pixel), NULL, stride);
}
static void FUNCC(pred8x8_dc_rv40)(uint8_t *p_src, int stride){
int i;
int dc0=0;
pixel4 dc0splat;
pixel *src = (pixel*)p_src;
stride >>= sizeof(pixel)-1;
for(i=0;i<4; i++){
dc0+= src[-1+i*stride] + src[i-stride];
dc0+= src[4+i-stride];
dc0+= src[-1+(i+4)*stride];
}
dc0splat = PIXEL_SPLAT_X4((dc0 + 8)>>4);
for(i=0; i<4; i++){
((pixel4*)(src+i*stride))[0]= dc0splat;
((pixel4*)(src+i*stride))[1]= dc0splat;
}
for(i=4; i<8; i++){
((pixel4*)(src+i*stride))[0]= dc0splat;
((pixel4*)(src+i*stride))[1]= dc0splat;
}
}
static void FUNCC(pred8x8_plane)(uint8_t *p_src, int p_stride){
int j, k;
int a;
INIT_CLIP
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const pixel * const src0 = src +3-stride;
const pixel * src1 = src +4*stride-1;
const pixel * src2 = src1-2*stride; // == src+2*stride-1;
int H = src0[1] - src0[-1];
int V = src1[0] - src2[ 0];
for(k=2; k<=4; ++k) {
src1 += stride; src2 -= stride;
H += k*(src0[k] - src0[-k]);
V += k*(src1[0] - src2[ 0]);
}
H = ( 17*H+16 ) >> 5;
V = ( 17*V+16 ) >> 5;
a = 16*(src1[0] + src2[8]+1) - 3*(V+H);
for(j=8; j>0; --j) {
int b = a;
a += V;
src[0] = CLIP((b ) >> 5);
src[1] = CLIP((b+ H) >> 5);
src[2] = CLIP((b+2*H) >> 5);
src[3] = CLIP((b+3*H) >> 5);
src[4] = CLIP((b+4*H) >> 5);
src[5] = CLIP((b+5*H) >> 5);
src[6] = CLIP((b+6*H) >> 5);
src[7] = CLIP((b+7*H) >> 5);
src += stride;
}
}
static void FUNCC(pred8x8_tm_vp8)(uint8_t *p_src, int p_stride){
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
uint8_t *cm = ff_cropTbl + MAX_NEG_CROP - src[-1-stride];
pixel *top = src-stride;
int y;
for (y = 0; y < 8; y++) {
uint8_t *cm_in = cm + src[-1];
src[0] = cm_in[top[0]];
src[1] = cm_in[top[1]];
src[2] = cm_in[top[2]];
src[3] = cm_in[top[3]];
src[4] = cm_in[top[4]];
src[5] = cm_in[top[5]];
src[6] = cm_in[top[6]];
src[7] = cm_in[top[7]];
src += stride;
}
}
#define SRC(x,y) src[(x)+(y)*stride]
#define PL(y) \
const int l##y = (SRC(-1,y-1) + 2*SRC(-1,y) + SRC(-1,y+1) + 2) >> 2;
#define PREDICT_8x8_LOAD_LEFT \
const int l0 = ((has_topleft ? SRC(-1,-1) : SRC(-1,0)) \
+ 2*SRC(-1,0) + SRC(-1,1) + 2) >> 2; \
PL(1) PL(2) PL(3) PL(4) PL(5) PL(6) \
const int l7 av_unused = (SRC(-1,6) + 3*SRC(-1,7) + 2) >> 2
#define PT(x) \
const int t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
#define PREDICT_8x8_LOAD_TOP \
const int t0 = ((has_topleft ? SRC(-1,-1) : SRC(0,-1)) \
+ 2*SRC(0,-1) + SRC(1,-1) + 2) >> 2; \
PT(1) PT(2) PT(3) PT(4) PT(5) PT(6) \
const int t7 av_unused = ((has_topright ? SRC(8,-1) : SRC(7,-1)) \
+ 2*SRC(7,-1) + SRC(6,-1) + 2) >> 2
#define PTR(x) \
t##x = (SRC(x-1,-1) + 2*SRC(x,-1) + SRC(x+1,-1) + 2) >> 2;
#define PREDICT_8x8_LOAD_TOPRIGHT \
int t8, t9, t10, t11, t12, t13, t14, t15; \
if(has_topright) { \
PTR(8) PTR(9) PTR(10) PTR(11) PTR(12) PTR(13) PTR(14) \
t15 = (SRC(14,-1) + 3*SRC(15,-1) + 2) >> 2; \
} else t8=t9=t10=t11=t12=t13=t14=t15= SRC(7,-1);
#define PREDICT_8x8_LOAD_TOPLEFT \
const int lt = (SRC(-1,0) + 2*SRC(-1,-1) + SRC(0,-1) + 2) >> 2
#define PREDICT_8x8_DC(v) \
int y; \
for( y = 0; y < 8; y++ ) { \
((pixel4*)src)[0] = \
((pixel4*)src)[1] = v; \
src += stride; \
}
static void FUNCC(pred8x8l_128_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_DC(PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1)));
}
static void FUNCC(pred8x8l_left_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
const pixel4 dc = PIXEL_SPLAT_X4((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3);
PREDICT_8x8_DC(dc);
}
static void FUNCC(pred8x8l_top_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
const pixel4 dc = PIXEL_SPLAT_X4((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3);
PREDICT_8x8_DC(dc);
}
static void FUNCC(pred8x8l_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOP;
const pixel4 dc = PIXEL_SPLAT_X4((l0+l1+l2+l3+l4+l5+l6+l7
+t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4);
PREDICT_8x8_DC(dc);
}
static void FUNCC(pred8x8l_horizontal)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
#define ROW(y) ((pixel4*)(src+y*stride))[0] =\
((pixel4*)(src+y*stride))[1] = PIXEL_SPLAT_X4(l##y)
ROW(0); ROW(1); ROW(2); ROW(3); ROW(4); ROW(5); ROW(6); ROW(7);
#undef ROW
}
static void FUNCC(pred8x8l_vertical)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
int y;
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
src[0] = t0;
src[1] = t1;
src[2] = t2;
src[3] = t3;
src[4] = t4;
src[5] = t5;
src[6] = t6;
src[7] = t7;
for( y = 1; y < 8; y++ ) {
((pixel4*)(src+y*stride))[0] = ((pixel4*)src)[0];
((pixel4*)(src+y*stride))[1] = ((pixel4*)src)[1];
}
}
static void FUNCC(pred8x8l_down_left)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_TOPRIGHT;
SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(0,1)=SRC(1,0)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(0,2)=SRC(1,1)=SRC(2,0)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(0,3)=SRC(1,2)=SRC(2,1)=SRC(3,0)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(0,4)=SRC(1,3)=SRC(2,2)=SRC(3,1)=SRC(4,0)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(0,5)=SRC(1,4)=SRC(2,3)=SRC(3,2)=SRC(4,1)=SRC(5,0)= (t5 + 2*t6 + t7 + 2) >> 2;
SRC(0,6)=SRC(1,5)=SRC(2,4)=SRC(3,3)=SRC(4,2)=SRC(5,1)=SRC(6,0)= (t6 + 2*t7 + t8 + 2) >> 2;
SRC(0,7)=SRC(1,6)=SRC(2,5)=SRC(3,4)=SRC(4,3)=SRC(5,2)=SRC(6,1)=SRC(7,0)= (t7 + 2*t8 + t9 + 2) >> 2;
SRC(1,7)=SRC(2,6)=SRC(3,5)=SRC(4,4)=SRC(5,3)=SRC(6,2)=SRC(7,1)= (t8 + 2*t9 + t10 + 2) >> 2;
SRC(2,7)=SRC(3,6)=SRC(4,5)=SRC(5,4)=SRC(6,3)=SRC(7,2)= (t9 + 2*t10 + t11 + 2) >> 2;
SRC(3,7)=SRC(4,6)=SRC(5,5)=SRC(6,4)=SRC(7,3)= (t10 + 2*t11 + t12 + 2) >> 2;
SRC(4,7)=SRC(5,6)=SRC(6,5)=SRC(7,4)= (t11 + 2*t12 + t13 + 2) >> 2;
SRC(5,7)=SRC(6,6)=SRC(7,5)= (t12 + 2*t13 + t14 + 2) >> 2;
SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
}
static void FUNCC(pred8x8l_down_right)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
SRC(0,7)= (l7 + 2*l6 + l5 + 2) >> 2;
SRC(0,6)=SRC(1,7)= (l6 + 2*l5 + l4 + 2) >> 2;
SRC(0,5)=SRC(1,6)=SRC(2,7)= (l5 + 2*l4 + l3 + 2) >> 2;
SRC(0,4)=SRC(1,5)=SRC(2,6)=SRC(3,7)= (l4 + 2*l3 + l2 + 2) >> 2;
SRC(0,3)=SRC(1,4)=SRC(2,5)=SRC(3,6)=SRC(4,7)= (l3 + 2*l2 + l1 + 2) >> 2;
SRC(0,2)=SRC(1,3)=SRC(2,4)=SRC(3,5)=SRC(4,6)=SRC(5,7)= (l2 + 2*l1 + l0 + 2) >> 2;
SRC(0,1)=SRC(1,2)=SRC(2,3)=SRC(3,4)=SRC(4,5)=SRC(5,6)=SRC(6,7)= (l1 + 2*l0 + lt + 2) >> 2;
SRC(0,0)=SRC(1,1)=SRC(2,2)=SRC(3,3)=SRC(4,4)=SRC(5,5)=SRC(6,6)=SRC(7,7)= (l0 + 2*lt + t0 + 2) >> 2;
SRC(1,0)=SRC(2,1)=SRC(3,2)=SRC(4,3)=SRC(5,4)=SRC(6,5)=SRC(7,6)= (lt + 2*t0 + t1 + 2) >> 2;
SRC(2,0)=SRC(3,1)=SRC(4,2)=SRC(5,3)=SRC(6,4)=SRC(7,5)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(3,0)=SRC(4,1)=SRC(5,2)=SRC(6,3)=SRC(7,4)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(4,0)=SRC(5,1)=SRC(6,2)=SRC(7,3)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(5,0)=SRC(6,1)=SRC(7,2)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
}
static void FUNCC(pred8x8l_vertical_right)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
SRC(0,6)= (l5 + 2*l4 + l3 + 2) >> 2;
SRC(0,7)= (l6 + 2*l5 + l4 + 2) >> 2;
SRC(0,4)=SRC(1,6)= (l3 + 2*l2 + l1 + 2) >> 2;
SRC(0,5)=SRC(1,7)= (l4 + 2*l3 + l2 + 2) >> 2;
SRC(0,2)=SRC(1,4)=SRC(2,6)= (l1 + 2*l0 + lt + 2) >> 2;
SRC(0,3)=SRC(1,5)=SRC(2,7)= (l2 + 2*l1 + l0 + 2) >> 2;
SRC(0,1)=SRC(1,3)=SRC(2,5)=SRC(3,7)= (l0 + 2*lt + t0 + 2) >> 2;
SRC(0,0)=SRC(1,2)=SRC(2,4)=SRC(3,6)= (lt + t0 + 1) >> 1;
SRC(1,1)=SRC(2,3)=SRC(3,5)=SRC(4,7)= (lt + 2*t0 + t1 + 2) >> 2;
SRC(1,0)=SRC(2,2)=SRC(3,4)=SRC(4,6)= (t0 + t1 + 1) >> 1;
SRC(2,1)=SRC(3,3)=SRC(4,5)=SRC(5,7)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(2,0)=SRC(3,2)=SRC(4,4)=SRC(5,6)= (t1 + t2 + 1) >> 1;
SRC(3,1)=SRC(4,3)=SRC(5,5)=SRC(6,7)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(3,0)=SRC(4,2)=SRC(5,4)=SRC(6,6)= (t2 + t3 + 1) >> 1;
SRC(4,1)=SRC(5,3)=SRC(6,5)=SRC(7,7)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(4,0)=SRC(5,2)=SRC(6,4)=SRC(7,6)= (t3 + t4 + 1) >> 1;
SRC(5,1)=SRC(6,3)=SRC(7,5)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(5,0)=SRC(6,2)=SRC(7,4)= (t4 + t5 + 1) >> 1;
SRC(6,1)=SRC(7,3)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(6,0)=SRC(7,2)= (t5 + t6 + 1) >> 1;
SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
SRC(7,0)= (t6 + t7 + 1) >> 1;
}
static void FUNCC(pred8x8l_horizontal_down)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
SRC(0,7)= (l6 + l7 + 1) >> 1;
SRC(1,7)= (l5 + 2*l6 + l7 + 2) >> 2;
SRC(0,6)=SRC(2,7)= (l5 + l6 + 1) >> 1;
SRC(1,6)=SRC(3,7)= (l4 + 2*l5 + l6 + 2) >> 2;
SRC(0,5)=SRC(2,6)=SRC(4,7)= (l4 + l5 + 1) >> 1;
SRC(1,5)=SRC(3,6)=SRC(5,7)= (l3 + 2*l4 + l5 + 2) >> 2;
SRC(0,4)=SRC(2,5)=SRC(4,6)=SRC(6,7)= (l3 + l4 + 1) >> 1;
SRC(1,4)=SRC(3,5)=SRC(5,6)=SRC(7,7)= (l2 + 2*l3 + l4 + 2) >> 2;
SRC(0,3)=SRC(2,4)=SRC(4,5)=SRC(6,6)= (l2 + l3 + 1) >> 1;
SRC(1,3)=SRC(3,4)=SRC(5,5)=SRC(7,6)= (l1 + 2*l2 + l3 + 2) >> 2;
SRC(0,2)=SRC(2,3)=SRC(4,4)=SRC(6,5)= (l1 + l2 + 1) >> 1;
SRC(1,2)=SRC(3,3)=SRC(5,4)=SRC(7,5)= (l0 + 2*l1 + l2 + 2) >> 2;
SRC(0,1)=SRC(2,2)=SRC(4,3)=SRC(6,4)= (l0 + l1 + 1) >> 1;
SRC(1,1)=SRC(3,2)=SRC(5,3)=SRC(7,4)= (lt + 2*l0 + l1 + 2) >> 2;
SRC(0,0)=SRC(2,1)=SRC(4,2)=SRC(6,3)= (lt + l0 + 1) >> 1;
SRC(1,0)=SRC(3,1)=SRC(5,2)=SRC(7,3)= (l0 + 2*lt + t0 + 2) >> 2;
SRC(2,0)=SRC(4,1)=SRC(6,2)= (t1 + 2*t0 + lt + 2) >> 2;
SRC(3,0)=SRC(5,1)=SRC(7,2)= (t2 + 2*t1 + t0 + 2) >> 2;
SRC(4,0)=SRC(6,1)= (t3 + 2*t2 + t1 + 2) >> 2;
SRC(5,0)=SRC(7,1)= (t4 + 2*t3 + t2 + 2) >> 2;
SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
}
static void FUNCC(pred8x8l_vertical_left)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_TOPRIGHT;
SRC(0,0)= (t0 + t1 + 1) >> 1;
SRC(0,1)= (t0 + 2*t1 + t2 + 2) >> 2;
SRC(0,2)=SRC(1,0)= (t1 + t2 + 1) >> 1;
SRC(0,3)=SRC(1,1)= (t1 + 2*t2 + t3 + 2) >> 2;
SRC(0,4)=SRC(1,2)=SRC(2,0)= (t2 + t3 + 1) >> 1;
SRC(0,5)=SRC(1,3)=SRC(2,1)= (t2 + 2*t3 + t4 + 2) >> 2;
SRC(0,6)=SRC(1,4)=SRC(2,2)=SRC(3,0)= (t3 + t4 + 1) >> 1;
SRC(0,7)=SRC(1,5)=SRC(2,3)=SRC(3,1)= (t3 + 2*t4 + t5 + 2) >> 2;
SRC(1,6)=SRC(2,4)=SRC(3,2)=SRC(4,0)= (t4 + t5 + 1) >> 1;
SRC(1,7)=SRC(2,5)=SRC(3,3)=SRC(4,1)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(2,6)=SRC(3,4)=SRC(4,2)=SRC(5,0)= (t5 + t6 + 1) >> 1;
SRC(2,7)=SRC(3,5)=SRC(4,3)=SRC(5,1)= (t5 + 2*t6 + t7 + 2) >> 2;
SRC(3,6)=SRC(4,4)=SRC(5,2)=SRC(6,0)= (t6 + t7 + 1) >> 1;
SRC(3,7)=SRC(4,5)=SRC(5,3)=SRC(6,1)= (t6 + 2*t7 + t8 + 2) >> 2;
SRC(4,6)=SRC(5,4)=SRC(6,2)=SRC(7,0)= (t7 + t8 + 1) >> 1;
SRC(4,7)=SRC(5,5)=SRC(6,3)=SRC(7,1)= (t7 + 2*t8 + t9 + 2) >> 2;
SRC(5,6)=SRC(6,4)=SRC(7,2)= (t8 + t9 + 1) >> 1;
SRC(5,7)=SRC(6,5)=SRC(7,3)= (t8 + 2*t9 + t10 + 2) >> 2;
SRC(6,6)=SRC(7,4)= (t9 + t10 + 1) >> 1;
SRC(6,7)=SRC(7,5)= (t9 + 2*t10 + t11 + 2) >> 2;
SRC(7,6)= (t10 + t11 + 1) >> 1;
SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
}
static void FUNCC(pred8x8l_horizontal_up)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
SRC(0,0)= (l0 + l1 + 1) >> 1;
SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
SRC(0,1)=SRC(2,0)= (l1 + l2 + 1) >> 1;
SRC(1,1)=SRC(3,0)= (l1 + 2*l2 + l3 + 2) >> 2;
SRC(0,2)=SRC(2,1)=SRC(4,0)= (l2 + l3 + 1) >> 1;
SRC(1,2)=SRC(3,1)=SRC(5,0)= (l2 + 2*l3 + l4 + 2) >> 2;
SRC(0,3)=SRC(2,2)=SRC(4,1)=SRC(6,0)= (l3 + l4 + 1) >> 1;
SRC(1,3)=SRC(3,2)=SRC(5,1)=SRC(7,0)= (l3 + 2*l4 + l5 + 2) >> 2;
SRC(0,4)=SRC(2,3)=SRC(4,2)=SRC(6,1)= (l4 + l5 + 1) >> 1;
SRC(1,4)=SRC(3,3)=SRC(5,2)=SRC(7,1)= (l4 + 2*l5 + l6 + 2) >> 2;
SRC(0,5)=SRC(2,4)=SRC(4,3)=SRC(6,2)= (l5 + l6 + 1) >> 1;
SRC(1,5)=SRC(3,4)=SRC(5,3)=SRC(7,2)= (l5 + 2*l6 + l7 + 2) >> 2;
SRC(0,6)=SRC(2,5)=SRC(4,4)=SRC(6,3)= (l6 + l7 + 1) >> 1;
SRC(1,6)=SRC(3,5)=SRC(5,4)=SRC(7,3)= (l6 + 3*l7 + 2) >> 2;
SRC(0,7)=SRC(1,7)=SRC(2,6)=SRC(2,7)=SRC(3,6)=
SRC(3,7)=SRC(4,5)=SRC(4,6)=SRC(4,7)=SRC(5,5)=
SRC(5,6)=SRC(5,7)=SRC(6,4)=SRC(6,5)=SRC(6,6)=
SRC(6,7)=SRC(7,4)=SRC(7,5)=SRC(7,6)=SRC(7,7)= l7;
}
#undef PREDICT_8x8_LOAD_LEFT
#undef PREDICT_8x8_LOAD_TOP
#undef PREDICT_8x8_LOAD_TOPLEFT
#undef PREDICT_8x8_LOAD_TOPRIGHT
#undef PREDICT_8x8_DC
#undef PTR
#undef PT
#undef PL
#undef SRC
static void FUNCC(pred4x4_vertical_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
pix -= stride;
for(i=0; i<4; i++){
pixel v = pix[0];
pix[1*stride]= v += block[0];
pix[2*stride]= v += block[4];
pix[3*stride]= v += block[8];
pix[4*stride]= v + block[12];
pix++;
block++;
}
}
static void FUNCC(pred4x4_horizontal_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
for(i=0; i<4; i++){
pixel v = pix[-1];
pix[0]= v += block[0];
pix[1]= v += block[1];
pix[2]= v += block[2];
pix[3]= v + block[3];
pix+= stride;
block+= 4;
}
}
static void FUNCC(pred8x8l_vertical_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
pix -= stride;
for(i=0; i<8; i++){
pixel v = pix[0];
pix[1*stride]= v += block[0];
pix[2*stride]= v += block[8];
pix[3*stride]= v += block[16];
pix[4*stride]= v += block[24];
pix[5*stride]= v += block[32];
pix[6*stride]= v += block[40];
pix[7*stride]= v += block[48];
pix[8*stride]= v + block[56];
pix++;
block++;
}
}
static void FUNCC(pred8x8l_horizontal_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
for(i=0; i<8; i++){
pixel v = pix[-1];
pix[0]= v += block[0];
pix[1]= v += block[1];
pix[2]= v += block[2];
pix[3]= v += block[3];
pix[4]= v += block[4];
pix[5]= v += block[5];
pix[6]= v += block[6];
pix[7]= v + block[7];
pix+= stride;
block+= 8;
}
}
static void FUNCC(pred16x16_vertical_add)(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
int i;
for(i=0; i<16; i++)
FUNCC(pred4x4_vertical_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
}
static void FUNCC(pred16x16_horizontal_add)(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
int i;
for(i=0; i<16; i++)
FUNCC(pred4x4_horizontal_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
}
static void FUNCC(pred8x8_vertical_add)(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
int i;
for(i=0; i<4; i++)
FUNCC(pred4x4_vertical_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
}
static void FUNCC(pred8x8_horizontal_add)(uint8_t *pix, const int *block_offset, const DCTELEM *block, int stride){
int i;
for(i=0; i<4; i++)
FUNCC(pred4x4_horizontal_add)(pix + block_offset[i], block + i*16*sizeof(pixel), stride);
}
......@@ -2,20 +2,20 @@
* H.26L/H.264/AVC/JVT/14496-10/... encoder/decoder
* Copyright (c) 2003-2011 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of Libav.
* This file is part of FFmpeg.
*
* Libav is free software; you can redistribute it and/or
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
......@@ -30,7 +30,7 @@
static void FUNCC(pred4x4_vertical)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a= AV_RN4PA(src-stride);
AV_WN4PA(src+0*stride, a);
......@@ -41,7 +41,7 @@ static void FUNCC(pred4x4_vertical)(uint8_t *_src, const uint8_t *topright, int
static void FUNCC(pred4x4_horizontal)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
AV_WN4PA(src+0*stride, PIXEL_SPLAT_X4(src[-1+0*stride]));
AV_WN4PA(src+1*stride, PIXEL_SPLAT_X4(src[-1+1*stride]));
AV_WN4PA(src+2*stride, PIXEL_SPLAT_X4(src[-1+2*stride]));
......@@ -50,7 +50,7 @@ static void FUNCC(pred4x4_horizontal)(uint8_t *_src, const uint8_t *topright, in
static void FUNCC(pred4x4_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride]
+ src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 4) >>3;
const pixel4 a = PIXEL_SPLAT_X4(dc);
......@@ -63,7 +63,7 @@ static void FUNCC(pred4x4_dc)(uint8_t *_src, const uint8_t *topright, int _strid
static void FUNCC(pred4x4_left_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const int dc= ( src[-1+0*stride] + src[-1+1*stride] + src[-1+2*stride] + src[-1+3*stride] + 2) >>2;
const pixel4 a = PIXEL_SPLAT_X4(dc);
......@@ -75,7 +75,7 @@ static void FUNCC(pred4x4_left_dc)(uint8_t *_src, const uint8_t *topright, int _
static void FUNCC(pred4x4_top_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const int dc= ( src[-stride] + src[1-stride] + src[2-stride] + src[3-stride] + 2) >>2;
const pixel4 a = PIXEL_SPLAT_X4(dc);
......@@ -87,7 +87,7 @@ static void FUNCC(pred4x4_top_dc)(uint8_t *_src, const uint8_t *topright, int _s
static void FUNCC(pred4x4_128_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a = PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1));
AV_WN4PA(src+0*stride, a);
......@@ -98,7 +98,7 @@ static void FUNCC(pred4x4_128_dc)(uint8_t *_src, const uint8_t *topright, int _s
static void FUNCC(pred4x4_127_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))-1);
AV_WN4PA(src+0*stride, a);
......@@ -109,7 +109,7 @@ static void FUNCC(pred4x4_127_dc)(uint8_t *_src, const uint8_t *topright, int _s
static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a = PIXEL_SPLAT_X4((1<<(BIT_DEPTH-1))+1);
AV_WN4PA(src+0*stride, a);
......@@ -145,7 +145,7 @@ static void FUNCC(pred4x4_129_dc)(uint8_t *_src, const uint8_t *topright, int _s
static void FUNCC(pred4x4_down_right)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
......@@ -171,7 +171,7 @@ static void FUNCC(pred4x4_down_right)(uint8_t *_src, const uint8_t *topright, in
static void FUNCC(pred4x4_down_left)(uint8_t *_src, const uint8_t *_topright, int _stride){
pixel *src = (pixel*)_src;
const pixel *topright = (const pixel*)_topright;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
// LOAD_LEFT_EDGE
......@@ -196,7 +196,7 @@ static void FUNCC(pred4x4_down_left)(uint8_t *_src, const uint8_t *_topright, in
static void FUNCC(pred4x4_vertical_right)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
......@@ -222,7 +222,7 @@ static void FUNCC(pred4x4_vertical_right)(uint8_t *_src, const uint8_t *topright
static void FUNCC(pred4x4_vertical_left)(uint8_t *_src, const uint8_t *_topright, int _stride){
pixel *src = (pixel*)_src;
const pixel *topright = (const pixel*)_topright;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
LOAD_TOP_EDGE
LOAD_TOP_RIGHT_EDGE
......@@ -246,7 +246,7 @@ static void FUNCC(pred4x4_vertical_left)(uint8_t *_src, const uint8_t *_topright
static void FUNCC(pred4x4_horizontal_up)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
LOAD_LEFT_EDGE
src[0+0*stride]=(l0 + l1 + 1)>>1;
......@@ -269,7 +269,7 @@ static void FUNCC(pred4x4_horizontal_up)(uint8_t *_src, const uint8_t *topright,
static void FUNCC(pred4x4_horizontal_down)(uint8_t *_src, const uint8_t *topright, int _stride){
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const int lt= src[-1-1*stride];
LOAD_TOP_EDGE
LOAD_LEFT_EDGE
......@@ -295,7 +295,7 @@ static void FUNCC(pred4x4_horizontal_down)(uint8_t *_src, const uint8_t *toprigh
static void FUNCC(pred16x16_vertical)(uint8_t *_src, int _stride){
int i;
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a = AV_RN4PA(((pixel4*)(src-stride))+0);
const pixel4 b = AV_RN4PA(((pixel4*)(src-stride))+1);
const pixel4 c = AV_RN4PA(((pixel4*)(src-stride))+2);
......@@ -312,7 +312,7 @@ static void FUNCC(pred16x16_vertical)(uint8_t *_src, int _stride){
static void FUNCC(pred16x16_horizontal)(uint8_t *_src, int stride){
int i;
pixel *src = (pixel*)_src;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
for(i=0; i<16; i++){
const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
......@@ -337,7 +337,7 @@ static void FUNCC(pred16x16_dc)(uint8_t *_src, int stride){
int i, dc=0;
pixel *src = (pixel*)_src;
pixel4 dcsplat;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
for(i=0;i<16; i++){
dc+= src[-1+i*stride];
......@@ -355,7 +355,7 @@ static void FUNCC(pred16x16_left_dc)(uint8_t *_src, int stride){
int i, dc=0;
pixel *src = (pixel*)_src;
pixel4 dcsplat;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
for(i=0;i<16; i++){
dc+= src[-1+i*stride];
......@@ -369,7 +369,7 @@ static void FUNCC(pred16x16_top_dc)(uint8_t *_src, int stride){
int i, dc=0;
pixel *src = (pixel*)_src;
pixel4 dcsplat;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
for(i=0;i<16; i++){
dc+= src[i-stride];
......@@ -383,7 +383,7 @@ static void FUNCC(pred16x16_top_dc)(uint8_t *_src, int stride){
static void FUNCC(pred16x16_##n##_dc)(uint8_t *_src, int stride){\
int i;\
pixel *src = (pixel*)_src;\
stride /= sizeof(pixel);\
stride >>= sizeof(pixel)-1;\
PREDICT_16x16_DC(PIXEL_SPLAT_X4(v));\
}
......@@ -391,12 +391,12 @@ PRED16x16_X(127, (1<<(BIT_DEPTH-1))-1);
PRED16x16_X(128, (1<<(BIT_DEPTH-1))+0);
PRED16x16_X(129, (1<<(BIT_DEPTH-1))+1);
static inline void FUNCC(pred16x16_plane_compat)(uint8_t *_src, int _stride, const int svq3, const int rv40){
static inline void FUNCC(pred16x16_plane_compat)(uint8_t *p_src, int p_stride, const int svq3, const int rv40){
int i, j, k;
int a;
INIT_CLIP
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
const pixel * const src0 = src +7-stride;
const pixel * src1 = src +8*stride-1;
const pixel * src2 = src1-2*stride; // == src+6*stride-1;
......@@ -443,7 +443,7 @@ static void FUNCC(pred16x16_plane)(uint8_t *src, int stride){
static void FUNCC(pred8x8_vertical)(uint8_t *_src, int _stride){
int i;
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const pixel4 a= AV_RN4PA(((pixel4*)(src-stride))+0);
const pixel4 b= AV_RN4PA(((pixel4*)(src-stride))+1);
......@@ -456,7 +456,7 @@ static void FUNCC(pred8x8_vertical)(uint8_t *_src, int _stride){
static void FUNCC(pred8x8_horizontal)(uint8_t *_src, int stride){
int i;
pixel *src = (pixel*)_src;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
for(i=0; i<8; i++){
const pixel4 a = PIXEL_SPLAT_X4(src[-1+i*stride]);
......@@ -470,7 +470,7 @@ static void FUNCC(pred8x8_##n##_dc)(uint8_t *_src, int stride){\
int i;\
const pixel4 a = PIXEL_SPLAT_X4(v);\
pixel *src = (pixel*)_src;\
stride /= sizeof(pixel);\
stride >>= sizeof(pixel)-1;\
for(i=0; i<8; i++){\
AV_WN4PA(((pixel4*)(src+i*stride))+0, a);\
AV_WN4PA(((pixel4*)(src+i*stride))+1, a);\
......@@ -486,7 +486,7 @@ static void FUNCC(pred8x8_left_dc)(uint8_t *_src, int stride){
int dc0, dc2;
pixel4 dc0splat, dc2splat;
pixel *src = (pixel*)_src;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
dc0=dc2=0;
for(i=0;i<4; i++){
......@@ -511,7 +511,7 @@ static void FUNCC(pred8x8_top_dc)(uint8_t *_src, int stride){
int dc0, dc1;
pixel4 dc0splat, dc1splat;
pixel *src = (pixel*)_src;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
dc0=dc1=0;
for(i=0;i<4; i++){
......@@ -536,7 +536,7 @@ static void FUNCC(pred8x8_dc)(uint8_t *_src, int stride){
int dc0, dc1, dc2;
pixel4 dc0splat, dc1splat, dc2splat, dc3splat;
pixel *src = (pixel*)_src;
stride /= sizeof(pixel);
stride >>= sizeof(pixel)-1;
dc0=dc1=dc2=0;
for(i=0;i<4; i++){
......@@ -587,7 +587,7 @@ static void FUNCC(pred8x8_plane)(uint8_t *_src, int _stride){
int a;
INIT_CLIP
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
const pixel * const src0 = src +3-stride;
const pixel * src1 = src +4*stride-1;
const pixel * src2 = src1-2*stride; // == src+2*stride-1;
......@@ -658,32 +658,32 @@ static void FUNCC(pred8x8_plane)(uint8_t *_src, int _stride){
static void FUNCC(pred8x8l_128_dc)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
PREDICT_8x8_DC(PIXEL_SPLAT_X4(1<<(BIT_DEPTH-1)));
}
static void FUNCC(pred8x8l_left_dc)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
const pixel4 dc = PIXEL_SPLAT_X4((l0+l1+l2+l3+l4+l5+l6+l7+4) >> 3);
PREDICT_8x8_DC(dc);
}
static void FUNCC(pred8x8l_top_dc)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_top_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
const pixel4 dc = PIXEL_SPLAT_X4((t0+t1+t2+t3+t4+t5+t6+t7+4) >> 3);
PREDICT_8x8_DC(dc);
}
static void FUNCC(pred8x8l_dc)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_dc)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOP;
......@@ -691,10 +691,10 @@ static void FUNCC(pred8x8l_dc)(uint8_t *_src, int has_topleft, int has_topright,
+t0+t1+t2+t3+t4+t5+t6+t7+8) >> 4);
PREDICT_8x8_DC(dc);
}
static void FUNCC(pred8x8l_horizontal)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_horizontal)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
#define ROW(y) ((pixel4*)(src+y*stride))[0] =\
......@@ -706,7 +706,7 @@ static void FUNCC(pred8x8l_vertical)(uint8_t *_src, int has_topleft, int has_top
{
int y;
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
int stride = _stride>>(sizeof(pixel)-1);
pixel4 a, b;
PREDICT_8x8_LOAD_TOP;
......@@ -725,10 +725,10 @@ static void FUNCC(pred8x8l_vertical)(uint8_t *_src, int has_topleft, int has_top
AV_WN4PA(((pixel4*)(src+y*stride))+1, b);
}
}
static void FUNCC(pred8x8l_down_left)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_down_left)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_TOPRIGHT;
SRC(0,0)= (t0 + 2*t1 + t2 + 2) >> 2;
......@@ -747,10 +747,10 @@ static void FUNCC(pred8x8l_down_left)(uint8_t *_src, int has_topleft, int has_to
SRC(6,7)=SRC(7,6)= (t13 + 2*t14 + t15 + 2) >> 2;
SRC(7,7)= (t14 + 3*t15 + 2) >> 2;
}
static void FUNCC(pred8x8l_down_right)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_down_right)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
......@@ -770,10 +770,10 @@ static void FUNCC(pred8x8l_down_right)(uint8_t *_src, int has_topleft, int has_t
SRC(6,0)=SRC(7,1)= (t4 + 2*t5 + t6 + 2) >> 2;
SRC(7,0)= (t5 + 2*t6 + t7 + 2) >> 2;
}
static void FUNCC(pred8x8l_vertical_right)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_vertical_right)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
......@@ -800,10 +800,10 @@ static void FUNCC(pred8x8l_vertical_right)(uint8_t *_src, int has_topleft, int h
SRC(7,1)= (t5 + 2*t6 + t7 + 2) >> 2;
SRC(7,0)= (t6 + t7 + 1) >> 1;
}
static void FUNCC(pred8x8l_horizontal_down)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_horizontal_down)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_LEFT;
PREDICT_8x8_LOAD_TOPLEFT;
......@@ -830,10 +830,10 @@ static void FUNCC(pred8x8l_horizontal_down)(uint8_t *_src, int has_topleft, int
SRC(6,0)= (t5 + 2*t4 + t3 + 2) >> 2;
SRC(7,0)= (t6 + 2*t5 + t4 + 2) >> 2;
}
static void FUNCC(pred8x8l_vertical_left)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_vertical_left)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_TOP;
PREDICT_8x8_LOAD_TOPRIGHT;
SRC(0,0)= (t0 + t1 + 1) >> 1;
......@@ -859,10 +859,10 @@ static void FUNCC(pred8x8l_vertical_left)(uint8_t *_src, int has_topleft, int ha
SRC(7,6)= (t10 + t11 + 1) >> 1;
SRC(7,7)= (t10 + 2*t11 + t12 + 2) >> 2;
}
static void FUNCC(pred8x8l_horizontal_up)(uint8_t *_src, int has_topleft, int has_topright, int _stride)
static void FUNCC(pred8x8l_horizontal_up)(uint8_t *p_src, int has_topleft, int has_topright, int p_stride)
{
pixel *src = (pixel*)_src;
int stride = _stride/sizeof(pixel);
pixel *src = (pixel*)p_src;
int stride = p_stride>>(sizeof(pixel)-1);
PREDICT_8x8_LOAD_LEFT;
SRC(0,0)= (l0 + l1 + 1) >> 1;
SRC(1,0)= (l0 + 2*l1 + l2 + 2) >> 2;
......@@ -893,11 +893,11 @@ static void FUNCC(pred8x8l_horizontal_up)(uint8_t *_src, int has_topleft, int ha
#undef PL
#undef SRC
static void FUNCC(pred4x4_vertical_add)(uint8_t *_pix, const DCTELEM *_block, int stride){
static void FUNCC(pred4x4_vertical_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)_pix;
const dctcoef *block = (const dctcoef*)_block;
stride /= sizeof(pixel);
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
pix -= stride;
for(i=0; i<4; i++){
pixel v = pix[0];
......@@ -910,11 +910,11 @@ static void FUNCC(pred4x4_vertical_add)(uint8_t *_pix, const DCTELEM *_block, in
}
}
static void FUNCC(pred4x4_horizontal_add)(uint8_t *_pix, const DCTELEM *_block, int stride){
static void FUNCC(pred4x4_horizontal_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)_pix;
const dctcoef *block = (const dctcoef*)_block;
stride /= sizeof(pixel);
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
for(i=0; i<4; i++){
pixel v = pix[-1];
pix[0]= v += block[0];
......@@ -926,11 +926,11 @@ static void FUNCC(pred4x4_horizontal_add)(uint8_t *_pix, const DCTELEM *_block,
}
}
static void FUNCC(pred8x8l_vertical_add)(uint8_t *_pix, const DCTELEM *_block, int stride){
static void FUNCC(pred8x8l_vertical_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)_pix;
const dctcoef *block = (const dctcoef*)_block;
stride /= sizeof(pixel);
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
pix -= stride;
for(i=0; i<8; i++){
pixel v = pix[0];
......@@ -947,11 +947,11 @@ static void FUNCC(pred8x8l_vertical_add)(uint8_t *_pix, const DCTELEM *_block, i
}
}
static void FUNCC(pred8x8l_horizontal_add)(uint8_t *_pix, const DCTELEM *_block, int stride){
static void FUNCC(pred8x8l_horizontal_add)(uint8_t *p_pix, const DCTELEM *p_block, int stride){
int i;
pixel *pix = (pixel*)_pix;
const dctcoef *block = (const dctcoef*)_block;
stride /= sizeof(pixel);
pixel *pix = (pixel*)p_pix;
const dctcoef *block = (const dctcoef*)p_block;
stride >>= sizeof(pixel)-1;
for(i=0; i<8; i++){
pixel v = pix[-1];
pix[0]= v += block[0];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment