Commit bb198e19 authored by Michael Niedermayer's avatar Michael Niedermayer

interlaced motion estimation

interlaced mpeg2 encoding
  P & B frames
  rate distored interlaced mb decission
  alternate scantable support
4mv encoding fixes (thats also why the regression tests change)
passing height to most dsp functions
interlaced mpeg4 encoding (no direct mode MBs yet)
various related cleanups
disabled old motion estimaton algorithms (log, full, ...) they will either be fixed or removed

Originally committed as revision 2638 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent b846b231
......@@ -119,8 +119,10 @@ static int use_obmc = 0;
static int use_aic = 0;
static int use_aiv = 0;
static int use_umv = 0;
static int use_alt_scan = 0;
static int do_deinterlace = 0;
static int do_interlace = 0;
static int do_interlace_dct = 0;
static int do_interlace_me = 0;
static int workaround_bugs = FF_BUG_AUTODETECT;
static int error_resilience = 2;
static int error_concealment = 3;
......@@ -130,6 +132,8 @@ static int use_part = 0;
static int packet_size = 0;
static int error_rate = 0;
static int strict = 0;
static int top_field_first = -1;
static int noise_reduction = 0;
static int debug = 0;
static int debug_mv = 0;
extern int loop_input; /* currently a hack */
......@@ -635,7 +639,12 @@ static void do_video_out(AVFormatContext *s,
/* better than nothing: use input picture interlaced
settings */
big_picture.interlaced_frame = in_picture->interlaced_frame;
big_picture.top_field_first = in_picture->top_field_first;
if(do_interlace_me || do_interlace_dct){
if(top_field_first == -1)
big_picture.top_field_first = in_picture->top_field_first;
else
big_picture.top_field_first = 1;
}
/* handles sameq here. This is not correct because it may
not be a global option */
......@@ -1946,6 +1955,16 @@ static void opt_strict(const char *arg)
strict= atoi(arg);
}
static void opt_top_field_first(const char *arg)
{
top_field_first= atoi(arg);
}
static void opt_noise_reduction(const char *arg)
{
noise_reduction= atoi(arg);
}
static void opt_audio_bitrate(const char *arg)
{
audio_bit_rate = atoi(arg) * 1000;
......@@ -2373,14 +2392,20 @@ static void opt_output_file(const char *filename)
if(use_part) {
video_enc->flags |= CODEC_FLAG_PART;
}
if (use_alt_scan) {
video_enc->flags |= CODEC_FLAG_ALT_SCAN;
}
if (b_frames) {
video_enc->max_b_frames = b_frames;
video_enc->b_frame_strategy = 0;
video_enc->b_quant_factor = 2.0;
}
if (do_interlace) {
if (do_interlace_dct) {
video_enc->flags |= CODEC_FLAG_INTERLACED_DCT;
}
if (do_interlace_me) {
video_enc->flags |= CODEC_FLAG_INTERLACED_ME;
}
video_enc->qmin = video_qmin;
video_enc->qmax = video_qmax;
video_enc->mb_qmin = video_mb_qmin;
......@@ -2430,6 +2455,7 @@ static void opt_output_file(const char *filename)
video_enc->idct_algo = idct_algo;
video_enc->strict_std_compliance = strict;
video_enc->error_rate = error_rate;
video_enc->noise_reduction= noise_reduction;
if(packet_size){
video_enc->rtp_mode= 1;
video_enc->rtp_payload_size= packet_size;
......@@ -2992,16 +3018,21 @@ const OptionDef options[] = {
{ "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename}, "select two pass log file name", "file" },
{ "deinterlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_deinterlace},
"deinterlace pictures" },
{ "interlace", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_interlace},
"force interlacing support in encoder (MPEG2/MPEG4)" },
{ "ildct", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_interlace_dct},
"force interlaced dct support in encoder (MPEG2/MPEG4)" },
{ "ilme", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_interlace_me},
"force interlacied me support in encoder MPEG2" },
{ "psnr", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_psnr}, "calculate PSNR of compressed frames" },
{ "vstats", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&do_vstats}, "dump video coding statistics to file" },
{ "vhook", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)add_frame_hooker}, "insert video processing module", "module" },
{ "aic", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_aic}, "enable Advanced intra coding (h263+)" },
{ "aiv", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_aiv}, "enable Alternative inter vlc (h263+)" },
{ "umv", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_umv}, "enable Unlimited Motion Vector (h263+)" },
{ "alt", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, {(void*)&use_alt_scan}, "enable alternate scantable (mpeg2)" },
{ "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_intra_matrix}, "specify intra matrix coeffs", "matrix" },
{ "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_inter_matrix}, "specify inter matrix coeffs", "matrix" },
{ "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_top_field_first}, "top=1/bottom=0/auto=-1 field first", "" },
{ "nr", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_noise_reduction}, "noise reduction", "" },
/* audio options */
{ "ab", HAS_ARG | OPT_AUDIO, {(void*)opt_audio_bitrate}, "set audio bitrate (in kbit/s)", "bitrate", },
......
......@@ -39,11 +39,11 @@ void get_pixels_mvi(DCTELEM *restrict block,
const uint8_t *restrict pixels, int line_size);
void diff_pixels_mvi(DCTELEM *block, const uint8_t *s1, const uint8_t *s2,
int stride);
int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
int pix_abs16x16_mvi_asm(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size);
int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
#if 0
/* These functions were the base for the optimized assembler routines,
......@@ -290,11 +290,6 @@ static int sad16x16_mvi(void *s, uint8_t *a, uint8_t *b, int stride)
return pix_abs16x16_mvi_asm(a, b, stride);
}
static int sad8x8_mvi(void *s, uint8_t *a, uint8_t *b, int stride)
{
return pix_abs8x8_mvi(a, b, stride);
}
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
{
c->put_pixels_tab[0][0] = put_pixels16_axp_asm;
......@@ -347,12 +342,13 @@ void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx)
c->get_pixels = get_pixels_mvi;
c->diff_pixels = diff_pixels_mvi;
c->sad[0] = sad16x16_mvi;
c->sad[1] = sad8x8_mvi;
c->pix_abs8x8 = pix_abs8x8_mvi;
c->pix_abs16x16 = pix_abs16x16_mvi_asm;
c->pix_abs16x16_x2 = pix_abs16x16_x2_mvi;
c->pix_abs16x16_y2 = pix_abs16x16_y2_mvi;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mvi;
c->sad[1] = pix_abs8x8_mvi;
// c->pix_abs[0][0] = pix_abs16x16_mvi_asm; //FIXME function arguments for the asm must be fixed
c->pix_abs[0][0] = sad16x16_mvi;
c->pix_abs[1][0] = pix_abs8x8_mvi;
c->pix_abs[0][1] = pix_abs16x16_x2_mvi;
c->pix_abs[0][2] = pix_abs16x16_y2_mvi;
c->pix_abs[0][3] = pix_abs16x16_xy2_mvi;
}
put_pixels_clamped_axp_p = c->put_pixels_clamped;
......
......@@ -84,10 +84,9 @@ static inline uint64_t avg4(uint64_t l1, uint64_t l2, uint64_t l3, uint64_t l4)
return r1 + r2;
}
int pix_abs8x8_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs8x8_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 8;
if ((size_t) pix2 & 0x7) {
/* works only when pix2 is actually unaligned */
......@@ -160,10 +159,9 @@ int pix_abs16x16_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
}
#endif
int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_x2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 16;
uint64_t disalign = (size_t) pix2 & 0x7;
switch (disalign) {
......@@ -234,10 +232,9 @@ int pix_abs16x16_x2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
return result;
}
int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_y2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 16;
if ((size_t) pix2 & 0x7) {
uint64_t t, p2_l, p2_r;
......@@ -288,10 +285,9 @@ int pix_abs16x16_y2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
return result;
}
int pix_abs16x16_xy2_mvi(uint8_t *pix1, uint8_t *pix2, int line_size)
int pix_abs16x16_xy2_mvi(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int result = 0;
int h = 16;
uint64_t p1_l, p1_r;
uint64_t p2_l, p2_r, p2_x;
......
......@@ -17,7 +17,7 @@ extern "C" {
#define FFMPEG_VERSION_INT 0x000408
#define FFMPEG_VERSION "0.4.8"
#define LIBAVCODEC_BUILD 4697
#define LIBAVCODEC_BUILD 4698
#define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVCODEC_VERSION FFMPEG_VERSION
......@@ -263,7 +263,8 @@ static const __attribute__((unused)) int Motion_Est_QTab[] =
#define CODEC_FLAG_H263P_AIV 0x00000008 ///< H263 Alternative inter vlc
#define CODEC_FLAG_OBMC 0x00000001 ///< OBMC
#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000
#define CODEC_FLAG_H263P_SLICE_STRUCT 0x10000000
#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
/* Unsupported options :
* Syntax Arithmetic coding (SAC)
* Reference Picture Selection
......
This diff is collapsed.
......@@ -110,9 +110,7 @@ static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
/* motion estimation */
typedef int (*op_pixels_abs_func)(uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size)/* __attribute__ ((const))*/;
typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size)/* __attribute__ ((const))*/;
typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h)/* __attribute__ ((const))*/;
/**
......@@ -136,19 +134,21 @@ typedef struct DSPContext {
void (*clear_blocks)(DCTELEM *blocks/*align 16*/);
int (*pix_sum)(uint8_t * pix, int line_size);
int (*pix_norm1)(uint8_t * pix, int line_size);
me_cmp_func sad[2]; /* identical to pix_absAxA except additional void * */
me_cmp_func sse[2];
me_cmp_func hadamard8_diff[2];
me_cmp_func dct_sad[2];
me_cmp_func quant_psnr[2];
me_cmp_func bit[2];
me_cmp_func rd[2];
// 16x16 8x8 4x4 2x2 16x8 8x4 4x2 8x16 4x8 2x4
me_cmp_func sad[4]; /* identical to pix_absAxA except additional void * */
me_cmp_func sse[4];
me_cmp_func hadamard8_diff[4];
me_cmp_func dct_sad[4];
me_cmp_func quant_psnr[4];
me_cmp_func bit[4];
me_cmp_func rd[4];
int (*hadamard8_abs )(uint8_t *src, int stride, int mean);
me_cmp_func me_pre_cmp[11];
me_cmp_func me_cmp[11];
me_cmp_func me_sub_cmp[11];
me_cmp_func mb_cmp[11];
me_cmp_func me_pre_cmp[5];
me_cmp_func me_cmp[5];
me_cmp_func me_sub_cmp[5];
me_cmp_func mb_cmp[5];
/* maybe create an array for 16/8/4/2 functions */
/**
......@@ -226,14 +226,7 @@ typedef struct DSPContext {
qpel_mc_func put_h264_qpel_pixels_tab[3][16];
qpel_mc_func avg_h264_qpel_pixels_tab[3][16];
op_pixels_abs_func pix_abs16x16;
op_pixels_abs_func pix_abs16x16_x2;
op_pixels_abs_func pix_abs16x16_y2;
op_pixels_abs_func pix_abs16x16_xy2;
op_pixels_abs_func pix_abs8x8;
op_pixels_abs_func pix_abs8x8_x2;
op_pixels_abs_func pix_abs8x8_y2;
op_pixels_abs_func pix_abs8x8_xy2;
me_cmp_func pix_abs[2][4];
/* huffyuv specific */
void (*add_bytes)(uint8_t *dst/*align 16*/, uint8_t *src/*align 16*/, int w);
......@@ -484,12 +477,24 @@ void ff_mdct_calc(MDCTContext *s, FFTSample *out,
const FFTSample *input, FFTSample *tmp);
void ff_mdct_end(MDCTContext *s);
#define WARPER88_1616(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride){\
return name8(s, dst , src , stride)\
+name8(s, dst+8 , src+8 , stride)\
+name8(s, dst +8*stride, src +8*stride, stride)\
+name8(s, dst+8+8*stride, src+8+8*stride, stride);\
#define WARPER8_16(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
return name8(s, dst , src , stride, h)\
+name8(s, dst+8 , src+8 , stride, h);\
}
#define WARPER8_16_SQ(name8, name16)\
static int name16(void /*MpegEncContext*/ *s, uint8_t *dst, uint8_t *src, int stride, int h){\
int score=0;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
if(h==16){\
dst += 8*stride;\
src += 8*stride;\
score +=name8(s, dst , src , stride, 8);\
score +=name8(s, dst+8 , src+8 , stride, 8);\
}\
return score;\
}
#ifndef HAVE_LRINTF
......
......@@ -582,8 +582,8 @@ static int is_intra_more_likely(MpegEncContext *s){
uint8_t *mb_ptr = s->current_picture.data[0] + mb_x*16 + mb_y*16*s->linesize;
uint8_t *last_mb_ptr= s->last_picture.data [0] + mb_x*16 + mb_y*16*s->linesize;
is_intra_likely += s->dsp.pix_abs16x16(last_mb_ptr, mb_ptr , s->linesize);
is_intra_likely -= s->dsp.pix_abs16x16(last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize);
is_intra_likely += s->dsp.sad[0](NULL, last_mb_ptr, mb_ptr , s->linesize, 16);
is_intra_likely -= s->dsp.sad[0](NULL, last_mb_ptr, last_mb_ptr+s->linesize*16, s->linesize, 16);
}else{
if(IS_INTRA(s->current_picture.mb_type[mb_xy]))
is_intra_likely++;
......
This diff is collapsed.
......@@ -61,8 +61,8 @@ static const int h263_mb_type_b_map[15]= {
MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_16x16,
MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT | MB_TYPE_16x16,
0, //stuffing
MB_TYPE_INTRA | MB_TYPE_CBP,
MB_TYPE_INTRA | MB_TYPE_CBP | MB_TYPE_QUANT,
MB_TYPE_INTRA4x4 | MB_TYPE_CBP,
MB_TYPE_INTRA4x4 | MB_TYPE_CBP | MB_TYPE_QUANT,
};
const uint8_t cbpc_b_tab[4][2] = {
......
......@@ -687,10 +687,10 @@ static int pix_norm1_mmx(uint8_t *pix, int line_size) {
return tmp;
}
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) {
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
int tmp;
asm volatile (
"movl $16,%%ecx\n"
"movl %4,%%ecx\n"
"pxor %%mm0,%%mm0\n" /* mm0 = 0 */
"pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
"1:\n"
......@@ -741,7 +741,9 @@ static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size) {
"psrlq $32, %%mm7\n" /* shift hi dword to lo */
"paddd %%mm7,%%mm1\n"
"movd %%mm1,%2\n"
: "+r" (pix1), "+r" (pix2), "=r"(tmp) : "r" (line_size) : "%ecx");
: "+r" (pix1), "+r" (pix2), "=r"(tmp)
: "r" (line_size) , "m" (h)
: "%ecx");
return tmp;
}
......@@ -866,9 +868,11 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t
"movq "#c", "#o"+32(%1) \n\t"\
"movq "#d", "#o"+48(%1) \n\t"\
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride){
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
uint64_t temp[16] __align8;
int sum=0;
assert(h==8);
diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
......@@ -951,9 +955,11 @@ static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride)
return sum&0xFFFF;
}
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride){
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
uint64_t temp[16] __align8;
int sum=0;
assert(h==8);
diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
......@@ -1037,8 +1043,8 @@ static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride
}
WARPER88_1616(hadamard8_diff_mmx, hadamard8_diff16_mmx)
WARPER88_1616(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
#endif //CONFIG_ENCODERS
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
......
......@@ -28,9 +28,9 @@ static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={
static __attribute__ ((aligned(8), unused)) uint64_t bone= 0x0101010101010101LL;
static inline void sad8_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_1_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -64,9 +64,9 @@ static inline void sad8_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
);
}
static inline void sad8_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_1_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -88,7 +88,7 @@ static inline void sad8_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -114,7 +114,7 @@ static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, in
static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{ //FIXME reuse src
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"movq "MANGLE(bone)", %%mm5 \n\t"
......@@ -151,7 +151,7 @@ static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -189,7 +189,7 @@ static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
{
int len= -(stride<<h);
int len= -(stride*h);
asm volatile(
".balign 16 \n\t"
"1: \n\t"
......@@ -265,85 +265,69 @@ static inline int sum_mmx2(void)
#define PIX_SAD(suf)\
static int pix_abs8x8_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1, blk2, stride, 3);\
sad8_1_ ## suf(blk1, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
static int sad8x8_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1, blk2, stride, 3);\
\
return sum_ ## suf();\
}\
\
static int pix_abs8x8_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
"movq %0, %%mm5 \n\t"\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 3);\
sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
\
static int pix_abs8x8_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
"movq %0, %%mm5 \n\t"\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 3);\
sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
\
static int pix_abs8x8_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad8_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
assert(h==8);\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
"movq %0, %%mm5 \n\t"\
:: "m"(round_tab[2]) \
);\
\
sad8_4_ ## suf(blk1, blk2, stride, 3);\
sad8_4_ ## suf(blk1, blk2, stride, 8);\
\
return sum_ ## suf();\
}\
\
static int pix_abs16x16_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1 , blk2 , stride, 4);\
sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
\
return sum_ ## suf();\
}\
static int sad16x16_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t":);\
\
sad8_ ## suf(blk1 , blk2 , stride, 4);\
sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
sad8_1_ ## suf(blk1 , blk2 , stride, h);\
sad8_1_ ## suf(blk1+8, blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
static int pix_abs16x16_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_x2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
......@@ -351,12 +335,12 @@ static int pix_abs16x16_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, 4);\
sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, 4);\
sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, h);\
sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
static int pix_abs16x16_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_y2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
......@@ -364,12 +348,12 @@ static int pix_abs16x16_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
:: "m"(round_tab[1]) \
);\
\
sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, 4);\
sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, 4);\
sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, h);\
sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
static int pix_abs16x16_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
static int sad16_xy2_ ## suf(void *v, uint8_t *blk2, uint8_t *blk1, int stride, int h)\
{\
asm volatile("pxor %%mm7, %%mm7 \n\t"\
"pxor %%mm6, %%mm6 \n\t"\
......@@ -377,8 +361,8 @@ static int pix_abs16x16_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
:: "m"(round_tab[2]) \
);\
\
sad8_4_ ## suf(blk1 , blk2 , stride, 4);\
sad8_4_ ## suf(blk1+8, blk2+8, stride, 4);\
sad8_4_ ## suf(blk1 , blk2 , stride, h);\
sad8_4_ ## suf(blk1+8, blk2+8, stride, h);\
\
return sum_ ## suf();\
}\
......@@ -389,32 +373,32 @@ PIX_SAD(mmx2)
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
{
if (mm_flags & MM_MMX) {
c->pix_abs16x16 = pix_abs16x16_mmx;
c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx;
c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx;
c->pix_abs8x8 = pix_abs8x8_mmx;
c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx;
c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx;
c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx;
c->pix_abs[0][0] = sad16_mmx;
c->pix_abs[0][1] = sad16_x2_mmx;
c->pix_abs[0][2] = sad16_y2_mmx;
c->pix_abs[0][3] = sad16_xy2_mmx;
c->pix_abs[1][0] = sad8_mmx;
c->pix_abs[1][1] = sad8_x2_mmx;
c->pix_abs[1][2] = sad8_y2_mmx;
c->pix_abs[1][3] = sad8_xy2_mmx;
c->sad[0]= sad16x16_mmx;
c->sad[1]= sad8x8_mmx;
c->sad[0]= sad16_mmx;
c->sad[1]= sad8_mmx;
}
if (mm_flags & MM_MMXEXT) {
c->pix_abs16x16 = pix_abs16x16_mmx2;
c->pix_abs8x8 = pix_abs8x8_mmx2;
c->pix_abs[0][0] = sad16_mmx2;
c->pix_abs[1][0] = sad8_mmx2;
c->sad[0]= sad16x16_mmx2;
c->sad[1]= sad8x8_mmx2;
c->sad[0]= sad16_mmx2;
c->sad[1]= sad8_mmx2;
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx2;
c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx2;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx2;
c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx2;
c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx2;
c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx2;
c->pix_abs[0][1] = sad16_x2_mmx2;
c->pix_abs[0][2] = sad16_y2_mmx2;
c->pix_abs[0][3] = sad16_xy2_mmx2;
c->pix_abs[1][1] = sad8_x2_mmx2;
c->pix_abs[1][2] = sad8_y2_mmx2;
c->pix_abs[1][3] = sad8_xy2_mmx2;
}
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -137,6 +137,7 @@ typedef struct Picture{
int16_t (*motion_val_base[2])[2];
int8_t *ref_index[2];
uint32_t *mb_type_base;
#define MB_TYPE_INTRA MB_TYPE_INTRA4x4 //default mb_type if theres just one type
#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
#define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16)
#define IS_PCM(a) ((a)&MB_TYPE_INTRA_PCM)
......@@ -206,23 +207,28 @@ typedef struct MotionEstContext{
int mb_penalty_factor;
int pre_pass; ///< = 1 for the pre pass
int dia_size;
int xmin;
int xmax;
int ymin;
int ymax;
uint8_t (*mv_penalty)[MAX_MV*2+1]; ///< amount of bits needed to encode a MV
int (*sub_motion_search)(struct MpegEncContext * s,
int *mx_ptr, int *my_ptr, int dmin,
int xmin, int ymin, int xmax, int ymax,
int pred_x, int pred_y, Picture *ref_picture,
int n, int size, uint8_t * const mv_penalty);
int (*motion_search[7])(struct MpegEncContext * s, int block,
int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride,
int size, int h, uint8_t * const mv_penalty);
int (*motion_search[7])(struct MpegEncContext * s,
int *mx_ptr, int *my_ptr,
int P[10][2], int pred_x, int pred_y,
int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2],
int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2],
int ref_mv_scale, uint8_t * const mv_penalty);
int (*pre_motion_search)(struct MpegEncContext * s, int block,
int (*pre_motion_search)(struct MpegEncContext * s,
int *mx_ptr, int *my_ptr,
int P[10][2], int pred_x, int pred_y,
int xmin, int ymin, int xmax, int ymax, Picture *ref_picture, int16_t (*last_mv)[2],
int P[10][2], int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride, int16_t (*last_mv)[2],
int ref_mv_scale, uint8_t * const mv_penalty);
int (*get_mb_score)(struct MpegEncContext * s, int mx, int my, int pred_x, int pred_y, Picture *ref_picture,
int (*get_mb_score)(struct MpegEncContext * s, int mx, int my, int pred_x, int pred_y, uint8_t *src_data[3],
uint8_t *ref_data[6], int stride, int uvstride,
uint8_t * const mv_penalty);
}MotionEstContext;
......@@ -351,12 +357,18 @@ typedef struct MpegEncContext {
int16_t (*b_bidir_forw_mv_table_base)[2];
int16_t (*b_bidir_back_mv_table_base)[2];
int16_t (*b_direct_mv_table_base)[2];
int16_t (*p_field_mv_table_base[2][2])[2];
int16_t (*b_field_mv_table_base[2][2][2])[2];
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) p-frame encoding
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode b-frame encoding
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode b-frame encoding
int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding
int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode b-frame encoding
int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode b-frame encoding
int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced p-frame encoding
int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced b-frame encoding
uint8_t (*p_field_select_table[2]);
uint8_t (*b_field_select_table[2][2]);
int me_method; ///< ME algorithm
int scene_change_score;
int mv_dir;
......@@ -391,17 +403,22 @@ typedef struct MpegEncContext {
int mb_x, mb_y;
int mb_skip_run;
int mb_intra;
uint8_t *mb_type; ///< Table for MB type FIXME remove and use picture->mb_type
#define MB_TYPE_INTRA 0x01
#define MB_TYPE_INTER 0x02
#define MB_TYPE_INTER4V 0x04
#define MB_TYPE_SKIPED 0x08
uint16_t *mb_type; ///< Table for candidate MB types for encoding
#define CANDIDATE_MB_TYPE_INTRA 0x01
#define CANDIDATE_MB_TYPE_INTER 0x02
#define CANDIDATE_MB_TYPE_INTER4V 0x04
#define CANDIDATE_MB_TYPE_SKIPED 0x08
//#define MB_TYPE_GMC 0x10
#define MB_TYPE_DIRECT 0x10
#define MB_TYPE_FORWARD 0x20
#define MB_TYPE_BACKWARD 0x40
#define MB_TYPE_BIDIR 0x80
#define CANDIDATE_MB_TYPE_DIRECT 0x10
#define CANDIDATE_MB_TYPE_FORWARD 0x20
#define CANDIDATE_MB_TYPE_BACKWARD 0x40
#define CANDIDATE_MB_TYPE_BIDIR 0x80
#define CANDIDATE_MB_TYPE_INTER_I 0x100
#define CANDIDATE_MB_TYPE_FORWARD_I 0x200
#define CANDIDATE_MB_TYPE_BACKWARD_I 0x400
#define CANDIDATE_MB_TYPE_BIDIR_I 0x800
int block_index[6]; ///< index to current MB in block based arrays with edges
int block_wrap[6];
......@@ -551,8 +568,6 @@ typedef struct MpegEncContext {
uint8_t *tex_pb_buffer;
uint8_t *pb2_buffer;
int mpeg_quant;
int16_t (*field_mv_table)[2][2]; ///< used for interlaced b frame decoding
int8_t (*field_select_table)[2]; ///< wtf, no really another table for interlaced b frames
int t_frame; ///< time distance of first I -> B, used for interlaced b frames
int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG4
......@@ -748,7 +763,8 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
int mb_x, int mb_y);
int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type);
void ff_fix_long_p_mvs(MpegEncContext * s);
void ff_fix_long_b_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type);
void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select,
int16_t (*mv_table)[2], int f_code, int type, int truncate);
void ff_init_me(MpegEncContext *s);
int ff_pre_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y);
......
......@@ -45,7 +45,7 @@ static void sigill_handler (int sig)
}
#endif /* CONFIG_DARWIN */
int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -57,7 +57,7 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
s = 0;
sad = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
pix1v: pix1[0]-pix1[15]
......@@ -92,7 +92,7 @@ int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -118,7 +118,7 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
tv = (vector unsigned char *) &pix2[0];
pix2v = vec_perm(tv[0], tv[1], vec_lvsl(0, &pix2[0]));
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
pix1v: pix1[0]-pix1[15]
......@@ -152,7 +152,7 @@ int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -194,7 +194,7 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
t1 = vec_add(pix2hv, pix2ihv);
t2 = vec_add(pix2lv, pix2ilv);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/*
Read unaligned pixels into our vectors. The vectors are as follows:
pix1v: pix1[0]-pix1[15]
......@@ -253,7 +253,7 @@ int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -266,7 +266,7 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
sad = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2 */
perm1 = vec_lvsl(0, pix1);
pix1v = (vector unsigned char *) pix1;
......@@ -295,7 +295,7 @@ int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
return s;
}
int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -309,7 +309,7 @@ int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2
Since we're reading 16 pixels, and actually only want 8,
mask out the last 8 pixels. The 0s don't change the sum. */
......@@ -374,9 +374,9 @@ int pix_norm1_altivec(uint8_t *pix, int line_size)
/**
* Sum of Squared Errors for a 8x8 block.
* AltiVec-enhanced.
* It's the pix_abs8x8_altivec code above w/ squaring added.
* It's the sad8_altivec code above w/ squaring added.
*/
int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -391,7 +391,7 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
for(i=0;i<8;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2
Since we're reading 16 pixels, and actually only want 8,
mask out the last 8 pixels. The 0s don't change the sum. */
......@@ -430,9 +430,9 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
/**
* Sum of Squared Errors for a 16x16 block.
* AltiVec-enhanced.
* It's the pix_abs16x16_altivec code above w/ squaring added.
* It's the sad16_altivec code above w/ squaring added.
*/
int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
{
int i;
int s __attribute__((aligned(16)));
......@@ -444,7 +444,7 @@ int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size)
sum = (vector unsigned int)vec_splat_u32(0);
for(i=0;i<16;i++) {
for(i=0;i<h;i++) {
/* Read potentially unaligned pixels into t1 and t2 */
perm1 = vec_lvsl(0, pix1);
pix1v = (vector unsigned char *) pix1;
......@@ -609,14 +609,6 @@ void diff_pixels_altivec(DCTELEM *restrict block, const uint8_t *s1,
}
}
int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
return pix_abs16x16_altivec(a,b,stride);
}
int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride) {
return pix_abs8x8_altivec(a,b,stride);
}
void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w) {
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
int i;
......
......@@ -24,16 +24,14 @@
#ifdef HAVE_ALTIVEC
extern int pix_abs16x16_x2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_y2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_xy2_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int sad16x16_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
extern int sad8x8_altivec(void *s, uint8_t *a, uint8_t *b, int stride);
extern int sad16_x2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad16_y2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad16_xy2_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int pix_norm1_altivec(uint8_t *pix, int line_size);
extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size);
extern int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int sse16_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h);
extern int pix_sum_altivec(uint8_t * pix, int line_size);
extern void diff_pixels_altivec(DCTELEM* block, const uint8_t* s1, const uint8_t* s2, int stride);
extern void get_pixels_altivec(DCTELEM* block, const uint8_t * pixels, int line_size);
......
......@@ -240,13 +240,13 @@ void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx)
mm_flags |= MM_ALTIVEC;
// Altivec specific optimisations
c->pix_abs16x16_x2 = pix_abs16x16_x2_altivec;
c->pix_abs16x16_y2 = pix_abs16x16_y2_altivec;
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_altivec;
c->pix_abs16x16 = pix_abs16x16_altivec;
c->pix_abs8x8 = pix_abs8x8_altivec;
c->sad[0]= sad16x16_altivec;
c->sad[1]= sad8x8_altivec;
c->pix_abs[0][1] = sad16_x2_altivec;
c->pix_abs[0][2] = sad16_y2_altivec;
c->pix_abs[0][3] = sad16_xy2_altivec;
c->pix_abs[0][0] = sad16_altivec;
c->pix_abs[1][0] = sad8_altivec;
c->sad[0]= sad16_altivec;
c->sad[1]= sad8_altivec;
c->pix_norm1 = pix_norm1_altivec;
c->sse[1]= sse8_altivec;
c->sse[0]= sse16_altivec;
......
......@@ -520,7 +520,7 @@ static void adaptive_quantization(MpegEncContext *s, double q){
if(spat_cplx < 4) spat_cplx= 4; //FIXME finetune
if(temp_cplx < 4) temp_cplx= 4; //FIXME finetune
if((s->mb_type[mb_xy]&MB_TYPE_INTRA)){//FIXME hq mode
if((s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTRA)){//FIXME hq mode
cplx= spat_cplx;
factor= 1.0 + p_masking;
}else{
......
......@@ -26,21 +26,21 @@ stddev: 8.18 bytes:7602176
920a0a8a0063655d1f34dcaad7857f98 *./data/a-h263p.avi
0eb167c9dfcbeeecbf3debed8af8f811 *./data/out.yuv
stddev: 2.08 bytes:7602176
a8cc41cd5016bbb821e7c2691f5090ea *./data/a-odivx.mp4
e48114a50ef4cfb4fe2016fa5b34ae4c *./data/out.yuv
stddev: 8.02 bytes:7602176
66f8b4b5b4f0655cff7bdbc44969cab3 *./data/a-odivx.mp4
5bd332c77ef45e58b7017e06a0467dd3 *./data/out.yuv
stddev: 7.94 bytes:7602176
5704a082cc5c5970620123ae20566286 *./data/a-huffyuv.avi
799d3db687f6cdd7a837ec156efc171f *./data/out.yuv
stddev: 0.00 bytes:7602176
e9f63126859b97cd23cd1413038f8f7b *./data/a-mpeg4-rc.avi
90a159074b1b109569914ee63f387860 *./data/out.yuv
stddev: 10.18 bytes:7145472
b3f1425e266569d5d726b88eadc13dd4 *./data/a-mpeg4-adv.avi
fb61365b22c947adbaeab74478579020 *./data/out.yuv
stddev: 7.31 bytes:7602176
25ec5ab399fd4db0c8aaea78cb692611 *./data/a-error-mpeg4-adv.avi
bd441fc1e2fb9a3c0bdc9c5f1ed25ef0 *./data/out.yuv
stddev: 13.57 bytes:7602176
d7d295f97a1e07b633f973d2325880ce *./data/a-mpeg4-adv.avi
612f79510c8098f1421aa154047e2bf2 *./data/out.yuv
stddev: 7.25 bytes:7602176
f863f4198521bd76930ea33991b47273 *./data/a-error-mpeg4-adv.avi
ba7fcd126c7c9fead5a5de71aaaf0624 *./data/out.yuv
stddev: 16.80 bytes:7602176
328ebd044362116e274739e23c482ee7 *./data/a-mpeg1b.mpg
788a9d500dc8986231a18076fc80fd73 *./data/out.yuv
stddev: 10.07 bytes:7145472
......
......@@ -138,7 +138,7 @@ do_ffmpeg $raw_dst -y -i $file -f rawvideo $raw_dst
# mpeg2 encoding interlaced
file=${outfile}mpeg2i.mpg
do_ffmpeg $file -y -qscale 10 -f pgmyuv -i $raw_src -vcodec mpeg2video -f mpeg1video -interlace $file
do_ffmpeg $file -y -qscale 10 -f pgmyuv -i $raw_src -vcodec mpeg2video -f mpeg1video -ildct $file
# mpeg2 decoding
do_ffmpeg $raw_dst -y -i $file -f rawvideo $raw_dst
......
......@@ -26,21 +26,21 @@ stddev: 5.41 bytes:7602176
f7828488c31ccb6787367ef4e4a2ad42 *./data/a-h263p.avi
7d39d1f272205a6a231d0e0baf32ff9d *./data/out.yuv
stddev: 1.91 bytes:7602176
f17dc7346f5d1d4307ecf4507f10fcc6 *./data/a-odivx.mp4
ff7ddb57d9038b94f08c43bae7e1329f *./data/out.yuv
stddev: 5.28 bytes:7602176
a831828595e5764e6ee30c2d9e548385 *./data/a-odivx.mp4
ad75d173bd30d642147f00da21df0012 *./data/out.yuv
stddev: 5.27 bytes:7602176
242a7a18c2793e115007bc163861ef4e *./data/a-huffyuv.avi
dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv
stddev: 0.00 bytes:7602176
6a469f42ce6946dd4c708f9e51e3da6a *./data/a-mpeg4-rc.avi
df9de7134d961119705b4e0cabca1f12 *./data/out.yuv
stddev: 4.20 bytes:7145472
742ffadf3c309d2c4ac888a6a0905bf9 *./data/a-mpeg4-adv.avi
b02f71e91e9368ce94814ab3d74f91ba *./data/out.yuv
stddev: 4.97 bytes:7602176
f2888ab759ac28aba85a16d3d54b80d0 *./data/a-error-mpeg4-adv.avi
93ab926aad2e658a5bb00c25b7cefdab *./data/out.yuv
stddev: 5.22 bytes:7602176
483504d060b0bd8ac1acfa3a823c2ad7 *./data/a-mpeg4-adv.avi
08d24bdd7da80cffaf8abaa3e71b1843 *./data/out.yuv
stddev: 4.96 bytes:7602176
03ff35856faefb4882eaf4d86d95bea7 *./data/a-error-mpeg4-adv.avi
8550acff0851ee915bd5800f1e20f37c *./data/out.yuv
stddev: 9.66 bytes:7602176
671802a2c5078e69f7f422765ea87f2a *./data/a-mpeg1b.mpg
d3d5876cef34b728602d5a22eee9249f *./data/out.yuv
stddev: 5.93 bytes:7145472
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment