Commit b40cd4e0 authored by Michael Niedermayer's avatar Michael Niedermayer

rv20 (h263) b frame decoding support

Originally committed as revision 2561 to svn://svn.ffmpeg.org/ffmpeg/trunk
parent 6952f123
......@@ -49,6 +49,8 @@
#define SPRITE_TRAJ_VLC_BITS 6
#define MB_TYPE_B_VLC_BITS 4
#define TEX_VLC_BITS 9
#define H263_MBTYPE_B_VLC_BITS 6
#define CBPC_B_VLC_BITS 3
#ifdef CONFIG_ENCODERS
static void h263_encode_block(MpegEncContext * s, DCTELEM * block,
......@@ -1092,8 +1094,8 @@ void h263_encode_mb(MpegEncContext * s,
h263_pred_motion(s, 0, &pred_x, &pred_y);
if (!s->umvplus) {
h263_encode_motion(s, motion_x - pred_x, s->f_code);
h263_encode_motion(s, motion_y - pred_y, s->f_code);
h263_encode_motion(s, motion_x - pred_x, 1);
h263_encode_motion(s, motion_y - pred_y, 1);
}
else {
h263p_encode_umotion(s, motion_x - pred_x);
......@@ -1121,8 +1123,8 @@ void h263_encode_mb(MpegEncContext * s,
motion_x= s->motion_val[ s->block_index[i] ][0];
motion_y= s->motion_val[ s->block_index[i] ][1];
if (!s->umvplus) {
h263_encode_motion(s, motion_x - pred_x, s->f_code);
h263_encode_motion(s, motion_y - pred_y, s->f_code);
h263_encode_motion(s, motion_x - pred_x, 1);
h263_encode_motion(s, motion_y - pred_y, 1);
}
else {
h263p_encode_umotion(s, motion_x - pred_x);
......@@ -1513,6 +1515,67 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block,
return mot_val;
}
// identical to above but with s->current_picture->motion_val, the above one will be removed, and this renamed to it
int16_t *h263_pred_motion2(MpegEncContext * s, int block, int dir,
int *px, int *py)
{
int xy, wrap;
int16_t *A, *B, *C, (*mot_val)[2];
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
xy = s->mb_x + s->mb_y * wrap;
mot_val = s->current_picture.motion_val[dir] + xy;
A = mot_val[ - 1];
/* special case for first (slice) line */
if (s->first_slice_line && block<3) {
// we cant just change some MVs to simulate that as we need them for the B frames (and ME)
// and if we ever support non rectangular objects than we need to do a few ifs here anyway :(
if(block==0){ //most common case
if(s->mb_x == s->resync_mb_x){ //rare
*px= *py = 0;
}else if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
C = mot_val[off[block] - wrap];
if(s->mb_x==0){
*px = C[0];
*py = C[1];
}else{
*px = mid_pred(A[0], 0, C[0]);
*py = mid_pred(A[1], 0, C[1]);
}
}else{
*px = A[0];
*py = A[1];
}
}else if(block==1){
if(s->mb_x + 1 == s->resync_mb_x && s->h263_pred){ //rare
C = mot_val[off[block] - wrap];
*px = mid_pred(A[0], 0, C[0]);
*py = mid_pred(A[1], 0, C[1]);
}else{
*px = A[0];
*py = A[1];
}
}else{ /* block==2*/
B = mot_val[ - wrap];
C = mot_val[off[block] - wrap];
if(s->mb_x == s->resync_mb_x) //rare
A[0]=A[1]=0;
*px = mid_pred(A[0], B[0], C[0]);
*py = mid_pred(A[1], B[1], C[1]);
}
} else {
B = mot_val[ - wrap];
C = mot_val[off[block] - wrap];
*px = mid_pred(A[0], B[0], C[0]);
*py = mid_pred(A[1], B[1], C[1]);
}
return *mot_val;
}
#ifdef CONFIG_ENCODERS
static void h263_encode_motion(MpegEncContext * s, int val, int f_code)
{
......@@ -2606,6 +2669,8 @@ static VLC mv_vlc;
static VLC dc_lum, dc_chrom;
static VLC sprite_trajectory;
static VLC mb_type_b_vlc;
static VLC h263_mbtype_b_vlc;
static VLC cbpc_b_vlc;
void init_vlc_rl(RLTable *rl)
{
......@@ -2698,6 +2763,12 @@ void h263_decode_init_vlc(MpegEncContext *s)
init_vlc(&mb_type_b_vlc, MB_TYPE_B_VLC_BITS, 4,
&mb_type_b_tab[0][1], 2, 1,
&mb_type_b_tab[0][0], 2, 1);
init_vlc(&h263_mbtype_b_vlc, H263_MBTYPE_B_VLC_BITS, 15,
&h263_mbtype_b_tab[0][1], 2, 1,
&h263_mbtype_b_tab[0][0], 2, 1);
init_vlc(&cbpc_b_vlc, CBPC_B_VLC_BITS, 4,
&cbpc_b_tab[0][1], 2, 1,
&cbpc_b_tab[0][0], 2, 1);
}
}
......@@ -3511,12 +3582,12 @@ static void preview_obmc(MpegEncContext *s){
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, s->f_code);
mx = h263_decode_motion(s, pred_x, 1);
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, s->f_code);
my = h263_decode_motion(s, pred_y, 1);
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= mx;
......@@ -3529,12 +3600,12 @@ static void preview_obmc(MpegEncContext *s){
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, s->f_code);
mx = h263_decode_motion(s, pred_x, 1);
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, s->f_code);
my = h263_decode_motion(s, pred_y, 1);
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
mot_val[0] = mx;
......@@ -3616,7 +3687,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, s->f_code);
mx = h263_decode_motion(s, pred_x, 1);
if (mx >= 0xffff)
return -1;
......@@ -3624,7 +3695,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, s->f_code);
my = h263_decode_motion(s, pred_y, 1);
if (my >= 0xffff)
return -1;
......@@ -3641,14 +3712,14 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus)
mx = h263p_decode_umotion(s, pred_x);
else
mx = h263_decode_motion(s, pred_x, s->f_code);
mx = h263_decode_motion(s, pred_x, 1);
if (mx >= 0xffff)
return -1;
if (s->umvplus)
my = h263p_decode_umotion(s, pred_y);
else
my = h263_decode_motion(s, pred_y, s->f_code);
my = h263_decode_motion(s, pred_y, 1);
if (my >= 0xffff)
return -1;
s->mv[0][i][0] = mx;
......@@ -3659,6 +3730,103 @@ int ff_h263_decode_mb(MpegEncContext *s,
mot_val[1] = my;
}
}
if(s->obmc){
if(s->pict_type == P_TYPE && s->mb_x+1<s->mb_width)
preview_obmc(s);
}
} else if(s->pict_type==B_TYPE) {
int mb_type;
const int stride= s->b8_stride;
int16_t *mot_val0 = s->current_picture.motion_val[0][ s->mb_x + s->mb_y*stride ];
int16_t *mot_val1 = s->current_picture.motion_val[1][ s->mb_x + s->mb_y*stride ];
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
//FIXME ugly
mot_val0[0 ]= mot_val0[2 ]= mot_val0[0+stride]= mot_val0[2+stride]= 0;
mot_val0[1 ]= mot_val0[3 ]= mot_val0[1+stride]= mot_val0[3+stride]= 0;
mot_val1[0 ]= mot_val1[2 ]= mot_val1[0+stride]= mot_val1[2+stride]= 0;
mot_val1[1 ]= mot_val1[3 ]= mot_val1[1+stride]= mot_val1[3+stride]= 0;
do{
mb_type= get_vlc2(&s->gb, h263_mbtype_b_vlc.table, H263_MBTYPE_B_VLC_BITS, 2);
if (mb_type < 0){
av_log(s->avctx, AV_LOG_ERROR, "b mb_type damaged at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
mb_type= h263_mb_type_b_map[ mb_type ];
}while(!mb_type);
s->mb_intra = IS_INTRA(mb_type);
if(HAS_CBP(mb_type)){
cbpc = get_vlc2(&s->gb, cbpc_b_vlc.table, CBPC_B_VLC_BITS, 1);
if(s->mb_intra){
dquant = IS_QUANT(mb_type);
goto intra;
}
cbpy = get_vlc2(&s->gb, cbpy_vlc.table, CBPY_VLC_BITS, 1);
if (cbpy < 0){
av_log(s->avctx, AV_LOG_ERROR, "b cbpy damaged at %d %d\n", s->mb_x, s->mb_y);
return -1;
}
if(s->alt_inter_vlc==0 || (cbpc & 3)!=3)
cbpy ^= 0xF;
cbp = (cbpc & 3) | (cbpy << 2);
}else
cbp=0;
assert(!s->mb_intra);
if(IS_QUANT(mb_type)){
if(s->modified_quant){ //FIXME factorize
if(get_bits1(&s->gb))
s->qscale= modified_quant_tab[get_bits1(&s->gb)][ s->qscale ];
else
s->qscale= get_bits(&s->gb, 5);
}else
s->qscale += quant_tab[get_bits(&s->gb, 2)];
ff_set_qscale(s, s->qscale);
}
if(IS_DIRECT(mb_type)){
s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
mb_type |= ff_mpeg4_set_direct_mv(s, 0, 0);
}else{
s->mv_dir = 0;
s->mv_type= MV_TYPE_16X16;
//FIXME UMV
if(USES_LIST(mb_type, 0)){
int16_t *mot_val= h263_pred_motion2(s, 0, 0, &mx, &my);
s->mv_dir = MV_DIR_FORWARD;
mx = h263_decode_motion(s, mx, 1);
my = h263_decode_motion(s, my, 1);
s->mv[0][0][0] = mx;
s->mv[0][0][1] = my;
mot_val[0 ]= mot_val[2 ]= mot_val[0+stride]= mot_val[2+stride]= mx;
mot_val[1 ]= mot_val[3 ]= mot_val[1+stride]= mot_val[3+stride]= my;
}
if(USES_LIST(mb_type, 1)){
int16_t *mot_val= h263_pred_motion2(s, 0, 1, &mx, &my);
s->mv_dir |= MV_DIR_BACKWARD;
mx = h263_decode_motion(s, mx, 1);
my = h263_decode_motion(s, my, 1);
s->mv[1][0][0] = mx;
s->mv[1][0][1] = my;
mot_val[0 ]= mot_val[2 ]= mot_val[0+stride]= mot_val[2+stride]= mx;
mot_val[1 ]= mot_val[3 ]= mot_val[1+stride]= mot_val[3+stride]= my;
}
}
s->current_picture.mb_type[xy]= mb_type;
} else { /* I-Frame */
do{
cbpc = get_vlc2(&s->gb, intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
......@@ -3698,14 +3866,6 @@ intra:
s->qscale += quant_tab[get_bits(&s->gb, 2)];
ff_set_qscale(s, s->qscale);
}
/* decode each block */
for (i = 0; i < 6; i++) {
if (h263_decode_block(s, block[i], i, cbp&32) < 0)
return -1;
cbp+=cbp;
}
goto end;
}
/* decode each block */
......@@ -3715,10 +3875,6 @@ intra:
cbp+=cbp;
}
end:
if(s->obmc){
if(s->pict_type == P_TYPE && s->mb_x+1<s->mb_width)
preview_obmc(s);
}
/* per-MB end of slice check */
{
......@@ -4692,12 +4848,15 @@ int h263_decode_picture_header(MpegEncContext *s)
}
/* MPPTYPE */
s->pict_type = get_bits(&s->gb, 3) + I_TYPE;
if (s->pict_type == 8 && s->avctx->codec_tag == ff_get_fourcc("ZYGO"))
s->pict_type = I_TYPE;
if (s->pict_type != I_TYPE &&
s->pict_type != P_TYPE)
s->pict_type = get_bits(&s->gb, 3);
switch(s->pict_type){
case 0: s->pict_type= I_TYPE;break;
case 1: s->pict_type= P_TYPE;break;
case 3: s->pict_type= B_TYPE;break;
case 7: s->pict_type= I_TYPE;break; //ZYGO
default:
return -1;
}
skip_bits(&s->gb, 2);
s->no_rounding = get_bits1(&s->gb);
skip_bits(&s->gb, 4);
......
......@@ -29,6 +29,49 @@ const uint8_t inter_MCBPC_bits[28] = {
11, 13, 13, 13,/* inter4Q*/
};
static const uint8_t h263_mbtype_b_tab[15][2] = {
{1, 1},
{3, 3},
{1, 5},
{4, 4},
{5, 4},
{6, 6},
{2, 4},
{3, 4},
{7, 6},
{4, 6},
{5, 6},
{1, 6},
{1,10},
{1, 7},
{1, 8},
};
static const int h263_mb_type_b_map[15]= {
MB_TYPE_DIRECT2 | MB_TYPE_L0L1,
MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP,
MB_TYPE_DIRECT2 | MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT,
MB_TYPE_L0,
MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_QUANT,
MB_TYPE_L1,
MB_TYPE_L1 | MB_TYPE_CBP,
MB_TYPE_L1 | MB_TYPE_CBP | MB_TYPE_QUANT,
MB_TYPE_L0L1,
MB_TYPE_L0L1 | MB_TYPE_CBP,
MB_TYPE_L0L1 | MB_TYPE_CBP | MB_TYPE_QUANT,
0, //stuffing
MB_TYPE_INTRA | MB_TYPE_CBP,
MB_TYPE_INTRA | MB_TYPE_CBP | MB_TYPE_QUANT,
};
const uint8_t cbpc_b_tab[4][2] = {
{0, 1},
{2, 2},
{7, 3},
{6, 3},
};
const uint8_t cbpy_tab[16][2] =
{
{3,4}, {5,5}, {4,5}, {9,4}, {3,5}, {7,4}, {2,6}, {11,4},
......
......@@ -1109,7 +1109,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
}
} else {
if (mb_type & MB_TYPE_ZERO_MV){
assert(mb_type & MB_TYPE_PAT);
assert(mb_type & MB_TYPE_CBP);
/* compute dct type */
if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
......@@ -1140,7 +1140,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
/* compute dct type */
if (s->picture_structure == PICT_FRAME && //FIXME add a interlaced_dct coded var?
!s->frame_pred_frame_dct && IS_PAT(mb_type)) {
!s->frame_pred_frame_dct && HAS_CBP(mb_type)) {
s->interlaced_dct = get_bits1(&s->gb);
}
......@@ -1266,7 +1266,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
s->mb_intra = 0;
if (IS_PAT(mb_type)) {
if (HAS_CBP(mb_type)) {
cbp = get_vlc2(&s->gb, mb_pat_vlc.table, MB_PAT_VLC_BITS, 1);
if (cbp < 0){
av_log(s->avctx, AV_LOG_ERROR, "invalid cbp at %d %d\n", s->mb_x, s->mb_y);
......
......@@ -283,10 +283,8 @@ static const uint8_t mbPatTable[63][2] = {
{0xc, 6}
};
#define MB_TYPE_PAT 0x40000000
#define MB_TYPE_ZERO_MV 0x20000000
#define IS_ZERO_MV(a) ((a)&MB_TYPE_ZERO_MV)
#define IS_PAT(a) ((a)&MB_TYPE_PAT)
static const uint8_t table_mb_ptype[7][2] = {
{ 3, 5 }, // 0x01 MB_INTRA
......@@ -300,12 +298,12 @@ static const uint8_t table_mb_ptype[7][2] = {
static const uint32_t ptype2mb_type[7] = {
MB_TYPE_INTRA,
MB_TYPE_L0 | MB_TYPE_PAT | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_L0,
MB_TYPE_L0 | MB_TYPE_PAT,
MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_INTRA,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP | MB_TYPE_ZERO_MV | MB_TYPE_16x16,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP,
};
static const uint8_t table_mb_btype[11][2] = {
......@@ -325,15 +323,15 @@ static const uint8_t table_mb_btype[11][2] = {
static const uint32_t btype2mb_type[11] = {
MB_TYPE_INTRA,
MB_TYPE_L1,
MB_TYPE_L1 | MB_TYPE_PAT,
MB_TYPE_L1 | MB_TYPE_CBP,
MB_TYPE_L0,
MB_TYPE_L0 | MB_TYPE_PAT,
MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_L0L1,
MB_TYPE_L0L1 | MB_TYPE_PAT,
MB_TYPE_L0L1 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_INTRA,
MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_PAT,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_PAT,
MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_PAT,
MB_TYPE_QUANT | MB_TYPE_L1 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_L0 | MB_TYPE_CBP,
MB_TYPE_QUANT | MB_TYPE_L0L1 | MB_TYPE_CBP,
};
static const uint8_t mbMotionVectorTable[17][2] = {
......
......@@ -265,6 +265,8 @@ static void copy_picture(Picture *dst, Picture *src){
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) doesnt sig11
const int mb_array_size= s->mb_stride*s->mb_height;
const int b8_array_size= s->b8_stride*s->mb_height*2;
const int b4_array_size= s->b4_stride*s->mb_height*4;
int i;
if(shared){
......@@ -310,8 +312,14 @@ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared){
pic->mb_type= pic->mb_type_base + s->mb_stride+1;
if(s->out_format == FMT_H264){
for(i=0; i<2; i++){
CHECKED_ALLOCZ(pic->motion_val[i], 2 * 16 * s->mb_num * sizeof(uint16_t))
CHECKED_ALLOCZ(pic->ref_index[i] , 4 * s->mb_num * sizeof(uint8_t))
CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b4_array_size+1) * sizeof(uint16_t))
pic->motion_val[i]= pic->motion_val_base[i]+1;
CHECKED_ALLOCZ(pic->ref_index[i] , b8_array_size * sizeof(uint8_t))
}
}else if(s->out_format == FMT_H263){
for(i=0; i<2; i++){
CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+1) * sizeof(uint16_t))
pic->motion_val[i]= pic->motion_val_base[i]+1;
}
}
pic->qstride= s->mb_stride;
......@@ -348,7 +356,7 @@ static void free_picture(MpegEncContext *s, Picture *pic){
av_freep(&pic->pan_scan);
pic->mb_type= NULL;
for(i=0; i<2; i++){
av_freep(&pic->motion_val[i]);
av_freep(&pic->motion_val_base[i]);
av_freep(&pic->ref_index[i]);
}
......@@ -374,6 +382,8 @@ int MPV_common_init(MpegEncContext *s)
s->mb_width = (s->width + 15) / 16;
s->mb_height = (s->height + 15) / 16;
s->mb_stride = s->mb_width + 1;
s->b8_stride = s->mb_width*2 + 1;
s->b4_stride = s->mb_width*4 + 1;
mb_array_size= s->mb_height * s->mb_stride;
/* set default edge pos, will be overriden in decode_header if needed */
......@@ -2364,7 +2374,7 @@ static inline void MPV_motion(MpegEncContext *s,
mb_x = s->mb_x;
mb_y = s->mb_y;
if(s->obmc){
if(s->obmc && s->pict_type != B_TYPE){
int16_t mv_cache[4][4][2];
const int xy= s->mb_x + s->mb_y*s->mb_stride;
const int mot_stride= s->mb_width*2 + 2;
......
......@@ -135,7 +135,8 @@ typedef struct Picture{
*/
uint8_t *interpolated[3];
int16_t (*motion_val[2])[2];
int16_t (*motion_val_base[2])[2];
int16_t (*motion_val[2])[2]; ///< motion_val_base+1, so motion_val[][-1] doesnt segfault
int8_t *ref_index[2];
uint32_t *mb_type_base;
uint32_t *mb_type; ///< mb_type_base + mb_width + 2, note: only used for decoding currently
......@@ -149,7 +150,7 @@ typedef struct Picture{
#define MB_TYPE_INTERLACED 0x0080
#define MB_TYPE_DIRECT2 0x0100 //FIXME
#define MB_TYPE_ACPRED 0x0200
#define MB_TYPE_GMC 0x0400 //FIXME mpeg4 specific
#define MB_TYPE_GMC 0x0400
#define MB_TYPE_SKIP 0x0800
#define MB_TYPE_P0L0 0x1000
#define MB_TYPE_P1L0 0x2000
......@@ -159,7 +160,8 @@ typedef struct Picture{
#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
#define MB_TYPE_QUANT 0x00010000
//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 pat, ...)
#define MB_TYPE_CBP 0x00020000
//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
#define IS_INTRA4x4(a) ((a)&MB_TYPE_INTRA4x4)
#define IS_INTRA16x16(a) ((a)&MB_TYPE_INTRA16x16)
......@@ -183,6 +185,7 @@ typedef struct Picture{
#define IS_QUANT(a) ((a)&MB_TYPE_QUANT)
#define IS_DIR(a, part, list) ((a) & (MB_TYPE_P0L0<<((part)+2*(list))))
#define USES_LIST(a, list) ((a) & ((MB_TYPE_P0L0|MB_TYPE_P1L0)<<(2*(list)))) ///< does this mb use listX, note doesnt work if subMBs
#define HAS_CBP(a) ((a)&MB_TYPE_CBP)
int field_poc[2]; ///< h264 top/bottom POC
......@@ -296,6 +299,8 @@ typedef struct MpegEncContext {
int b_frames_since_non_b; ///< used for encoding, relative to not yet reordered input
int mb_width, mb_height; ///< number of MBs horizontally & vertically
int mb_stride; ///< mb_width+1 used for some arrays to allow simple addressng of left & top MBs withoutt sig11
int b8_stride; ///< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressng
int b4_stride; ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressng
int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replicateion)
int mb_num; ///< number of MBs of a picture
int linesize; ///< line size, in bytes, may be different from width
......
......@@ -341,6 +341,13 @@ static int rv20_decode_picture_header(MpegEncContext *s)
{
int seq, mb_pos, i;
if(s->avctx->sub_id == 0x30202002 || s->avctx->sub_id == 0x30203002){
if (get_bits(&s->gb, 3)){
av_log(s->avctx, AV_LOG_ERROR, "unknown triplet set\n");
return -1;
}
}
i= get_bits(&s->gb, 2);
switch(i){
case 0: s->pict_type= I_TYPE; break;
......@@ -362,11 +369,40 @@ static int rv20_decode_picture_header(MpegEncContext *s)
av_log(s->avctx, AV_LOG_ERROR, "error, qscale:0\n");
return -1;
}
if(s->avctx->sub_id == 0x30203002){
if (get_bits(&s->gb, 1)){
av_log(s->avctx, AV_LOG_ERROR, "unknown bit2 set\n");
return -1;
}
}
if(s->avctx->sub_id == 0x20200002)
seq= get_bits(&s->gb, 16);
else
seq= get_bits(&s->gb, 8);
if(s->avctx->sub_id == 0x20200002 || s->avctx->sub_id == 0x30202002 || s->avctx->sub_id == 0x30203002){
if (get_bits(&s->gb, 1)){
av_log(s->avctx, AV_LOG_ERROR, "unknown bit3 set\n");
return -1;
}
seq= get_bits(&s->gb, 15);
}else
seq= get_bits(&s->gb, 8)*128;
//printf("%d\n", seq);
seq |= s->time &~0x7FFF;
if(seq - s->time > 0x4000) seq -= 0x8000;
if(seq - s->time < -0x4000) seq += 0x8000;
if(seq != s->time){
if(s->pict_type!=B_TYPE){
s->time= seq;
s->pp_time= s->time - s->last_non_b_time;
s->last_non_b_time= s->time;
}else{
s->time= seq;
s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
if(s->pp_time <=s->pb_time || s->pp_time <= s->pp_time - s->pb_time || s->pp_time<=0){
printf("messed up order, seeking?, skiping current b frame\n");
return FRAME_SKIPED;
}
}
}
// printf("%d %d %d %d %d\n", seq, (int)s->time, (int)s->last_non_b_time, s->pp_time, s->pb_time);
for(i=0; i<6; i++){
if(s->mb_width*s->mb_height < ff_mba_max[i]) break;
......@@ -390,10 +426,7 @@ static int rv20_decode_picture_header(MpegEncContext *s)
seq, s->mb_x, s->mb_y, s->pict_type, s->qscale, s->no_rounding);
}
if (s->pict_type == B_TYPE){
av_log(s->avctx, AV_LOG_ERROR, "b frame not supported\n");
return -1;
}
assert(s->pict_type != B_TYPE || !s->low_delay);
return s->mb_width*s->mb_height - mb_pos;
}
......@@ -414,14 +447,17 @@ static int rv10_decode_init(AVCodecContext *avctx)
case 0x10000000:
s->rv10_version= 0;
s->h263_long_vectors=0;
s->low_delay=1;
break;
case 0x10003000:
s->rv10_version= 3;
s->h263_long_vectors=1;
s->low_delay=1;
break;
case 0x10003001:
s->rv10_version= 3;
s->h263_long_vectors=0;
s->low_delay=1;
break;
case 0x20001000:
case 0x20100001: //ok
......@@ -614,7 +650,13 @@ static int rv10_decode_frame(AVCodecContext *avctx,
if(s->mb_y>=s->mb_height){
MPV_frame_end(s);
if(s->pict_type==B_TYPE || s->low_delay){
*pict= *(AVFrame*)&s->current_picture;
ff_print_debug_info(s, s->current_picture_ptr);
} else {
*pict= *(AVFrame*)&s->last_picture;
ff_print_debug_info(s, s->last_picture_ptr);
}
*data_size = sizeof(AVFrame);
}else{
......@@ -647,3 +689,4 @@ AVCodec rv20_decoder = {
rv10_decode_frame,
CODEC_CAP_DR1
};
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment