Commit 657ccb5a authored by Diego Biurrun's avatar Diego Biurrun

Eliminate FF_COMMON_FRAME macro.

FF_COMMON_FRAME holds the contents of the AVFrame structure and is also copied
to struct Picture.  Replace by an embedded AVFrame structure in struct Picture.
parent 142e76f1
This diff is collapsed.
......@@ -333,9 +333,9 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
const int mx= mv->x + src_x_offset*8;
const int my= mv->y + src_y_offset*8;
const int luma_xy= (mx&3) + ((my&3)<<2);
uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
uint8_t * src_y = pic->f.data[0] + (mx >> 2) + (my >> 2) * h->l_stride;
uint8_t * src_cb = pic->f.data[1] + (mx >> 3) + (my >> 3) * h->c_stride;
uint8_t * src_cr = pic->f.data[2] + (mx >> 3) + (my >> 3) * h->c_stride;
int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
int extra_height= extra_width;
int emu=0;
......@@ -344,7 +344,7 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
const int pic_width = 16*h->mb_width;
const int pic_height = 16*h->mb_height;
if(!pic->data[0])
if(!pic->f.data[0])
return;
if(mx&7) extra_width -= 3;
if(my&7) extra_height -= 3;
......@@ -602,9 +602,9 @@ int ff_cavs_next_mb(AVSContext *h) {
h->mbx = 0;
h->mby++;
/* re-calculate sample pointers */
h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
h->cy = h->picture.f.data[0] + h->mby * 16 * h->l_stride;
h->cu = h->picture.f.data[1] + h->mby * 8 * h->c_stride;
h->cv = h->picture.f.data[2] + h->mby * 8 * h->c_stride;
if(h->mby == h->mb_height) { //frame end
return 0;
}
......@@ -629,11 +629,11 @@ void ff_cavs_init_pic(AVSContext *h) {
h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
h->cy = h->picture.data[0];
h->cu = h->picture.data[1];
h->cv = h->picture.data[2];
h->l_stride = h->picture.linesize[0];
h->c_stride = h->picture.linesize[1];
h->cy = h->picture.f.data[0];
h->cu = h->picture.f.data[1];
h->cv = h->picture.f.data[2];
h->l_stride = h->picture.f.linesize[0];
h->c_stride = h->picture.f.linesize[1];
h->luma_scan[2] = 8*h->l_stride;
h->luma_scan[3] = 8*h->l_stride+8;
h->mbx = h->mby = h->mbidx = 0;
......
......@@ -476,8 +476,8 @@ static int decode_pic(AVSContext *h) {
return -1;
}
/* make sure we have the reference frames we need */
if(!h->DPB[0].data[0] ||
(!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B))
if(!h->DPB[0].f.data[0] ||
(!h->DPB[1].f.data[0] && h->pic_type == AV_PICTURE_TYPE_B))
return -1;
} else {
h->pic_type = AV_PICTURE_TYPE_I;
......@@ -494,7 +494,7 @@ static int decode_pic(AVSContext *h) {
skip_bits(&s->gb,1); //marker_bit
}
/* release last B frame */
if(h->picture.data[0])
if(h->picture.f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
......@@ -585,7 +585,7 @@ static int decode_pic(AVSContext *h) {
} while(ff_cavs_next_mb(h));
}
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0])
if(h->DPB[1].f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
h->DPB[1] = h->DPB[0];
h->DPB[0] = h->picture;
......@@ -648,7 +648,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
s->avctx = avctx;
if (buf_size == 0) {
if(!s->low_delay && h->DPB[0].data[0]) {
if (!s->low_delay && h->DPB[0].f.data[0]) {
*data_size = sizeof(AVPicture);
*picture = *(AVFrame *) &h->DPB[0];
}
......@@ -669,9 +669,9 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
break;
case PIC_I_START_CODE:
if(!h->got_keyframe) {
if(h->DPB[0].data[0])
if(h->DPB[0].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
if(h->DPB[1].data[0])
if(h->DPB[1].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
h->got_keyframe = 1;
}
......@@ -685,7 +685,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
break;
*data_size = sizeof(AVPicture);
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0]) {
if(h->DPB[1].f.data[0]) {
*picture = *(AVFrame *) &h->DPB[1];
} else {
*data_size = 0;
......
This diff is collapsed.
......@@ -215,7 +215,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
......@@ -323,14 +323,14 @@ static int h261_decode_mb(H261Context *h){
}
if(s->mb_intra){
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
goto intra;
}
//set motion vectors
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2;
......@@ -464,7 +464,7 @@ static int h261_decode_picture_header(H261Context *h){
s->picture_number = (s->picture_number&~31) + i;
s->avctx->time_base= (AVRational){1001, 30000};
s->current_picture.pts= s->picture_number;
s->current_picture.f.pts = s->picture_number;
/* PTYPE starts here */
......@@ -570,7 +570,7 @@ retry:
}
//we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
int i= ff_find_unused_picture(s, 0);
s->current_picture_ptr= &s->picture[i];
}
......@@ -596,8 +596,8 @@ retry:
}
// for skipping the frame
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
......
......@@ -52,7 +52,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
const int wrap = s->b8_stride;
const int xy = s->block_index[0];
s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y;
......@@ -71,30 +71,30 @@ void ff_h263_update_motion_val(MpegEncContext * s){
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
}
s->current_picture.ref_index[0][4*mb_xy ]=
s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0];
s->current_picture.ref_index[0][4*mb_xy + 2]=
s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1];
s->current_picture.f.ref_index[0][4*mb_xy ] =
s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
s->current_picture.f.ref_index[0][4*mb_xy + 2] =
s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
}
/* no update if 8X8 because it has been done during parsing */
s->current_picture.motion_val[0][xy][0] = motion_x;
s->current_picture.motion_val[0][xy][1] = motion_y;
s->current_picture.motion_val[0][xy + 1][0] = motion_x;
s->current_picture.motion_val[0][xy + 1][1] = motion_y;
s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
s->current_picture.f.motion_val[0][xy][0] = motion_x;
s->current_picture.f.motion_val[0][xy][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1][1] = motion_y;
s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
}
if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8)
s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra)
s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
else
s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
}
}
......@@ -154,7 +154,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
Diag Top
Left Center
*/
if(!IS_SKIP(s->current_picture.mb_type[xy])){
if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
......@@ -164,10 +164,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_y){
int qp_dt, qp_tt, qp_tc;
if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
qp_tt=0;
else
qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
if(qp_c)
qp_tc= qp_c;
......@@ -187,10 +187,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){
if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt;
else
qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt];
......@@ -209,10 +209,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_x){
int qp_lc;
if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
qp_lc= qp_c;
else
qp_lc= s->current_picture.qscale_table[xy-1];
qp_lc = s->current_picture.f.qscale_table[xy - 1];
if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
......@@ -321,7 +321,7 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */
......
......@@ -394,7 +394,7 @@ retry:
/* We need to set current_picture_ptr before reading the header,
* otherwise we cannot store anyting in there */
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
int i= ff_find_unused_picture(s, 0);
s->current_picture_ptr= &s->picture[i];
}
......@@ -581,8 +581,8 @@ retry:
s->gob_index = ff_h263_get_gob_height(s);
// for skipping the frame
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
/* skip B-frames if we don't have reference frames */
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
......
This diff is collapsed.
......@@ -795,8 +795,8 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
h->left_block = left_block_options[0];
if(FRAME_MBAFF){
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if(s->mb_y&1){
if (left_mb_field_flag != curr_mb_field_flag) {
left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
......@@ -812,9 +812,9 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
}
}else{
if(curr_mb_field_flag){
topleft_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy - 1]>>7)&1)-1);
topright_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy + 1]>>7)&1)-1);
top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
}
if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) {
......@@ -834,11 +834,11 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context?
h->topleft_type = s->current_picture.mb_type[topleft_xy] ;
h->top_type = s->current_picture.mb_type[top_xy] ;
h->topright_type= s->current_picture.mb_type[topright_xy];
h->left_type[LTOP] = s->current_picture.mb_type[left_xy[LTOP]] ;
h->left_type[LBOT] = s->current_picture.mb_type[left_xy[LBOT]] ;
h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
h->top_type = s->current_picture.f.mb_type[top_xy];
h->topright_type = s->current_picture.f.mb_type[topright_xy];
h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
if(FMO){
if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
......@@ -898,7 +898,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
h->left_samples_available&= 0xFF5F;
}
}else{
int left_typei = s->current_picture.mb_type[left_xy[LTOP] + s->mb_stride];
int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
assert(left_xy[LTOP] == left_xy[LBOT]);
if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
......@@ -1016,9 +1016,9 @@ static void fill_decode_caches(H264Context *h, int mb_type){
int b_stride = h->b_stride;
for(list=0; list<h->list_count; list++){
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
int8_t *ref = s->current_picture.ref_index[list];
int8_t *ref = s->current_picture.f.ref_index[list];
int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
int16_t (*mv)[2] = s->current_picture.motion_val[list];
int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
if(!USES_LIST(mb_type, list)){
continue;
}
......@@ -1240,7 +1240,7 @@ static av_always_inline void write_back_non_zero_count(H264Context *h){
static av_always_inline void write_back_motion_list(H264Context *h, MpegEncContext * const s, int b_stride,
int b_xy, int b8_xy, int mb_type, int list )
{
int16_t (*mv_dst)[2] = &s->current_picture.motion_val[list][b_xy];
int16_t (*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy];
int16_t (*mv_src)[2] = &h->mv_cache[list][scan8[0]];
AV_COPY128(mv_dst + 0*b_stride, mv_src + 8*0);
AV_COPY128(mv_dst + 1*b_stride, mv_src + 8*1);
......@@ -1260,7 +1260,7 @@ static av_always_inline void write_back_motion_list(H264Context *h, MpegEncConte
}
{
int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
int8_t *ref_index = &s->current_picture.f.ref_index[list][b8_xy];
int8_t *ref_cache = h->ref_cache[list];
ref_index[0+0*2]= ref_cache[scan8[0]];
ref_index[1+0*2]= ref_cache[scan8[4]];
......@@ -1278,7 +1278,8 @@ static av_always_inline void write_back_motion(H264Context *h, int mb_type){
if(USES_LIST(mb_type, 0)){
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0);
}else{
fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy],
2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
}
if(USES_LIST(mb_type, 1)){
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1);
......@@ -1334,8 +1335,8 @@ static void av_unused decode_mb_skip(H264Context *h){
}
write_back_motion(h, mb_type);
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.qscale_table[mb_xy]= s->qscale;
s->current_picture.f.mb_type[mb_xy] = mb_type;
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
h->slice_table[ mb_xy ]= h->slice_num;
h->prev_mb_skipped= 1;
}
......
......@@ -1284,8 +1284,8 @@ static int decode_cabac_field_decoding_flag(H264Context *h) {
unsigned long ctx = 0;
ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy]>>7)&(h->slice_table[mba_xy] == h->slice_num);
ctx += (s->current_picture.mb_type[mbb_xy]>>7)&(h->slice_table[mbb_xy] == h->slice_num);
ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
ctx += (s->current_picture.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
}
......@@ -1330,13 +1330,13 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mba_xy = mb_xy - 1;
if( (mb_y&1)
&& h->slice_table[mba_xy] == h->slice_num
&& MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) )
&& MB_FIELD == !!IS_INTERLACED( s->current_picture.f.mb_type[mba_xy] ) )
mba_xy += s->mb_stride;
if( MB_FIELD ){
mbb_xy = mb_xy - s->mb_stride;
if( !(mb_y&1)
&& h->slice_table[mbb_xy] == h->slice_num
&& IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) )
&& IS_INTERLACED( s->current_picture.f.mb_type[mbb_xy] ) )
mbb_xy -= s->mb_stride;
}else
mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
......@@ -1346,9 +1346,9 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
}
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mba_xy] ))
ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mbb_xy] ))
ctx++;
if( h->slice_type_nos == AV_PICTURE_TYPE_B )
......@@ -1849,7 +1849,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
/* read skip flags */
if( skip ) {
if( FRAME_MBAFF && (s->mb_y&1)==0 ){
s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_SKIP;
h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
if(!h->next_mb_skipped)
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
......@@ -1965,10 +1965,10 @@ decode_intra_mb:
h->cbp_table[mb_xy] = 0xf7ef;
h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0
s->current_picture.qscale_table[mb_xy]= 0;
s->current_picture.f.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
h->last_qscale_diff = 0;
return 0;
}
......@@ -2265,7 +2265,7 @@ decode_intra_mb:
AV_WN32A(&nnz_cache[4+8*10], top_empty);
}
}
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
if( cbp || IS_INTRA16x16( mb_type ) ) {
const uint8_t *scan, *scan8x8;
......@@ -2344,7 +2344,7 @@ decode_intra_mb:
h->last_qscale_diff = 0;
}
s->current_picture.qscale_table[mb_xy]= s->qscale;
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
write_back_non_zero_count(h);
if(MB_MBAFF){
......
......@@ -689,11 +689,11 @@ decode_intra_mb:
}
// In deblocking, the quantizer is 0
s->current_picture.qscale_table[mb_xy]= 0;
s->current_picture.f.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
return 0;
}
......@@ -990,7 +990,7 @@ decode_intra_mb:
}
h->cbp=
h->cbp_table[mb_xy]= cbp;
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
if(cbp || IS_INTRA16x16(mb_type)){
int i4x4, chroma_idx;
......@@ -1063,7 +1063,7 @@ decode_intra_mb:
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
}
s->current_picture.qscale_table[mb_xy]= s->qscale;
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
write_back_non_zero_count(h);
if(MB_MBAFF){
......
......@@ -87,7 +87,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){
if(4*h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference&3) == poc){
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
int cur_ref= mbafi ? (j-16)^field : j;
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
if(rfield == field || !interl)
......@@ -105,12 +105,12 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
Picture * const cur = s->current_picture_ptr;
int list, j, field;
int sidx= (s->picture_structure&1)^1;
int ref1sidx= (ref1->reference&1)^1;
int ref1sidx = (ref1->f.reference&1)^1;
for(list=0; list<2; list++){
cur->ref_count[sidx][list] = h->ref_count[list];
for(j=0; j<h->ref_count[list]; j++)
cur->ref_poc[sidx][list][j] = 4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3);
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3);
}
if(s->picture_structure == PICT_FRAME){
......@@ -126,11 +126,11 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
int *col_poc = h->ref_list[1]->field_poc;
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
ref1sidx=sidx= h->col_parity;
}else if(!(s->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff){ // FL -> FL & differ parity
h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3;
} else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
}
if(cur->pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
return;
for(list=0; list<2; list++){
......@@ -143,7 +143,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
{
int ref_field = ref->reference - 1;
int ref_field = ref->f.reference - 1;
int ref_field_picture = ref->field_picture;
int ref_height = 16*h->s.mb_height >> ref_field_picture;
......@@ -234,8 +234,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
return;
}
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (s->mb_y&~1) + h->col_parity;
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
b8_stride = 0;
......@@ -248,8 +248,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = s->mb_y&~1;
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
b8_stride = 2+4*s->mb_stride;
b4_stride *= 6;
......@@ -264,7 +264,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
......@@ -284,10 +284,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
if(!b8_stride){
if(s->mb_y&1){
l1ref0 += 2;
......@@ -420,8 +420,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (s->mb_y&~1) + h->col_parity;
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
b8_stride = 0;
......@@ -434,8 +434,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = s->mb_y&~1;
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
b8_stride = 2+4*s->mb_stride;
b4_stride *= 6;
......@@ -451,7 +451,7 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
......@@ -471,10 +471,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
if(!b8_stride){
if(s->mb_y&1){
l1ref0 += 2;
......
......@@ -230,10 +230,10 @@ void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y,
left_type= h->left_type[LTOP];
top_type= h->top_type;
mb_type = s->current_picture.mb_type[mb_xy];
qp = s->current_picture.qscale_table[mb_xy];
qp0 = s->current_picture.qscale_table[mb_xy-1];
qp1 = s->current_picture.qscale_table[h->top_mb_xy];
mb_type = s->current_picture.f.mb_type[mb_xy];
qp = s->current_picture.f.qscale_table[mb_xy];
qp0 = s->current_picture.f.qscale_table[mb_xy - 1];
qp1 = s->current_picture.f.qscale_table[h->top_mb_xy];
qpc = get_chroma_qp( h, 0, qp );
qpc0 = get_chroma_qp( h, 0, qp0 );
qpc1 = get_chroma_qp( h, 0, qp1 );
......@@ -435,10 +435,10 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
for(j=0; j<2; j++, mbn_xy += s->mb_stride){
DECLARE_ALIGNED(8, int16_t, bS)[4];
int qp;
if( IS_INTRA(mb_type|s->current_picture.mb_type[mbn_xy]) ) {
if (IS_INTRA(mb_type | s->current_picture.f.mb_type[mbn_xy])) {
AV_WN64A(bS, 0x0003000300030003ULL);
} else {
if(!CABAC && IS_8x8DCT(s->current_picture.mb_type[mbn_xy])){
if (!CABAC && IS_8x8DCT(s->current_picture.f.mb_type[mbn_xy])) {
bS[0]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+0]);
bS[1]= 1+((h->cbp_table[mbn_xy] & 0x4000)||h->non_zero_count_cache[scan8[0]+1]);
bS[2]= 1+((h->cbp_table[mbn_xy] & 0x8000)||h->non_zero_count_cache[scan8[0]+2]);
......@@ -453,12 +453,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
}
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbn_xy] + 1 ) >> 1;
qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbn_xy] + 1) >> 1;
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, h );
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbn_xy] ) + 1 ) >> 1;
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) {
if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], h);
......@@ -518,12 +518,12 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
if(bS[0]+bS[1]+bS[2]+bS[3]){
qp = ( s->current_picture.qscale_table[mb_xy] + s->current_picture.qscale_table[mbm_xy] + 1 ) >> 1;
qp = (s->current_picture.f.qscale_table[mb_xy] + s->current_picture.f.qscale_table[mbm_xy] + 1) >> 1;
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp( h, 0, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp( h, 1, s->current_picture.qscale_table[mbm_xy] ) + 1 ) >> 1;
chroma_qp_avg[0] = (h->chroma_qp[0] + get_chroma_qp(h, 0, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
chroma_qp_avg[1] = (h->chroma_qp[1] + get_chroma_qp(h, 1, s->current_picture.f.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, h );
if (chroma) {
......@@ -602,7 +602,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
/* Filter edge */
// Do not use s->qscale as luma quantizer because it has not the same
// value in IPCM macroblocks.
qp = s->current_picture.qscale_table[mb_xy];
qp = s->current_picture.f.qscale_table[mb_xy];
//tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d, QPc:%d, QPcn:%d\n", mb_x, mb_y, dir, edge, qp, h->chroma_qp[0], s->current_picture.qscale_table[mbn_xy]);
tprintf(s->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
//{ int i; for (i = 0; i < 4; i++) tprintf(s->avctx, " bS[%d]:%d", i, bS[i]); tprintf(s->avctx, "\n"); }
......@@ -635,7 +635,7 @@ static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, u
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize) {
MpegEncContext * const s = &h->s;
const int mb_xy= mb_x + mb_y*s->mb_stride;
const int mb_type = s->current_picture.mb_type[mb_xy];
const int mb_type = s->current_picture.f.mb_type[mb_xy];
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0;
av_unused int dir;
......@@ -688,9 +688,9 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
}
}
mb_qp = s->current_picture.qscale_table[mb_xy];
mbn0_qp = s->current_picture.qscale_table[h->left_mb_xy[0]];
mbn1_qp = s->current_picture.qscale_table[h->left_mb_xy[1]];
mb_qp = s->current_picture.f.qscale_table[mb_xy];
mbn0_qp = s->current_picture.f.qscale_table[h->left_mb_xy[0]];
mbn1_qp = s->current_picture.f.qscale_table[h->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) +
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1;
......
......@@ -48,15 +48,15 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\
if(!USES_LIST(mb_type,list))\
return LIST_NOT_USED;\
mv = s->current_picture_ptr->motion_val[list][h->mb2b_xy[xy]+3 + y4*h->b_stride];\
mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4*h->b_stride];\
h->mv_cache[list][scan8[0]-2][0] = mv[0];\
h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
return s->current_picture_ptr->ref_index[list][4*xy+1 + (y4&~1)] REF_OP;
return s->current_picture_ptr->f.ref_index[list][4*xy + 1 + (y4 & ~1)] REF_OP;
if(topright_ref == PART_NOT_AVAILABLE
&& i >= scan8[0]+8 && (i&7)==4
&& h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
const uint32_t *mb_types = s->current_picture_ptr->mb_type;
const uint32_t *mb_types = s->current_picture_ptr->f.mb_type;
const int16_t *mv;
AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
*C = h->mv_cache[list][scan8[0]-2];
......@@ -236,8 +236,8 @@ static av_always_inline void pred_pskip_motion(H264Context * const h){
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
MpegEncContext * const s = &h->s;
int8_t *ref = s->current_picture.ref_index[0];
int16_t (*mv)[2] = s->current_picture.motion_val[0];
int8_t *ref = s->current_picture.f.ref_index[0];
int16_t (*mv)[2] = s->current_picture.f.motion_val[0];
int top_ref, left_ref, diagonal_ref, match_count, mx, my;
const int16_t *A, *B, *C;
int b_stride = h->b_stride;
......
......@@ -39,16 +39,16 @@ static void pic_as_field(Picture *pic, const int parity){
int i;
for (i = 0; i < 4; ++i) {
if (parity == PICT_BOTTOM_FIELD)
pic->data[i] += pic->linesize[i];
pic->reference = parity;
pic->linesize[i] *= 2;
pic->f.data[i] += pic->f.linesize[i];
pic->f.reference = parity;
pic->f.linesize[i] *= 2;
}
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
}
static int split_field_copy(Picture *dest, Picture *src,
int parity, int id_add){
int match = !!(src->reference & parity);
int match = !!(src->f.reference & parity);
if (match) {
*dest = *src;
......@@ -67,9 +67,9 @@ static int build_def_list(Picture *def, Picture **in, int len, int is_long, int
int index=0;
while(i[0]<len || i[1]<len){
while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel)))
i[0]++;
while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
i[1]++;
if(i[0] < len){
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
......@@ -133,7 +133,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){
}
if(lens[0] == lens[1] && lens[1] > 1){
for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++);
if(i == lens[0])
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
}
......@@ -229,11 +229,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
for(i= h->short_ref_count-1; i>=0; i--){
ref = h->short_ref[i];
assert(ref->reference);
assert(ref->f.reference);
assert(!ref->long_ref);
if(
ref->frame_num == frame_num &&
(ref->reference & pic_structure)
(ref->f.reference & pic_structure)
)
break;
}
......@@ -250,8 +250,8 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
return -1;
}
ref = h->long_ref[long_idx];
assert(!(ref && !ref->reference));
if(ref && (ref->reference & pic_structure)){
assert(!(ref && !ref->f.reference));
if (ref && (ref->f.reference & pic_structure)) {
ref->pic_id= pic_id;
assert(ref->long_ref);
i=0;
......@@ -285,9 +285,9 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
}
for(list=0; list<h->list_count; list++){
for(index= 0; index < h->ref_count[list]; index++){
if(!h->ref_list[list][index].data[0]){
if (!h->ref_list[list][index].f.data[0]) {
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
if(h->default_ref_list[list][0].data[0])
if (h->default_ref_list[list][0].f.data[0])
h->ref_list[list][index]= h->default_ref_list[list][0];
else
return -1;
......@@ -306,13 +306,13 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
Picture *field = &h->ref_list[list][16+2*i];
field[0] = *frame;
for(j=0; j<3; j++)
field[0].linesize[j] <<= 1;
field[0].reference = PICT_TOP_FIELD;
field[0].f.linesize[j] <<= 1;
field[0].f.reference = PICT_TOP_FIELD;
field[0].poc= field[0].field_poc[0];
field[1] = field[0];
for(j=0; j<3; j++)
field[1].data[j] += frame->linesize[j];
field[1].reference = PICT_BOTTOM_FIELD;
field[1].f.data[j] += frame->f.linesize[j];
field[1].f.reference = PICT_BOTTOM_FIELD;
field[1].poc= field[1].field_poc[1];
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
......@@ -338,12 +338,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
*/
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
int i;
if (pic->reference &= refmask) {
if (pic->f.reference &= refmask) {
return 0;
} else {
for(i = 0; h->delayed_pic[i]; i++)
if(pic == h->delayed_pic[i]){
pic->reference=DELAYED_PIC_REF;
pic->f.reference = DELAYED_PIC_REF;
break;
}
return 1;
......@@ -453,7 +453,8 @@ static void print_short_term(H264Context *h) {
av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
for(i=0; i<h->short_ref_count; i++){
Picture *pic= h->short_ref[i];
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
i, pic->frame_num, pic->poc, pic->f.data[0]);
}
}
}
......@@ -468,7 +469,8 @@ static void print_long_term(H264Context *h) {
for(i = 0; i < 16; i++){
Picture *pic= h->long_ref[i];
if (pic) {
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
i, pic->frame_num, pic->poc, pic->f.data[0]);
}
}
}
......@@ -480,7 +482,7 @@ void ff_generate_sliding_window_mmcos(H264Context *h) {
h->mmco_index= 0;
if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) {
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) {
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
h->mmco_index= 1;
......@@ -561,7 +563,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->long_ref_count++;
}
s->current_picture_ptr->reference |= s->picture_structure;
s->current_picture_ptr->f.reference |= s->picture_structure;
current_ref_assigned=1;
break;
case MMCO_SET_MAX_LONG:
......@@ -600,7 +602,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
*/
if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
/* Just mark the second field valid */
s->current_picture_ptr->reference = PICT_FRAME;
s->current_picture_ptr->f.reference = PICT_FRAME;
} else if (s->current_picture_ptr->long_ref) {
av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
"assignment for second field "
......@@ -617,7 +619,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->short_ref[0]= s->current_picture_ptr;
h->short_ref_count++;
s->current_picture_ptr->reference |= s->picture_structure;
s->current_picture_ptr->f.reference |= s->picture_structure;
}
}
......
......@@ -304,7 +304,7 @@ static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma
int quant;
s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
s->current_picture.linesize[chroma>0],
s->current_picture.f.linesize[chroma>0],
&range, &sum, w->edges);
if(chroma){
w->orient=w->chroma_orient;
......@@ -613,7 +613,7 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
s->dest[chroma], s->current_picture.linesize[!!chroma]);
s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
goto block_placed;
}
......@@ -637,15 +637,15 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
}
if(w->flat_dc){
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.linesize[!!chroma]);
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
}else{
s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer,
s->dest[chroma],
s->current_picture.linesize[!!chroma] );
s->current_picture.f.linesize[!!chroma] );
}
if(!zeros_only)
s->dsp.idct_add ( s->dest[chroma],
s->current_picture.linesize[!!chroma],
s->current_picture.f.linesize[!!chroma],
s->block[0] );
block_placed:
......@@ -656,7 +656,7 @@ block_placed:
if(s->loop_filter){
uint8_t* ptr = s->dest[chroma];
int linesize = s->current_picture.linesize[!!chroma];
int linesize = s->current_picture.f.linesize[!!chroma];
if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
s->dsp.x8_h_loop_filter(ptr, linesize, w->quant);
......@@ -671,12 +671,12 @@ block_placed:
static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_*
//not s->linesize as this would be wrong for field pics
//not that IntraX8 has interlacing support ;)
const int linesize = s->current_picture.linesize[0];
const int uvlinesize= s->current_picture.linesize[1];
const int linesize = s->current_picture.f.linesize[0];
const int uvlinesize = s->current_picture.f.linesize[1];
s->dest[0] = s->current_picture.data[0];
s->dest[1] = s->current_picture.data[1];
s->dest[2] = s->current_picture.data[2];
s->dest[0] = s->current_picture.f.data[0];
s->dest[1] = s->current_picture.f.data[1];
s->dest[2] = s->current_picture.f.data[2];
s->dest[0] += s->mb_y * linesize << 3;
s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows
......@@ -771,7 +771,7 @@ int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_of
/*emulate MB info in the relevant tables*/
s->mbskip_table [mb_xy]=0;
s->mbintra_table[mb_xy]=1;
s->current_picture.qscale_table[mb_xy]=w->quant;
s->current_picture.f.qscale_table[mb_xy] = w->quant;
mb_xy++;
}
s->dest[0]+= 8;
......
......@@ -353,20 +353,20 @@ static void preview_obmc(MpegEncContext *s){
do{
if (get_bits1(&s->gb)) {
/* skip mb */
mot_val = s->current_picture.motion_val[0][ s->block_index[0] ];
mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0;
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= 0;
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end;
}
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
}while(cbpc == 20);
if(cbpc & 4){
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
}else{
get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if (cbpc & 8) {
......@@ -378,7 +378,7 @@ static void preview_obmc(MpegEncContext *s){
}
if ((cbpc & 16) == 0) {
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
......@@ -396,7 +396,7 @@ static void preview_obmc(MpegEncContext *s){
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
......@@ -618,7 +618,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = !(s->obmc | s->loop_filter);
......@@ -651,7 +651,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
......@@ -676,7 +676,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
} else {
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
......@@ -704,8 +704,8 @@ int ff_h263_decode_mb(MpegEncContext *s,
} else if(s->pict_type==AV_PICTURE_TYPE_B) {
int mb_type;
const int stride= s->b8_stride;
int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ];
int16_t *mot_val0 = s->current_picture.f.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
int16_t *mot_val1 = s->current_picture.f.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
//FIXME ugly
......@@ -788,7 +788,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
}
s->current_picture.mb_type[xy]= mb_type;
s->current_picture.f.mb_type[xy] = mb_type;
} else { /* I-Frame */
do{
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
......@@ -803,11 +803,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
dquant = cbpc & 4;
s->mb_intra = 1;
intra:
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
if (s->h263_aic) {
s->ac_pred = get_bits1(&s->gb);
if(s->ac_pred){
s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
s->h263_aic_dir = get_bits1(&s->gb);
}
......@@ -889,7 +889,7 @@ int h263_decode_picture_header(MpegEncContext *s)
i = get_bits(&s->gb, 8); /* picture timestamp */
if( (s->picture_number&~0xFF)+i < s->picture_number)
i+= 256;
s->current_picture_ptr->pts=
s->current_picture_ptr->f.pts =
s->picture_number= (s->picture_number&~0xFF) + i;
/* PTYPE starts here */
......
......@@ -275,7 +275,7 @@ void h263_encode_gob_header(MpegEncContext * s, int mb_line)
*/
void ff_clean_h263_qscales(MpegEncContext *s){
int i;
int8_t * const qscale_table= s->current_picture.qscale_table;
int8_t * const qscale_table = s->current_picture.f.qscale_table;
ff_init_qscale_tab(s);
......@@ -529,8 +529,8 @@ void h263_encode_mb(MpegEncContext * s,
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
if (!s->umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, 1);
......
This diff is collapsed.
......@@ -221,20 +221,20 @@ static int mpeg_decode_mb(MpegEncContext *s,
if (s->mb_skip_run-- != 0) {
if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1;
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else {
int mb_type;
if(s->mb_x)
mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
else
mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
if(IS_INTRA(mb_type))
return -1;
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
mb_type | MB_TYPE_SKIP;
// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
// assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
s->mb_skipped = 1;
......@@ -577,7 +577,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
}
}
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
return 0;
}
......@@ -1385,8 +1385,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[1][0] = f_code;
s->mpeg_f_code[1][1] = f_code;
}
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if(avctx->debug & FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
......@@ -1539,8 +1539,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
s->pict_type= AV_PICTURE_TYPE_P;
}else
s->pict_type= AV_PICTURE_TYPE_B;
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
}
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
......@@ -1618,19 +1618,19 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
ff_er_frame_start(s);
/* first check if we must repeat the frame */
s->current_picture_ptr->repeat_pict = 0;
s->current_picture_ptr->f.repeat_pict = 0;
if (s->repeat_first_field) {
if (s->progressive_sequence) {
if (s->top_field_first)
s->current_picture_ptr->repeat_pict = 4;
s->current_picture_ptr->f.repeat_pict = 4;
else
s->current_picture_ptr->repeat_pict = 2;
s->current_picture_ptr->f.repeat_pict = 2;
} else if (s->progressive_frame) {
s->current_picture_ptr->repeat_pict = 1;
s->current_picture_ptr->f.repeat_pict = 1;
}
}
*s->current_picture_ptr->pan_scan= s1->pan_scan;
*s->current_picture_ptr->f.pan_scan = s1->pan_scan;
if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_finish_setup(avctx);
......@@ -1643,9 +1643,9 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
}
for(i=0; i<4; i++){
s->current_picture.data[i] = s->current_picture_ptr->data[i];
s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
if(s->picture_structure == PICT_BOTTOM_FIELD){
s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i];
}
}
}
......@@ -1767,7 +1767,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if(mpeg_decode_mb(s, s->block) < 0)
return -1;
if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs
const int wrap = s->b8_stride;
int xy = s->mb_x*2 + s->mb_y*2*wrap;
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
......@@ -1785,12 +1785,12 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
motion_y = s->mv[dir][i][1];
}
s->current_picture.motion_val[dir][xy ][0] = motion_x;
s->current_picture.motion_val[dir][xy ][1] = motion_y;
s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
s->current_picture.ref_index [dir][b8_xy ]=
s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
s->current_picture.f.motion_val[dir][xy ][0] = motion_x;
s->current_picture.f.motion_val[dir][xy ][1] = motion_y;
s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
s->current_picture.f.ref_index [dir][b8_xy ] =
s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
}
xy += wrap;
......@@ -1954,7 +1954,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
/* end of image */
s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
ff_er_frame_end(s);
......
......@@ -200,7 +200,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)
if (s->current_picture.key_frame) {
if (s->current_picture.f.key_frame) {
AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];
/* mpeg1 header repeated every gop */
......@@ -287,9 +287,9 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
/* time code : we must convert from the real frame rate to a
fake mpeg frame rate in case of low frame rate */
fps = (framerate.num + framerate.den/2)/ framerate.den;
time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start;
time_code = s->current_picture_ptr->f.coded_picture_number + s->avctx->timecode_frame_start;
s->gop_picture_number = s->current_picture_ptr->coded_picture_number;
s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) {
/* only works for NTSC 29.97 */
int d = time_code / 17982;
......@@ -396,7 +396,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
if (s->progressive_sequence) {
put_bits(&s->pb, 1, 0); /* no repeat */
} else {
put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
}
/* XXX: optimize the generation of this flag with entropy
measures */
......
......@@ -89,7 +89,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
uint16_t time_pb= s->pb_time;
int p_mx, p_my;
p_mx= s->next_picture.motion_val[0][xy][0];
p_mx = s->next_picture.f.motion_val[0][xy][0];
if((unsigned)(p_mx + tab_bias) < tab_size){
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
......@@ -99,7 +99,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
: p_mx*(time_pb - time_pp)/time_pp;
}
p_my= s->next_picture.motion_val[0][xy][1];
p_my = s->next_picture.f.motion_val[0][xy][1];
if((unsigned)(p_my + tab_bias) < tab_size){
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
......@@ -120,7 +120,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
*/
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
const int colocated_mb_type= s->next_picture.mb_type[mb_index];
const int colocated_mb_type = s->next_picture.f.mb_type[mb_index];
uint16_t time_pp;
uint16_t time_pb;
int i;
......@@ -137,7 +137,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
} else if(IS_INTERLACED(colocated_mb_type)){
s->mv_type = MV_TYPE_FIELD;
for(i=0; i<2; i++){
int field_select= s->next_picture.ref_index[0][4*mb_index + 2*i];
int field_select = s->next_picture.f.ref_index[0][4 * mb_index + 2 * i];
s->field_select[0][i]= field_select;
s->field_select[1][i]= i;
if(s->top_field_first){
......
This diff is collapsed.
......@@ -124,7 +124,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const
{
int score= 0;
int i, n;
int8_t * const qscale_table= s->current_picture.qscale_table;
int8_t * const qscale_table = s->current_picture.f.qscale_table;
memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
......@@ -201,7 +201,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const
*/
void ff_clean_mpeg4_qscales(MpegEncContext *s){
int i;
int8_t * const qscale_table= s->current_picture.qscale_table;
int8_t * const qscale_table = s->current_picture.f.qscale_table;
ff_clean_h263_qscales(s);
......@@ -457,7 +457,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
assert(mb_type>=0);
/* nothing to do if this MB was skipped in the next P Frame */
if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ...
if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
s->skip_count++;
s->mv[0][0][0]=
s->mv[0][0][1]=
......@@ -589,7 +589,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
if(y+16 > s->height) y= s->height-16;
offset= x + y*s->linesize;
p_pic= s->new_picture.data[0] + offset;
p_pic = s->new_picture.f.data[0] + offset;
s->mb_skipped=1;
for(i=0; i<s->max_b_frames; i++){
......@@ -597,10 +597,11 @@ void mpeg4_encode_mb(MpegEncContext * s,
int diff;
Picture *pic= s->reordered_input_picture[i+1];
if(pic==NULL || pic->pict_type!=AV_PICTURE_TYPE_B) break;
if (pic == NULL || pic->f.pict_type != AV_PICTURE_TYPE_B)
break;
b_pic= pic->data[0] + offset;
if(pic->type != FF_BUFFER_TYPE_SHARED)
b_pic = pic->f.data[0] + offset;
if (pic->f.type != FF_BUFFER_TYPE_SHARED)
b_pic+= INPLACE_OFFSET;
diff= s->dsp.sad[0](NULL, p_pic, b_pic, s->linesize, 16);
if(diff>s->qscale*70){ //FIXME check that 70 is optimal
......@@ -704,8 +705,8 @@ void mpeg4_encode_mb(MpegEncContext * s,
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
}
}
......@@ -814,9 +815,9 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){
put_bits(&s->pb, 16, 0);
put_bits(&s->pb, 16, GOP_STARTCODE);
time= s->current_picture_ptr->pts;
time = s->current_picture_ptr->f.pts;
if(s->reordered_input_picture[1])
time= FFMIN(time, s->reordered_input_picture[1]->pts);
time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
time= time*s->avctx->time_base.num;
seconds= time/s->avctx->time_base.den;
......@@ -1026,7 +1027,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
}
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
if(!s->progressive_sequence){
put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
put_bits(&s->pb, 1, s->alternate_scan);
}
//FIXME sprite stuff
......
This diff is collapsed.
......@@ -28,6 +28,7 @@
#ifndef AVCODEC_MPEGVIDEO_H
#define AVCODEC_MPEGVIDEO_H
#include "avcodec.h"
#include "dsputil.h"
#include "get_bits.h"
#include "put_bits.h"
......@@ -82,7 +83,7 @@ struct MpegEncContext;
* Picture.
*/
typedef struct Picture{
FF_COMMON_FRAME
struct AVFrame f;
/**
* halfpel luma planes.
......
......@@ -255,8 +255,8 @@ if(s->quarter_sample)
#endif
v_edge_pos = s->v_edge_pos >> field_based;
linesize = s->current_picture.linesize[0] << field_based;
uvlinesize = s->current_picture.linesize[1] << field_based;
linesize = s->current_picture.f.linesize[0] << field_based;
uvlinesize = s->current_picture.f.linesize[1] << field_based;
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
src_x = s->mb_x* 16 + (motion_x >> 1);
......@@ -657,30 +657,30 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
assert(!s->mb_skipped);
memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
memcpy(mv_cache[1][1], s->current_picture.f.motion_val[0][mot_xy ], sizeof(int16_t) * 4);
memcpy(mv_cache[2][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
memcpy(mv_cache[3][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
if (mb_y == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - s->mb_stride])) {
memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
}else{
memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
memcpy(mv_cache[0][1], s->current_picture.f.motion_val[0][mot_xy - mot_stride], sizeof(int16_t) * 4);
}
if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
if (mb_x == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - 1])) {
AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
}else{
AV_COPY32(mv_cache[1][0], s->current_picture.motion_val[0][mot_xy-1]);
AV_COPY32(mv_cache[2][0], s->current_picture.motion_val[0][mot_xy-1+mot_stride]);
AV_COPY32(mv_cache[1][0], s->current_picture.f.motion_val[0][mot_xy - 1]);
AV_COPY32(mv_cache[2][0], s->current_picture.f.motion_val[0][mot_xy - 1 + mot_stride]);
}
if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
if (mb_x + 1 >= s->mb_width || IS_INTRA(s->current_picture.f.mb_type[xy + 1])) {
AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
}else{
AV_COPY32(mv_cache[1][3], s->current_picture.motion_val[0][mot_xy+2]);
AV_COPY32(mv_cache[2][3], s->current_picture.motion_val[0][mot_xy+2+mot_stride]);
AV_COPY32(mv_cache[1][3], s->current_picture.f.motion_val[0][mot_xy + 2]);
AV_COPY32(mv_cache[2][3], s->current_picture.f.motion_val[0][mot_xy + 2 + mot_stride]);
}
mx = 0;
......@@ -817,7 +817,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
}
} else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
ref_picture= s->current_picture_ptr->data;
ref_picture = s->current_picture_ptr->f.data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
......@@ -834,7 +834,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture;
}else{
ref2picture= s->current_picture_ptr->data;
ref2picture = s->current_picture_ptr->f.data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
......@@ -871,7 +871,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
//opposite parity is always in the same frame if this is second field
if(!s->first_field){
ref_picture = s->current_picture_ptr->data;
ref_picture = s->current_picture_ptr->f.data;
}
}
}
......
This diff is collapsed.
......@@ -780,10 +780,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
}else{
if(n<4){
wrap= s->linesize;
dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * 8* wrap ) + ((n & 1) + 2*s->mb_x) * 8;
}else{
wrap= s->uvlinesize;
dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
dest= s->current_picture.f.data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
}
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
else a= get_dc(dest-8, wrap, scale*8);
......@@ -1172,7 +1172,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
{
int cbp, code, i;
uint8_t *coded_val;
uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
uint32_t * const mb_type_ptr = &s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride];
if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) {
......
......@@ -44,9 +44,9 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
void ff_write_pass1_stats(MpegEncContext *s){
snprintf(s->avctx->stats_out, 256, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
s->current_picture_ptr->display_picture_number, s->current_picture_ptr->coded_picture_number, s->pict_type,
s->current_picture.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
s->current_picture_ptr->f.display_picture_number, s->current_picture_ptr->f.coded_picture_number, s->pict_type,
s->current_picture.f.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
}
static inline double qp2bits(RateControlEntry *rce, double qp){
......@@ -707,10 +707,10 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//if(dts_pic)
// av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number);
if(!dts_pic || dts_pic->pts == AV_NOPTS_VALUE)
if (!dts_pic || dts_pic->f.pts == AV_NOPTS_VALUE)
wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps);
else
wanted_bits= (uint64_t)(s->bit_rate*(double)dts_pic->pts/fps);
wanted_bits = (uint64_t)(s->bit_rate*(double)dts_pic->f.pts / fps);
}
diff= s->total_bits - wanted_bits;
......
......@@ -142,7 +142,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
int mbtype = s->current_picture_ptr->mb_type[mb_pos];
int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
r->deblock_coefs[mb_pos] = 0xFFFF;
if(IS_INTRA(mbtype))
......@@ -154,11 +154,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
*/
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
if(mb_x)
left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
left_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - 1]];
for(j = 0; j < 16; j += 4){
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
for(i = !mb_x; i < 4; i++, Y += 4){
int ij = i + j;
loc_lim = 0;
......@@ -178,7 +178,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
if(mb_x)
left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
for(j = 0; j < 8; j += 4){
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
for(i = !mb_x; i < 2; i++, C += 4){
int ij = i + (j >> 1);
loc_lim = 0;
......@@ -196,11 +196,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
}
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
if(row)
top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
top_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - s->mb_stride]];
for(j = 4*!row; j < 16; j += 4){
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
for(i = 0; i < 4; i++, Y += 4){
int ij = i + j;
loc_lim = 0;
......@@ -220,7 +220,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
if(row)
top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
for(j = 4*!row; j < 8; j += 4){
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
C = s->current_picture_ptr->f.data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
for(i = 0; i < 2; i++, C += 4){
int ij = i + (j >> 1);
loc_lim = 0;
......
This diff is collapsed.
......@@ -475,7 +475,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
int mbtype = s->current_picture_ptr->mb_type[mb_pos];
int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
r->cbp_luma [mb_pos] = r->deblock_coefs[mb_pos] = 0xFFFF;
if(IS_INTRA(mbtype))
......@@ -489,7 +489,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
int avail[4];
int y_to_deblock, c_to_deblock[2];
q = s->current_picture_ptr->qscale_table[mb_pos];
q = s->current_picture_ptr->f.qscale_table[mb_pos];
alpha = rv40_alpha_tab[q];
beta = rv40_beta_tab [q];
betaY = betaC = beta * 3;
......@@ -504,7 +504,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
if(avail[i]){
int pos = mb_pos + neighbour_offs_x[i] + neighbour_offs_y[i]*s->mb_stride;
mvmasks[i] = r->deblock_coefs[pos];
mbtype [i] = s->current_picture_ptr->mb_type[pos];
mbtype [i] = s->current_picture_ptr->f.mb_type[pos];
cbp [i] = r->cbp_luma[pos];
uvcbp[i][0] = r->cbp_chroma[pos] & 0xF;
uvcbp[i][1] = r->cbp_chroma[pos] >> 4;
......@@ -563,7 +563,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
}
for(j = 0; j < 16; j += 4){
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
for(i = 0; i < 4; i++, Y += 4){
int ij = i + j;
int clip_cur = y_to_deblock & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
......@@ -607,7 +607,7 @@ static void rv40_loop_filter(RV34DecContext *r, int row)
}
for(k = 0; k < 2; k++){
for(j = 0; j < 2; j++){
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j*4) * s->uvlinesize;
for(i = 0; i < 2; i++, C += 4){
int ij = i + j*2;
int clip_cur = c_to_deblock[k] & (MASK_CUR << ij) ? clip[POS_CUR] : 0;
......
......@@ -3441,8 +3441,8 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
frame_start(s);
s->m.current_picture_ptr= &s->m.current_picture;
s->m.last_picture.pts= s->m.current_picture.pts;
s->m.current_picture.pts= pict->pts;
s->m.last_picture.f.pts = s->m.current_picture.f.pts;
s->m.current_picture.f.pts = pict->pts;
if(pict->pict_type == AV_PICTURE_TYPE_P){
int block_width = (width +15)>>4;
int block_height= (height+15)>>4;
......@@ -3452,14 +3452,14 @@ static int encode_frame(AVCodecContext *avctx, unsigned char *buf, int buf_size,
assert(s->last_picture[0].data[0]);
s->m.avctx= s->avctx;
s->m.current_picture.data[0]= s->current_picture.data[0];
s->m. last_picture.data[0]= s->last_picture[0].data[0];
s->m. new_picture.data[0]= s-> input_picture.data[0];
s->m.current_picture.f.data[0] = s->current_picture.data[0];
s->m. last_picture.f.data[0] = s->last_picture[0].data[0];
s->m. new_picture.f.data[0] = s-> input_picture.data[0];
s->m. last_picture_ptr= &s->m. last_picture;
s->m.linesize=
s->m. last_picture.linesize[0]=
s->m. new_picture.linesize[0]=
s->m.current_picture.linesize[0]= stride;
s->m. last_picture.f.linesize[0] =
s->m. new_picture.f.linesize[0] =
s->m.current_picture.f.linesize[0] = stride;
s->m.uvlinesize= s->current_picture.linesize[1];
s->m.width = width;
s->m.height= height;
......@@ -3646,9 +3646,9 @@ redo_frame:
s->current_picture.quality = pict->quality;
s->m.frame_bits = 8*(s->c.bytestream - s->c.bytestream_start);
s->m.p_tex_bits = s->m.frame_bits - s->m.misc_bits - s->m.mv_bits;
s->m.current_picture.display_picture_number =
s->m.current_picture.coded_picture_number = avctx->frame_number;
s->m.current_picture.quality = pict->quality;
s->m.current_picture.f.display_picture_number =
s->m.current_picture.f.coded_picture_number = avctx->frame_number;
s->m.current_picture.f.quality = pict->quality;
s->m.total_bits += 8*(s->c.bytestream - s->c.bytestream_start);
if(s->pass1_rc)
if (ff_rate_estimate_qscale(&s->m, 0) < 0)
......
......@@ -689,12 +689,12 @@ static int svq1_decode_frame(AVCodecContext *avctx,
linesize= s->uvlinesize;
}
current = s->current_picture.data[i];
current = s->current_picture.f.data[i];
if(s->pict_type==AV_PICTURE_TYPE_B){
previous = s->next_picture.data[i];
previous = s->next_picture.f.data[i];
}else{
previous = s->last_picture.data[i];
previous = s->last_picture.f.data[i];
}
if (s->pict_type == AV_PICTURE_TYPE_I) {
......
......@@ -284,11 +284,11 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
s->m.avctx= s->avctx;
s->m.current_picture_ptr= &s->m.current_picture;
s->m.last_picture_ptr = &s->m.last_picture;
s->m.last_picture.data[0]= ref_plane;
s->m.last_picture.f.data[0] = ref_plane;
s->m.linesize=
s->m.last_picture.linesize[0]=
s->m.new_picture.linesize[0]=
s->m.current_picture.linesize[0]= stride;
s->m.last_picture.f.linesize[0] =
s->m.new_picture.f.linesize[0] =
s->m.current_picture.f.linesize[0] = stride;
s->m.width= width;
s->m.height= height;
s->m.mb_width= block_width;
......@@ -318,9 +318,9 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
s->m.current_picture.mb_var= (uint16_t*)s->dummy;
s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
s->m.current_picture.mb_type= s->dummy;
s->m.current_picture.f.mb_type = s->dummy;
s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2;
s->m.current_picture.f.motion_val[0] = s->motion_val8[plane] + 2;
s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
s->m.dsp= s->dsp; //move
ff_init_me(&s->m);
......@@ -328,7 +328,7 @@ static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane
s->m.me.dia_size= s->avctx->dia_size;
s->m.first_slice_line=1;
for (y = 0; y < block_height; y++) {
s->m.new_picture.data[0]= src - y*16*stride; //ugly
s->m.new_picture.f.data[0] = src - y*16*stride; //ugly
s->m.mb_y= y;
for(i=0; i<16 && i + 16*y<height; i++){
......
This diff is collapsed.
This diff is collapsed.
......@@ -32,7 +32,7 @@
static void parse_mb_skip(Wmv2Context * w){
int mb_x, mb_y;
MpegEncContext * const s= &w->s;
uint32_t * const mb_type= s->current_picture_ptr->mb_type;
uint32_t * const mb_type = s->current_picture_ptr->f.mb_type;
w->skip_type= get_bits(&s->gb, 2);
switch(w->skip_type){
......@@ -257,11 +257,11 @@ static int16_t *wmv2_pred_motion(Wmv2Context *w, int *px, int *py){
wrap = s->b8_stride;
xy = s->block_index[0];
mot_val = s->current_picture.motion_val[0][xy];
mot_val = s->current_picture.f.motion_val[0][xy];
A = s->current_picture.motion_val[0][xy - 1];
B = s->current_picture.motion_val[0][xy - wrap];
C = s->current_picture.motion_val[0][xy + 2 - wrap];
A = s->current_picture.f.motion_val[0][xy - 1];
B = s->current_picture.f.motion_val[0][xy - wrap];
C = s->current_picture.f.motion_val[0][xy + 2 - wrap];
if(s->mb_x && !s->first_slice_line && !s->mspel && w->top_left_mv_flag)
diff= FFMAX(FFABS(A[0] - B[0]), FFABS(A[1] - B[1]));
......@@ -343,7 +343,7 @@ int ff_wmv2_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
if(w->j_type) return 0;
if (s->pict_type == AV_PICTURE_TYPE_P) {
if(IS_SKIP(s->current_picture.mb_type[s->mb_y * s->mb_stride + s->mb_x])){
if (IS_SKIP(s->current_picture.f.mb_type[s->mb_y * s->mb_stride + s->mb_x])) {
/* skip mb */
s->mb_intra = 0;
for(i=0;i<6;i++)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment