Commit e10979ff authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  changelog: misc typo and wording fixes
  H.264: add filter_mb_fast support for >8-bit decoding
  doc: Remove outdated comments about gcc 2.95 and gcc 3.3 support.
  lls: use av_lfg instead of rand() in test program
  build: remove unnecessary dependency on libs from 'all' target
  H.264: avoid redundant alpha/beta calculations in loopfilter
  H.264: optimize intra/inter loopfilter decision
  mpegts: fix Continuity Counter error detection
  build: remove unnecessary FFLDFLAGS variable
  vp8/mt: flush worker thread, not application thread context, on seek.
  mt: proper locking around release_buffer calls.
  DxVA2: unbreak build after [657ccb5a]
  hwaccel: unbreak build
  Eliminate FF_COMMON_FRAME macro.

Conflicts:
	Changelog
	Makefile
	doc/developer.texi
	libavcodec/avcodec.h
	libavcodec/h264.c
	libavcodec/mpeg4videodec.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents a8c2ff2c b047941d
......@@ -8,6 +8,7 @@ version next:
version 0.8:
- many many things we forgot because we rather write code than changelogs
- WebM support in Matroska de/muxer
- low overhead Ogg muxing
......@@ -57,10 +58,10 @@ version 0.8:
- demuxer for receiving raw rtp:// URLs without an SDP description
- single stream LATM/LOAS decoder
- setpts filter added
- Win64 support for optimized asm functions
- Win64 support for optimized x86 assembly functions
- MJPEG/AVI1 to JPEG/JFIF bitstream filter
- ASS subtitle encoder and decoder
- IEC 61937 encapsulation for E-AC3, TrueHD, DTS-HD (for HDMI passthrough)
- IEC 61937 encapsulation for E-AC-3, TrueHD, DTS-HD (for HDMI passthrough)
- overlay filter added
- rename aspect filter to setdar, and pixelaspect to setsar
- IEC 61937 demuxer
......
......@@ -8,6 +8,7 @@ vpath %.asm $(SRC_PATH)
vpath %.v $(SRC_PATH)
vpath %.texi $(SRC_PATH)
PROGS-$(CONFIG_FFMPEG) += ffmpeg
PROGS-$(CONFIG_FFPLAY) += ffplay
PROGS-$(CONFIG_FFPROBE) += ffprobe
......@@ -43,11 +44,10 @@ SKIPHEADERS = cmdutils_common_opts.h
include $(SRC_PATH)/common.mak
FF_LDFLAGS := $(FFLDFLAGS)
FF_EXTRALIBS := $(FFEXTRALIBS)
FF_DEP_LIBS := $(DEP_LIBS)
all: $(FF_DEP_LIBS) $(PROGS)
all: $(PROGS)
$(PROGS): %$(EXESUF): %_g$(EXESUF)
$(CP) $< $@
......@@ -83,10 +83,10 @@ $(foreach D,$(FFLIBS),$(eval $(call DOSUBDIR,lib$(D))))
ffplay.o: CFLAGS += $(SDL_CFLAGS)
ffplay_g$(EXESUF): FF_EXTRALIBS += $(SDL_LIBS)
ffserver_g$(EXESUF): FF_LDFLAGS += $(FFSERVERLDFLAGS)
ffserver_g$(EXESUF): LDFLAGS += $(FFSERVERLDFLAGS)
%_g$(EXESUF): %.o cmdutils.o $(FF_DEP_LIBS)
$(LD) $(FF_LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS)
$(LD) $(LDFLAGS) -o $@ $< cmdutils.o $(FF_EXTRALIBS)
OBJDIRS += tools
......
......@@ -27,6 +27,7 @@ CFLAGS += $(ECFLAGS)
CCFLAGS = $(CFLAGS)
YASMFLAGS += $(IFLAGS) -Pconfig.asm
HOSTCFLAGS += $(IFLAGS)
LDFLAGS += $(ALLFFLIBS:%=-Llib%)
define COMPILE
$($(1)DEP)
......@@ -69,7 +70,6 @@ FFLIBS := $(FFLIBS-yes) $(FFLIBS)
TESTPROGS += $(TESTPROGS-yes)
FFEXTRALIBS := $(FFLIBS:%=-l%$(BUILDSUF)) $(EXTRALIBS)
FFLDFLAGS := $(ALLFFLIBS:%=-Llib%) $(LDFLAGS)
EXAMPLES := $(EXAMPLES:%=$(SUBDIR)%-example$(EXESUF))
OBJS := $(sort $(OBJS:%=$(SUBDIR)%))
......
......@@ -54,10 +54,8 @@ These features are supported by all compilers we care about, so we will not
accept patches to remove their use unless they absolutely do not impair
clarity and performance.
All code must compile with GCC 3.3. Currently, FFmpeg also
compiles with several other compilers, such as the Compaq ccc compiler
or Sun Studio 9, and we would like to keep it that way unless it would
be exceedingly involved. To ensure compatibility, please do not use any
All code must compile with recent versions of GCC and a number of other
currently supported compilers. To ensure compatibility, please do not use
additional C99 features or GCC extensions. Especially watch out for:
@itemize @bullet
@item
......
This diff is collapsed.
......@@ -333,9 +333,9 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
const int mx= mv->x + src_x_offset*8;
const int my= mv->y + src_y_offset*8;
const int luma_xy= (mx&3) + ((my&3)<<2);
uint8_t * src_y = pic->data[0] + (mx>>2) + (my>>2)*h->l_stride;
uint8_t * src_cb= pic->data[1] + (mx>>3) + (my>>3)*h->c_stride;
uint8_t * src_cr= pic->data[2] + (mx>>3) + (my>>3)*h->c_stride;
uint8_t * src_y = pic->f.data[0] + (mx >> 2) + (my >> 2) * h->l_stride;
uint8_t * src_cb = pic->f.data[1] + (mx >> 3) + (my >> 3) * h->c_stride;
uint8_t * src_cr = pic->f.data[2] + (mx >> 3) + (my >> 3) * h->c_stride;
int extra_width= 0; //(s->flags&CODEC_FLAG_EMU_EDGE) ? 0 : 16;
int extra_height= extra_width;
int emu=0;
......@@ -344,7 +344,7 @@ static inline void mc_dir_part(AVSContext *h,Picture *pic,int square,
const int pic_width = 16*h->mb_width;
const int pic_height = 16*h->mb_height;
if(!pic->data[0])
if(!pic->f.data[0])
return;
if(mx&7) extra_width -= 3;
if(my&7) extra_height -= 3;
......@@ -602,9 +602,9 @@ int ff_cavs_next_mb(AVSContext *h) {
h->mbx = 0;
h->mby++;
/* re-calculate sample pointers */
h->cy = h->picture.data[0] + h->mby*16*h->l_stride;
h->cu = h->picture.data[1] + h->mby*8*h->c_stride;
h->cv = h->picture.data[2] + h->mby*8*h->c_stride;
h->cy = h->picture.f.data[0] + h->mby * 16 * h->l_stride;
h->cu = h->picture.f.data[1] + h->mby * 8 * h->c_stride;
h->cv = h->picture.f.data[2] + h->mby * 8 * h->c_stride;
if(h->mby == h->mb_height) { //frame end
return 0;
}
......@@ -629,11 +629,11 @@ void ff_cavs_init_pic(AVSContext *h) {
h->mv[MV_FWD_X0] = ff_cavs_dir_mv;
set_mvs(&h->mv[MV_FWD_X0], BLK_16X16);
h->pred_mode_Y[3] = h->pred_mode_Y[6] = NOT_AVAIL;
h->cy = h->picture.data[0];
h->cu = h->picture.data[1];
h->cv = h->picture.data[2];
h->l_stride = h->picture.linesize[0];
h->c_stride = h->picture.linesize[1];
h->cy = h->picture.f.data[0];
h->cu = h->picture.f.data[1];
h->cv = h->picture.f.data[2];
h->l_stride = h->picture.f.linesize[0];
h->c_stride = h->picture.f.linesize[1];
h->luma_scan[2] = 8*h->l_stride;
h->luma_scan[3] = 8*h->l_stride+8;
h->mbx = h->mby = h->mbidx = 0;
......
......@@ -476,8 +476,8 @@ static int decode_pic(AVSContext *h) {
return -1;
}
/* make sure we have the reference frames we need */
if(!h->DPB[0].data[0] ||
(!h->DPB[1].data[0] && h->pic_type == AV_PICTURE_TYPE_B))
if(!h->DPB[0].f.data[0] ||
(!h->DPB[1].f.data[0] && h->pic_type == AV_PICTURE_TYPE_B))
return -1;
} else {
h->pic_type = AV_PICTURE_TYPE_I;
......@@ -494,7 +494,7 @@ static int decode_pic(AVSContext *h) {
skip_bits(&s->gb,1); //marker_bit
}
/* release last B frame */
if(h->picture.data[0])
if(h->picture.f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->picture);
s->avctx->get_buffer(s->avctx, (AVFrame *)&h->picture);
......@@ -585,7 +585,7 @@ static int decode_pic(AVSContext *h) {
} while(ff_cavs_next_mb(h));
}
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0])
if(h->DPB[1].f.data[0])
s->avctx->release_buffer(s->avctx, (AVFrame *)&h->DPB[1]);
h->DPB[1] = h->DPB[0];
h->DPB[0] = h->picture;
......@@ -648,7 +648,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
s->avctx = avctx;
if (buf_size == 0) {
if(!s->low_delay && h->DPB[0].data[0]) {
if (!s->low_delay && h->DPB[0].f.data[0]) {
*data_size = sizeof(AVPicture);
*picture = *(AVFrame *) &h->DPB[0];
}
......@@ -669,9 +669,9 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
break;
case PIC_I_START_CODE:
if(!h->got_keyframe) {
if(h->DPB[0].data[0])
if(h->DPB[0].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[0]);
if(h->DPB[1].data[0])
if(h->DPB[1].f.data[0])
avctx->release_buffer(avctx, (AVFrame *)&h->DPB[1]);
h->got_keyframe = 1;
}
......@@ -685,7 +685,7 @@ static int cavs_decode_frame(AVCodecContext * avctx,void *data, int *data_size,
break;
*data_size = sizeof(AVPicture);
if(h->pic_type != AV_PICTURE_TYPE_B) {
if(h->DPB[1].data[0]) {
if(h->DPB[1].f.data[0]) {
*picture = *(AVFrame *) &h->DPB[1];
} else {
*data_size = 0;
......
......@@ -24,7 +24,7 @@
void *ff_dxva2_get_surface(const Picture *picture)
{
return picture->data[3];
return picture->f.data[3];
}
unsigned ff_dxva2_get_surface_index(const struct dxva_context *ctx,
......
......@@ -70,15 +70,15 @@ static void fill_picture_parameters(struct dxva_context *ctx, const H264Context
ff_dxva2_get_surface_index(ctx, r),
r->long_ref != 0);
if ((r->reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
if ((r->f.reference & PICT_TOP_FIELD) && r->field_poc[0] != INT_MAX)
pp->FieldOrderCntList[i][0] = r->field_poc[0];
if ((r->reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
if ((r->f.reference & PICT_BOTTOM_FIELD) && r->field_poc[1] != INT_MAX)
pp->FieldOrderCntList[i][1] = r->field_poc[1];
pp->FrameNumList[i] = r->long_ref ? r->pic_id : r->frame_num;
if (r->reference & PICT_TOP_FIELD)
if (r->f.reference & PICT_TOP_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 0);
if (r->reference & PICT_BOTTOM_FIELD)
if (r->f.reference & PICT_BOTTOM_FIELD)
pp->UsedForReferenceFlags |= 1 << (2*i + 1);
} else {
pp->RefFrameList[i].bPicEntry = 0xff;
......@@ -229,7 +229,7 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
unsigned plane;
fill_picture_entry(&slice->RefPicList[list][i],
ff_dxva2_get_surface_index(ctx, r),
r->reference == PICT_BOTTOM_FIELD);
r->f.reference == PICT_BOTTOM_FIELD);
for (plane = 0; plane < 3; plane++) {
int w, o;
if (plane == 0 && h->luma_weight_flag[list]) {
......@@ -278,7 +278,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
const unsigned mb_count = s->mb_width * s->mb_height;
struct dxva_context *ctx = avctx->hwaccel_context;
const Picture *current_picture = h->s.current_picture_ptr;
struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private;
DXVA_Slice_H264_Short *slice = NULL;
uint8_t *dxva_data, *current, *end;
unsigned dxva_size;
......@@ -373,7 +373,7 @@ static int start_frame(AVCodecContext *avctx,
{
const H264Context *h = avctx->priv_data;
struct dxva_context *ctx = avctx->hwaccel_context;
struct dxva2_picture_context *ctx_pic = h->s.current_picture_ptr->hwaccel_picture_private;
struct dxva2_picture_context *ctx_pic = h->s.current_picture_ptr->f.hwaccel_picture_private;
if (!ctx->decoder || !ctx->cfg || ctx->surface_count <= 0)
return -1;
......@@ -397,7 +397,7 @@ static int decode_slice(AVCodecContext *avctx,
const H264Context *h = avctx->priv_data;
struct dxva_context *ctx = avctx->hwaccel_context;
const Picture *current_picture = h->s.current_picture_ptr;
struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private;
unsigned position;
if (ctx_pic->slice_count >= MAX_SLICES)
......@@ -426,7 +426,7 @@ static int end_frame(AVCodecContext *avctx)
H264Context *h = avctx->priv_data;
MpegEncContext *s = &h->s;
struct dxva2_picture_context *ctx_pic =
h->s.current_picture_ptr->hwaccel_picture_private;
h->s.current_picture_ptr->f.hwaccel_picture_private;
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
return -1;
......
......@@ -151,7 +151,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
const struct MpegEncContext *s = avctx->priv_data;
struct dxva_context *ctx = avctx->hwaccel_context;
struct dxva2_picture_context *ctx_pic =
s->current_picture_ptr->hwaccel_picture_private;
s->current_picture_ptr->f.hwaccel_picture_private;
const int is_field = s->picture_structure != PICT_FRAME;
const unsigned mb_count = s->mb_width * (s->mb_height >> is_field);
uint8_t *dxva_data, *current, *end;
......@@ -210,7 +210,7 @@ static int start_frame(AVCodecContext *avctx,
const struct MpegEncContext *s = avctx->priv_data;
struct dxva_context *ctx = avctx->hwaccel_context;
struct dxva2_picture_context *ctx_pic =
s->current_picture_ptr->hwaccel_picture_private;
s->current_picture_ptr->f.hwaccel_picture_private;
if (!ctx->decoder || !ctx->cfg || ctx->surface_count <= 0)
return -1;
......@@ -230,7 +230,7 @@ static int decode_slice(AVCodecContext *avctx,
{
const struct MpegEncContext *s = avctx->priv_data;
struct dxva2_picture_context *ctx_pic =
s->current_picture_ptr->hwaccel_picture_private;
s->current_picture_ptr->f.hwaccel_picture_private;
unsigned position;
if (ctx_pic->slice_count >= MAX_SLICES)
......@@ -250,7 +250,7 @@ static int end_frame(AVCodecContext *avctx)
{
struct MpegEncContext *s = avctx->priv_data;
struct dxva2_picture_context *ctx_pic =
s->current_picture_ptr->hwaccel_picture_private;
s->current_picture_ptr->f.hwaccel_picture_private;
if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
return -1;
......
......@@ -161,7 +161,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
const VC1Context *v = avctx->priv_data;
struct dxva_context *ctx = avctx->hwaccel_context;
const MpegEncContext *s = &v->s;
struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->f.hwaccel_picture_private;
DXVA_SliceInfo *slice = &ctx_pic->si;
......@@ -213,7 +213,7 @@ static int start_frame(AVCodecContext *avctx,
{
const VC1Context *v = avctx->priv_data;
struct dxva_context *ctx = avctx->hwaccel_context;
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->f.hwaccel_picture_private;
if (!ctx->decoder || !ctx->cfg || ctx->surface_count <= 0)
return -1;
......@@ -231,7 +231,7 @@ static int decode_slice(AVCodecContext *avctx,
{
const VC1Context *v = avctx->priv_data;
const Picture *current_picture = v->s.current_picture_ptr;
struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
struct dxva2_picture_context *ctx_pic = current_picture->f.hwaccel_picture_private;
if (ctx_pic->bitstream_size > 0)
return -1;
......@@ -252,7 +252,7 @@ static int decode_slice(AVCodecContext *avctx,
static int end_frame(AVCodecContext *avctx)
{
VC1Context *v = avctx->priv_data;
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->f.hwaccel_picture_private;
if (ctx_pic->bitstream_size <= 0)
return -1;
......
This diff is collapsed.
......@@ -215,7 +215,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2 )
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = 1;
......@@ -323,14 +323,14 @@ static int h261_decode_mb(H261Context *h){
}
if(s->mb_intra){
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
goto intra;
}
//set motion vectors
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = h->current_mv_x * 2;//gets divided by 2 in motion compensation
s->mv[0][0][1] = h->current_mv_y * 2;
......@@ -464,7 +464,7 @@ static int h261_decode_picture_header(H261Context *h){
s->picture_number = (s->picture_number&~31) + i;
s->avctx->time_base= (AVRational){1001, 30000};
s->current_picture.pts= s->picture_number;
s->current_picture.f.pts = s->picture_number;
/* PTYPE starts here */
......@@ -570,7 +570,7 @@ retry:
}
//we need to set current_picture_ptr before reading the header, otherwise we cannot store anyting im there
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
int i= ff_find_unused_picture(s, 0);
s->current_picture_ptr= &s->picture[i];
}
......@@ -596,8 +596,8 @@ retry:
}
// for skipping the frame
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==AV_PICTURE_TYPE_B)
||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=AV_PICTURE_TYPE_I)
......
......@@ -52,7 +52,7 @@ void ff_h263_update_motion_val(MpegEncContext * s){
const int wrap = s->b8_stride;
const int xy = s->block_index[0];
s->current_picture.mbskip_table[mb_xy]= s->mb_skipped;
s->current_picture.f.mbskip_table[mb_xy] = s->mb_skipped;
if(s->mv_type != MV_TYPE_8X8){
int motion_x, motion_y;
......@@ -71,30 +71,30 @@ void ff_h263_update_motion_val(MpegEncContext * s){
s->p_field_mv_table[i][0][mb_xy][0]= s->mv[0][i][0];
s->p_field_mv_table[i][0][mb_xy][1]= s->mv[0][i][1];
}
s->current_picture.ref_index[0][4*mb_xy ]=
s->current_picture.ref_index[0][4*mb_xy + 1]= s->field_select[0][0];
s->current_picture.ref_index[0][4*mb_xy + 2]=
s->current_picture.ref_index[0][4*mb_xy + 3]= s->field_select[0][1];
s->current_picture.f.ref_index[0][4*mb_xy ] =
s->current_picture.f.ref_index[0][4*mb_xy + 1] = s->field_select[0][0];
s->current_picture.f.ref_index[0][4*mb_xy + 2] =
s->current_picture.f.ref_index[0][4*mb_xy + 3] = s->field_select[0][1];
}
/* no update if 8X8 because it has been done during parsing */
s->current_picture.motion_val[0][xy][0] = motion_x;
s->current_picture.motion_val[0][xy][1] = motion_y;
s->current_picture.motion_val[0][xy + 1][0] = motion_x;
s->current_picture.motion_val[0][xy + 1][1] = motion_y;
s->current_picture.motion_val[0][xy + wrap][0] = motion_x;
s->current_picture.motion_val[0][xy + wrap][1] = motion_y;
s->current_picture.motion_val[0][xy + 1 + wrap][0] = motion_x;
s->current_picture.motion_val[0][xy + 1 + wrap][1] = motion_y;
s->current_picture.f.motion_val[0][xy][0] = motion_x;
s->current_picture.f.motion_val[0][xy][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1][1] = motion_y;
s->current_picture.f.motion_val[0][xy + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + wrap][1] = motion_y;
s->current_picture.f.motion_val[0][xy + 1 + wrap][0] = motion_x;
s->current_picture.f.motion_val[0][xy + 1 + wrap][1] = motion_y;
}
if(s->encoding){ //FIXME encoding MUST be cleaned up
if (s->mv_type == MV_TYPE_8X8)
s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_8x8;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_8x8;
else if(s->mb_intra)
s->current_picture.mb_type[mb_xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_INTRA;
else
s->current_picture.mb_type[mb_xy]= MB_TYPE_L0 | MB_TYPE_16x16;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_L0 | MB_TYPE_16x16;
}
}
......@@ -154,7 +154,7 @@ void ff_h263_loop_filter(MpegEncContext * s){
Diag Top
Left Center
*/
if(!IS_SKIP(s->current_picture.mb_type[xy])){
if (!IS_SKIP(s->current_picture.f.mb_type[xy])) {
qp_c= s->qscale;
s->dsp.h263_v_loop_filter(dest_y+8*linesize , linesize, qp_c);
s->dsp.h263_v_loop_filter(dest_y+8*linesize+8, linesize, qp_c);
......@@ -164,10 +164,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_y){
int qp_dt, qp_tt, qp_tc;
if(IS_SKIP(s->current_picture.mb_type[xy-s->mb_stride]))
if (IS_SKIP(s->current_picture.f.mb_type[xy - s->mb_stride]))
qp_tt=0;
else
qp_tt= s->current_picture.qscale_table[xy-s->mb_stride];
qp_tt = s->current_picture.f.qscale_table[xy - s->mb_stride];
if(qp_c)
qp_tc= qp_c;
......@@ -187,10 +187,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
s->dsp.h263_h_loop_filter(dest_y-8*linesize+8 , linesize, qp_tt);
if(s->mb_x){
if(qp_tt || IS_SKIP(s->current_picture.mb_type[xy-1-s->mb_stride]))
if (qp_tt || IS_SKIP(s->current_picture.f.mb_type[xy - 1 - s->mb_stride]))
qp_dt= qp_tt;
else
qp_dt= s->current_picture.qscale_table[xy-1-s->mb_stride];
qp_dt = s->current_picture.f.qscale_table[xy - 1 - s->mb_stride];
if(qp_dt){
const int chroma_qp= s->chroma_qscale_table[qp_dt];
......@@ -209,10 +209,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
if(s->mb_x){
int qp_lc;
if(qp_c || IS_SKIP(s->current_picture.mb_type[xy-1]))
if (qp_c || IS_SKIP(s->current_picture.f.mb_type[xy - 1]))
qp_lc= qp_c;
else
qp_lc= s->current_picture.qscale_table[xy-1];
qp_lc = s->current_picture.f.qscale_table[xy - 1];
if(qp_lc){
s->dsp.h263_h_loop_filter(dest_y, linesize, qp_lc);
......@@ -321,7 +321,7 @@ int16_t *h263_pred_motion(MpegEncContext * s, int block, int dir,
static const int off[4]= {2, 1, 1, -1};
wrap = s->b8_stride;
mot_val = s->current_picture.motion_val[dir] + s->block_index[block];
mot_val = s->current_picture.f.motion_val[dir] + s->block_index[block];
A = mot_val[ - 1];
/* special case for first (slice) line */
......
......@@ -406,7 +406,7 @@ retry:
/* We need to set current_picture_ptr before reading the header,
* otherwise we cannot store anyting in there */
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f.data[0]) {
int i= ff_find_unused_picture(s, 0);
s->current_picture_ptr= &s->picture[i];
}
......@@ -593,8 +593,8 @@ retry:
s->gob_index = ff_h263_get_gob_height(s);
// for skipping the frame
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
/* skip B-frames if we don't have reference frames */
if(s->last_picture_ptr==NULL && (s->pict_type==AV_PICTURE_TYPE_B || s->dropable)) return get_consumed_bytes(s, buf_size);
......
This diff is collapsed.
......@@ -795,8 +795,8 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
left_xy[LBOT] = left_xy[LTOP] = mb_xy-1;
h->left_block = left_block_options[0];
if(FRAME_MBAFF){
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.mb_type[mb_xy-1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
const int left_mb_field_flag = IS_INTERLACED(s->current_picture.f.mb_type[mb_xy - 1]);
const int curr_mb_field_flag = IS_INTERLACED(mb_type);
if(s->mb_y&1){
if (left_mb_field_flag != curr_mb_field_flag) {
left_xy[LBOT] = left_xy[LTOP] = mb_xy - s->mb_stride - 1;
......@@ -812,9 +812,9 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
}
}else{
if(curr_mb_field_flag){
topleft_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy - 1]>>7)&1)-1);
topright_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy + 1]>>7)&1)-1);
top_xy += s->mb_stride & (((s->current_picture.mb_type[top_xy ]>>7)&1)-1);
topleft_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy - 1] >> 7) & 1) - 1);
topright_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy + 1] >> 7) & 1) - 1);
top_xy += s->mb_stride & (((s->current_picture.f.mb_type[top_xy ] >> 7) & 1) - 1);
}
if (left_mb_field_flag != curr_mb_field_flag) {
if (curr_mb_field_flag) {
......@@ -834,11 +834,11 @@ static void fill_decode_neighbors(H264Context *h, int mb_type){
h->left_mb_xy[LBOT] = left_xy[LBOT];
//FIXME do we need all in the context?
h->topleft_type = s->current_picture.mb_type[topleft_xy] ;
h->top_type = s->current_picture.mb_type[top_xy] ;
h->topright_type= s->current_picture.mb_type[topright_xy];
h->left_type[LTOP] = s->current_picture.mb_type[left_xy[LTOP]] ;
h->left_type[LBOT] = s->current_picture.mb_type[left_xy[LBOT]] ;
h->topleft_type = s->current_picture.f.mb_type[topleft_xy];
h->top_type = s->current_picture.f.mb_type[top_xy];
h->topright_type = s->current_picture.f.mb_type[topright_xy];
h->left_type[LTOP] = s->current_picture.f.mb_type[left_xy[LTOP]];
h->left_type[LBOT] = s->current_picture.f.mb_type[left_xy[LBOT]];
if(FMO){
if(h->slice_table[topleft_xy ] != h->slice_num) h->topleft_type = 0;
......@@ -898,7 +898,7 @@ static void fill_decode_caches(H264Context *h, int mb_type){
h->left_samples_available&= 0xFF5F;
}
}else{
int left_typei = s->current_picture.mb_type[left_xy[LTOP] + s->mb_stride];
int left_typei = s->current_picture.f.mb_type[left_xy[LTOP] + s->mb_stride];
assert(left_xy[LTOP] == left_xy[LBOT]);
if(!((left_typei & type_mask) && (left_type[LTOP] & type_mask))){
......@@ -1016,9 +1016,9 @@ static void fill_decode_caches(H264Context *h, int mb_type){
int b_stride = h->b_stride;
for(list=0; list<h->list_count; list++){
int8_t *ref_cache = &h->ref_cache[list][scan8[0]];
int8_t *ref = s->current_picture.ref_index[list];
int8_t *ref = s->current_picture.f.ref_index[list];
int16_t (*mv_cache)[2] = &h->mv_cache[list][scan8[0]];
int16_t (*mv)[2] = s->current_picture.motion_val[list];
int16_t (*mv)[2] = s->current_picture.f.motion_val[list];
if(!USES_LIST(mb_type, list)){
continue;
}
......@@ -1240,7 +1240,7 @@ static av_always_inline void write_back_non_zero_count(H264Context *h){
static av_always_inline void write_back_motion_list(H264Context *h, MpegEncContext * const s, int b_stride,
int b_xy, int b8_xy, int mb_type, int list )
{
int16_t (*mv_dst)[2] = &s->current_picture.motion_val[list][b_xy];
int16_t (*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy];
int16_t (*mv_src)[2] = &h->mv_cache[list][scan8[0]];
AV_COPY128(mv_dst + 0*b_stride, mv_src + 8*0);
AV_COPY128(mv_dst + 1*b_stride, mv_src + 8*1);
......@@ -1260,7 +1260,7 @@ static av_always_inline void write_back_motion_list(H264Context *h, MpegEncConte
}
{
int8_t *ref_index = &s->current_picture.ref_index[list][b8_xy];
int8_t *ref_index = &s->current_picture.f.ref_index[list][b8_xy];
int8_t *ref_cache = h->ref_cache[list];
ref_index[0+0*2]= ref_cache[scan8[0]];
ref_index[1+0*2]= ref_cache[scan8[4]];
......@@ -1278,7 +1278,8 @@ static av_always_inline void write_back_motion(H264Context *h, int mb_type){
if(USES_LIST(mb_type, 0)){
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 0);
}else{
fill_rectangle(&s->current_picture.ref_index[0][b8_xy], 2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy],
2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
}
if(USES_LIST(mb_type, 1)){
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1);
......@@ -1334,8 +1335,8 @@ static void av_unused decode_mb_skip(H264Context *h){
}
write_back_motion(h, mb_type);
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.qscale_table[mb_xy]= s->qscale;
s->current_picture.f.mb_type[mb_xy] = mb_type;
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
h->slice_table[ mb_xy ]= h->slice_num;
h->prev_mb_skipped= 1;
}
......
......@@ -1284,8 +1284,8 @@ static int decode_cabac_field_decoding_flag(H264Context *h) {
unsigned long ctx = 0;
ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.mb_type[mba_xy]>>7)&(h->slice_table[mba_xy] == h->slice_num);
ctx += (s->current_picture.mb_type[mbb_xy]>>7)&(h->slice_table[mbb_xy] == h->slice_num);
ctx += h->mb_field_decoding_flag & !!s->mb_x; //for FMO:(s->current_picture.f.mb_type[mba_xy] >> 7) & (h->slice_table[mba_xy] == h->slice_num);
ctx += (s->current_picture.f.mb_type[mbb_xy] >> 7) & (h->slice_table[mbb_xy] == h->slice_num);
return get_cabac_noinline( &h->cabac, &(h->cabac_state+70)[ctx] );
}
......@@ -1330,13 +1330,13 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mba_xy = mb_xy - 1;
if( (mb_y&1)
&& h->slice_table[mba_xy] == h->slice_num
&& MB_FIELD == !!IS_INTERLACED( s->current_picture.mb_type[mba_xy] ) )
&& MB_FIELD == !!IS_INTERLACED( s->current_picture.f.mb_type[mba_xy] ) )
mba_xy += s->mb_stride;
if( MB_FIELD ){
mbb_xy = mb_xy - s->mb_stride;
if( !(mb_y&1)
&& h->slice_table[mbb_xy] == h->slice_num
&& IS_INTERLACED( s->current_picture.mb_type[mbb_xy] ) )
&& IS_INTERLACED( s->current_picture.f.mb_type[mbb_xy] ) )
mbb_xy -= s->mb_stride;
}else
mbb_xy = mb_x + (mb_y-1)*s->mb_stride;
......@@ -1346,9 +1346,9 @@ static int decode_cabac_mb_skip( H264Context *h, int mb_x, int mb_y ) {
mbb_xy = mb_xy - (s->mb_stride << FIELD_PICTURE);
}
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mba_xy] ))
if( h->slice_table[mba_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mba_xy] ))
ctx++;
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.mb_type[mbb_xy] ))
if( h->slice_table[mbb_xy] == h->slice_num && !IS_SKIP( s->current_picture.f.mb_type[mbb_xy] ))
ctx++;
if( h->slice_type_nos == AV_PICTURE_TYPE_B )
......@@ -1850,7 +1850,7 @@ int ff_h264_decode_mb_cabac(H264Context *h) {
/* read skip flags */
if( skip ) {
if( FRAME_MBAFF && (s->mb_y&1)==0 ){
s->current_picture.mb_type[mb_xy] = MB_TYPE_SKIP;
s->current_picture.f.mb_type[mb_xy] = MB_TYPE_SKIP;
h->next_mb_skipped = decode_cabac_mb_skip( h, s->mb_x, s->mb_y+1 );
if(!h->next_mb_skipped)
h->mb_mbaff = h->mb_field_decoding_flag = decode_cabac_field_decoding_flag(h);
......@@ -1966,10 +1966,10 @@ decode_intra_mb:
h->cbp_table[mb_xy] = 0xf7ef;
h->chroma_pred_mode_table[mb_xy] = 0;
// In deblocking, the quantizer is 0
s->current_picture.qscale_table[mb_xy]= 0;
s->current_picture.f.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
h->last_qscale_diff = 0;
return 0;
}
......@@ -2266,7 +2266,7 @@ decode_intra_mb:
AV_WN32A(&nnz_cache[4+8*10], top_empty);
}
}
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
if( cbp || IS_INTRA16x16( mb_type ) ) {
const uint8_t *scan, *scan8x8;
......@@ -2345,7 +2345,7 @@ decode_intra_mb:
h->last_qscale_diff = 0;
}
s->current_picture.qscale_table[mb_xy]= s->qscale;
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
write_back_non_zero_count(h);
if(MB_MBAFF){
......
......@@ -689,11 +689,11 @@ decode_intra_mb:
}
// In deblocking, the quantizer is 0
s->current_picture.qscale_table[mb_xy]= 0;
s->current_picture.f.qscale_table[mb_xy] = 0;
// All coeffs are present
memset(h->non_zero_count[mb_xy], 16, 48);
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
return 0;
}
......@@ -990,7 +990,7 @@ decode_intra_mb:
}
h->cbp=
h->cbp_table[mb_xy]= cbp;
s->current_picture.mb_type[mb_xy]= mb_type;
s->current_picture.f.mb_type[mb_xy] = mb_type;
if(cbp || IS_INTRA16x16(mb_type)){
int i4x4, chroma_idx;
......@@ -1063,7 +1063,7 @@ decode_intra_mb:
fill_rectangle(&h->non_zero_count_cache[scan8[16]], 4, 4, 8, 0, 1);
fill_rectangle(&h->non_zero_count_cache[scan8[32]], 4, 4, 8, 0, 1);
}
s->current_picture.qscale_table[mb_xy]= s->qscale;
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
write_back_non_zero_count(h);
if(MB_MBAFF){
......
......@@ -87,7 +87,7 @@ static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field,
poc= (poc&~3) + rfield + 1;
for(j=start; j<end; j++){
if(4*h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference&3) == poc){
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].f.reference & 3) == poc) {
int cur_ref= mbafi ? (j-16)^field : j;
map[list][2*old_ref + (rfield^field) + 16] = cur_ref;
if(rfield == field || !interl)
......@@ -105,12 +105,12 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
Picture * const cur = s->current_picture_ptr;
int list, j, field;
int sidx= (s->picture_structure&1)^1;
int ref1sidx= (ref1->reference&1)^1;
int ref1sidx = (ref1->f.reference&1)^1;
for(list=0; list<2; list++){
cur->ref_count[sidx][list] = h->ref_count[list];
for(j=0; j<h->ref_count[list]; j++)
cur->ref_poc[sidx][list][j] = 4*h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference&3);
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].f.reference & 3);
}
if(s->picture_structure == PICT_FRAME){
......@@ -126,11 +126,11 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
int *col_poc = h->ref_list[1]->field_poc;
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
ref1sidx=sidx= h->col_parity;
}else if(!(s->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff){ // FL -> FL & differ parity
h->col_fieldoff= 2*(h->ref_list[1][0].reference) - 3;
} else if (!(s->picture_structure & h->ref_list[1][0].f.reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
h->col_fieldoff = 2 * h->ref_list[1][0].f.reference - 3;
}
if(cur->pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
if (cur->f.pict_type != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
return;
for(list=0; list<2; list++){
......@@ -143,7 +143,7 @@ void ff_h264_direct_ref_list_init(H264Context * const h){
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
{
int ref_field = ref->reference - 1;
int ref_field = ref->f.reference - 1;
int ref_field_picture = ref->field_picture;
int ref_height = 16*h->s.mb_height >> ref_field_picture;
......@@ -234,8 +234,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
return;
}
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (s->mb_y&~1) + h->col_parity;
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
b8_stride = 0;
......@@ -248,8 +248,8 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = s->mb_y&~1;
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
b8_stride = 2+4*s->mb_stride;
b4_stride *= 6;
......@@ -264,7 +264,7 @@ static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
......@@ -284,10 +284,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
if(!b8_stride){
if(s->mb_y&1){
l1ref0 += 2;
......@@ -420,8 +420,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
await_reference_mb_row(h, &h->ref_list[1][0], s->mb_y + !!IS_INTERLACED(*mb_type));
if(IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])){ // AFL/AFR/FR/FL -> AFL/FL
if(!IS_INTERLACED(*mb_type)){ // AFR/FR -> AFL/FL
if (IS_INTERLACED(h->ref_list[1][0].f.mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (s->mb_y&~1) + h->col_parity;
mb_xy= s->mb_x + ((s->mb_y&~1) + h->col_parity)*s->mb_stride;
b8_stride = 0;
......@@ -434,8 +434,8 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
mb_y = s->mb_y&~1;
mb_xy= s->mb_x + (s->mb_y&~1)*s->mb_stride;
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + s->mb_stride];
mb_type_col[0] = h->ref_list[1][0].f.mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy + s->mb_stride];
b8_stride = 2+4*s->mb_stride;
b4_stride *= 6;
......@@ -451,7 +451,7 @@ static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
}else{ // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
mb_type_col[1] = h->ref_list[1][0].f.mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
......@@ -471,10 +471,10 @@ single_col:
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].ref_index [0][4*mb_xy];
l1ref1 = &h->ref_list[1][0].ref_index [1][4*mb_xy];
l1mv0 = &h->ref_list[1][0].f.motion_val[0][h->mb2b_xy [mb_xy]];
l1mv1 = &h->ref_list[1][0].f.motion_val[1][h->mb2b_xy [mb_xy]];
l1ref0 = &h->ref_list[1][0].f.ref_index [0][4 * mb_xy];
l1ref1 = &h->ref_list[1][0].f.ref_index [1][4 * mb_xy];
if(!b8_stride){
if(s->mb_y&1){
l1ref0 += 2;
......
This diff is collapsed.
......@@ -48,15 +48,15 @@ static av_always_inline int fetch_diagonal_mv(H264Context *h, const int16_t **C,
const int mb_type = mb_types[xy+(y4>>2)*s->mb_stride];\
if(!USES_LIST(mb_type,list))\
return LIST_NOT_USED;\
mv = s->current_picture_ptr->motion_val[list][h->mb2b_xy[xy]+3 + y4*h->b_stride];\
mv = s->current_picture_ptr->f.motion_val[list][h->mb2b_xy[xy] + 3 + y4*h->b_stride];\
h->mv_cache[list][scan8[0]-2][0] = mv[0];\
h->mv_cache[list][scan8[0]-2][1] = mv[1] MV_OP;\
return s->current_picture_ptr->ref_index[list][4*xy+1 + (y4&~1)] REF_OP;
return s->current_picture_ptr->f.ref_index[list][4*xy + 1 + (y4 & ~1)] REF_OP;
if(topright_ref == PART_NOT_AVAILABLE
&& i >= scan8[0]+8 && (i&7)==4
&& h->ref_cache[list][scan8[0]-1] != PART_NOT_AVAILABLE){
const uint32_t *mb_types = s->current_picture_ptr->mb_type;
const uint32_t *mb_types = s->current_picture_ptr->f.mb_type;
const int16_t *mv;
AV_ZERO32(h->mv_cache[list][scan8[0]-2]);
*C = h->mv_cache[list][scan8[0]-2];
......@@ -236,8 +236,8 @@ static av_always_inline void pred_pskip_motion(H264Context * const h){
DECLARE_ALIGNED(4, static const int16_t, zeromv)[2] = {0};
DECLARE_ALIGNED(4, int16_t, mvbuf)[3][2];
MpegEncContext * const s = &h->s;
int8_t *ref = s->current_picture.ref_index[0];
int16_t (*mv)[2] = s->current_picture.motion_val[0];
int8_t *ref = s->current_picture.f.ref_index[0];
int16_t (*mv)[2] = s->current_picture.f.motion_val[0];
int top_ref, left_ref, diagonal_ref, match_count, mx, my;
const int16_t *A, *B, *C;
int b_stride = h->b_stride;
......
......@@ -40,16 +40,16 @@ static void pic_as_field(Picture *pic, const int parity){
int i;
for (i = 0; i < 4; ++i) {
if (parity == PICT_BOTTOM_FIELD)
pic->data[i] += pic->linesize[i];
pic->reference = parity;
pic->linesize[i] *= 2;
pic->f.data[i] += pic->f.linesize[i];
pic->f.reference = parity;
pic->f.linesize[i] *= 2;
}
pic->poc= pic->field_poc[parity == PICT_BOTTOM_FIELD];
}
static int split_field_copy(Picture *dest, Picture *src,
int parity, int id_add){
int match = !!(src->reference & parity);
int match = !!(src->f.reference & parity);
if (match) {
*dest = *src;
......@@ -68,9 +68,9 @@ static int build_def_list(Picture *def, Picture **in, int len, int is_long, int
int index=0;
while(i[0]<len || i[1]<len){
while(i[0]<len && !(in[ i[0] ] && (in[ i[0] ]->reference & sel)))
while (i[0] < len && !(in[ i[0] ] && (in[ i[0] ]->f.reference & sel)))
i[0]++;
while(i[1]<len && !(in[ i[1] ] && (in[ i[1] ]->reference & (sel^3))))
while (i[1] < len && !(in[ i[1] ] && (in[ i[1] ]->f.reference & (sel^3))))
i[1]++;
if(i[0] < len){
in[ i[0] ]->pic_id= is_long ? i[0] : in[ i[0] ]->frame_num;
......@@ -134,7 +134,7 @@ int ff_h264_fill_default_ref_list(H264Context *h){
}
if(lens[0] == lens[1] && lens[1] > 1){
for(i=0; h->default_ref_list[0][i].data[0] == h->default_ref_list[1][i].data[0] && i<lens[0]; i++);
for (i = 0; h->default_ref_list[0][i].f.data[0] == h->default_ref_list[1][i].f.data[0] && i < lens[0]; i++);
if(i == lens[0])
FFSWAP(Picture, h->default_ref_list[1][0], h->default_ref_list[1][1]);
}
......@@ -230,11 +230,11 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
for(i= h->short_ref_count-1; i>=0; i--){
ref = h->short_ref[i];
assert(ref->reference);
assert(ref->f.reference);
assert(!ref->long_ref);
if(
ref->frame_num == frame_num &&
(ref->reference & pic_structure)
(ref->f.reference & pic_structure)
)
break;
}
......@@ -251,8 +251,8 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
return -1;
}
ref = h->long_ref[long_idx];
assert(!(ref && !ref->reference));
if(ref && (ref->reference & pic_structure)){
assert(!(ref && !ref->f.reference));
if (ref && (ref->f.reference & pic_structure)) {
ref->pic_id= pic_id;
assert(ref->long_ref);
i=0;
......@@ -286,9 +286,9 @@ int ff_h264_decode_ref_pic_list_reordering(H264Context *h){
}
for(list=0; list<h->list_count; list++){
for(index= 0; index < h->ref_count[list]; index++){
if(!h->ref_list[list][index].data[0]){
if (!h->ref_list[list][index].f.data[0]) {
av_log(h->s.avctx, AV_LOG_ERROR, "Missing reference picture\n");
if(h->default_ref_list[list][0].data[0])
if (h->default_ref_list[list][0].f.data[0])
h->ref_list[list][index]= h->default_ref_list[list][0];
else
return -1;
......@@ -307,13 +307,13 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
Picture *field = &h->ref_list[list][16+2*i];
field[0] = *frame;
for(j=0; j<3; j++)
field[0].linesize[j] <<= 1;
field[0].reference = PICT_TOP_FIELD;
field[0].f.linesize[j] <<= 1;
field[0].f.reference = PICT_TOP_FIELD;
field[0].poc= field[0].field_poc[0];
field[1] = field[0];
for(j=0; j<3; j++)
field[1].data[j] += frame->linesize[j];
field[1].reference = PICT_BOTTOM_FIELD;
field[1].f.data[j] += frame->f.linesize[j];
field[1].f.reference = PICT_BOTTOM_FIELD;
field[1].poc= field[1].field_poc[1];
h->luma_weight[16+2*i][list][0] = h->luma_weight[16+2*i+1][list][0] = h->luma_weight[i][list][0];
......@@ -339,12 +339,12 @@ void ff_h264_fill_mbaff_ref_list(H264Context *h){
*/
static inline int unreference_pic(H264Context *h, Picture *pic, int refmask){
int i;
if (pic->reference &= refmask) {
if (pic->f.reference &= refmask) {
return 0;
} else {
for(i = 0; h->delayed_pic[i]; i++)
if(pic == h->delayed_pic[i]){
pic->reference=DELAYED_PIC_REF;
pic->f.reference = DELAYED_PIC_REF;
break;
}
return 1;
......@@ -454,7 +454,8 @@ static void print_short_term(H264Context *h) {
av_log(h->s.avctx, AV_LOG_DEBUG, "short term list:\n");
for(i=0; i<h->short_ref_count; i++){
Picture *pic= h->short_ref[i];
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
i, pic->frame_num, pic->poc, pic->f.data[0]);
}
}
}
......@@ -469,7 +470,8 @@ static void print_long_term(H264Context *h) {
for(i = 0; i < 16; i++){
Picture *pic= h->long_ref[i];
if (pic) {
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n", i, pic->frame_num, pic->poc, pic->data[0]);
av_log(h->s.avctx, AV_LOG_DEBUG, "%d fn:%d poc:%d %p\n",
i, pic->frame_num, pic->poc, pic->f.data[0]);
}
}
}
......@@ -481,7 +483,7 @@ void ff_generate_sliding_window_mmcos(H264Context *h) {
h->mmco_index= 0;
if(h->short_ref_count && h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->reference)) {
!(FIELD_PICTURE && !s->first_field && s->current_picture_ptr->f.reference)) {
h->mmco[0].opcode= MMCO_SHORT2UNUSED;
h->mmco[0].short_pic_num= h->short_ref[ h->short_ref_count - 1 ]->frame_num;
h->mmco_index= 1;
......@@ -562,7 +564,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->long_ref_count++;
}
s->current_picture_ptr->reference |= s->picture_structure;
s->current_picture_ptr->f.reference |= s->picture_structure;
current_ref_assigned=1;
break;
case MMCO_SET_MAX_LONG:
......@@ -601,7 +603,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
*/
if (h->short_ref_count && h->short_ref[0] == s->current_picture_ptr) {
/* Just mark the second field valid */
s->current_picture_ptr->reference = PICT_FRAME;
s->current_picture_ptr->f.reference = PICT_FRAME;
} else if (s->current_picture_ptr->long_ref) {
av_log(h->s.avctx, AV_LOG_ERROR, "illegal short term reference "
"assignment for second field "
......@@ -618,7 +620,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count){
h->short_ref[0]= s->current_picture_ptr;
h->short_ref_count++;
s->current_picture_ptr->reference |= s->picture_structure;
s->current_picture_ptr->f.reference |= s->picture_structure;
}
}
......
......@@ -304,7 +304,7 @@ static int x8_setup_spatial_predictor(IntraX8Context * const w, const int chroma
int quant;
s->dsp.x8_setup_spatial_compensation(s->dest[chroma], s->edge_emu_buffer,
s->current_picture.linesize[chroma>0],
s->current_picture.f.linesize[chroma>0],
&range, &sum, w->edges);
if(chroma){
w->orient=w->chroma_orient;
......@@ -613,7 +613,7 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
dc_level+= (w->predicted_dc*divide_quant + (1<<12) )>>13;
dsp_x8_put_solidcolor( av_clip_uint8((dc_level*dc_quant+4)>>3),
s->dest[chroma], s->current_picture.linesize[!!chroma]);
s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
goto block_placed;
}
......@@ -637,15 +637,15 @@ static int x8_decode_intra_mb(IntraX8Context* const w, const int chroma){
}
if(w->flat_dc){
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.linesize[!!chroma]);
dsp_x8_put_solidcolor(w->predicted_dc, s->dest[chroma], s->current_picture.f.linesize[!!chroma]);
}else{
s->dsp.x8_spatial_compensation[w->orient]( s->edge_emu_buffer,
s->dest[chroma],
s->current_picture.linesize[!!chroma] );
s->current_picture.f.linesize[!!chroma] );
}
if(!zeros_only)
s->dsp.idct_add ( s->dest[chroma],
s->current_picture.linesize[!!chroma],
s->current_picture.f.linesize[!!chroma],
s->block[0] );
block_placed:
......@@ -656,7 +656,7 @@ block_placed:
if(s->loop_filter){
uint8_t* ptr = s->dest[chroma];
int linesize = s->current_picture.linesize[!!chroma];
int linesize = s->current_picture.f.linesize[!!chroma];
if(!( (w->edges&2) || ( zeros_only && (w->orient|4)==4 ) )){
s->dsp.x8_h_loop_filter(ptr, linesize, w->quant);
......@@ -671,12 +671,12 @@ block_placed:
static void x8_init_block_index(MpegEncContext *s){ //FIXME maybe merge with ff_*
//not s->linesize as this would be wrong for field pics
//not that IntraX8 has interlacing support ;)
const int linesize = s->current_picture.linesize[0];
const int uvlinesize= s->current_picture.linesize[1];
const int linesize = s->current_picture.f.linesize[0];
const int uvlinesize = s->current_picture.f.linesize[1];
s->dest[0] = s->current_picture.data[0];
s->dest[1] = s->current_picture.data[1];
s->dest[2] = s->current_picture.data[2];
s->dest[0] = s->current_picture.f.data[0];
s->dest[1] = s->current_picture.f.data[1];
s->dest[2] = s->current_picture.f.data[2];
s->dest[0] += s->mb_y * linesize << 3;
s->dest[1] += ( s->mb_y&(~1) ) * uvlinesize << 2;//chroma blocks are on add rows
......@@ -771,7 +771,7 @@ int ff_intrax8_decode_picture(IntraX8Context * const w, int dquant, int quant_of
/*emulate MB info in the relevant tables*/
s->mbskip_table [mb_xy]=0;
s->mbintra_table[mb_xy]=1;
s->current_picture.qscale_table[mb_xy]=w->quant;
s->current_picture.f.qscale_table[mb_xy] = w->quant;
mb_xy++;
}
s->dest[0]+= 8;
......
......@@ -353,20 +353,20 @@ static void preview_obmc(MpegEncContext *s){
do{
if (get_bits1(&s->gb)) {
/* skip mb */
mot_val = s->current_picture.motion_val[0][ s->block_index[0] ];
mot_val = s->current_picture.f.motion_val[0][s->block_index[0]];
mot_val[0 ]= mot_val[2 ]=
mot_val[0+stride]= mot_val[2+stride]= 0;
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= 0;
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
goto end;
}
cbpc = get_vlc2(&s->gb, ff_h263_inter_MCBPC_vlc.table, INTER_MCBPC_VLC_BITS, 2);
}while(cbpc == 20);
if(cbpc & 4){
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
}else{
get_vlc2(&s->gb, ff_h263_cbpy_vlc.table, CBPY_VLC_BITS, 1);
if (cbpc & 8) {
......@@ -378,7 +378,7 @@ static void preview_obmc(MpegEncContext *s){
}
if ((cbpc & 16) == 0) {
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
mot_val= h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
if (s->umvplus)
......@@ -396,7 +396,7 @@ static void preview_obmc(MpegEncContext *s){
mot_val[1 ]= mot_val[3 ]=
mot_val[1+stride]= mot_val[3+stride]= my;
} else {
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
if (s->umvplus)
......@@ -618,7 +618,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->block_last_index[i] = -1;
s->mv_dir = MV_DIR_FORWARD;
s->mv_type = MV_TYPE_16X16;
s->current_picture.mb_type[xy]= MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_SKIP | MB_TYPE_16x16 | MB_TYPE_L0;
s->mv[0][0][0] = 0;
s->mv[0][0][1] = 0;
s->mb_skipped = !(s->obmc | s->loop_filter);
......@@ -651,7 +651,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
s->mv_dir = MV_DIR_FORWARD;
if ((cbpc & 16) == 0) {
s->current_picture.mb_type[xy]= MB_TYPE_16x16 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_16x16 | MB_TYPE_L0;
/* 16x16 motion prediction */
s->mv_type = MV_TYPE_16X16;
h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
......@@ -676,7 +676,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
if (s->umvplus && (mx - pred_x) == 1 && (my - pred_y) == 1)
skip_bits1(&s->gb); /* Bit stuffing to prevent PSC */
} else {
s->current_picture.mb_type[xy]= MB_TYPE_8x8 | MB_TYPE_L0;
s->current_picture.f.mb_type[xy] = MB_TYPE_8x8 | MB_TYPE_L0;
s->mv_type = MV_TYPE_8X8;
for(i=0;i<4;i++) {
mot_val = h263_pred_motion(s, i, 0, &pred_x, &pred_y);
......@@ -704,8 +704,8 @@ int ff_h263_decode_mb(MpegEncContext *s,
} else if(s->pict_type==AV_PICTURE_TYPE_B) {
int mb_type;
const int stride= s->b8_stride;
int16_t *mot_val0 = s->current_picture.motion_val[0][ 2*(s->mb_x + s->mb_y*stride) ];
int16_t *mot_val1 = s->current_picture.motion_val[1][ 2*(s->mb_x + s->mb_y*stride) ];
int16_t *mot_val0 = s->current_picture.f.motion_val[0][2 * (s->mb_x + s->mb_y * stride)];
int16_t *mot_val1 = s->current_picture.f.motion_val[1][2 * (s->mb_x + s->mb_y * stride)];
// const int mv_xy= s->mb_x + 1 + s->mb_y * s->mb_stride;
//FIXME ugly
......@@ -788,7 +788,7 @@ int ff_h263_decode_mb(MpegEncContext *s,
}
}
s->current_picture.mb_type[xy]= mb_type;
s->current_picture.f.mb_type[xy] = mb_type;
} else { /* I-Frame */
do{
cbpc = get_vlc2(&s->gb, ff_h263_intra_MCBPC_vlc.table, INTRA_MCBPC_VLC_BITS, 2);
......@@ -803,11 +803,11 @@ int ff_h263_decode_mb(MpegEncContext *s,
dquant = cbpc & 4;
s->mb_intra = 1;
intra:
s->current_picture.mb_type[xy]= MB_TYPE_INTRA;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA;
if (s->h263_aic) {
s->ac_pred = get_bits1(&s->gb);
if(s->ac_pred){
s->current_picture.mb_type[xy]= MB_TYPE_INTRA | MB_TYPE_ACPRED;
s->current_picture.f.mb_type[xy] = MB_TYPE_INTRA | MB_TYPE_ACPRED;
s->h263_aic_dir = get_bits1(&s->gb);
}
......@@ -889,7 +889,7 @@ int h263_decode_picture_header(MpegEncContext *s)
i = get_bits(&s->gb, 8); /* picture timestamp */
if( (s->picture_number&~0xFF)+i < s->picture_number)
i+= 256;
s->current_picture_ptr->pts=
s->current_picture_ptr->f.pts =
s->picture_number= (s->picture_number&~0xFF) + i;
/* PTYPE starts here */
......
......@@ -275,7 +275,7 @@ void h263_encode_gob_header(MpegEncContext * s, int mb_line)
*/
void ff_clean_h263_qscales(MpegEncContext *s){
int i;
int8_t * const qscale_table= s->current_picture.qscale_table;
int8_t * const qscale_table = s->current_picture.f.qscale_table;
ff_init_qscale_tab(s);
......@@ -529,8 +529,8 @@ void h263_encode_mb(MpegEncContext * s,
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
motion_x= s->current_picture.motion_val[0][ s->block_index[i] ][0];
motion_y= s->current_picture.motion_val[0][ s->block_index[i] ][1];
motion_x = s->current_picture.f.motion_val[0][s->block_index[i]][0];
motion_y = s->current_picture.f.motion_val[0][s->block_index[i]][1];
if (!s->umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, 1);
......
This diff is collapsed.
......@@ -217,20 +217,20 @@ static int mpeg_decode_mb(MpegEncContext *s,
if (s->mb_skip_run-- != 0) {
if (s->pict_type == AV_PICTURE_TYPE_P) {
s->mb_skipped = 1;
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = MB_TYPE_SKIP | MB_TYPE_L0 | MB_TYPE_16x16;
} else {
int mb_type;
if(s->mb_x)
mb_type= s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1];
mb_type = s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1];
else
mb_type= s->current_picture.mb_type[ s->mb_width + (s->mb_y-1)*s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
mb_type = s->current_picture.f.mb_type[s->mb_width + (s->mb_y - 1) * s->mb_stride - 1]; // FIXME not sure if this is allowed in MPEG at all
if(IS_INTRA(mb_type))
return -1;
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]=
s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride] =
mb_type | MB_TYPE_SKIP;
// assert(s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
// assert(s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride - 1]&(MB_TYPE_16x16|MB_TYPE_16x8));
if((s->mv[0][0][0]|s->mv[0][0][1]|s->mv[1][0][0]|s->mv[1][0][1])==0)
s->mb_skipped = 1;
......@@ -581,7 +581,7 @@ static int mpeg_decode_mb(MpegEncContext *s,
}
}
s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ]= mb_type;
s->current_picture.f.mb_type[s->mb_x + s->mb_y * s->mb_stride] = mb_type;
return 0;
}
......@@ -1423,8 +1423,8 @@ static int mpeg1_decode_picture(AVCodecContext *avctx,
s->mpeg_f_code[1][0] = f_code;
s->mpeg_f_code[1][1] = f_code;
}
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
if(avctx->debug & FF_DEBUG_PICT_INFO)
av_log(avctx, AV_LOG_DEBUG, "vbv_delay %d, ref %d type:%d\n", vbv_delay, ref, s->pict_type);
......@@ -1577,8 +1577,8 @@ static void mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
s->pict_type= AV_PICTURE_TYPE_P;
}else
s->pict_type= AV_PICTURE_TYPE_B;
s->current_picture.pict_type= s->pict_type;
s->current_picture.key_frame= s->pict_type == AV_PICTURE_TYPE_I;
s->current_picture.f.pict_type = s->pict_type;
s->current_picture.f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
}
s->intra_dc_precision = get_bits(&s->gb, 2);
s->picture_structure = get_bits(&s->gb, 2);
......@@ -1655,19 +1655,19 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
ff_er_frame_start(s);
/* first check if we must repeat the frame */
s->current_picture_ptr->repeat_pict = 0;
s->current_picture_ptr->f.repeat_pict = 0;
if (s->repeat_first_field) {
if (s->progressive_sequence) {
if (s->top_field_first)
s->current_picture_ptr->repeat_pict = 4;
s->current_picture_ptr->f.repeat_pict = 4;
else
s->current_picture_ptr->repeat_pict = 2;
s->current_picture_ptr->f.repeat_pict = 2;
} else if (s->progressive_frame) {
s->current_picture_ptr->repeat_pict = 1;
s->current_picture_ptr->f.repeat_pict = 1;
}
}
*s->current_picture_ptr->pan_scan= s1->pan_scan;
*s->current_picture_ptr->f.pan_scan = s1->pan_scan;
if (HAVE_PTHREADS && (avctx->active_thread_type & FF_THREAD_FRAME))
ff_thread_finish_setup(avctx);
......@@ -1680,9 +1680,9 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
}
for(i=0; i<4; i++){
s->current_picture.data[i] = s->current_picture_ptr->data[i];
s->current_picture.f.data[i] = s->current_picture_ptr->f.data[i];
if(s->picture_structure == PICT_BOTTOM_FIELD){
s->current_picture.data[i] += s->current_picture_ptr->linesize[i];
s->current_picture.f.data[i] += s->current_picture_ptr->f.linesize[i];
}
}
}
......@@ -1804,7 +1804,7 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
if(mpeg_decode_mb(s, s->block) < 0)
return -1;
if(s->current_picture.motion_val[0] && !s->encoding){ //note motion_val is normally NULL unless we want to extract the MVs
if (s->current_picture.f.motion_val[0] && !s->encoding) { //note motion_val is normally NULL unless we want to extract the MVs
const int wrap = s->b8_stride;
int xy = s->mb_x*2 + s->mb_y*2*wrap;
int b8_xy= 4*(s->mb_x + s->mb_y*s->mb_stride);
......@@ -1822,12 +1822,12 @@ static int mpeg_decode_slice(Mpeg1Context *s1, int mb_y,
motion_y = s->mv[dir][i][1];
}
s->current_picture.motion_val[dir][xy ][0] = motion_x;
s->current_picture.motion_val[dir][xy ][1] = motion_y;
s->current_picture.motion_val[dir][xy + 1][0] = motion_x;
s->current_picture.motion_val[dir][xy + 1][1] = motion_y;
s->current_picture.ref_index [dir][b8_xy ]=
s->current_picture.ref_index [dir][b8_xy + 1]= s->field_select[dir][i];
s->current_picture.f.motion_val[dir][xy ][0] = motion_x;
s->current_picture.f.motion_val[dir][xy ][1] = motion_y;
s->current_picture.f.motion_val[dir][xy + 1][0] = motion_x;
s->current_picture.f.motion_val[dir][xy + 1][1] = motion_y;
s->current_picture.f.ref_index [dir][b8_xy ] =
s->current_picture.f.ref_index [dir][b8_xy + 1] = s->field_select[dir][i];
assert(s->field_select[dir][i]==0 || s->field_select[dir][i]==1);
}
xy += wrap;
......@@ -1991,7 +1991,7 @@ static int slice_end(AVCodecContext *avctx, AVFrame *pict)
if (/*s->mb_y<<field_pic == s->mb_height &&*/ !s->first_field) {
/* end of image */
s->current_picture_ptr->qscale_type= FF_QSCALE_TYPE_MPEG2;
s->current_picture_ptr->f.qscale_type = FF_QSCALE_TYPE_MPEG2;
ff_er_frame_end(s);
......
......@@ -200,7 +200,7 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)
if (s->current_picture.key_frame) {
if (s->current_picture.f.key_frame) {
AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];
/* mpeg1 header repeated every gop */
......@@ -287,9 +287,9 @@ static void mpeg1_encode_sequence_header(MpegEncContext *s)
/* time code : we must convert from the real frame rate to a
fake mpeg frame rate in case of low frame rate */
fps = (framerate.num + framerate.den/2)/ framerate.den;
time_code = s->current_picture_ptr->coded_picture_number + s->avctx->timecode_frame_start;
time_code = s->current_picture_ptr->f.coded_picture_number + s->avctx->timecode_frame_start;
s->gop_picture_number = s->current_picture_ptr->coded_picture_number;
s->gop_picture_number = s->current_picture_ptr->f.coded_picture_number;
if (s->avctx->flags2 & CODEC_FLAG2_DROP_FRAME_TIMECODE) {
/* only works for NTSC 29.97 */
int d = time_code / 17982;
......@@ -396,7 +396,7 @@ void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
if (s->progressive_sequence) {
put_bits(&s->pb, 1, 0); /* no repeat */
} else {
put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
}
/* XXX: optimize the generation of this flag with entropy
measures */
......
......@@ -89,7 +89,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
uint16_t time_pb= s->pb_time;
int p_mx, p_my;
p_mx= s->next_picture.motion_val[0][xy][0];
p_mx = s->next_picture.f.motion_val[0][xy][0];
if((unsigned)(p_mx + tab_bias) < tab_size){
s->mv[0][i][0] = s->direct_scale_mv[0][p_mx + tab_bias] + mx;
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
......@@ -99,7 +99,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
s->mv[1][i][0] = mx ? s->mv[0][i][0] - p_mx
: p_mx*(time_pb - time_pp)/time_pp;
}
p_my= s->next_picture.motion_val[0][xy][1];
p_my = s->next_picture.f.motion_val[0][xy][1];
if((unsigned)(p_my + tab_bias) < tab_size){
s->mv[0][i][1] = s->direct_scale_mv[0][p_my + tab_bias] + my;
s->mv[1][i][1] = my ? s->mv[0][i][1] - p_my
......@@ -120,7 +120,7 @@ static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx, int my,
*/
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
const int mb_index= s->mb_x + s->mb_y*s->mb_stride;
const int colocated_mb_type= s->next_picture.mb_type[mb_index];
const int colocated_mb_type = s->next_picture.f.mb_type[mb_index];
uint16_t time_pp;
uint16_t time_pb;
int i;
......@@ -137,7 +137,7 @@ int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my){
} else if(IS_INTERLACED(colocated_mb_type)){
s->mv_type = MV_TYPE_FIELD;
for(i=0; i<2; i++){
int field_select= s->next_picture.ref_index[0][4*mb_index + 2*i];
int field_select = s->next_picture.f.ref_index[0][4 * mb_index + 2 * i];
s->field_select[0][i]= field_select;
s->field_select[1][i]= i;
if(s->top_field_first){
......
This diff is collapsed.
......@@ -124,7 +124,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const
{
int score= 0;
int i, n;
int8_t * const qscale_table= s->current_picture.qscale_table;
int8_t * const qscale_table = s->current_picture.f.qscale_table;
memcpy(zigzag_last_index, s->block_last_index, sizeof(int)*6);
......@@ -201,7 +201,7 @@ static inline int decide_ac_pred(MpegEncContext * s, DCTELEM block[6][64], const
*/
void ff_clean_mpeg4_qscales(MpegEncContext *s){
int i;
int8_t * const qscale_table= s->current_picture.qscale_table;
int8_t * const qscale_table = s->current_picture.f.qscale_table;
ff_clean_h263_qscales(s);
......@@ -457,7 +457,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
assert(mb_type>=0);
/* nothing to do if this MB was skipped in the next P Frame */
if(s->next_picture.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]){ //FIXME avoid DCT & ...
if (s->next_picture.f.mbskip_table[s->mb_y * s->mb_stride + s->mb_x]) { //FIXME avoid DCT & ...
s->skip_count++;
s->mv[0][0][0]=
s->mv[0][0][1]=
......@@ -587,7 +587,7 @@ void mpeg4_encode_mb(MpegEncContext * s,
y= s->mb_y*16;
offset= x + y*s->linesize;
p_pic= s->new_picture.data[0] + offset;
p_pic = s->new_picture.f.data[0] + offset;
s->mb_skipped=1;
for(i=0; i<s->max_b_frames; i++){
......@@ -595,10 +595,11 @@ void mpeg4_encode_mb(MpegEncContext * s,
int diff;
Picture *pic= s->reordered_input_picture[i+1];
if(pic==NULL || pic->pict_type!=AV_PICTURE_TYPE_B) break;
if (pic == NULL || pic->f.pict_type != AV_PICTURE_TYPE_B)
break;
b_pic= pic->data[0] + offset;
if(pic->type != FF_BUFFER_TYPE_SHARED)
b_pic = pic->f.data[0] + offset;
if (pic->f.type != FF_BUFFER_TYPE_SHARED)
b_pic+= INPLACE_OFFSET;
if(x+16 > s->width || y+16 > s->height){
......@@ -716,8 +717,8 @@ void mpeg4_encode_mb(MpegEncContext * s,
/* motion vectors: 8x8 mode*/
h263_pred_motion(s, i, 0, &pred_x, &pred_y);
ff_h263_encode_motion_vector(s, s->current_picture.motion_val[0][ s->block_index[i] ][0] - pred_x,
s->current_picture.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
ff_h263_encode_motion_vector(s, s->current_picture.f.motion_val[0][ s->block_index[i] ][0] - pred_x,
s->current_picture.f.motion_val[0][ s->block_index[i] ][1] - pred_y, s->f_code);
}
}
......@@ -826,9 +827,9 @@ static void mpeg4_encode_gop_header(MpegEncContext * s){
put_bits(&s->pb, 16, 0);
put_bits(&s->pb, 16, GOP_STARTCODE);
time= s->current_picture_ptr->pts;
time = s->current_picture_ptr->f.pts;
if(s->reordered_input_picture[1])
time= FFMIN(time, s->reordered_input_picture[1]->pts);
time = FFMIN(time, s->reordered_input_picture[1]->f.pts);
time= time*s->avctx->time_base.num;
s->last_time_base= FFUDIV(time, s->avctx->time_base.den);
......@@ -1036,7 +1037,7 @@ void mpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
}
put_bits(&s->pb, 3, 0); /* intra dc VLC threshold */
if(!s->progressive_sequence){
put_bits(&s->pb, 1, s->current_picture_ptr->top_field_first);
put_bits(&s->pb, 1, s->current_picture_ptr->f.top_field_first);
put_bits(&s->pb, 1, s->alternate_scan);
}
//FIXME sprite stuff
......
This diff is collapsed.
......@@ -28,6 +28,7 @@
#ifndef AVCODEC_MPEGVIDEO_H
#define AVCODEC_MPEGVIDEO_H
#include "avcodec.h"
#include "dsputil.h"
#include "get_bits.h"
#include "put_bits.h"
......@@ -82,7 +83,7 @@ struct MpegEncContext;
* Picture.
*/
typedef struct Picture{
FF_COMMON_FRAME
struct AVFrame f;
/**
* halfpel luma planes.
......
......@@ -255,8 +255,8 @@ if(s->quarter_sample)
#endif
v_edge_pos = s->v_edge_pos >> field_based;
linesize = s->current_picture.linesize[0] << field_based;
uvlinesize = s->current_picture.linesize[1] << field_based;
linesize = s->current_picture.f.linesize[0] << field_based;
uvlinesize = s->current_picture.f.linesize[1] << field_based;
dxy = ((motion_y & 1) << 1) | (motion_x & 1);
src_x = s->mb_x* 16 + (motion_x >> 1);
......@@ -657,30 +657,30 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
assert(!s->mb_skipped);
memcpy(mv_cache[1][1], s->current_picture.motion_val[0][mot_xy ], sizeof(int16_t)*4);
memcpy(mv_cache[2][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
memcpy(mv_cache[3][1], s->current_picture.motion_val[0][mot_xy+mot_stride], sizeof(int16_t)*4);
memcpy(mv_cache[1][1], s->current_picture.f.motion_val[0][mot_xy ], sizeof(int16_t) * 4);
memcpy(mv_cache[2][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
memcpy(mv_cache[3][1], s->current_picture.f.motion_val[0][mot_xy + mot_stride], sizeof(int16_t) * 4);
if(mb_y==0 || IS_INTRA(s->current_picture.mb_type[xy-s->mb_stride])){
if (mb_y == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - s->mb_stride])) {
memcpy(mv_cache[0][1], mv_cache[1][1], sizeof(int16_t)*4);
}else{
memcpy(mv_cache[0][1], s->current_picture.motion_val[0][mot_xy-mot_stride], sizeof(int16_t)*4);
memcpy(mv_cache[0][1], s->current_picture.f.motion_val[0][mot_xy - mot_stride], sizeof(int16_t) * 4);
}
if(mb_x==0 || IS_INTRA(s->current_picture.mb_type[xy-1])){
if (mb_x == 0 || IS_INTRA(s->current_picture.f.mb_type[xy - 1])) {
AV_COPY32(mv_cache[1][0], mv_cache[1][1]);
AV_COPY32(mv_cache[2][0], mv_cache[2][1]);
}else{
AV_COPY32(mv_cache[1][0], s->current_picture.motion_val[0][mot_xy-1]);
AV_COPY32(mv_cache[2][0], s->current_picture.motion_val[0][mot_xy-1+mot_stride]);
AV_COPY32(mv_cache[1][0], s->current_picture.f.motion_val[0][mot_xy - 1]);
AV_COPY32(mv_cache[2][0], s->current_picture.f.motion_val[0][mot_xy - 1 + mot_stride]);
}
if(mb_x+1>=s->mb_width || IS_INTRA(s->current_picture.mb_type[xy+1])){
if (mb_x + 1 >= s->mb_width || IS_INTRA(s->current_picture.f.mb_type[xy + 1])) {
AV_COPY32(mv_cache[1][3], mv_cache[1][2]);
AV_COPY32(mv_cache[2][3], mv_cache[2][2]);
}else{
AV_COPY32(mv_cache[1][3], s->current_picture.motion_val[0][mot_xy+2]);
AV_COPY32(mv_cache[2][3], s->current_picture.motion_val[0][mot_xy+2+mot_stride]);
AV_COPY32(mv_cache[1][3], s->current_picture.f.motion_val[0][mot_xy + 2]);
AV_COPY32(mv_cache[2][3], s->current_picture.f.motion_val[0][mot_xy + 2 + mot_stride]);
}
mx = 0;
......@@ -817,7 +817,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
}
} else {
if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
ref_picture= s->current_picture_ptr->data;
ref_picture = s->current_picture_ptr->f.data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
......@@ -834,7 +834,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
|| s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
ref2picture= ref_picture;
}else{
ref2picture= s->current_picture_ptr->data;
ref2picture = s->current_picture_ptr->f.data;
}
mpeg_motion(s, dest_y, dest_cb, dest_cr,
......@@ -871,7 +871,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
//opposite parity is always in the same frame if this is second field
if(!s->first_field){
ref_picture = s->current_picture_ptr->data;
ref_picture = s->current_picture_ptr->f.data;
}
}
}
......
This diff is collapsed.
......@@ -41,7 +41,7 @@
*/
void ff_xvmc_init_block(MpegEncContext *s)
{
struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
assert(render && render->xvmc_id == AV_XVMC_ID);
s->block = (DCTELEM (*)[64])(render->data_blocks + render->next_free_data_block_num * 64);
......@@ -73,7 +73,7 @@ void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
*/
int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
{
struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
const int mb_block_count = 4 + (1 << s->chroma_format);
assert(avctx);
......@@ -113,7 +113,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
case AV_PICTURE_TYPE_I:
return 0; // no prediction from other frames
case AV_PICTURE_TYPE_B:
next = (struct xvmc_pix_fmt*)s->next_picture.data[2];
next = (struct xvmc_pix_fmt*)s->next_picture.f.data[2];
if (!next)
return -1;
if (next->xvmc_id != AV_XVMC_ID)
......@@ -121,7 +121,7 @@ int ff_xvmc_field_start(MpegEncContext *s, AVCodecContext *avctx)
render->p_future_surface = next->p_surface;
// no return here, going to set forward prediction
case AV_PICTURE_TYPE_P:
last = (struct xvmc_pix_fmt*)s->last_picture.data[2];
last = (struct xvmc_pix_fmt*)s->last_picture.f.data[2];
if (!last)
last = render; // predict second field from the first
if (last->xvmc_id != AV_XVMC_ID)
......@@ -141,7 +141,7 @@ return -1;
*/
void ff_xvmc_field_end(MpegEncContext *s)
{
struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
assert(render);
if (render->filled_mv_blocks_num > 0)
......@@ -179,10 +179,10 @@ void ff_xvmc_decode_mb(MpegEncContext *s)
// Do I need to export quant when I could not perform postprocessing?
// Anyway, it doesn't hurt.
s->current_picture.qscale_table[mb_xy] = s->qscale;
s->current_picture.f.qscale_table[mb_xy] = s->qscale;
// start of XVMC-specific code
render = (struct xvmc_pix_fmt*)s->current_picture.data[2];
render = (struct xvmc_pix_fmt*)s->current_picture.f.data[2];
assert(render);
assert(render->xvmc_id == AV_XVMC_ID);
assert(render->mv_blocks);
......
......@@ -780,10 +780,10 @@ static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
}else{
if(n<4){
wrap= s->linesize;
dest= s->current_picture.data[0] + (((n>>1) + 2*s->mb_y) * 8* wrap ) + ((n&1) + 2*s->mb_x) * 8;
dest= s->current_picture.f.data[0] + (((n >> 1) + 2*s->mb_y) * 8* wrap ) + ((n & 1) + 2*s->mb_x) * 8;
}else{
wrap= s->uvlinesize;
dest= s->current_picture.data[n-3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
dest= s->current_picture.f.data[n - 3] + (s->mb_y * 8 * wrap) + s->mb_x * 8;
}
if(s->mb_x==0) a= (1024 + (scale>>1))/scale;
else a= get_dc(dest-8, wrap, scale*8);
......@@ -1172,7 +1172,7 @@ static int msmpeg4v34_decode_mb(MpegEncContext *s, DCTELEM block[6][64])
{
int cbp, code, i;
uint8_t *coded_val;
uint32_t * const mb_type_ptr= &s->current_picture.mb_type[ s->mb_x + s->mb_y*s->mb_stride ];
uint32_t * const mb_type_ptr = &s->current_picture.f.mb_type[s->mb_x + s->mb_y*s->mb_stride];
if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->use_skip_mb_code) {
......
......@@ -408,9 +408,10 @@ static void release_delayed_buffers(PerThreadContext *p)
FrameThreadContext *fctx = p->parent;
while (p->num_released_buffers > 0) {
AVFrame *f = &p->released_buffers[--p->num_released_buffers];
AVFrame *f;
pthread_mutex_lock(&fctx->buffer_mutex);
f = &p->released_buffers[--p->num_released_buffers];
free_progress(f);
f->thread_opaque = NULL;
......@@ -746,9 +747,12 @@ void ff_thread_flush(AVCodecContext *avctx)
if (!avctx->thread_opaque) return;
park_frame_worker_threads(fctx, avctx->thread_count);
if (fctx->prev_thread)
update_context_from_thread(fctx->threads->avctx, fctx->prev_thread->avctx, 0);
if (fctx->prev_thread) {
if (fctx->prev_thread != &fctx->threads[0])
update_context_from_thread(fctx->threads[0].avctx, fctx->prev_thread->avctx, 0);
if (avctx->codec->flush)
avctx->codec->flush(fctx->threads[0].avctx);
}
fctx->next_decoding = fctx->next_finished = 0;
fctx->delaying = 1;
......@@ -836,6 +840,7 @@ int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f)
void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
{
PerThreadContext *p = avctx->thread_opaque;
FrameThreadContext *fctx;
if (!(avctx->active_thread_type&FF_THREAD_FRAME)) {
avctx->release_buffer(avctx, f);
......@@ -851,7 +856,10 @@ void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p, %d buffers used\n",
f, f->owner->internal_buffer_count);
fctx = p->parent;
pthread_mutex_lock(&fctx->buffer_mutex);
p->released_buffers[p->num_released_buffers++] = *f;
pthread_mutex_unlock(&fctx->buffer_mutex);
memset(f->data, 0, sizeof(f->data));
}
......
......@@ -44,9 +44,9 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
void ff_write_pass1_stats(MpegEncContext *s){
snprintf(s->avctx->stats_out, 256, "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%d var:%d icount:%d skipcount:%d hbits:%d;\n",
s->current_picture_ptr->display_picture_number, s->current_picture_ptr->coded_picture_number, s->pict_type,
s->current_picture.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
s->current_picture_ptr->f.display_picture_number, s->current_picture_ptr->f.coded_picture_number, s->pict_type,
s->current_picture.f.quality, s->i_tex_bits, s->p_tex_bits, s->mv_bits, s->misc_bits,
s->f_code, s->b_code, s->current_picture.mc_mb_var_sum, s->current_picture.mb_var_sum, s->i_count, s->skip_count, s->header_bits);
}
static inline double qp2bits(RateControlEntry *rce, double qp){
......@@ -707,10 +707,10 @@ float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
//if(dts_pic)
// av_log(NULL, AV_LOG_ERROR, "%Ld %Ld %Ld %d\n", s->current_picture_ptr->pts, s->user_specified_pts, dts_pic->pts, picture_number);
if(!dts_pic || dts_pic->pts == AV_NOPTS_VALUE)
if (!dts_pic || dts_pic->f.pts == AV_NOPTS_VALUE)
wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps);
else
wanted_bits= (uint64_t)(s->bit_rate*(double)dts_pic->pts/fps);
wanted_bits = (uint64_t)(s->bit_rate*(double)dts_pic->f.pts / fps);
}
diff= s->total_bits - wanted_bits;
......
......@@ -142,7 +142,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
int mbtype = s->current_picture_ptr->mb_type[mb_pos];
int mbtype = s->current_picture_ptr->f.mb_type[mb_pos];
if(IS_INTRA(mbtype) || IS_SEPARATE_DC(mbtype))
r->deblock_coefs[mb_pos] = 0xFFFF;
if(IS_INTRA(mbtype))
......@@ -154,11 +154,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
*/
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
if(mb_x)
left_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - 1]];
left_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - 1]];
for(j = 0; j < 16; j += 4){
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize + 4 * !mb_x;
for(i = !mb_x; i < 4; i++, Y += 4){
int ij = i + j;
loc_lim = 0;
......@@ -178,7 +178,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
if(mb_x)
left_cbp = (r->cbp_chroma[mb_pos - 1] >> (k*4)) & 0xF;
for(j = 0; j < 8; j += 4){
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
C = s->current_picture_ptr->f.data[k + 1] + mb_x*8 + (row*8 + j) * s->uvlinesize + 4 * !mb_x;
for(i = !mb_x; i < 2; i++, C += 4){
int ij = i + (j >> 1);
loc_lim = 0;
......@@ -196,11 +196,11 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
}
mb_pos = row * s->mb_stride;
for(mb_x = 0; mb_x < s->mb_width; mb_x++, mb_pos++){
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos]];
cur_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos]];
if(row)
top_lim = rv30_loop_filt_lim[s->current_picture_ptr->qscale_table[mb_pos - s->mb_stride]];
top_lim = rv30_loop_filt_lim[s->current_picture_ptr->f.qscale_table[mb_pos - s->mb_stride]];
for(j = 4*!row; j < 16; j += 4){
Y = s->current_picture_ptr->data[0] + mb_x*16 + (row*16 + j) * s->linesize;
Y = s->current_picture_ptr->f.data[0] + mb_x*16 + (row*16 + j) * s->linesize;
for(i = 0; i < 4; i++, Y += 4){
int ij = i + j;
loc_lim = 0;
......@@ -220,7 +220,7 @@ static void rv30_loop_filter(RV34DecContext *r, int row)
if(row)
top_cbp = (r->cbp_chroma[mb_pos - s->mb_stride] >> (k*4)) & 0xF;
for(j = 4*!row; j < 8; j += 4){
C = s->current_picture_ptr->data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
C = s->current_picture_ptr->f.data[k+1] + mb_x*8 + (row*8 + j) * s->uvlinesize;
for(i = 0; i < 2; i++, C += 4){
int ij = i + (j >> 1);
loc_lim = 0;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -689,12 +689,12 @@ static int svq1_decode_frame(AVCodecContext *avctx,
linesize= s->uvlinesize;
}
current = s->current_picture.data[i];
current = s->current_picture.f.data[i];
if(s->pict_type==AV_PICTURE_TYPE_B){
previous = s->next_picture.data[i];
previous = s->next_picture.f.data[i];
}else{
previous = s->last_picture.data[i];
previous = s->last_picture.f.data[i];
}
if (s->pict_type == AV_PICTURE_TYPE_I) {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment