Commit cf06dee5 authored by Michael Niedermayer's avatar Michael Niedermayer

libavcodec/snow: change AVFrame to pointers to AVFrame for ABI stability

Signed-off-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parent 1cefdc39
...@@ -337,7 +337,7 @@ void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, int stride, ...@@ -337,7 +337,7 @@ void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, int stride,
} }
} }
}else{ }else{
uint8_t *src= s->last_picture[block->ref].data[plane_index]; uint8_t *src= s->last_picture[block->ref]->data[plane_index];
const int scale= plane_index ? (2*s->mv_scale)>>s->chroma_h_shift : 2*s->mv_scale; const int scale= plane_index ? (2*s->mv_scale)>>s->chroma_h_shift : 2*s->mv_scale;
int mx= block->mx*scale; int mx= block->mx*scale;
int my= block->my*scale; int my= block->my*scale;
...@@ -461,11 +461,11 @@ av_cold int ff_snow_common_init(AVCodecContext *avctx){ ...@@ -461,11 +461,11 @@ av_cold int ff_snow_common_init(AVCodecContext *avctx){
for(i=0; i<MAX_REF_FRAMES; i++) { for(i=0; i<MAX_REF_FRAMES; i++) {
for(j=0; j<MAX_REF_FRAMES; j++) for(j=0; j<MAX_REF_FRAMES; j++)
ff_scale_mv_ref[i][j] = 256*(i+1)/(j+1); ff_scale_mv_ref[i][j] = 256*(i+1)/(j+1);
avcodec_get_frame_defaults(&s->last_picture[i]); s->last_picture[i] = av_frame_alloc();
} }
avcodec_get_frame_defaults(&s->mconly_picture); s->mconly_picture = av_frame_alloc();
avcodec_get_frame_defaults(&s->current_picture); s->current_picture = av_frame_alloc();
return 0; return 0;
fail: fail:
...@@ -478,15 +478,15 @@ int ff_snow_common_init_after_header(AVCodecContext *avctx) { ...@@ -478,15 +478,15 @@ int ff_snow_common_init_after_header(AVCodecContext *avctx) {
int ret, emu_buf_size; int ret, emu_buf_size;
if(!s->scratchbuf) { if(!s->scratchbuf) {
if ((ret = ff_get_buffer(s->avctx, &s->mconly_picture, if ((ret = ff_get_buffer(s->avctx, s->mconly_picture,
AV_GET_BUFFER_FLAG_REF)) < 0) AV_GET_BUFFER_FLAG_REF)) < 0)
return ret; return ret;
FF_ALLOCZ_OR_GOTO(avctx, s->scratchbuf, FFMAX(s->mconly_picture.linesize[0], 2*avctx->width+256)*7*MB_SIZE, fail); FF_ALLOCZ_OR_GOTO(avctx, s->scratchbuf, FFMAX(s->mconly_picture->linesize[0], 2*avctx->width+256)*7*MB_SIZE, fail);
emu_buf_size = FFMAX(s->mconly_picture.linesize[0], 2*avctx->width+256) * (2 * MB_SIZE + HTAPS_MAX - 1); emu_buf_size = FFMAX(s->mconly_picture->linesize[0], 2*avctx->width+256) * (2 * MB_SIZE + HTAPS_MAX - 1);
FF_ALLOC_OR_GOTO(avctx, s->emu_edge_buffer, emu_buf_size, fail); FF_ALLOC_OR_GOTO(avctx, s->emu_edge_buffer, emu_buf_size, fail);
} }
if(s->mconly_picture.format != avctx->pix_fmt) { if(s->mconly_picture->format != avctx->pix_fmt) {
av_log(avctx, AV_LOG_ERROR, "pixel format changed\n"); av_log(avctx, AV_LOG_ERROR, "pixel format changed\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
...@@ -596,51 +596,51 @@ void ff_snow_release_buffer(AVCodecContext *avctx) ...@@ -596,51 +596,51 @@ void ff_snow_release_buffer(AVCodecContext *avctx)
SnowContext *s = avctx->priv_data; SnowContext *s = avctx->priv_data;
int i; int i;
if(s->last_picture[s->max_ref_frames-1].data[0]){ if(s->last_picture[s->max_ref_frames-1]->data[0]){
av_frame_unref(&s->last_picture[s->max_ref_frames-1]); av_frame_unref(s->last_picture[s->max_ref_frames-1]);
for(i=0; i<9; i++) for(i=0; i<9; i++)
if(s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3]) if(s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3])
av_free(s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3] - EDGE_WIDTH*(1+s->current_picture.linesize[i%3])); av_free(s->halfpel_plane[s->max_ref_frames-1][1+i/3][i%3] - EDGE_WIDTH*(1+s->current_picture->linesize[i%3]));
} }
} }
int ff_snow_frame_start(SnowContext *s){ int ff_snow_frame_start(SnowContext *s){
AVFrame tmp; AVFrame *tmp;
int i, ret; int i, ret;
int w= s->avctx->width; //FIXME round up to x16 ? int w= s->avctx->width; //FIXME round up to x16 ?
int h= s->avctx->height; int h= s->avctx->height;
if (s->current_picture.data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)) { if (s->current_picture->data[0] && !(s->avctx->flags&CODEC_FLAG_EMU_EDGE)) {
s->dsp.draw_edges(s->current_picture.data[0], s->dsp.draw_edges(s->current_picture->data[0],
s->current_picture.linesize[0], w , h , s->current_picture->linesize[0], w , h ,
EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM); EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(s->current_picture.data[1], s->dsp.draw_edges(s->current_picture->data[1],
s->current_picture.linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift, s->current_picture->linesize[1], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM); EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
s->dsp.draw_edges(s->current_picture.data[2], s->dsp.draw_edges(s->current_picture->data[2],
s->current_picture.linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift, s->current_picture->linesize[2], w>>s->chroma_h_shift, h>>s->chroma_v_shift,
EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM); EDGE_WIDTH>>s->chroma_h_shift, EDGE_WIDTH>>s->chroma_v_shift, EDGE_TOP | EDGE_BOTTOM);
} }
ff_snow_release_buffer(s->avctx); ff_snow_release_buffer(s->avctx);
av_frame_move_ref(&tmp, &s->last_picture[s->max_ref_frames-1]); tmp= s->last_picture[s->max_ref_frames-1];
for(i=s->max_ref_frames-1; i>0; i--) for(i=s->max_ref_frames-1; i>0; i--)
av_frame_move_ref(&s->last_picture[i], &s->last_picture[i-1]); s->last_picture[i] = s->last_picture[i-1];
memmove(s->halfpel_plane+1, s->halfpel_plane, (s->max_ref_frames-1)*sizeof(void*)*4*4); memmove(s->halfpel_plane+1, s->halfpel_plane, (s->max_ref_frames-1)*sizeof(void*)*4*4);
if(USE_HALFPEL_PLANE && s->current_picture.data[0]) { if(USE_HALFPEL_PLANE && s->current_picture->data[0]) {
if((ret = halfpel_interpol(s, s->halfpel_plane[0], &s->current_picture)) < 0) if((ret = halfpel_interpol(s, s->halfpel_plane[0], s->current_picture)) < 0)
return ret; return ret;
} }
av_frame_move_ref(&s->last_picture[0], &s->current_picture); s->last_picture[0] = s->current_picture;
av_frame_move_ref(&s->current_picture, &tmp); s->current_picture = tmp;
if(s->keyframe){ if(s->keyframe){
s->ref_frames= 0; s->ref_frames= 0;
}else{ }else{
int i; int i;
for(i=0; i<s->max_ref_frames && s->last_picture[i].data[0]; i++) for(i=0; i<s->max_ref_frames && s->last_picture[i]->data[0]; i++)
if(i && s->last_picture[i-1].key_frame) if(i && s->last_picture[i-1]->key_frame)
break; break;
s->ref_frames= i; s->ref_frames= i;
if(s->ref_frames==0){ if(s->ref_frames==0){
...@@ -649,10 +649,10 @@ int ff_snow_frame_start(SnowContext *s){ ...@@ -649,10 +649,10 @@ int ff_snow_frame_start(SnowContext *s){
} }
} }
if ((ret = ff_get_buffer(s->avctx, &s->current_picture, AV_GET_BUFFER_FLAG_REF)) < 0) if ((ret = ff_get_buffer(s->avctx, s->current_picture, AV_GET_BUFFER_FLAG_REF)) < 0)
return ret; return ret;
s->current_picture.key_frame= s->keyframe; s->current_picture->key_frame= s->keyframe;
return 0; return 0;
} }
...@@ -680,10 +680,10 @@ av_cold void ff_snow_common_end(SnowContext *s) ...@@ -680,10 +680,10 @@ av_cold void ff_snow_common_end(SnowContext *s)
for(i=0; i<MAX_REF_FRAMES; i++){ for(i=0; i<MAX_REF_FRAMES; i++){
av_freep(&s->ref_mvs[i]); av_freep(&s->ref_mvs[i]);
av_freep(&s->ref_scores[i]); av_freep(&s->ref_scores[i]);
if(s->last_picture[i].data[0]) { if(s->last_picture[i]->data[0]) {
av_assert0(s->last_picture[i].data[0] != s->current_picture.data[0]); av_assert0(s->last_picture[i]->data[0] != s->current_picture->data[0]);
av_frame_unref(&s->last_picture[i]);
} }
av_frame_free(&s->last_picture[i]);
} }
for(plane_index=0; plane_index<3; plane_index++){ for(plane_index=0; plane_index<3; plane_index++){
...@@ -695,6 +695,6 @@ av_cold void ff_snow_common_end(SnowContext *s) ...@@ -695,6 +695,6 @@ av_cold void ff_snow_common_end(SnowContext *s)
} }
} }
} }
av_frame_unref(&s->mconly_picture); av_frame_free(&s->mconly_picture);
av_frame_unref(&s->current_picture); av_frame_free(&s->current_picture);
} }
...@@ -114,12 +114,12 @@ typedef struct SnowContext{ ...@@ -114,12 +114,12 @@ typedef struct SnowContext{
VideoDSPContext vdsp; VideoDSPContext vdsp;
H264QpelContext h264qpel; H264QpelContext h264qpel;
SnowDWTContext dwt; SnowDWTContext dwt;
AVFrame new_picture; AVFrame *new_picture;
AVFrame input_picture; ///< new_picture with the internal linesizes AVFrame *input_picture; ///< new_picture with the internal linesizes
AVFrame current_picture; AVFrame *current_picture;
AVFrame last_picture[MAX_REF_FRAMES]; AVFrame *last_picture[MAX_REF_FRAMES];
uint8_t *halfpel_plane[MAX_REF_FRAMES][4][4]; uint8_t *halfpel_plane[MAX_REF_FRAMES][4][4];
AVFrame mconly_picture; AVFrame *mconly_picture;
// uint8_t q_context[16]; // uint8_t q_context[16];
uint8_t header_state[32]; uint8_t header_state[32];
uint8_t block_state[128 + 32*128]; uint8_t block_state[128 + 32*128];
...@@ -414,8 +414,8 @@ static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int pl ...@@ -414,8 +414,8 @@ static av_always_inline void predict_slice(SnowContext *s, IDWTELEM *buf, int pl
int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size; int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth]; const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
int ref_stride= s->current_picture.linesize[plane_index]; int ref_stride= s->current_picture->linesize[plane_index];
uint8_t *dst8= s->current_picture.data[plane_index]; uint8_t *dst8= s->current_picture->data[plane_index];
int w= p->width; int w= p->width;
int h= p->height; int h= p->height;
av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares av_assert2(s->chroma_h_shift == s->chroma_v_shift); // obmc params assume squares
......
...@@ -43,8 +43,8 @@ static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer ...@@ -43,8 +43,8 @@ static av_always_inline void predict_slice_buffered(SnowContext *s, slice_buffer
int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size; int block_h = plane_index ? block_size>>s->chroma_v_shift : block_size;
const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth]; const uint8_t *obmc = plane_index ? ff_obmc_tab[s->block_max_depth+s->chroma_h_shift] : ff_obmc_tab[s->block_max_depth];
int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
int ref_stride= s->current_picture.linesize[plane_index]; int ref_stride= s->current_picture->linesize[plane_index];
uint8_t *dst8= s->current_picture.data[plane_index]; uint8_t *dst8= s->current_picture->data[plane_index];
int w= p->width; int w= p->width;
int h= p->height; int h= p->height;
...@@ -403,7 +403,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -403,7 +403,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ff_init_range_decoder(c, buf, buf_size); ff_init_range_decoder(c, buf, buf_size);
ff_build_rac_states(c, 0.05*(1LL<<32), 256-8); ff_build_rac_states(c, 0.05*(1LL<<32), 256-8);
s->current_picture.pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P s->current_picture->pict_type= AV_PICTURE_TYPE_I; //FIXME I vs. P
if(decode_header(s)<0) if(decode_header(s)<0)
return -1; return -1;
if ((res=ff_snow_common_init_after_header(avctx)) < 0) if ((res=ff_snow_common_init_after_header(avctx)) < 0)
...@@ -449,8 +449,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -449,8 +449,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
for(y=0; y<h; y++){ for(y=0; y<h; y++){
for(x=0; x<w; x++){ for(x=0; x<w; x++){
int v= s->current_picture.data[plane_index][y*s->current_picture.linesize[plane_index] + x]; int v= s->current_picture->data[plane_index][y*s->current_picture->linesize[plane_index] + x];
s->mconly_picture.data[plane_index][y*s->mconly_picture.linesize[plane_index] + x]= v; s->mconly_picture->data[plane_index][y*s->mconly_picture->linesize[plane_index] + x]= v;
} }
} }
} }
...@@ -548,9 +548,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -548,9 +548,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ff_snow_release_buffer(avctx); ff_snow_release_buffer(avctx);
if(!(s->avctx->debug&2048)) if(!(s->avctx->debug&2048))
av_frame_ref(picture, &s->current_picture); av_frame_ref(picture, s->current_picture);
else else
av_frame_ref(picture, &s->mconly_picture); av_frame_ref(picture, s->mconly_picture);
*got_frame = 1; *got_frame = 1;
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment