Commit 642a655f authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'f1d8763a'

* commit 'f1d8763a':
  mpegvideo: allocate scratch buffers after linesize is known

Conflicts:
	libavcodec/mpegvideo.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents e84194f0 f1d8763a
...@@ -655,7 +655,9 @@ int ff_cavs_next_mb(AVSContext *h) { ...@@ -655,7 +655,9 @@ int ff_cavs_next_mb(AVSContext *h) {
* *
****************************************************************************/ ****************************************************************************/
void ff_cavs_init_pic(AVSContext *h) { int ff_cavs_init_pic(AVSContext *h) {
MpegEncContext *s = &h->s;
int ret;
int i; int i;
/* clear some predictors */ /* clear some predictors */
...@@ -675,6 +677,14 @@ void ff_cavs_init_pic(AVSContext *h) { ...@@ -675,6 +677,14 @@ void ff_cavs_init_pic(AVSContext *h) {
h->luma_scan[3] = 8*h->l_stride+8; h->luma_scan[3] = 8*h->l_stride+8;
h->mbx = h->mby = h->mbidx = 0; h->mbx = h->mby = h->mbidx = 0;
h->flags = 0; h->flags = 0;
if (!s->edge_emu_buffer &&
(ret = ff_mpv_frame_size_alloc(s, h->picture.f.linesize[0])) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed to allocate context scratch buffers.\n");
return ret;
}
return 0;
} }
/***************************************************************************** /*****************************************************************************
......
...@@ -253,7 +253,7 @@ void ff_cavs_mv(AVSContext *h, enum cavs_mv_loc nP, enum cavs_mv_loc nC, ...@@ -253,7 +253,7 @@ void ff_cavs_mv(AVSContext *h, enum cavs_mv_loc nP, enum cavs_mv_loc nC,
enum cavs_mv_pred mode, enum cavs_block size, int ref); enum cavs_mv_pred mode, enum cavs_block size, int ref);
void ff_cavs_init_mb(AVSContext *h); void ff_cavs_init_mb(AVSContext *h);
int ff_cavs_next_mb(AVSContext *h); int ff_cavs_next_mb(AVSContext *h);
void ff_cavs_init_pic(AVSContext *h); int ff_cavs_init_pic(AVSContext *h);
void ff_cavs_init_top_lines(AVSContext *h); void ff_cavs_init_top_lines(AVSContext *h);
int ff_cavs_init(AVCodecContext *avctx); int ff_cavs_init(AVCodecContext *avctx);
int ff_cavs_end (AVCodecContext *avctx); int ff_cavs_end (AVCodecContext *avctx);
......
...@@ -966,7 +966,8 @@ static int decode_pic(AVSContext *h) { ...@@ -966,7 +966,8 @@ static int decode_pic(AVSContext *h) {
if ((ret = ff_get_buffer(s->avctx, &h->picture.f)) < 0) if ((ret = ff_get_buffer(s->avctx, &h->picture.f)) < 0)
return ret; return ret;
ff_cavs_init_pic(h); if ((ret = ff_cavs_init_pic(h)) < 0)
return ret;
h->picture.poc = get_bits(&s->gb,8)*2; h->picture.poc = get_bits(&s->gb,8)*2;
/* get temporal distances and MV scaling factors */ /* get temporal distances and MV scaling factors */
......
...@@ -2300,8 +2300,10 @@ static int field_end(H264Context *h, int in_setup) ...@@ -2300,8 +2300,10 @@ static int field_end(H264Context *h, int in_setup)
/** /**
* Replicate H264 "master" context to thread contexts. * Replicate H264 "master" context to thread contexts.
*/ */
static void clone_slice(H264Context *dst, H264Context *src) static int clone_slice(H264Context *dst, H264Context *src)
{ {
int ret;
memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset)); memcpy(dst->block_offset, src->block_offset, sizeof(dst->block_offset));
dst->s.current_picture_ptr = src->s.current_picture_ptr; dst->s.current_picture_ptr = src->s.current_picture_ptr;
dst->s.current_picture = src->s.current_picture; dst->s.current_picture = src->s.current_picture;
...@@ -2309,6 +2311,13 @@ static void clone_slice(H264Context *dst, H264Context *src) ...@@ -2309,6 +2311,13 @@ static void clone_slice(H264Context *dst, H264Context *src)
dst->s.uvlinesize = src->s.uvlinesize; dst->s.uvlinesize = src->s.uvlinesize;
dst->s.first_field = src->s.first_field; dst->s.first_field = src->s.first_field;
if (!dst->s.edge_emu_buffer &&
(ret = ff_mpv_frame_size_alloc(&dst->s, dst->s.linesize))) {
av_log(dst->s.avctx, AV_LOG_ERROR,
"Failed to allocate scratch buffers\n");
return ret;
}
dst->prev_poc_msb = src->prev_poc_msb; dst->prev_poc_msb = src->prev_poc_msb;
dst->prev_poc_lsb = src->prev_poc_lsb; dst->prev_poc_lsb = src->prev_poc_lsb;
dst->prev_frame_num_offset = src->prev_frame_num_offset; dst->prev_frame_num_offset = src->prev_frame_num_offset;
...@@ -2322,6 +2331,8 @@ static void clone_slice(H264Context *dst, H264Context *src) ...@@ -2322,6 +2331,8 @@ static void clone_slice(H264Context *dst, H264Context *src)
memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff)); memcpy(dst->dequant4_coeff, src->dequant4_coeff, sizeof(src->dequant4_coeff));
memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff)); memcpy(dst->dequant8_coeff, src->dequant8_coeff, sizeof(src->dequant8_coeff));
return 0;
} }
/** /**
...@@ -2901,8 +2912,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0) ...@@ -2901,8 +2912,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
ff_release_unused_pictures(s, 0); ff_release_unused_pictures(s, 0);
} }
} }
if (h != h0) if (h != h0 && (ret = clone_slice(h, h0)) < 0)
clone_slice(h, h0); return ret;
s->current_picture_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup s->current_picture_ptr->frame_num = h->frame_num; // FIXME frame_num cleanup
......
...@@ -2492,7 +2492,10 @@ static int decode_chunks(AVCodecContext *avctx, ...@@ -2492,7 +2492,10 @@ static int decode_chunks(AVCodecContext *avctx,
thread_context->end_mb_y = s2->mb_height; thread_context->end_mb_y = s2->mb_height;
if (s->slice_count) { if (s->slice_count) {
s2->thread_context[s->slice_count-1]->end_mb_y = mb_y; s2->thread_context[s->slice_count-1]->end_mb_y = mb_y;
ff_update_duplicate_context(thread_context, s2); ret = ff_update_duplicate_context(thread_context,
s2);
if (ret < 0)
return ret;
} }
init_get_bits(&thread_context->gb, buf_ptr, input_size*8); init_get_bits(&thread_context->gb, buf_ptr, input_size*8);
s->slice_count++; s->slice_count++;
......
...@@ -232,12 +232,35 @@ static void free_frame_buffer(MpegEncContext *s, Picture *pic) ...@@ -232,12 +232,35 @@ static void free_frame_buffer(MpegEncContext *s, Picture *pic)
av_freep(&pic->f.hwaccel_picture_private); av_freep(&pic->f.hwaccel_picture_private);
} }
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize)
{
int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
// edge emu needs blocksize + filter length - 1
// (= 17x17 for halfpel / 21x21 for h264)
// linesize * interlaced * MBsize
FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, alloc_size * 4 * 21,
fail);
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, alloc_size * 2 * 16 * 2,
fail)
s->me.temp = s->me.scratchpad;
s->rd_scratchpad = s->me.scratchpad;
s->b_scratchpad = s->me.scratchpad;
s->obmc_scratchpad = s->me.scratchpad + 16;
return 0;
fail:
av_freep(&s->edge_emu_buffer);
return AVERROR(ENOMEM);
}
/** /**
* Allocate a frame buffer * Allocate a frame buffer
*/ */
static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
{ {
int r; int r, ret;
if (s->avctx->hwaccel) { if (s->avctx->hwaccel) {
assert(!pic->f.hwaccel_picture_private); assert(!pic->f.hwaccel_picture_private);
...@@ -279,6 +302,14 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic) ...@@ -279,6 +302,14 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
return -1; return -1;
} }
if (!s->edge_emu_buffer &&
(ret = ff_mpv_frame_size_alloc(s, pic->f.linesize[0])) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"get_buffer() failed to allocate context scratch buffers.\n");
free_frame_buffer(s, pic);
return ret;
}
return 0; return 0;
} }
...@@ -416,19 +447,13 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base) ...@@ -416,19 +447,13 @@ static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base)
int yc_size = y_size + 2 * c_size; int yc_size = y_size + 2 * c_size;
int i; int i;
// edge emu needs blocksize + filter length - 1 s->edge_emu_buffer =
// (= 17x17 for halfpel / 21x21 for h264) s->me.scratchpad =
FF_ALLOCZ_OR_GOTO(s->avctx, s->edge_emu_buffer, s->me.temp =
(s->width + 95) * 2 * 21 * 4, fail); // (width + edge + align)*interlaced*MBsize*tolerance s->rd_scratchpad =
s->b_scratchpad =
s->obmc_scratchpad = NULL;
// FIXME should be linesize instead of s->width * 2
// but that is not known before get_buffer()
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad,
(s->width + 95) * 4 * 16 * 2 * sizeof(uint8_t), fail)
s->me.temp = s->me.scratchpad;
s->rd_scratchpad = s->me.scratchpad;
s->b_scratchpad = s->me.scratchpad;
s->obmc_scratchpad = s->me.scratchpad + 16;
if (s->encoding) { if (s->encoding) {
FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map, FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map,
ME_MAP_SIZE * sizeof(uint32_t), fail) ME_MAP_SIZE * sizeof(uint32_t), fail)
...@@ -507,10 +532,10 @@ static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src) ...@@ -507,10 +532,10 @@ static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
#undef COPY #undef COPY
} }
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
{ {
MpegEncContext bak; MpegEncContext bak;
int i; int i, ret;
// FIXME copy only needed parts // FIXME copy only needed parts
// START_TIMER // START_TIMER
backup_duplicate_context(&bak, dst); backup_duplicate_context(&bak, dst);
...@@ -519,8 +544,15 @@ void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) ...@@ -519,8 +544,15 @@ void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
for (i = 0; i < 12; i++) { for (i = 0; i < 12; i++) {
dst->pblocks[i] = &dst->block[i]; dst->pblocks[i] = &dst->block[i];
} }
if (!dst->edge_emu_buffer &&
(ret = ff_mpv_frame_size_alloc(dst, dst->linesize)) < 0) {
av_log(dst->avctx, AV_LOG_ERROR, "failed to allocate context "
"scratch buffers.\n");
return ret;
}
// STOP_TIMER("update_duplicate_context") // STOP_TIMER("update_duplicate_context")
// about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads // about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
return 0;
} }
int ff_mpeg_update_thread_context(AVCodecContext *dst, int ff_mpeg_update_thread_context(AVCodecContext *dst,
...@@ -612,6 +644,19 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, ...@@ -612,6 +644,19 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
FF_INPUT_BUFFER_PADDING_SIZE); FF_INPUT_BUFFER_PADDING_SIZE);
} }
// linesize dependend scratch buffer allocation
if (!s->edge_emu_buffer)
if (s1->linesize) {
if (ff_mpv_frame_size_alloc(s, s1->linesize) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Failed to allocate context "
"scratch buffers.\n");
return AVERROR(ENOMEM);
}
} else {
av_log(s->avctx, AV_LOG_ERROR, "Context scratch buffers could not "
"be allocated due to unknown size.\n");
}
// MPEG2/interlacing info // MPEG2/interlacing info
memcpy(&s->progressive_sequence, &s1->progressive_sequence, memcpy(&s->progressive_sequence, &s1->progressive_sequence,
(char *) &s1->rtp_mode - (char *) &s1->progressive_sequence); (char *) &s1->rtp_mode - (char *) &s1->progressive_sequence);
......
...@@ -771,6 +771,7 @@ void ff_MPV_common_defaults(MpegEncContext *s); ...@@ -771,6 +771,7 @@ void ff_MPV_common_defaults(MpegEncContext *s);
void ff_MPV_decode_defaults(MpegEncContext *s); void ff_MPV_decode_defaults(MpegEncContext *s);
int ff_MPV_common_init(MpegEncContext *s); int ff_MPV_common_init(MpegEncContext *s);
int ff_mpv_frame_size_alloc(MpegEncContext *s, int linesize);
int ff_MPV_common_frame_size_change(MpegEncContext *s); int ff_MPV_common_frame_size_change(MpegEncContext *s);
void ff_MPV_common_end(MpegEncContext *s); void ff_MPV_common_end(MpegEncContext *s);
void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]); void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]);
...@@ -794,7 +795,7 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix); ...@@ -794,7 +795,7 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
void ff_release_unused_pictures(MpegEncContext *s, int remove_current); void ff_release_unused_pictures(MpegEncContext *s, int remove_current);
int ff_find_unused_picture(MpegEncContext *s, int shared); int ff_find_unused_picture(MpegEncContext *s, int shared);
void ff_denoise_dct(MpegEncContext *s, DCTELEM *block); void ff_denoise_dct(MpegEncContext *s, DCTELEM *block);
void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src); int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir); int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir);
void ff_MPV_report_decode_progress(MpegEncContext *s); void ff_MPV_report_decode_progress(MpegEncContext *s);
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src); int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
......
...@@ -3228,7 +3228,7 @@ static void set_frame_distances(MpegEncContext * s){ ...@@ -3228,7 +3228,7 @@ static void set_frame_distances(MpegEncContext * s){
static int encode_picture(MpegEncContext *s, int picture_number) static int encode_picture(MpegEncContext *s, int picture_number)
{ {
int i; int i, ret;
int bits; int bits;
int context_count = s->slice_context_count; int context_count = s->slice_context_count;
...@@ -3278,7 +3278,9 @@ static int encode_picture(MpegEncContext *s, int picture_number) ...@@ -3278,7 +3278,9 @@ static int encode_picture(MpegEncContext *s, int picture_number)
s->mb_intra=0; //for the rate distortion & bit compare functions s->mb_intra=0; //for the rate distortion & bit compare functions
for(i=1; i<context_count; i++){ for(i=1; i<context_count; i++){
ff_update_duplicate_context(s->thread_context[i], s); ret = ff_update_duplicate_context(s->thread_context[i], s);
if (ret < 0)
return ret;
} }
if(ff_init_me(s)<0) if(ff_init_me(s)<0)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment