Commit aec25b1c authored by Anton Khirnov's avatar Anton Khirnov

mpegvideo: split the encoding-only parts of frame_start() into a separate function

This introduces some code duplication. However, much of it should go
away once the decoders stop using MpegEncContext.
parent b318106f
...@@ -1401,30 +1401,9 @@ int ff_find_unused_picture(MpegEncContext *s, int shared) ...@@ -1401,30 +1401,9 @@ int ff_find_unused_picture(MpegEncContext *s, int shared)
return ret; return ret;
} }
static void update_noise_reduction(MpegEncContext *s)
{
int intra, i;
for (intra = 0; intra < 2; intra++) {
if (s->dct_count[intra] > (1 << 16)) {
for (i = 0; i < 64; i++) {
s->dct_error_sum[intra][i] >>= 1;
}
s->dct_count[intra] >>= 1;
}
for (i = 0; i < 64; i++) {
s->dct_offset[intra][i] = (s->avctx->noise_reduction *
s->dct_count[intra] +
s->dct_error_sum[intra][i] / 2) /
(s->dct_error_sum[intra][i] + 1);
}
}
}
/** /**
* generic function for encode/decode called after coding/decoding * generic function called after decoding
* the header and before a frame is coded/decoded. * the header and before a frame is decoded.
*/ */
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{ {
...@@ -1441,62 +1420,58 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) ...@@ -1441,62 +1420,58 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
/* release forgotten pictures */ /* release forgotten pictures */
/* if (mpeg124/h263) */ /* if (mpeg124/h263) */
if (!s->encoding) { for (i = 0; i < MAX_PICTURE_COUNT; i++) {
for (i = 0; i < MAX_PICTURE_COUNT; i++) { if (&s->picture[i] != s->last_picture_ptr &&
if (&s->picture[i] != s->last_picture_ptr && &s->picture[i] != s->next_picture_ptr &&
&s->picture[i] != s->next_picture_ptr && s->picture[i].reference && !s->picture[i].needs_realloc) {
s->picture[i].reference && !s->picture[i].needs_realloc) { if (!(avctx->active_thread_type & FF_THREAD_FRAME))
if (!(avctx->active_thread_type & FF_THREAD_FRAME)) av_log(avctx, AV_LOG_ERROR,
av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
"releasing zombie picture\n"); ff_mpeg_unref_picture(s, &s->picture[i]);
ff_mpeg_unref_picture(s, &s->picture[i]);
}
} }
} }
ff_mpeg_unref_picture(s, &s->current_picture); ff_mpeg_unref_picture(s, &s->current_picture);
if (!s->encoding) { release_unused_pictures(s);
release_unused_pictures(s);
if (s->current_picture_ptr && if (s->current_picture_ptr &&
s->current_picture_ptr->f.buf[0] == NULL) { s->current_picture_ptr->f.buf[0] == NULL) {
// we already have a unused image // we already have a unused image
// (maybe it was set before reading the header) // (maybe it was set before reading the header)
pic = s->current_picture_ptr; pic = s->current_picture_ptr;
} else { } else {
i = ff_find_unused_picture(s, 0); i = ff_find_unused_picture(s, 0);
if (i < 0) { if (i < 0) {
av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n"); av_log(s->avctx, AV_LOG_ERROR, "no frame buffer available\n");
return i; return i;
}
pic = &s->picture[i];
} }
pic = &s->picture[i];
}
pic->reference = 0; pic->reference = 0;
if (!s->droppable) { if (!s->droppable) {
if (s->pict_type != AV_PICTURE_TYPE_B) if (s->pict_type != AV_PICTURE_TYPE_B)
pic->reference = 3; pic->reference = 3;
} }
pic->f.coded_picture_number = s->coded_picture_number++; pic->f.coded_picture_number = s->coded_picture_number++;
if (ff_alloc_picture(s, pic, 0) < 0) if (ff_alloc_picture(s, pic, 0) < 0)
return -1; return -1;
s->current_picture_ptr = pic; s->current_picture_ptr = pic;
// FIXME use only the vars from current_pic // FIXME use only the vars from current_pic
s->current_picture_ptr->f.top_field_first = s->top_field_first; s->current_picture_ptr->f.top_field_first = s->top_field_first;
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
if (s->picture_structure != PICT_FRAME) if (s->picture_structure != PICT_FRAME)
s->current_picture_ptr->f.top_field_first = s->current_picture_ptr->f.top_field_first =
(s->picture_structure == PICT_TOP_FIELD) == s->first_field; (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
}
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
!s->progressive_sequence;
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
} }
s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame &&
!s->progressive_sequence;
s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
s->current_picture_ptr->f.pict_type = s->pict_type; s->current_picture_ptr->f.pict_type = s->pict_type;
// if (s->flags && CODEC_FLAG_QSCALE) // if (s->flags && CODEC_FLAG_QSCALE)
...@@ -1634,11 +1609,6 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) ...@@ -1634,11 +1609,6 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter; s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
} }
if (s->dct_error_sum) {
assert(s->avctx->noise_reduction && s->encoding);
update_noise_reduction(s);
}
#if FF_API_XVMC #if FF_API_XVMC
FF_DISABLE_DEPRECATION_WARNINGS FF_DISABLE_DEPRECATION_WARNINGS
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
......
...@@ -1385,6 +1385,99 @@ static void frame_end(MpegEncContext *s) ...@@ -1385,6 +1385,99 @@ static void frame_end(MpegEncContext *s)
} }
static void update_noise_reduction(MpegEncContext *s)
{
int intra, i;
for (intra = 0; intra < 2; intra++) {
if (s->dct_count[intra] > (1 << 16)) {
for (i = 0; i < 64; i++) {
s->dct_error_sum[intra][i] >>= 1;
}
s->dct_count[intra] >>= 1;
}
for (i = 0; i < 64; i++) {
s->dct_offset[intra][i] = (s->avctx->noise_reduction *
s->dct_count[intra] +
s->dct_error_sum[intra][i] / 2) /
(s->dct_error_sum[intra][i] + 1);
}
}
}
static int frame_start(MpegEncContext *s)
{
int ret;
/* mark & release old frames */
if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
s->last_picture_ptr != s->next_picture_ptr &&
s->last_picture_ptr->f.buf[0]) {
ff_mpeg_unref_picture(s, s->last_picture_ptr);
}
s->current_picture_ptr->f.pict_type = s->pict_type;
s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
ff_mpeg_unref_picture(s, &s->current_picture);
if ((ret = ff_mpeg_ref_picture(s, &s->current_picture,
s->current_picture_ptr)) < 0)
return ret;
if (s->pict_type != AV_PICTURE_TYPE_B) {
s->last_picture_ptr = s->next_picture_ptr;
if (!s->droppable)
s->next_picture_ptr = s->current_picture_ptr;
}
if (s->last_picture_ptr) {
ff_mpeg_unref_picture(s, &s->last_picture);
if (s->last_picture_ptr->f.buf[0] &&
(ret = ff_mpeg_ref_picture(s, &s->last_picture,
s->last_picture_ptr)) < 0)
return ret;
}
if (s->next_picture_ptr) {
ff_mpeg_unref_picture(s, &s->next_picture);
if (s->next_picture_ptr->f.buf[0] &&
(ret = ff_mpeg_ref_picture(s, &s->next_picture,
s->next_picture_ptr)) < 0)
return ret;
}
if (s->picture_structure!= PICT_FRAME) {
int i;
for (i = 0; i < 4; i++) {
if (s->picture_structure == PICT_BOTTOM_FIELD) {
s->current_picture.f.data[i] +=
s->current_picture.f.linesize[i];
}
s->current_picture.f.linesize[i] *= 2;
s->last_picture.f.linesize[i] *= 2;
s->next_picture.f.linesize[i] *= 2;
}
}
if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
} else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
} else {
s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
}
if (s->dct_error_sum) {
assert(s->avctx->noise_reduction && s->encoding);
update_noise_reduction(s);
}
return 0;
}
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic_arg, int *got_packet) const AVFrame *pic_arg, int *got_packet)
{ {
...@@ -1425,7 +1518,9 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, ...@@ -1425,7 +1518,9 @@ int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
s->pict_type = s->new_picture.f.pict_type; s->pict_type = s->new_picture.f.pict_type;
//emms_c(); //emms_c();
ff_MPV_frame_start(s, avctx); ret = frame_start(s);
if (ret < 0)
return ret;
vbv_retry: vbv_retry:
if (encode_picture(s, s->picture_number) < 0) if (encode_picture(s, s->picture_number) < 0)
return -1; return -1;
...@@ -1461,7 +1556,7 @@ vbv_retry: ...@@ -1461,7 +1556,7 @@ vbv_retry:
s->lambda_table[i] * (s->qscale + 1) / s->lambda_table[i] * (s->qscale + 1) /
s->qscale); s->qscale);
} }
s->mb_skipped = 0; // done in MPV_frame_start() s->mb_skipped = 0; // done in frame_start()
// done in encode_picture() so we must undo it // done in encode_picture() so we must undo it
if (s->pict_type == AV_PICTURE_TYPE_P) { if (s->pict_type == AV_PICTURE_TYPE_P) {
if (s->flipflop_rounding || if (s->flipflop_rounding ||
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment