Commit 835f798c authored by Diego Biurrun's avatar Diego Biurrun

mpegvideo: cosmetics: Lowercase ugly uppercase MPV_ function name prefixes

parent a6a27fed
...@@ -40,12 +40,12 @@ void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, int16_t *block, ...@@ -40,12 +40,12 @@ void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, int16_t *block,
void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block, void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block,
int n, int qscale); int n, int qscale);
av_cold void ff_MPV_common_init_arm(MpegEncContext *s) av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
{ {
int cpu_flags = av_get_cpu_flags(); int cpu_flags = av_get_cpu_flags();
if (have_armv5te(cpu_flags)) if (have_armv5te(cpu_flags))
ff_MPV_common_init_armv5te(s); ff_mpv_common_init_armv5te(s);
if (have_neon(cpu_flags)) { if (have_neon(cpu_flags)) {
s->dct_unquantize_h263_intra = ff_dct_unquantize_h263_intra_neon; s->dct_unquantize_h263_intra = ff_dct_unquantize_h263_intra_neon;
......
...@@ -21,6 +21,6 @@ ...@@ -21,6 +21,6 @@
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideo.h"
void ff_MPV_common_init_armv5te(MpegEncContext *s); void ff_mpv_common_init_armv5te(MpegEncContext *s);
#endif /* AVCODEC_ARM_MPEGVIDEO_ARM_H */ #endif /* AVCODEC_ARM_MPEGVIDEO_ARM_H */
...@@ -94,7 +94,7 @@ static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s, ...@@ -94,7 +94,7 @@ static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s,
ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1); ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1);
} }
av_cold void ff_MPV_common_init_armv5te(MpegEncContext *s) av_cold void ff_mpv_common_init_armv5te(MpegEncContext *s)
{ {
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te; s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te;
s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te; s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te;
......
...@@ -92,9 +92,9 @@ AVCodec ff_flv_encoder = { ...@@ -92,9 +92,9 @@ AVCodec ff_flv_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FLV1, .id = AV_CODEC_ID_FLV1,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.priv_class = &flv_class, .priv_class = &flv_class,
}; };
...@@ -74,7 +74,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx) ...@@ -74,7 +74,7 @@ static av_cold int h261_decode_init(AVCodecContext *avctx)
MpegEncContext *const s = &h->s; MpegEncContext *const s = &h->s;
// set defaults // set defaults
ff_MPV_decode_defaults(s); ff_mpv_decode_defaults(s);
s->avctx = avctx; s->avctx = avctx;
s->width = s->avctx->coded_width; s->width = s->avctx->coded_width;
s->height = s->avctx->coded_height; s->height = s->avctx->coded_height;
...@@ -218,7 +218,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2) ...@@ -218,7 +218,7 @@ static int h261_decode_mb_skipped(H261Context *h, int mba1, int mba2)
s->mb_skipped = 1; s->mb_skipped = 1;
h->mtype &= ~MB_TYPE_H261_FIL; h->mtype &= ~MB_TYPE_H261_FIL;
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
} }
return 0; return 0;
...@@ -445,7 +445,7 @@ intra: ...@@ -445,7 +445,7 @@ intra:
s->block_last_index[i] = -1; s->block_last_index[i] = -1;
} }
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
return SLICE_OK; return SLICE_OK;
} }
...@@ -596,12 +596,12 @@ retry: ...@@ -596,12 +596,12 @@ retry:
if (s->width != avctx->coded_width || s->height != avctx->coded_height) { if (s->width != avctx->coded_width || s->height != avctx->coded_height) {
ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat ParseContext pc = s->parse_context; // FIXME move this demuxing hack to libavformat
s->parse_context.buffer = 0; s->parse_context.buffer = 0;
ff_MPV_common_end(s); ff_mpv_common_end(s);
s->parse_context = pc; s->parse_context = pc;
} }
if (!s->context_initialized) if (!s->context_initialized)
if ((ret = ff_MPV_common_init(s)) < 0) if ((ret = ff_mpv_common_init(s)) < 0)
return ret; return ret;
if (!s->context_initialized) { if (!s->context_initialized) {
...@@ -621,7 +621,7 @@ retry: ...@@ -621,7 +621,7 @@ retry:
avctx->skip_frame >= AVDISCARD_ALL) avctx->skip_frame >= AVDISCARD_ALL)
return get_consumed_bytes(s, buf_size); return get_consumed_bytes(s, buf_size);
if (ff_MPV_frame_start(s, avctx) < 0) if (ff_mpv_frame_start(s, avctx) < 0)
return -1; return -1;
ff_mpeg_er_frame_start(s); ff_mpeg_er_frame_start(s);
...@@ -635,7 +635,7 @@ retry: ...@@ -635,7 +635,7 @@ retry:
break; break;
h261_decode_gob(h); h261_decode_gob(h);
} }
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
assert(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type); assert(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
assert(s->current_picture.f->pict_type == s->pict_type); assert(s->current_picture.f->pict_type == s->pict_type);
...@@ -654,7 +654,7 @@ static av_cold int h261_decode_end(AVCodecContext *avctx) ...@@ -654,7 +654,7 @@ static av_cold int h261_decode_end(AVCodecContext *avctx)
H261Context *h = avctx->priv_data; H261Context *h = avctx->priv_data;
MpegEncContext *s = &h->s; MpegEncContext *s = &h->s;
ff_MPV_common_end(s); ff_mpv_common_end(s);
return 0; return 0;
} }
......
...@@ -331,9 +331,9 @@ AVCodec ff_h261_encoder = { ...@@ -331,9 +331,9 @@ AVCodec ff_h261_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H261, .id = AV_CODEC_ID_H261,
.priv_data_size = sizeof(H261Context), .priv_data_size = sizeof(H261Context),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },
.priv_class = &h261_class, .priv_class = &h261_class,
......
...@@ -52,7 +52,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx) ...@@ -52,7 +52,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
s->workaround_bugs = avctx->workaround_bugs; s->workaround_bugs = avctx->workaround_bugs;
// set defaults // set defaults
ff_MPV_decode_defaults(s); ff_mpv_decode_defaults(s);
s->quant_precision = 5; s->quant_precision = 5;
s->decode_mb = ff_h263_decode_mb; s->decode_mb = ff_h263_decode_mb;
s->low_delay = 1; s->low_delay = 1;
...@@ -115,7 +115,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx) ...@@ -115,7 +115,7 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
if (avctx->codec->id != AV_CODEC_ID_H263 && if (avctx->codec->id != AV_CODEC_ID_H263 &&
avctx->codec->id != AV_CODEC_ID_MPEG4) { avctx->codec->id != AV_CODEC_ID_MPEG4) {
ff_mpv_idct_init(s); ff_mpv_idct_init(s);
if ((ret = ff_MPV_common_init(s)) < 0) if ((ret = ff_mpv_common_init(s)) < 0)
return ret; return ret;
} }
...@@ -130,7 +130,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx) ...@@ -130,7 +130,7 @@ av_cold int ff_h263_decode_end(AVCodecContext *avctx)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
ff_MPV_common_end(s); ff_mpv_common_end(s);
return 0; return 0;
} }
...@@ -239,7 +239,7 @@ static int decode_slice(MpegEncContext *s) ...@@ -239,7 +239,7 @@ static int decode_slice(MpegEncContext *s)
if (ret < 0) { if (ret < 0) {
const int xy = s->mb_x + s->mb_y * s->mb_stride; const int xy = s->mb_x + s->mb_y * s->mb_stride;
if (ret == SLICE_END) { if (ret == SLICE_END) {
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
if (s->loop_filter) if (s->loop_filter)
ff_h263_loop_filter(s); ff_h263_loop_filter(s);
...@@ -251,7 +251,7 @@ static int decode_slice(MpegEncContext *s) ...@@ -251,7 +251,7 @@ static int decode_slice(MpegEncContext *s)
if (++s->mb_x >= s->mb_width) { if (++s->mb_x >= s->mb_width) {
s->mb_x = 0; s->mb_x = 0;
ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size);
ff_MPV_report_decode_progress(s); ff_mpv_report_decode_progress(s);
s->mb_y++; s->mb_y++;
} }
return 0; return 0;
...@@ -270,13 +270,13 @@ static int decode_slice(MpegEncContext *s) ...@@ -270,13 +270,13 @@ static int decode_slice(MpegEncContext *s)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
if (s->loop_filter) if (s->loop_filter)
ff_h263_loop_filter(s); ff_h263_loop_filter(s);
} }
ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size); ff_mpeg_draw_horiz_band(s, s->mb_y * mb_size, mb_size);
ff_MPV_report_decode_progress(s); ff_mpv_report_decode_progress(s);
s->mb_x = 0; s->mb_x = 0;
} }
...@@ -454,7 +454,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -454,7 +454,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
} }
if (!s->context_initialized) if (!s->context_initialized)
if ((ret = ff_MPV_common_init(s)) < 0) if ((ret = ff_mpv_common_init(s)) < 0)
return ret; return ret;
if (s->current_picture_ptr == NULL || s->current_picture_ptr->f->data[0]) { if (s->current_picture_ptr == NULL || s->current_picture_ptr->f->data[0]) {
...@@ -503,7 +503,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -503,7 +503,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ff_set_sar(avctx, avctx->sample_aspect_ratio); ff_set_sar(avctx, avctx->sample_aspect_ratio);
if ((ret = ff_MPV_common_frame_size_change(s))) if ((ret = ff_mpv_common_frame_size_change(s)))
return ret; return ret;
} }
...@@ -542,7 +542,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -542,7 +542,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
} }
if ((ret = ff_MPV_frame_start(s, avctx)) < 0) if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
return ret; return ret;
if (!s->divx_packed && !avctx->hwaccel) if (!s->divx_packed && !avctx->hwaccel)
...@@ -559,7 +559,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -559,7 +559,7 @@ int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
/* the second part of the wmv2 header contains the MB skip bits which /* the second part of the wmv2 header contains the MB skip bits which
* are stored in current_picture->mb_type which is not available before * are stored in current_picture->mb_type which is not available before
* ff_MPV_frame_start() */ * ff_mpv_frame_start() */
if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) { if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) {
ret = ff_wmv2_decode_secondary_picture_header(s); ret = ff_wmv2_decode_secondary_picture_header(s);
if (ret < 0) if (ret < 0)
...@@ -613,7 +613,7 @@ intrax8_decoded: ...@@ -613,7 +613,7 @@ intrax8_decoded:
return ret; return ret;
} }
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
if (!s->divx_packed && avctx->hwaccel) if (!s->divx_packed && avctx->hwaccel)
ff_thread_finish_setup(avctx); ff_thread_finish_setup(avctx);
......
...@@ -733,7 +733,7 @@ static int h264_frame_start(H264Context *h) ...@@ -733,7 +733,7 @@ static int h264_frame_start(H264Context *h)
/* We mark the current picture as non-reference after allocating it, so /* We mark the current picture as non-reference after allocating it, so
* that if we break out due to an error it can be released automatically * that if we break out due to an error it can be released automatically
* in the next ff_MPV_frame_start(). * in the next ff_mpv_frame_start().
*/ */
h->cur_pic_ptr->reference = 0; h->cur_pic_ptr->reference = 0;
......
...@@ -718,9 +718,9 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w) ...@@ -718,9 +718,9 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w)
/** /**
* Decode single IntraX8 frame. * Decode single IntraX8 frame.
* The parent codec must fill s->loopfilter and s->gb (bitstream). * The parent codec must fill s->loopfilter and s->gb (bitstream).
* The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function. * The parent codec must call ff_mpv_frame_start(), ff_er_frame_start() before calling this function.
* The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function. * The parent codec must call ff_er_frame_end(), ff_mpv_frame_end() after calling this function.
* This function does not use MPV_decode_mb(). * This function does not use ff_mpv_decode_mb().
* @param w pointer to IntraX8Context * @param w pointer to IntraX8Context
* @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1. * @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1.
* @param quant_offset offset away from zero * @param quant_offset offset away from zero
......
...@@ -159,9 +159,9 @@ AVCodec ff_mjpeg_encoder = { ...@@ -159,9 +159,9 @@ AVCodec ff_mjpeg_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MJPEG, .id = AV_CODEC_ID_MJPEG,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ .pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE
}, },
......
...@@ -1098,7 +1098,7 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx) ...@@ -1098,7 +1098,7 @@ static av_cold int mpeg_decode_init(AVCodecContext *avctx)
Mpeg1Context *s = avctx->priv_data; Mpeg1Context *s = avctx->priv_data;
MpegEncContext *s2 = &s->mpeg_enc_ctx; MpegEncContext *s2 = &s->mpeg_enc_ctx;
ff_MPV_decode_defaults(s2); ff_mpv_decode_defaults(s2);
s->mpeg_enc_ctx.avctx = avctx; s->mpeg_enc_ctx.avctx = avctx;
s->mpeg_enc_ctx.flags = avctx->flags; s->mpeg_enc_ctx.flags = avctx->flags;
...@@ -1221,7 +1221,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) ...@@ -1221,7 +1221,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
if (s1->mpeg_enc_ctx_allocated) { if (s1->mpeg_enc_ctx_allocated) {
ParseContext pc = s->parse_context; ParseContext pc = s->parse_context;
s->parse_context.buffer = 0; s->parse_context.buffer = 0;
ff_MPV_common_end(s); ff_mpv_common_end(s);
s->parse_context = pc; s->parse_context = pc;
} }
...@@ -1312,7 +1312,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx) ...@@ -1312,7 +1312,7 @@ static int mpeg_decode_postinit(AVCodecContext *avctx)
memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t)); memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t));
ff_mpv_idct_init(s); ff_mpv_idct_init(s);
if (ff_MPV_common_init(s) < 0) if (ff_mpv_common_init(s) < 0)
return -2; return -2;
quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation); quant_matrix_rebuild(s->intra_matrix, old_permutation, s->idsp.idct_permutation);
...@@ -1590,7 +1590,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) ...@@ -1590,7 +1590,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
if (s->first_field || s->picture_structure == PICT_FRAME) { if (s->first_field || s->picture_structure == PICT_FRAME) {
AVFrameSideData *pan_scan; AVFrameSideData *pan_scan;
if (ff_MPV_frame_start(s, avctx) < 0) if (ff_mpv_frame_start(s, avctx) < 0)
return -1; return -1;
ff_mpeg_er_frame_start(s); ff_mpeg_er_frame_start(s);
...@@ -1676,7 +1676,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size) ...@@ -1676,7 +1676,7 @@ static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
#if FF_API_XVMC #if FF_API_XVMC
FF_DISABLE_DEPRECATION_WARNINGS FF_DISABLE_DEPRECATION_WARNINGS
// MPV_frame_start will call this function too, // ff_mpv_frame_start will call this function too,
// but we need to call it on every field // but we need to call it on every field
if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration) if (CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
if (ff_xvmc_field_start(s, avctx) < 0) if (ff_xvmc_field_start(s, avctx) < 0)
...@@ -1841,13 +1841,13 @@ FF_ENABLE_DEPRECATION_WARNINGS ...@@ -1841,13 +1841,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
s->dest[1] += 16 >> s->chroma_x_shift; s->dest[1] += 16 >> s->chroma_x_shift;
s->dest[2] += 16 >> s->chroma_x_shift; s->dest[2] += 16 >> s->chroma_x_shift;
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
if (++s->mb_x >= s->mb_width) { if (++s->mb_x >= s->mb_width) {
const int mb_size = 16; const int mb_size = 16;
ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size); ff_mpeg_draw_horiz_band(s, mb_size * (s->mb_y >> field_pic), mb_size);
ff_MPV_report_decode_progress(s); ff_mpv_report_decode_progress(s);
s->mb_x = 0; s->mb_x = 0;
s->mb_y += 1 << field_pic; s->mb_y += 1 << field_pic;
...@@ -2016,7 +2016,7 @@ FF_ENABLE_DEPRECATION_WARNINGS ...@@ -2016,7 +2016,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
int ret = av_frame_ref(pict, s->current_picture_ptr->f); int ret = av_frame_ref(pict, s->current_picture_ptr->f);
...@@ -2133,7 +2133,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) ...@@ -2133,7 +2133,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
/* start new MPEG-1 context decoding */ /* start new MPEG-1 context decoding */
s->out_format = FMT_MPEG1; s->out_format = FMT_MPEG1;
if (s1->mpeg_enc_ctx_allocated) { if (s1->mpeg_enc_ctx_allocated) {
ff_MPV_common_end(s); ff_mpv_common_end(s);
} }
s->width = avctx->coded_width; s->width = avctx->coded_width;
s->height = avctx->coded_height; s->height = avctx->coded_height;
...@@ -2151,7 +2151,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx) ...@@ -2151,7 +2151,7 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
avctx->idct_algo = FF_IDCT_SIMPLE; avctx->idct_algo = FF_IDCT_SIMPLE;
ff_mpv_idct_init(s); ff_mpv_idct_init(s);
if (ff_MPV_common_init(s) < 0) if (ff_mpv_common_init(s) < 0)
return -1; return -1;
s1->mpeg_enc_ctx_allocated = 1; s1->mpeg_enc_ctx_allocated = 1;
...@@ -2653,7 +2653,7 @@ static av_cold int mpeg_decode_end(AVCodecContext *avctx) ...@@ -2653,7 +2653,7 @@ static av_cold int mpeg_decode_end(AVCodecContext *avctx)
Mpeg1Context *s = avctx->priv_data; Mpeg1Context *s = avctx->priv_data;
if (s->mpeg_enc_ctx_allocated) if (s->mpeg_enc_ctx_allocated)
ff_MPV_common_end(&s->mpeg_enc_ctx); ff_mpv_common_end(&s->mpeg_enc_ctx);
av_freep(&s->a53_caption); av_freep(&s->a53_caption);
return 0; return 0;
} }
......
...@@ -130,7 +130,7 @@ static av_cold int encode_init(AVCodecContext *avctx) ...@@ -130,7 +130,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
if (ff_MPV_encode_init(avctx) < 0) if (ff_mpv_encode_init(avctx) < 0)
return -1; return -1;
if (find_frame_rate_index(s) < 0) { if (find_frame_rate_index(s) < 0) {
...@@ -1082,8 +1082,8 @@ AVCodec ff_mpeg1video_encoder = { ...@@ -1082,8 +1082,8 @@ AVCodec ff_mpeg1video_encoder = {
.id = AV_CODEC_ID_MPEG1VIDEO, .id = AV_CODEC_ID_MPEG1VIDEO,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = encode_init, .init = encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.supported_framerates = ff_mpeg12_frame_rate_tab + 1, .supported_framerates = ff_mpeg12_frame_rate_tab + 1,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE }, AV_PIX_FMT_NONE },
...@@ -1098,8 +1098,8 @@ AVCodec ff_mpeg2video_encoder = { ...@@ -1098,8 +1098,8 @@ AVCodec ff_mpeg2video_encoder = {
.id = AV_CODEC_ID_MPEG2VIDEO, .id = AV_CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = encode_init, .init = encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.supported_framerates = ff_mpeg12_frame_rate_tab + 1, .supported_framerates = ff_mpeg12_frame_rate_tab + 1,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P,
......
...@@ -1287,7 +1287,7 @@ static av_cold int encode_init(AVCodecContext *avctx) ...@@ -1287,7 +1287,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
int ret; int ret;
static int done = 0; static int done = 0;
if ((ret = ff_MPV_encode_init(avctx)) < 0) if ((ret = ff_mpv_encode_init(avctx)) < 0)
return ret; return ret;
if (!done) { if (!done) {
...@@ -1401,8 +1401,8 @@ AVCodec ff_mpeg4_encoder = { ...@@ -1401,8 +1401,8 @@ AVCodec ff_mpeg4_encoder = {
.id = AV_CODEC_ID_MPEG4, .id = AV_CODEC_ID_MPEG4,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = encode_init, .init = encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
.priv_class = &mpeg4enc_class, .priv_class = &mpeg4enc_class,
......
...@@ -371,7 +371,7 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type, ...@@ -371,7 +371,7 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift); s->dest[2] = s->current_picture.f->data[2] + (s->mb_y * (16 >> s->chroma_y_shift) * s->uvlinesize) + s->mb_x * (16 >> s->chroma_x_shift);
assert(ref == 0); assert(ref == 0);
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
} }
/* init common dct for both encoder and decoder */ /* init common dct for both encoder and decoder */
...@@ -393,14 +393,14 @@ static av_cold int dct_init(MpegEncContext *s) ...@@ -393,14 +393,14 @@ static av_cold int dct_init(MpegEncContext *s)
s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c; s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
if (HAVE_INTRINSICS_NEON) if (HAVE_INTRINSICS_NEON)
ff_MPV_common_init_neon(s); ff_mpv_common_init_neon(s);
if (ARCH_ARM) if (ARCH_ARM)
ff_MPV_common_init_arm(s); ff_mpv_common_init_arm(s);
if (ARCH_PPC) if (ARCH_PPC)
ff_MPV_common_init_ppc(s); ff_mpv_common_init_ppc(s);
if (ARCH_X86) if (ARCH_X86)
ff_MPV_common_init_x86(s); ff_mpv_common_init_x86(s);
return 0; return 0;
} }
...@@ -821,7 +821,7 @@ static int init_duplicate_context(MpegEncContext *s) ...@@ -821,7 +821,7 @@ static int init_duplicate_context(MpegEncContext *s)
return 0; return 0;
fail: fail:
return -1; // free() through ff_MPV_common_end() return -1; // free() through ff_mpv_common_end()
} }
static void free_duplicate_context(MpegEncContext *s) static void free_duplicate_context(MpegEncContext *s)
...@@ -915,7 +915,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, ...@@ -915,7 +915,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0; s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
ff_mpv_idct_init(s); ff_mpv_idct_init(s);
ff_MPV_common_init(s); ff_mpv_common_init(s);
} }
if (s->height != s1->height || s->width != s1->width || s->context_reinit) { if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
...@@ -923,7 +923,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, ...@@ -923,7 +923,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
s->context_reinit = 0; s->context_reinit = 0;
s->height = s1->height; s->height = s1->height;
s->width = s1->width; s->width = s1->width;
if ((err = ff_MPV_common_frame_size_change(s)) < 0) if ((err = ff_mpv_common_frame_size_change(s)) < 0)
return err; return err;
} }
...@@ -1024,7 +1024,7 @@ do {\ ...@@ -1024,7 +1024,7 @@ do {\
* The changed fields will not depend upon the * The changed fields will not depend upon the
* prior state of the MpegEncContext. * prior state of the MpegEncContext.
*/ */
void ff_MPV_common_defaults(MpegEncContext *s) void ff_mpv_common_defaults(MpegEncContext *s)
{ {
s->y_dc_scale_table = s->y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg1_dc_scale_table; s->c_dc_scale_table = ff_mpeg1_dc_scale_table;
...@@ -1047,9 +1047,9 @@ void ff_MPV_common_defaults(MpegEncContext *s) ...@@ -1047,9 +1047,9 @@ void ff_MPV_common_defaults(MpegEncContext *s)
* the changed fields will not depend upon * the changed fields will not depend upon
* the prior state of the MpegEncContext. * the prior state of the MpegEncContext.
*/ */
void ff_MPV_decode_defaults(MpegEncContext *s) void ff_mpv_decode_defaults(MpegEncContext *s)
{ {
ff_MPV_common_defaults(s); ff_mpv_common_defaults(s);
} }
static int init_er(MpegEncContext *s) static int init_er(MpegEncContext *s)
...@@ -1232,7 +1232,7 @@ fail: ...@@ -1232,7 +1232,7 @@ fail:
* init common structure for both encoder and decoder. * init common structure for both encoder and decoder.
* this assumes that some variables like width/height are already set * this assumes that some variables like width/height are already set
*/ */
av_cold int ff_MPV_common_init(MpegEncContext *s) av_cold int ff_mpv_common_init(MpegEncContext *s)
{ {
int i; int i;
int nb_slices = (HAVE_THREADS && int nb_slices = (HAVE_THREADS &&
...@@ -1343,7 +1343,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s) ...@@ -1343,7 +1343,7 @@ av_cold int ff_MPV_common_init(MpegEncContext *s)
return 0; return 0;
fail: fail:
ff_MPV_common_end(s); ff_mpv_common_end(s);
return -1; return -1;
} }
...@@ -1402,7 +1402,7 @@ static int free_context_frame(MpegEncContext *s) ...@@ -1402,7 +1402,7 @@ static int free_context_frame(MpegEncContext *s)
return 0; return 0;
} }
int ff_MPV_common_frame_size_change(MpegEncContext *s) int ff_mpv_common_frame_size_change(MpegEncContext *s)
{ {
int i, err = 0; int i, err = 0;
...@@ -1470,12 +1470,12 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s) ...@@ -1470,12 +1470,12 @@ int ff_MPV_common_frame_size_change(MpegEncContext *s)
return 0; return 0;
fail: fail:
ff_MPV_common_end(s); ff_mpv_common_end(s);
return err; return err;
} }
/* init common structure for both encoder and decoder */ /* init common structure for both encoder and decoder */
void ff_MPV_common_end(MpegEncContext *s) void ff_mpv_common_end(MpegEncContext *s)
{ {
int i; int i;
...@@ -1674,7 +1674,7 @@ int ff_find_unused_picture(MpegEncContext *s, int shared) ...@@ -1674,7 +1674,7 @@ int ff_find_unused_picture(MpegEncContext *s, int shared)
* generic function called after decoding * generic function called after decoding
* the header and before a frame is decoded. * the header and before a frame is decoded.
*/ */
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{ {
int i, ret; int i, ret;
Picture *pic; Picture *pic;
...@@ -1889,7 +1889,7 @@ FF_ENABLE_DEPRECATION_WARNINGS ...@@ -1889,7 +1889,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
} }
/* called after a frame has been decoded. */ /* called after a frame has been decoded. */
void ff_MPV_frame_end(MpegEncContext *s) void ff_mpv_frame_end(MpegEncContext *s)
{ {
#if FF_API_XVMC #if FF_API_XVMC
FF_DISABLE_DEPRECATION_WARNINGS FF_DISABLE_DEPRECATION_WARNINGS
...@@ -2010,7 +2010,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p) ...@@ -2010,7 +2010,7 @@ void ff_print_debug_info(MpegEncContext *s, Picture *p)
/** /**
* find the lowest MB row referenced in the MVs * find the lowest MB row referenced in the MVs
*/ */
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir) int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir)
{ {
int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample; int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
int my, off, i, mvs; int my, off, i, mvs;
...@@ -2116,7 +2116,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s) ...@@ -2116,7 +2116,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
s->interlaced_dct : true if interlaced dct used (mpeg2) s->interlaced_dct : true if interlaced dct used (mpeg2)
*/ */
static av_always_inline static av_always_inline
void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64], void mpv_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
int is_mpeg12) int is_mpeg12)
{ {
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
...@@ -2206,12 +2206,12 @@ FF_ENABLE_DEPRECATION_WARNINGS ...@@ -2206,12 +2206,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) { if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
if (s->mv_dir & MV_DIR_FORWARD) { if (s->mv_dir & MV_DIR_FORWARD) {
ff_thread_await_progress(&s->last_picture_ptr->tf, ff_thread_await_progress(&s->last_picture_ptr->tf,
ff_MPV_lowest_referenced_row(s, 0), ff_mpv_lowest_referenced_row(s, 0),
0); 0);
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->mv_dir & MV_DIR_BACKWARD) {
ff_thread_await_progress(&s->next_picture_ptr->tf, ff_thread_await_progress(&s->next_picture_ptr->tf,
ff_MPV_lowest_referenced_row(s, 1), ff_mpv_lowest_referenced_row(s, 1),
0); 0);
} }
} }
...@@ -2223,12 +2223,12 @@ FF_ENABLE_DEPRECATION_WARNINGS ...@@ -2223,12 +2223,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
op_pix = s->hdsp.put_no_rnd_pixels_tab; op_pix = s->hdsp.put_no_rnd_pixels_tab;
} }
if (s->mv_dir & MV_DIR_FORWARD) { if (s->mv_dir & MV_DIR_FORWARD) {
ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix); ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
op_pix = s->hdsp.avg_pixels_tab; op_pix = s->hdsp.avg_pixels_tab;
op_qpix= s->me.qpel_avg; op_qpix= s->me.qpel_avg;
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->mv_dir & MV_DIR_BACKWARD) {
ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix); ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
} }
} }
...@@ -2351,13 +2351,14 @@ skip_idct: ...@@ -2351,13 +2351,14 @@ skip_idct:
} }
} }
void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]){ void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64])
{
#if !CONFIG_SMALL #if !CONFIG_SMALL
if(s->out_format == FMT_MPEG1) { if(s->out_format == FMT_MPEG1) {
MPV_decode_mb_internal(s, block, 1); mpv_decode_mb_internal(s, block, 1);
} else } else
#endif #endif
MPV_decode_mb_internal(s, block, 0); mpv_decode_mb_internal(s, block, 0);
} }
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h) void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
...@@ -2472,7 +2473,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale) ...@@ -2472,7 +2473,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
} }
void ff_MPV_report_decode_progress(MpegEncContext *s) void ff_mpv_report_decode_progress(MpegEncContext *s)
{ {
if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred) if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0); ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
......
...@@ -689,24 +689,33 @@ static const AVClass name ## _class = {\ ...@@ -689,24 +689,33 @@ static const AVClass name ## _class = {\
* and decoding). The changed fields will not depend upon the prior * and decoding). The changed fields will not depend upon the prior
* state of the MpegEncContext. * state of the MpegEncContext.
*/ */
void ff_MPV_common_defaults(MpegEncContext *s); void ff_mpv_common_defaults(MpegEncContext *s);
void ff_MPV_decode_defaults(MpegEncContext *s); int ff_mpv_common_init(MpegEncContext *s);
int ff_MPV_common_init(MpegEncContext *s); void ff_mpv_common_init_arm(MpegEncContext *s);
int ff_MPV_common_frame_size_change(MpegEncContext *s); void ff_mpv_common_init_neon(MpegEncContext *s);
void ff_MPV_common_end(MpegEncContext *s); void ff_mpv_common_init_ppc(MpegEncContext *s);
void ff_MPV_decode_mb(MpegEncContext *s, int16_t block[12][64]); void ff_mpv_common_init_x86(MpegEncContext *s);
int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
void ff_MPV_frame_end(MpegEncContext *s); int ff_mpv_common_frame_size_change(MpegEncContext *s);
int ff_MPV_encode_init(AVCodecContext *avctx); void ff_mpv_common_end(MpegEncContext *s);
int ff_MPV_encode_end(AVCodecContext *avctx);
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, void ff_mpv_decode_defaults(MpegEncContext *s);
void ff_mpv_decode_mb(MpegEncContext *s, int16_t block[12][64]);
void ff_mpv_report_decode_progress(MpegEncContext *s);
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx);
void ff_mpv_frame_end(MpegEncContext *s);
int ff_mpv_lowest_referenced_row(MpegEncContext *s, int dir);
int ff_mpv_encode_init(AVCodecContext *avctx);
void ff_mpv_encode_init_x86(MpegEncContext *s);
int ff_mpv_encode_end(AVCodecContext *avctx);
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet); const AVFrame *frame, int *got_packet);
void ff_MPV_encode_init_x86(MpegEncContext *s);
void ff_MPV_common_init_x86(MpegEncContext *s);
void ff_MPV_common_init_arm(MpegEncContext *s);
void ff_MPV_common_init_neon(MpegEncContext *s);
void ff_MPV_common_init_ppc(MpegEncContext *s);
void ff_clean_intra_table_entries(MpegEncContext *s); void ff_clean_intra_table_entries(MpegEncContext *s);
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h); void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h);
void ff_mpeg_flush(AVCodecContext *avctx); void ff_mpeg_flush(AVCodecContext *avctx);
...@@ -715,8 +724,6 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix); ...@@ -715,8 +724,6 @@ void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
int ff_find_unused_picture(MpegEncContext *s, int shared); int ff_find_unused_picture(MpegEncContext *s, int shared);
void ff_denoise_dct(MpegEncContext *s, int16_t *block); void ff_denoise_dct(MpegEncContext *s, int16_t *block);
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src); int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
int ff_MPV_lowest_referenced_row(MpegEncContext *s, int dir);
void ff_MPV_report_decode_progress(MpegEncContext *s);
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src); int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
void ff_set_qscale(MpegEncContext * s, int qscale); void ff_set_qscale(MpegEncContext * s, int qscale);
...@@ -727,7 +734,7 @@ int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int ...@@ -727,7 +734,7 @@ int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int
void ff_init_block_index(MpegEncContext *s); void ff_init_block_index(MpegEncContext *s);
void ff_MPV_motion(MpegEncContext *s, void ff_mpv_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_y, uint8_t *dest_cb,
uint8_t *dest_cr, int dir, uint8_t *dest_cr, int dir,
uint8_t **ref_picture, uint8_t **ref_picture,
......
...@@ -218,10 +218,10 @@ static void update_duplicate_context_after_me(MpegEncContext *dst, ...@@ -218,10 +218,10 @@ static void update_duplicate_context_after_me(MpegEncContext *dst,
* Set the given MpegEncContext to defaults for encoding. * Set the given MpegEncContext to defaults for encoding.
* the changed fields will not depend upon the prior state of the MpegEncContext. * the changed fields will not depend upon the prior state of the MpegEncContext.
*/ */
static void MPV_encode_defaults(MpegEncContext *s) static void mpv_encode_defaults(MpegEncContext *s)
{ {
int i; int i;
ff_MPV_common_defaults(s); ff_mpv_common_defaults(s);
for (i = -16; i < 16; i++) { for (i = -16; i < 16; i++) {
default_fcode_tab[i + MAX_MV] = 1; default_fcode_tab[i + MAX_MV] = 1;
...@@ -234,12 +234,12 @@ static void MPV_encode_defaults(MpegEncContext *s) ...@@ -234,12 +234,12 @@ static void MPV_encode_defaults(MpegEncContext *s)
} }
/* init video encoder */ /* init video encoder */
av_cold int ff_MPV_encode_init(AVCodecContext *avctx) av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
int i, ret, format_supported; int i, ret, format_supported;
MPV_encode_defaults(s); mpv_encode_defaults(s);
switch (avctx->codec_id) { switch (avctx->codec_id) {
case AV_CODEC_ID_MPEG2VIDEO: case AV_CODEC_ID_MPEG2VIDEO:
...@@ -701,11 +701,11 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) ...@@ -701,11 +701,11 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
/* init */ /* init */
ff_mpv_idct_init(s); ff_mpv_idct_init(s);
if (ff_MPV_common_init(s) < 0) if (ff_mpv_common_init(s) < 0)
return -1; return -1;
if (ARCH_X86) if (ARCH_X86)
ff_MPV_encode_init_x86(s); ff_mpv_encode_init_x86(s);
ff_fdctdsp_init(&s->fdsp, avctx); ff_fdctdsp_init(&s->fdsp, avctx);
ff_me_cmp_init(&s->mecc, avctx); ff_me_cmp_init(&s->mecc, avctx);
...@@ -838,18 +838,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) ...@@ -838,18 +838,18 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
return 0; return 0;
fail: fail:
ff_MPV_encode_end(avctx); ff_mpv_encode_end(avctx);
return AVERROR_UNKNOWN; return AVERROR_UNKNOWN;
} }
av_cold int ff_MPV_encode_end(AVCodecContext *avctx) av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
int i; int i;
ff_rate_control_uninit(s); ff_rate_control_uninit(s);
ff_MPV_common_end(s); ff_mpv_common_end(s);
if (CONFIG_MJPEG_ENCODER && if (CONFIG_MJPEG_ENCODER &&
s->out_format == FMT_MJPEG) s->out_format == FMT_MJPEG)
ff_mjpeg_encode_close(s); ff_mjpeg_encode_close(s);
...@@ -1530,7 +1530,7 @@ static int frame_start(MpegEncContext *s) ...@@ -1530,7 +1530,7 @@ static int frame_start(MpegEncContext *s)
return 0; return 0;
} }
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt, int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic_arg, int *got_packet) const AVFrame *pic_arg, int *got_packet)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
...@@ -1986,14 +1986,14 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, ...@@ -1986,14 +1986,14 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
} }
if (s->mv_dir & MV_DIR_FORWARD) { if (s->mv_dir & MV_DIR_FORWARD) {
ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
s->last_picture.f->data, s->last_picture.f->data,
op_pix, op_qpix); op_pix, op_qpix);
op_pix = s->hdsp.avg_pixels_tab; op_pix = s->hdsp.avg_pixels_tab;
op_qpix = s->qdsp.avg_qpel_pixels_tab; op_qpix = s->qdsp.avg_qpel_pixels_tab;
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->mv_dir & MV_DIR_BACKWARD) {
ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 1, ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
s->next_picture.f->data, s->next_picture.f->data,
op_pix, op_qpix); op_pix, op_qpix);
} }
...@@ -2314,7 +2314,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE ...@@ -2314,7 +2314,7 @@ static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
} }
if(s->avctx->mb_decision == FF_MB_DECISION_RD){ if(s->avctx->mb_decision == FF_MB_DECISION_RD){
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
score *= s->lambda2; score *= s->lambda2;
score += sse_mb(s) << FF_LAMBDA_SHIFT; score += sse_mb(s) << FF_LAMBDA_SHIFT;
...@@ -2959,7 +2959,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ ...@@ -2959,7 +2959,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
} }
if(s->avctx->mb_decision == FF_MB_DECISION_BITS) if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
} else { } else {
int motion_x = 0, motion_y = 0; int motion_x = 0, motion_y = 0;
s->mv_type=MV_TYPE_16X16; s->mv_type=MV_TYPE_16X16;
...@@ -3078,7 +3078,7 @@ static int encode_thread(AVCodecContext *c, void *arg){ ...@@ -3078,7 +3078,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B) s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s); ff_h263_update_motion_val(s);
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
} }
/* clean the MV table in IPS frames for direct mode in B frames */ /* clean the MV table in IPS frames for direct mode in B frames */
...@@ -4259,9 +4259,9 @@ AVCodec ff_h263_encoder = { ...@@ -4259,9 +4259,9 @@ AVCodec ff_h263_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H263, .id = AV_CODEC_ID_H263,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE}, .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
.priv_class = &h263_class, .priv_class = &h263_class,
}; };
...@@ -4287,9 +4287,9 @@ AVCodec ff_h263p_encoder = { ...@@ -4287,9 +4287,9 @@ AVCodec ff_h263p_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_H263P, .id = AV_CODEC_ID_H263P,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS, .capabilities = CODEC_CAP_SLICE_THREADS,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.priv_class = &h263p_class, .priv_class = &h263p_class,
...@@ -4303,9 +4303,9 @@ AVCodec ff_msmpeg4v2_encoder = { ...@@ -4303,9 +4303,9 @@ AVCodec ff_msmpeg4v2_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MSMPEG4V2, .id = AV_CODEC_ID_MSMPEG4V2,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.priv_class = &msmpeg4v2_class, .priv_class = &msmpeg4v2_class,
}; };
...@@ -4318,9 +4318,9 @@ AVCodec ff_msmpeg4v3_encoder = { ...@@ -4318,9 +4318,9 @@ AVCodec ff_msmpeg4v3_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_MSMPEG4V3, .id = AV_CODEC_ID_MSMPEG4V3,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.priv_class = &msmpeg4v3_class, .priv_class = &msmpeg4v3_class,
}; };
...@@ -4333,9 +4333,9 @@ AVCodec ff_wmv1_encoder = { ...@@ -4333,9 +4333,9 @@ AVCodec ff_wmv1_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_WMV1, .id = AV_CODEC_ID_WMV1,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.priv_class = &wmv1_class, .priv_class = &wmv1_class,
}; };
...@@ -826,7 +826,7 @@ static inline void apply_8x8(MpegEncContext *s, ...@@ -826,7 +826,7 @@ static inline void apply_8x8(MpegEncContext *s,
* @param qpix_op qpel motion compensation function (average or put normally) * @param qpix_op qpel motion compensation function (average or put normally)
* the motion vectors are taken from s->mv and the MV type from s->mv_type * the motion vectors are taken from s->mv and the MV type from s->mv_type
*/ */
static av_always_inline void MPV_motion_internal(MpegEncContext *s, static av_always_inline void mpv_motion_internal(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_y,
uint8_t *dest_cb, uint8_t *dest_cb,
uint8_t *dest_cr, uint8_t *dest_cr,
...@@ -965,7 +965,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s, ...@@ -965,7 +965,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
} }
} }
void ff_MPV_motion(MpegEncContext *s, void ff_mpv_motion(MpegEncContext *s,
uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_y, uint8_t *dest_cb,
uint8_t *dest_cr, int dir, uint8_t *dest_cr, int dir,
uint8_t **ref_picture, uint8_t **ref_picture,
...@@ -974,10 +974,10 @@ void ff_MPV_motion(MpegEncContext *s, ...@@ -974,10 +974,10 @@ void ff_MPV_motion(MpegEncContext *s,
{ {
#if !CONFIG_SMALL #if !CONFIG_SMALL
if (s->out_format == FMT_MPEG1) if (s->out_format == FMT_MPEG1)
MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
ref_picture, pix_op, qpix_op, 1); ref_picture, pix_op, qpix_op, 1);
else else
#endif #endif
MPV_motion_internal(s, dest_y, dest_cb, dest_cr, dir, mpv_motion_internal(s, dest_y, dest_cb, dest_cr, dir,
ref_picture, pix_op, qpix_op, 0); ref_picture, pix_op, qpix_op, 0);
} }
...@@ -169,7 +169,7 @@ void ff_xvmc_decode_mb(MpegEncContext *s) ...@@ -169,7 +169,7 @@ void ff_xvmc_decode_mb(MpegEncContext *s)
return; return;
} }
// from MPV_decode_mb(), update DC predictors for P macroblocks // from ff_mpv_decode_mb(), update DC predictors for P macroblocks
if (!s->mb_intra) { if (!s->mb_intra) {
s->last_dc[0] = s->last_dc[0] =
s->last_dc[1] = s->last_dc[1] =
......
...@@ -396,8 +396,8 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, ...@@ -396,8 +396,8 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
if ((ret = ff_MPV_frame_start(s, avctx)) < 0) { if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
av_log(v->s.avctx, AV_LOG_ERROR, "ff_MPV_frame_start error\n"); av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
avctx->pix_fmt = AV_PIX_FMT_RGB24; avctx->pix_fmt = AV_PIX_FMT_RGB24;
return ret; return ret;
} }
...@@ -417,7 +417,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size, ...@@ -417,7 +417,7 @@ static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
f = s->current_picture.f; f = s->current_picture.f;
......
...@@ -123,7 +123,7 @@ static void dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block, ...@@ -123,7 +123,7 @@ static void dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block,
} }
av_cold void ff_MPV_common_init_neon(MpegEncContext *s) av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
{ {
int cpu_flags = av_get_cpu_flags(); int cpu_flags = av_get_cpu_flags();
......
...@@ -115,7 +115,7 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s, ...@@ -115,7 +115,7 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
#endif /* HAVE_ALTIVEC */ #endif /* HAVE_ALTIVEC */
av_cold void ff_MPV_common_init_ppc(MpegEncContext *s) av_cold void ff_mpv_common_init_ppc(MpegEncContext *s)
{ {
#if HAVE_ALTIVEC #if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags())) if (!PPC_ALTIVEC(av_get_cpu_flags()))
......
...@@ -372,7 +372,7 @@ static int rv20_decode_picture_header(RVDecContext *rv) ...@@ -372,7 +372,7 @@ static int rv20_decode_picture_header(RVDecContext *rv)
if (new_w != s->width || new_h != s->height) { if (new_w != s->width || new_h != s->height) {
av_log(s->avctx, AV_LOG_DEBUG, av_log(s->avctx, AV_LOG_DEBUG,
"attempting to change resolution to %dx%d\n", new_w, new_h); "attempting to change resolution to %dx%d\n", new_w, new_h);
ff_MPV_common_end(s); ff_mpv_common_end(s);
ret = ff_set_dimensions(s->avctx, new_w, new_h); ret = ff_set_dimensions(s->avctx, new_w, new_h);
if (ret < 0) if (ret < 0)
...@@ -380,7 +380,7 @@ static int rv20_decode_picture_header(RVDecContext *rv) ...@@ -380,7 +380,7 @@ static int rv20_decode_picture_header(RVDecContext *rv)
s->width = new_w; s->width = new_w;
s->height = new_h; s->height = new_h;
if ((ret = ff_MPV_common_init(s)) < 0) if ((ret = ff_mpv_common_init(s)) < 0)
return ret; return ret;
} }
...@@ -455,7 +455,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx) ...@@ -455,7 +455,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
avctx->coded_height, 0, avctx)) < 0) avctx->coded_height, 0, avctx)) < 0)
return ret; return ret;
ff_MPV_decode_defaults(s); ff_mpv_decode_defaults(s);
s->avctx = avctx; s->avctx = avctx;
s->out_format = FMT_H263; s->out_format = FMT_H263;
...@@ -499,7 +499,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx) ...@@ -499,7 +499,7 @@ static av_cold int rv10_decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_YUV420P; avctx->pix_fmt = AV_PIX_FMT_YUV420P;
ff_mpv_idct_init(s); ff_mpv_idct_init(s);
if ((ret = ff_MPV_common_init(s)) < 0) if ((ret = ff_mpv_common_init(s)) < 0)
return ret; return ret;
ff_h263dsp_init(&s->h263dsp); ff_h263dsp_init(&s->h263dsp);
...@@ -523,7 +523,7 @@ static av_cold int rv10_decode_end(AVCodecContext *avctx) ...@@ -523,7 +523,7 @@ static av_cold int rv10_decode_end(AVCodecContext *avctx)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
ff_MPV_common_end(s); ff_mpv_common_end(s);
return 0; return 0;
} }
...@@ -561,10 +561,10 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf, ...@@ -561,10 +561,10 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
// FIXME write parser so we always have complete frames? // FIXME write parser so we always have complete frames?
if (s->current_picture_ptr) { if (s->current_picture_ptr) {
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0; s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0;
} }
if ((ret = ff_MPV_frame_start(s, avctx)) < 0) if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
return ret; return ret;
ff_mpeg_er_frame_start(s); ff_mpeg_er_frame_start(s);
} else { } else {
...@@ -646,7 +646,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf, ...@@ -646,7 +646,7 @@ static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
} }
if (s->pict_type != AV_PICTURE_TYPE_B) if (s->pict_type != AV_PICTURE_TYPE_B)
ff_h263_update_motion_val(s); ff_h263_update_motion_val(s);
ff_MPV_decode_mb(s, s->block); ff_mpv_decode_mb(s, s->block);
if (s->loop_filter) if (s->loop_filter)
ff_h263_loop_filter(s); ff_h263_loop_filter(s);
...@@ -739,7 +739,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, ...@@ -739,7 +739,7 @@ static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
if (s->current_picture_ptr != NULL && s->mb_y >= s->mb_height) { if (s->current_picture_ptr != NULL && s->mb_y >= s->mb_height) {
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) { if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0) if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
......
...@@ -64,9 +64,9 @@ AVCodec ff_rv10_encoder = { ...@@ -64,9 +64,9 @@ AVCodec ff_rv10_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_RV10, .id = AV_CODEC_ID_RV10,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.priv_class = &rv10_class, .priv_class = &rv10_class,
}; };
...@@ -65,9 +65,9 @@ AVCodec ff_rv20_encoder = { ...@@ -65,9 +65,9 @@ AVCodec ff_rv20_encoder = {
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_RV20, .id = AV_CODEC_ID_RV20,
.priv_data_size = sizeof(MpegEncContext), .priv_data_size = sizeof(MpegEncContext),
.init = ff_MPV_encode_init, .init = ff_mpv_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.priv_class = &rv20_class, .priv_class = &rv20_class,
}; };
...@@ -1475,7 +1475,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx) ...@@ -1475,7 +1475,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
MpegEncContext *s = &r->s; MpegEncContext *s = &r->s;
int ret; int ret;
ff_MPV_decode_defaults(s); ff_mpv_decode_defaults(s);
s->avctx = avctx; s->avctx = avctx;
s->out_format = FMT_H263; s->out_format = FMT_H263;
s->codec_id = avctx->codec_id; s->codec_id = avctx->codec_id;
...@@ -1489,7 +1489,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx) ...@@ -1489,7 +1489,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
s->low_delay = 0; s->low_delay = 0;
ff_mpv_idct_init(s); ff_mpv_idct_init(s);
if ((ret = ff_MPV_common_init(s)) < 0) if ((ret = ff_mpv_common_init(s)) < 0)
return ret; return ret;
ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1); ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
...@@ -1504,7 +1504,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx) ...@@ -1504,7 +1504,7 @@ av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
#endif #endif
if ((ret = rv34_decoder_alloc(r)) < 0) { if ((ret = rv34_decoder_alloc(r)) < 0) {
ff_MPV_common_end(&r->s); ff_mpv_common_end(&r->s);
return ret; return ret;
} }
...@@ -1526,10 +1526,10 @@ int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx) ...@@ -1526,10 +1526,10 @@ int ff_rv34_decode_init_thread_copy(AVCodecContext *avctx)
if (avctx->internal->is_copy) { if (avctx->internal->is_copy) {
r->tmp_b_block_base = NULL; r->tmp_b_block_base = NULL;
ff_mpv_idct_init(&r->s); ff_mpv_idct_init(&r->s);
if ((err = ff_MPV_common_init(&r->s)) < 0) if ((err = ff_mpv_common_init(&r->s)) < 0)
return err; return err;
if ((err = rv34_decoder_alloc(r)) < 0) { if ((err = rv34_decoder_alloc(r)) < 0) {
ff_MPV_common_end(&r->s); ff_mpv_common_end(&r->s);
return err; return err;
} }
} }
...@@ -1549,7 +1549,7 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte ...@@ -1549,7 +1549,7 @@ int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
if (s->height != s1->height || s->width != s1->width) { if (s->height != s1->height || s->width != s1->width) {
s->height = s1->height; s->height = s1->height;
s->width = s1->width; s->width = s1->width;
if ((err = ff_MPV_common_frame_size_change(s)) < 0) if ((err = ff_mpv_common_frame_size_change(s)) < 0)
return err; return err;
if ((err = rv34_decoder_realloc(r)) < 0) if ((err = rv34_decoder_realloc(r)) < 0)
return err; return err;
...@@ -1580,7 +1580,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict) ...@@ -1580,7 +1580,7 @@ static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
int got_picture = 0, ret; int got_picture = 0, ret;
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
s->mb_num_left = 0; s->mb_num_left = 0;
if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME)) if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
...@@ -1665,7 +1665,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, ...@@ -1665,7 +1665,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.", av_log(avctx, AV_LOG_ERROR, "New frame but still %d MB left.",
s->mb_num_left); s->mb_num_left);
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
} }
if (s->width != si.width || s->height != si.height) { if (s->width != si.width || s->height != si.height) {
...@@ -1681,13 +1681,13 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, ...@@ -1681,13 +1681,13 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
if (err < 0) if (err < 0)
return err; return err;
if ((err = ff_MPV_common_frame_size_change(s)) < 0) if ((err = ff_mpv_common_frame_size_change(s)) < 0)
return err; return err;
if ((err = rv34_decoder_realloc(r)) < 0) if ((err = rv34_decoder_realloc(r)) < 0)
return err; return err;
} }
s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I; s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
if (ff_MPV_frame_start(s, s->avctx) < 0) if (ff_mpv_frame_start(s, s->avctx) < 0)
return -1; return -1;
ff_mpeg_er_frame_start(s); ff_mpeg_er_frame_start(s);
if (!r->tmp_b_block_base) { if (!r->tmp_b_block_base) {
...@@ -1792,7 +1792,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx, ...@@ -1792,7 +1792,7 @@ int ff_rv34_decode_frame(AVCodecContext *avctx,
/* always mark the current frame as finished, frame-mt supports /* always mark the current frame as finished, frame-mt supports
* only complete frames */ * only complete frames */
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
s->mb_num_left = 0; s->mb_num_left = 0;
ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0); ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
...@@ -1806,7 +1806,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx) ...@@ -1806,7 +1806,7 @@ av_cold int ff_rv34_decode_end(AVCodecContext *avctx)
{ {
RV34DecContext *r = avctx->priv_data; RV34DecContext *r = avctx->priv_data;
ff_MPV_common_end(&r->s); ff_mpv_common_end(&r->s);
rv34_decoder_free(r); rv34_decoder_free(r);
return 0; return 0;
......
...@@ -483,7 +483,7 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx) ...@@ -483,7 +483,7 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx)
avctx->frame_number)); avctx->frame_number));
s->m.mb_type = NULL; s->m.mb_type = NULL;
ff_MPV_common_end(&s->m); ff_mpv_common_end(&s->m);
av_freep(&s->m.me.scratchpad); av_freep(&s->m.me.scratchpad);
av_freep(&s->m.me.map); av_freep(&s->m.me.map);
...@@ -533,7 +533,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx) ...@@ -533,7 +533,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
s->avctx = avctx; s->avctx = avctx;
s->m.avctx = avctx; s->m.avctx = avctx;
if ((ret = ff_MPV_common_init(&s->m)) < 0) { if ((ret = ff_mpv_common_init(&s->m)) < 0) {
svq1_encode_end(avctx); svq1_encode_end(avctx);
return ret; return ret;
} }
......
...@@ -5749,7 +5749,7 @@ av_cold int ff_vc1_decode_end(AVCodecContext *avctx) ...@@ -5749,7 +5749,7 @@ av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
av_freep(&v->sr_rows[i >> 1][i & 1]); av_freep(&v->sr_rows[i >> 1][i & 1]);
av_freep(&v->hrd_rate); av_freep(&v->hrd_rate);
av_freep(&v->hrd_buffer); av_freep(&v->hrd_buffer);
ff_MPV_common_end(&v->s); ff_mpv_common_end(&v->s);
av_freep(&v->mv_type_mb_plane); av_freep(&v->mv_type_mb_plane);
av_freep(&v->direct_mb_plane); av_freep(&v->direct_mb_plane);
av_freep(&v->forward_mb_plane); av_freep(&v->forward_mb_plane);
...@@ -5927,7 +5927,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, ...@@ -5927,7 +5927,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
if (ff_msmpeg4_decode_init(avctx) < 0) if (ff_msmpeg4_decode_init(avctx) < 0)
goto err; goto err;
if (ff_vc1_decode_init_alloc_tables(v) < 0) { if (ff_vc1_decode_init_alloc_tables(v) < 0) {
ff_MPV_common_end(s); ff_mpv_common_end(s);
goto err; goto err;
} }
...@@ -5980,7 +5980,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, ...@@ -5980,7 +5980,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
s->next_p_frame_damaged = 0; s->next_p_frame_damaged = 0;
} }
if (ff_MPV_frame_start(s, avctx) < 0) { if (ff_mpv_frame_start(s, avctx) < 0) {
goto err; goto err;
} }
...@@ -6093,7 +6093,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, ...@@ -6093,7 +6093,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
ff_er_frame_end(&s->er); ff_er_frame_end(&s->er);
} }
ff_MPV_frame_end(s); ff_mpv_frame_end(s);
if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
image: image:
......
...@@ -54,7 +54,7 @@ static int encode_ext_header(Wmv2Context *w){ ...@@ -54,7 +54,7 @@ static int encode_ext_header(Wmv2Context *w){
static av_cold int wmv2_encode_init(AVCodecContext *avctx){ static av_cold int wmv2_encode_init(AVCodecContext *avctx){
Wmv2Context * const w= avctx->priv_data; Wmv2Context * const w= avctx->priv_data;
if(ff_MPV_encode_init(avctx) < 0) if (ff_mpv_encode_init(avctx) < 0)
return -1; return -1;
ff_wmv2_common_init(w); ff_wmv2_common_init(w);
...@@ -217,7 +217,7 @@ AVCodec ff_wmv2_encoder = { ...@@ -217,7 +217,7 @@ AVCodec ff_wmv2_encoder = {
.id = AV_CODEC_ID_WMV2, .id = AV_CODEC_ID_WMV2,
.priv_data_size = sizeof(Wmv2Context), .priv_data_size = sizeof(Wmv2Context),
.init = wmv2_encode_init, .init = wmv2_encode_init,
.encode2 = ff_MPV_encode_picture, .encode2 = ff_mpv_encode_picture,
.close = ff_MPV_encode_end, .close = ff_mpv_encode_end,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE }, .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
}; };
...@@ -444,7 +444,7 @@ __asm__ volatile( ...@@ -444,7 +444,7 @@ __asm__ volatile(
#endif /* HAVE_INLINE_ASM */ #endif /* HAVE_INLINE_ASM */
av_cold void ff_MPV_common_init_x86(MpegEncContext *s) av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
{ {
#if HAVE_INLINE_ASM #if HAVE_INLINE_ASM
int cpu_flags = av_get_cpu_flags(); int cpu_flags = av_get_cpu_flags();
......
...@@ -193,7 +193,7 @@ static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){ ...@@ -193,7 +193,7 @@ static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){
} }
#endif /* HAVE_INLINE_ASM */ #endif /* HAVE_INLINE_ASM */
av_cold void ff_MPV_encode_init_x86(MpegEncContext *s) av_cold void ff_mpv_encode_init_x86(MpegEncContext *s)
{ {
const int dct_algo = s->avctx->dct_algo; const int dct_algo = s->avctx->dct_algo;
int i; int i;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment