Commit f073b150 authored by Anton Khirnov's avatar Anton Khirnov

lavc: remove disabled FF_API_OLD_ENCODE_AUDIO cruft

parent 5d606863
......@@ -683,9 +683,6 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
av_freep(&s->buffer.samples);
av_freep(&s->cpe);
ff_af_queue_close(&s->afq);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......@@ -719,11 +716,6 @@ static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
for(ch = 0; ch < s->channels; ch++)
s->planar_samples[ch] = s->buffer.samples + 3 * 1024 * ch;
#if FF_API_OLD_ENCODE_AUDIO
if (!(avctx->coded_frame = avcodec_alloc_frame()))
goto alloc_fail;
#endif
return 0;
alloc_fail:
return AVERROR(ENOMEM);
......
......@@ -2052,9 +2052,6 @@ av_cold int ff_ac3_encode_close(AVCodecContext *avctx)
s->mdct_end(s);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......@@ -2484,14 +2481,6 @@ av_cold int ff_ac3_encode_init(AVCodecContext *avctx)
if (ret)
goto init_fail;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame= avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto init_fail;
}
#endif
ff_dsputil_init(&s->dsp, avctx);
avpriv_float_dsp_init(&s->fdsp, avctx->flags & CODEC_FLAG_BITEXACT);
ff_ac3dsp_init(&s->ac3dsp, avctx->flags & CODEC_FLAG_BITEXACT);
......
......@@ -142,11 +142,6 @@ static av_cold int adpcm_encode_init(AVCodecContext *avctx)
goto error;
}
#if FF_API_OLD_ENCODE_AUDIO
if (!(avctx->coded_frame = avcodec_alloc_frame()))
goto error;
#endif
return 0;
error:
av_freep(&s->paths);
......@@ -159,9 +154,6 @@ error:
static av_cold int adpcm_encode_close(AVCodecContext *avctx)
{
ADPCMEncodeContext *s = avctx->priv_data;
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&s->paths);
av_freep(&s->node_buf);
av_freep(&s->nodep_buf);
......
......@@ -107,14 +107,6 @@ static int adx_encode_header(AVCodecContext *avctx, uint8_t *buf, int bufsize)
return HEADER_SIZE;
}
#if FF_API_OLD_ENCODE_AUDIO
static av_cold int adx_encode_close(AVCodecContext *avctx)
{
av_freep(&avctx->coded_frame);
return 0;
}
#endif
static av_cold int adx_encode_init(AVCodecContext *avctx)
{
ADXContext *c = avctx->priv_data;
......@@ -125,11 +117,6 @@ static av_cold int adx_encode_init(AVCodecContext *avctx)
}
avctx->frame_size = BLOCK_SAMPLES;
#if FF_API_OLD_ENCODE_AUDIO
if (!(avctx->coded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
#endif
/* the cutoff can be adjusted, but this seems to work pretty well */
c->cutoff = 500;
ff_adx_calculate_coeffs(c->cutoff, avctx->sample_rate, COEFF_BITS, c->coeff);
......@@ -177,9 +164,6 @@ AVCodec ff_adpcm_adx_encoder = {
.id = AV_CODEC_ID_ADPCM_ADX,
.priv_data_size = sizeof(ADXContext),
.init = adx_encode_init,
#if FF_API_OLD_ENCODE_AUDIO
.close = adx_encode_close,
#endif
.encode2 = adx_encode_frame,
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
......
......@@ -3685,36 +3685,6 @@ AVCodec *avcodec_find_encoder(enum AVCodecID id);
*/
AVCodec *avcodec_find_encoder_by_name(const char *name);
#if FF_API_OLD_ENCODE_AUDIO
/**
* Encode an audio frame from samples into buf.
*
* @deprecated Use avcodec_encode_audio2 instead.
*
* @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
* However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
* will know how much space is needed because it depends on the value passed
* in buf_size as described below. In that case a lower value can be used.
*
* @param avctx the codec context
* @param[out] buf the output buffer
* @param[in] buf_size the output buffer size
* @param[in] samples the input buffer containing the samples
* The number of samples read from this buffer is frame_size*channels,
* both of which are defined in avctx.
* For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
* samples read from samples is equal to:
* buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
* This also implies that av_get_bits_per_sample() must not return 0 for these
* codecs.
* @return On error a negative value is returned, on success zero or the number
* of bytes used to encode the data read from the input buffer.
*/
int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
uint8_t *buf, int buf_size,
const short *samples);
#endif
/**
* Encode a frame of audio.
*
......
......@@ -394,12 +394,6 @@ static av_cold int flac_encode_init(AVCodecContext *avctx)
s->frame_count = 0;
s->min_framesize = s->max_framesize;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
#endif
ret = ff_lpc_init(&s->lpc_ctx, avctx->frame_size,
s->options.max_prediction_order, FF_LPC_TYPE_LEVINSON);
......@@ -1285,9 +1279,6 @@ static av_cold int flac_encode_close(AVCodecContext *avctx)
}
av_freep(&avctx->extradata);
avctx->extradata_size = 0;
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......
......@@ -52,9 +52,6 @@ static av_cold int g722_encode_close(AVCodecContext *avctx)
av_freep(&c->node_buf[i]);
av_freep(&c->nodep_buf[i]);
}
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......@@ -122,14 +119,6 @@ static av_cold int g722_encode_init(AVCodecContext * avctx)
}
}
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
return 0;
error:
g722_encode_close(avctx);
......
......@@ -331,13 +331,6 @@ static av_cold int g726_encode_init(AVCodecContext *avctx)
g726_reset(c);
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->key_frame = 1;
#endif
/* select a frame size that will end on a byte boundary and have a size of
approximately 1024 bytes */
avctx->frame_size = ((int[]){ 4096, 2736, 2048, 1640 })[c->code_size - 2];
......@@ -345,14 +338,6 @@ static av_cold int g726_encode_init(AVCodecContext *avctx)
return 0;
}
#if FF_API_OLD_ENCODE_AUDIO
static av_cold int g726_encode_close(AVCodecContext *avctx)
{
av_freep(&avctx->coded_frame);
return 0;
}
#endif
static int g726_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
{
......@@ -404,9 +389,6 @@ AVCodec ff_adpcm_g726_encoder = {
.priv_data_size = sizeof(G726Context),
.init = g726_encode_init,
.encode2 = g726_encode_frame,
#if FF_API_OLD_ENCODE_AUDIO
.close = g726_encode_close,
#endif
.capabilities = CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
......
......@@ -76,14 +76,6 @@ typedef struct AVCodecInternal {
*/
int allocate_progress;
#if FF_API_OLD_ENCODE_AUDIO
/**
* Internal sample count used by avcodec_encode_audio() to fabricate pts.
* Can be removed along with avcodec_encode_audio().
*/
int sample_count;
#endif
/**
* An audio frame with less than required samples has been submitted and
* padded with silence. Reject all subsequent frames.
......
......@@ -46,9 +46,6 @@ static av_cold int Faac_encode_close(AVCodecContext *avctx)
{
FaacAudioContext *s = avctx->priv_data;
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&avctx->extradata);
ff_af_queue_close(&s->afq);
......@@ -133,14 +130,6 @@ static av_cold int Faac_encode_init(AVCodecContext *avctx)
avctx->frame_size = samples_input / avctx->channels;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame= avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
/* Set decoder specific info */
avctx->extradata_size = 0;
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
......
......@@ -97,9 +97,6 @@ static int aac_encode_close(AVCodecContext *avctx)
if (s->handle)
aacEncClose(&s->handle);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&avctx->extradata);
ff_af_queue_close(&s->afq);
......@@ -275,13 +272,6 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
goto error;
}
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
avctx->frame_size = info.frameLength;
avctx->delay = info.encoderDelay;
ff_af_queue_init(avctx, &s->afq);
......
......@@ -77,21 +77,10 @@ static av_cold int libgsm_encode_init(AVCodecContext *avctx) {
}
}
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame= avcodec_alloc_frame();
if (!avctx->coded_frame) {
gsm_destroy(avctx->priv_data);
return AVERROR(ENOMEM);
}
#endif
return 0;
}
static av_cold int libgsm_encode_close(AVCodecContext *avctx) {
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
gsm_destroy(avctx->priv_data);
avctx->priv_data = NULL;
return 0;
......
......@@ -153,23 +153,10 @@ static av_cold int ilbc_encode_init(AVCodecContext *avctx)
avctx->block_align = s->encoder.no_of_bytes;
avctx->frame_size = s->encoder.blockl;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
#endif
return 0;
}
static av_cold int ilbc_encode_close(AVCodecContext *avctx)
{
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
static int ilbc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr)
{
......@@ -200,7 +187,6 @@ AVCodec ff_libilbc_encoder = {
.priv_data_size = sizeof(ILBCEncContext),
.init = ilbc_encode_init,
.encode2 = ilbc_encode_frame,
.close = ilbc_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("iLBC (Internet Low Bitrate Codec)"),
......
......@@ -78,9 +78,6 @@ static av_cold int mp3lame_encode_close(AVCodecContext *avctx)
{
LAMEContext *s = avctx->priv_data;
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&s->samples_flt[0]);
av_freep(&s->samples_flt[1]);
av_freep(&s->buffer);
......@@ -142,14 +139,6 @@ static av_cold int mp3lame_encode_init(AVCodecContext *avctx)
avctx->frame_size = lame_get_framesize(s->gfp);
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
/* allocate float sample buffers */
if (avctx->sample_fmt == AV_SAMPLE_FMT_FLTP) {
int ch;
......
......@@ -202,11 +202,6 @@ static av_cold int amr_nb_encode_init(AVCodecContext *avctx)
avctx->frame_size = 160;
avctx->delay = 50;
ff_af_queue_init(avctx, &s->afq);
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
#endif
s->enc_state = Encoder_Interface_init(s->enc_dtx);
if (!s->enc_state) {
......@@ -227,9 +222,6 @@ static av_cold int amr_nb_encode_close(AVCodecContext *avctx)
Encoder_Interface_exit(s->enc_state);
ff_af_queue_close(&s->afq);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......
......@@ -251,16 +251,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "memory allocation error\n");
return AVERROR(ENOMEM);
}
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
av_freep(&avctx->extradata);
speex_header_free(header_data);
speex_encoder_destroy(s->enc_state);
av_log(avctx, AV_LOG_ERROR, "memory allocation error\n");
return AVERROR(ENOMEM);
}
#endif
/* copy header packet to extradata */
memcpy(avctx->extradata, header_data, header_size);
......@@ -329,9 +319,6 @@ static av_cold int encode_close(AVCodecContext *avctx)
speex_encoder_destroy(s->enc_state);
ff_af_queue_close(&s->afq);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&avctx->extradata);
return 0;
......
......@@ -47,9 +47,6 @@ static int aac_encode_close(AVCodecContext *avctx)
AACContext *s = avctx->priv_data;
s->codec_api.Uninit(s->handle);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&avctx->extradata);
ff_af_queue_close(&s->afq);
av_freep(&s->end_buffer);
......@@ -63,11 +60,6 @@ static av_cold int aac_encode_init(AVCodecContext *avctx)
AACENC_PARAM params = { 0 };
int index, ret;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
#endif
avctx->frame_size = FRAME_SIZE;
avctx->delay = ENC_DELAY;
s->last_frame = 2;
......
......@@ -94,11 +94,6 @@ static av_cold int amr_wb_encode_init(AVCodecContext *avctx)
avctx->frame_size = 320;
avctx->delay = 80;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
#endif
s->state = E_IF_init();
......@@ -110,7 +105,6 @@ static int amr_wb_encode_close(AVCodecContext *avctx)
AMRWBContext *s = avctx->priv_data;
E_IF_exit(s->state);
av_freep(&avctx->coded_frame);
return 0;
}
......
......@@ -162,9 +162,6 @@ static av_cold int oggvorbis_encode_close(AVCodecContext *avctx)
av_fifo_free(s->pkt_fifo);
ff_af_queue_close(&s->afq);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&avctx->extradata);
return 0;
......@@ -241,14 +238,6 @@ static av_cold int oggvorbis_encode_init(AVCodecContext *avctx)
goto error;
}
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
return 0;
error:
oggvorbis_encode_close(avctx);
......
......@@ -184,12 +184,6 @@ static av_cold int MPA_encode_init(AVCodecContext *avctx)
total_quant_bits[i] = 12 * v;
}
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame= avcodec_alloc_frame();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
#endif
return 0;
}
......@@ -771,14 +765,6 @@ static int MPA_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
return 0;
}
static av_cold int MPA_encode_close(AVCodecContext *avctx)
{
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
static const AVCodecDefault mp2_defaults[] = {
{ "b", "128k" },
{ NULL },
......@@ -791,7 +777,6 @@ AVCodec ff_mp2_encoder = {
.priv_data_size = sizeof(MpegAudioContext),
.init = MPA_encode_init,
.encode2 = MPA_encode_frame,
.close = MPA_encode_close,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.supported_samplerates = (const int[]){
......
......@@ -140,9 +140,6 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_free(s->path);
}
ff_af_queue_close(&s->afq);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......@@ -187,14 +184,6 @@ static av_cold int encode_init(AVCodecContext *avctx)
}
}
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
return 0;
error:
encode_end(avctx);
......
......@@ -40,9 +40,6 @@ static av_cold int ra144_encode_close(AVCodecContext *avctx)
RA144Context *ractx = avctx->priv_data;
ff_lpc_end(&ractx->lpc_ctx);
ff_af_queue_close(&ractx->afq);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......@@ -71,14 +68,6 @@ static av_cold int ra144_encode_init(AVCodecContext * avctx)
ff_af_queue_init(avctx, &ractx->afq);
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
return 0;
error:
ra144_encode_close(avctx);
......
......@@ -46,9 +46,6 @@ static av_cold int roq_dpcm_encode_close(AVCodecContext *avctx)
{
ROQDPCMContext *context = avctx->priv_data;
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&context->frame_buffer);
return 0;
......@@ -81,14 +78,6 @@ static av_cold int roq_dpcm_encode_init(AVCodecContext *avctx)
context->lastSample[0] = context->lastSample[1] = 0;
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame= avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
return 0;
error:
roq_dpcm_encode_close(avctx);
......
......@@ -1245,87 +1245,6 @@ end:
return ret;
}
#if FF_API_OLD_ENCODE_AUDIO
int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx,
uint8_t *buf, int buf_size,
const short *samples)
{
AVPacket pkt;
AVFrame frame0 = { { 0 } };
AVFrame *frame;
int ret, samples_size, got_packet;
av_init_packet(&pkt);
pkt.data = buf;
pkt.size = buf_size;
if (samples) {
frame = &frame0;
avcodec_get_frame_defaults(frame);
if (avctx->frame_size) {
frame->nb_samples = avctx->frame_size;
} else {
/* if frame_size is not set, the number of samples must be
* calculated from the buffer size */
int64_t nb_samples;
if (!av_get_bits_per_sample(avctx->codec_id)) {
av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not "
"support this codec\n");
return AVERROR(EINVAL);
}
nb_samples = (int64_t)buf_size * 8 /
(av_get_bits_per_sample(avctx->codec_id) *
avctx->channels);
if (nb_samples >= INT_MAX)
return AVERROR(EINVAL);
frame->nb_samples = nb_samples;
}
/* it is assumed that the samples buffer is large enough based on the
* relevant parameters */
samples_size = av_samples_get_buffer_size(NULL, avctx->channels,
frame->nb_samples,
avctx->sample_fmt, 1);
if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
avctx->sample_fmt,
(const uint8_t *)samples,
samples_size, 1)))
return ret;
/* fabricate frame pts from sample count.
* this is needed because the avcodec_encode_audio() API does not have
* a way for the user to provide pts */
frame->pts = ff_samples_to_time_base(avctx,
avctx->internal->sample_count);
avctx->internal->sample_count += frame->nb_samples;
} else {
frame = NULL;
}
got_packet = 0;
ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet);
if (!ret && got_packet && avctx->coded_frame) {
avctx->coded_frame->pts = pkt.pts;
avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
}
/* free any side data since we cannot return it */
if (pkt.side_data_elems > 0) {
int i;
for (i = 0; i < pkt.side_data_elems; i++)
av_free(pkt.side_data[i].data);
av_freep(&pkt.side_data);
pkt.side_data_elems = 0;
}
if (frame && frame->extended_data != frame->data)
av_free(frame->extended_data);
return ret ? ret : pkt.size;
}
#endif
#if FF_API_OLD_ENCODE_VIDEO
int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
const AVFrame *pict)
......
......@@ -49,9 +49,6 @@
#ifndef FF_API_REQUEST_CHANNELS
#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 56)
#endif
#ifndef FF_API_OLD_ENCODE_AUDIO
#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
#ifndef FF_API_OLD_ENCODE_VIDEO
#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 55)
#endif
......
......@@ -1156,9 +1156,6 @@ static av_cold int vorbis_encode_close(AVCodecContext *avctx)
ff_mdct_end(&venc->mdct[0]);
ff_mdct_end(&venc->mdct[1]);
#if FF_API_OLD_ENCODE_AUDIO
av_freep(&avctx->coded_frame);
#endif
av_freep(&avctx->extradata);
return 0 ;
......@@ -1190,14 +1187,6 @@ static av_cold int vorbis_encode_init(AVCodecContext *avctx)
avctx->frame_size = 1 << (venc->log2_blocksize[0] - 1);
#if FF_API_OLD_ENCODE_AUDIO
avctx->coded_frame = avcodec_alloc_frame();
if (!avctx->coded_frame) {
ret = AVERROR(ENOMEM);
goto error;
}
#endif
return 0;
error:
vorbis_encode_close(avctx);
......
......@@ -386,11 +386,6 @@ int ff_wma_end(AVCodecContext *avctx)
av_free(s->int_table[i]);
}
#if FF_API_OLD_ENCODE_AUDIO
if (av_codec_is_encoder(avctx->codec))
av_freep(&avctx->coded_frame);
#endif
return 0;
}
......
......@@ -52,11 +52,6 @@ static int encode_init(AVCodecContext * avctx){
return AVERROR(EINVAL);
}
#if FF_API_OLD_ENCODE_AUDIO
if (!(avctx->coded_frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
#endif
/* extract flag infos */
flags1 = 0;
flags2 = 1;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment