Commit dca6fb08 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'ee6ca11b'

* commit 'ee6ca11b':
  vorbis: decode directly to the user-provided AVFrame
  vmdaudio: decode directly to the user-provided AVFrame
  twinvq: decode directly to the user-provided AVFrame
  tta: decode directly to the user-provided AVFrame
  truespeech: decode directly to the user-provided AVFrame

Conflicts:
	libavcodec/tta.c
	libavcodec/twinvq.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 2becf21d ee6ca11b
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
* TrueSpeech decoder context * TrueSpeech decoder context
*/ */
typedef struct { typedef struct {
AVFrame frame;
DSPContext dsp; DSPContext dsp;
/* input data */ /* input data */
DECLARE_ALIGNED(16, uint8_t, buffer)[32]; DECLARE_ALIGNED(16, uint8_t, buffer)[32];
...@@ -73,9 +72,6 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx) ...@@ -73,9 +72,6 @@ static av_cold int truespeech_decode_init(AVCodecContext * avctx)
ff_dsputil_init(&c->dsp, avctx); ff_dsputil_init(&c->dsp, avctx);
avcodec_get_frame_defaults(&c->frame);
avctx->coded_frame = &c->frame;
return 0; return 0;
} }
...@@ -310,6 +306,7 @@ static void truespeech_save_prevvec(TSContext *c) ...@@ -310,6 +306,7 @@ static void truespeech_save_prevvec(TSContext *c)
static int truespeech_decode_frame(AVCodecContext *avctx, void *data, static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TSContext *c = avctx->priv_data; TSContext *c = avctx->priv_data;
...@@ -327,12 +324,12 @@ static int truespeech_decode_frame(AVCodecContext *avctx, void *data, ...@@ -327,12 +324,12 @@ static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
} }
/* get output buffer */ /* get output buffer */
c->frame.nb_samples = iterations * 240; frame->nb_samples = iterations * 240;
if ((ret = ff_get_buffer(avctx, &c->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
samples = (int16_t *)c->frame.data[0]; samples = (int16_t *)frame->data[0];
memset(samples, 0, iterations * 240 * sizeof(*samples)); memset(samples, 0, iterations * 240 * sizeof(*samples));
...@@ -354,8 +351,7 @@ static int truespeech_decode_frame(AVCodecContext *avctx, void *data, ...@@ -354,8 +351,7 @@ static int truespeech_decode_frame(AVCodecContext *avctx, void *data,
truespeech_save_prevvec(c); truespeech_save_prevvec(c);
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = c->frame;
return buf_size; return buf_size;
} }
......
...@@ -61,7 +61,6 @@ typedef struct TTAChannel { ...@@ -61,7 +61,6 @@ typedef struct TTAChannel {
typedef struct TTAContext { typedef struct TTAContext {
AVClass *class; AVClass *class;
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
const AVCRC *crc_table; const AVCRC *crc_table;
...@@ -312,15 +311,13 @@ static av_cold int tta_decode_init(AVCodecContext * avctx) ...@@ -312,15 +311,13 @@ static av_cold int tta_decode_init(AVCodecContext * avctx)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int tta_decode_frame(AVCodecContext *avctx, void *data, static int tta_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TTAContext *s = avctx->priv_data; TTAContext *s = avctx->priv_data;
...@@ -336,15 +333,15 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, ...@@ -336,15 +333,15 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
init_get_bits(&s->gb, buf, buf_size*8); init_get_bits(&s->gb, buf, buf_size*8);
/* get output buffer */ /* get output buffer */
s->frame.nb_samples = framelen; frame->nb_samples = framelen;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
// decode directly to output buffer for 24-bit sample format // decode directly to output buffer for 24-bit sample format
if (s->bps == 3) if (s->bps == 3)
s->decode_buffer = (int32_t *)s->frame.data[0]; s->decode_buffer = (int32_t *)frame->data[0];
// init per channel states // init per channel states
for (i = 0; i < s->channels; i++) { for (i = 0; i < s->channels; i++) {
...@@ -433,7 +430,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, ...@@ -433,7 +430,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
i++; i++;
// check for last frame // check for last frame
if (i == s->last_frame_length && get_bits_left(&s->gb) / 8 == 4) { if (i == s->last_frame_length && get_bits_left(&s->gb) / 8 == 4) {
s->frame.nb_samples = framelen = s->last_frame_length; frame->nb_samples = framelen = s->last_frame_length;
break; break;
} }
} }
...@@ -449,20 +446,20 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, ...@@ -449,20 +446,20 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
// convert to output buffer // convert to output buffer
switch (s->bps) { switch (s->bps) {
case 1: { case 1: {
uint8_t *samples = (uint8_t *)s->frame.data[0]; uint8_t *samples = (uint8_t *)frame->data[0];
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
*samples++ = *p + 0x80; *samples++ = *p + 0x80;
break; break;
} }
case 2: { case 2: {
int16_t *samples = (int16_t *)s->frame.data[0]; int16_t *samples = (int16_t *)frame->data[0];
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
*samples++ = *p; *samples++ = *p;
break; break;
} }
case 3: { case 3: {
// shift samples for 24-bit sample format // shift samples for 24-bit sample format
int32_t *samples = (int32_t *)s->frame.data[0]; int32_t *samples = (int32_t *)frame->data[0];
for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++) for (p = s->decode_buffer; p < s->decode_buffer + (framelen * s->channels); p++)
*samples++ <<= 8; *samples++ <<= 8;
// reset decode buffer // reset decode buffer
...@@ -471,8 +468,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data, ...@@ -471,8 +468,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
} }
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return buf_size; return buf_size;
error: error:
......
...@@ -177,7 +177,6 @@ static const ModeTab mode_44_48 = { ...@@ -177,7 +177,6 @@ static const ModeTab mode_44_48 = {
typedef struct TwinContext { typedef struct TwinContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame frame;
AVFloatDSPContext fdsp; AVFloatDSPContext fdsp;
FFTContext mdct_ctx[3]; FFTContext mdct_ctx[3];
...@@ -811,6 +810,7 @@ static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb, ...@@ -811,6 +810,7 @@ static void read_and_decode_spectrum(TwinContext *tctx, GetBitContext *gb,
static int twin_decode_frame(AVCodecContext * avctx, void *data, static int twin_decode_frame(AVCodecContext * avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
TwinContext *tctx = avctx->priv_data; TwinContext *tctx = avctx->priv_data;
...@@ -832,12 +832,12 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data, ...@@ -832,12 +832,12 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
/* get output buffer */ /* get output buffer */
if (tctx->discarded_packets >= 2) { if (tctx->discarded_packets >= 2) {
tctx->frame.nb_samples = mtab->size; frame->nb_samples = mtab->size;
if ((ret = ff_get_buffer(avctx, &tctx->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
out = (float **)tctx->frame.extended_data; out = (float **)frame->extended_data;
} }
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
...@@ -863,8 +863,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data, ...@@ -863,8 +863,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
return buf_size; return buf_size;
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = tctx->frame;
return buf_size; return buf_size;
} }
...@@ -1171,9 +1170,6 @@ static av_cold int twin_decode_init(AVCodecContext *avctx) ...@@ -1171,9 +1170,6 @@ static av_cold int twin_decode_init(AVCodecContext *avctx)
memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist)); memset_float(tctx->bark_hist[0][0], 0.1, FF_ARRAY_ELEMS(tctx->bark_hist));
avcodec_get_frame_defaults(&tctx->frame);
avctx->coded_frame = &tctx->frame;
return 0; return 0;
} }
......
...@@ -469,7 +469,6 @@ static av_cold int vmdvideo_decode_end(AVCodecContext *avctx) ...@@ -469,7 +469,6 @@ static av_cold int vmdvideo_decode_end(AVCodecContext *avctx)
#define BLOCK_TYPE_SILENCE 3 #define BLOCK_TYPE_SILENCE 3
typedef struct VmdAudioContext { typedef struct VmdAudioContext {
AVFrame frame;
int out_bps; int out_bps;
int chunk_size; int chunk_size;
} VmdAudioContext; } VmdAudioContext;
...@@ -514,9 +513,6 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx) ...@@ -514,9 +513,6 @@ static av_cold int vmdaudio_decode_init(AVCodecContext *avctx)
s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2); s->chunk_size = avctx->block_align + avctx->channels * (s->out_bps == 2);
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, " av_log(avctx, AV_LOG_DEBUG, "%d channels, %d bits/sample, "
"block align = %d, sample rate = %d\n", "block align = %d, sample rate = %d\n",
avctx->channels, avctx->bits_per_coded_sample, avctx->block_align, avctx->channels, avctx->bits_per_coded_sample, avctx->block_align,
...@@ -557,6 +553,7 @@ static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size, ...@@ -557,6 +553,7 @@ static void decode_audio_s16(int16_t *out, const uint8_t *buf, int buf_size,
static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
const uint8_t *buf_end; const uint8_t *buf_end;
int buf_size = avpkt->size; int buf_size = avpkt->size;
...@@ -601,13 +598,14 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, ...@@ -601,13 +598,14 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
audio_chunks = buf_size / s->chunk_size; audio_chunks = buf_size / s->chunk_size;
/* get output buffer */ /* get output buffer */
s->frame.nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) / avctx->channels; frame->nb_samples = ((silent_chunks + audio_chunks) * avctx->block_align) /
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { avctx->channels;
if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
output_samples_u8 = s->frame.data[0]; output_samples_u8 = frame->data[0];
output_samples_s16 = (int16_t *)s->frame.data[0]; output_samples_s16 = (int16_t *)frame->data[0];
/* decode silent chunks */ /* decode silent chunks */
if (silent_chunks > 0) { if (silent_chunks > 0) {
...@@ -637,8 +635,7 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data, ...@@ -637,8 +635,7 @@ static int vmdaudio_decode_frame(AVCodecContext *avctx, void *data,
} }
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return avpkt->size; return avpkt->size;
} }
......
...@@ -125,7 +125,6 @@ typedef struct { ...@@ -125,7 +125,6 @@ typedef struct {
typedef struct vorbis_context_s { typedef struct vorbis_context_s {
AVCodecContext *avccontext; AVCodecContext *avccontext;
AVFrame frame;
GetBitContext gb; GetBitContext gb;
VorbisDSPContext dsp; VorbisDSPContext dsp;
AVFloatDSPContext fdsp; AVFloatDSPContext fdsp;
...@@ -1040,9 +1039,6 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext) ...@@ -1040,9 +1039,6 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
avccontext->channels = vc->audio_channels; avccontext->channels = vc->audio_channels;
avccontext->sample_rate = vc->audio_samplerate; avccontext->sample_rate = vc->audio_samplerate;
avcodec_get_frame_defaults(&vc->frame);
avccontext->coded_frame = &vc->frame;
return 0; return 0;
} }
...@@ -1653,6 +1649,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, ...@@ -1653,6 +1649,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
vorbis_context *vc = avccontext->priv_data; vorbis_context *vc = avccontext->priv_data;
AVFrame *frame = data;
GetBitContext *gb = &vc->gb; GetBitContext *gb = &vc->gb;
float *channel_ptrs[255]; float *channel_ptrs[255];
int i, len, ret; int i, len, ret;
...@@ -1699,19 +1696,19 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, ...@@ -1699,19 +1696,19 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
} }
/* get output buffer */ /* get output buffer */
vc->frame.nb_samples = vc->blocksize[1] / 2; frame->nb_samples = vc->blocksize[1] / 2;
if ((ret = ff_get_buffer(avccontext, &vc->frame)) < 0) { if ((ret = ff_get_buffer(avccontext, frame)) < 0) {
av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
if (vc->audio_channels > 8) { if (vc->audio_channels > 8) {
for (i = 0; i < vc->audio_channels; i++) for (i = 0; i < vc->audio_channels; i++)
channel_ptrs[i] = (float *)vc->frame.extended_data[i]; channel_ptrs[i] = (float *)frame->extended_data[i];
} else { } else {
for (i = 0; i < vc->audio_channels; i++) { for (i = 0; i < vc->audio_channels; i++) {
int ch = ff_vorbis_channel_layout_offsets[vc->audio_channels - 1][i]; int ch = ff_vorbis_channel_layout_offsets[vc->audio_channels - 1][i];
channel_ptrs[ch] = (float *)vc->frame.extended_data[i]; channel_ptrs[ch] = (float *)frame->extended_data[i];
} }
} }
...@@ -1729,9 +1726,8 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, ...@@ -1729,9 +1726,8 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
vc->frame.nb_samples = len; frame->nb_samples = len;
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = vc->frame;
return buf_size; return buf_size;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment