Commit 5459cf41 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'a8ea936a'

* commit 'a8ea936a':
  libilbc: decode directly to the user-provided AVFrame
  dpcm: decode directly to the user-provided AVFrame
  imc/iac: decode directly to the user-provided AVFrame
  gsm: decode directly to the user-provided AVFrame

Conflicts:
	libavcodec/dpcm.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents d88e674a a8ea936a
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#include "mathops.h" #include "mathops.h"
typedef struct DPCMContext { typedef struct DPCMContext {
AVFrame frame;
int16_t roq_square_array[256]; int16_t roq_square_array[256];
int sample[2]; ///< previous sample (for SOL_DPCM) int sample[2]; ///< previous sample (for SOL_DPCM)
const int8_t *sol_table; ///< delta table for SOL_DPCM const int8_t *sol_table; ///< delta table for SOL_DPCM
...@@ -163,9 +162,6 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx) ...@@ -163,9 +162,6 @@ static av_cold int dpcm_decode_init(AVCodecContext *avctx)
else else
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
...@@ -175,6 +171,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -175,6 +171,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
{ {
int buf_size = avpkt->size; int buf_size = avpkt->size;
DPCMContext *s = avctx->priv_data; DPCMContext *s = avctx->priv_data;
AVFrame *frame = data;
int out = 0, ret; int out = 0, ret;
int predictor[2]; int predictor[2];
int ch = 0; int ch = 0;
...@@ -213,12 +210,12 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -213,12 +210,12 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
} }
/* get output buffer */ /* get output buffer */
s->frame.nb_samples = (out + avctx->channels - 1) / avctx->channels; frame->nb_samples = (out + avctx->channels - 1) / avctx->channels;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
output_samples = (int16_t *)s->frame.data[0]; output_samples = (int16_t *)frame->data[0];
samples_end = output_samples + out; samples_end = output_samples + out;
switch(avctx->codec->id) { switch(avctx->codec->id) {
...@@ -298,7 +295,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -298,7 +295,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
} }
case AV_CODEC_ID_SOL_DPCM: case AV_CODEC_ID_SOL_DPCM:
if (avctx->codec_tag != 3) { if (avctx->codec_tag != 3) {
uint8_t *output_samples_u8 = s->frame.data[0], uint8_t *output_samples_u8 = frame->data[0],
*samples_end_u8 = output_samples_u8 + out; *samples_end_u8 = output_samples_u8 + out;
while (output_samples_u8 < samples_end_u8) { while (output_samples_u8 < samples_end_u8) {
int n = bytestream2_get_byteu(&gb); int n = bytestream2_get_byteu(&gb);
...@@ -325,8 +322,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -325,8 +322,7 @@ static int dpcm_decode_frame(AVCodecContext *avctx, void *data,
break; break;
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return avpkt->size; return avpkt->size;
} }
......
...@@ -34,8 +34,6 @@ ...@@ -34,8 +34,6 @@
static av_cold int gsm_init(AVCodecContext *avctx) static av_cold int gsm_init(AVCodecContext *avctx)
{ {
GSMContext *s = avctx->priv_data;
avctx->channels = 1; avctx->channels = 1;
avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->channel_layout = AV_CH_LAYOUT_MONO;
if (!avctx->sample_rate) if (!avctx->sample_rate)
...@@ -52,16 +50,13 @@ static av_cold int gsm_init(AVCodecContext *avctx) ...@@ -52,16 +50,13 @@ static av_cold int gsm_init(AVCodecContext *avctx)
avctx->block_align = GSM_MS_BLOCK_SIZE; avctx->block_align = GSM_MS_BLOCK_SIZE;
} }
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
return 0; return 0;
} }
static int gsm_decode_frame(AVCodecContext *avctx, void *data, static int gsm_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
GSMContext *s = avctx->priv_data; AVFrame *frame = data;
int res; int res;
GetBitContext gb; GetBitContext gb;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
...@@ -74,12 +69,12 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -74,12 +69,12 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data,
} }
/* get output buffer */ /* get output buffer */
s->frame.nb_samples = avctx->frame_size; frame->nb_samples = avctx->frame_size;
if ((res = ff_get_buffer(avctx, &s->frame)) < 0) { if ((res = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return res; return res;
} }
samples = (int16_t *)s->frame.data[0]; samples = (int16_t *)frame->data[0];
switch (avctx->codec_id) { switch (avctx->codec_id) {
case AV_CODEC_ID_GSM: case AV_CODEC_ID_GSM:
...@@ -96,8 +91,7 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -96,8 +91,7 @@ static int gsm_decode_frame(AVCodecContext *avctx, void *data,
return res; return res;
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return avctx->block_align; return avctx->block_align;
} }
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include "avcodec.h" #include "avcodec.h"
typedef struct GSMContext { typedef struct GSMContext {
AVFrame frame;
// Contains first 120 elements from the previous frame // Contains first 120 elements from the previous frame
// (used by long_term_synth according to the "lag"), // (used by long_term_synth according to the "lag"),
// then in the following 160 elements the current // then in the following 160 elements the current
......
...@@ -81,8 +81,6 @@ typedef struct IMCChannel { ...@@ -81,8 +81,6 @@ typedef struct IMCChannel {
} IMCChannel; } IMCChannel;
typedef struct { typedef struct {
AVFrame frame;
IMCChannel chctx[2]; IMCChannel chctx[2];
/** MDCT tables */ /** MDCT tables */
...@@ -253,9 +251,6 @@ static av_cold int imc_decode_init(AVCodecContext *avctx) ...@@ -253,9 +251,6 @@ static av_cold int imc_decode_init(AVCodecContext *avctx)
avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO
: AV_CH_LAYOUT_STEREO; : AV_CH_LAYOUT_STEREO;
avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame;
return 0; return 0;
} }
...@@ -937,6 +932,7 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch) ...@@ -937,6 +932,7 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch)
static int imc_decode_frame(AVCodecContext *avctx, void *data, static int imc_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
AVFrame *frame = data;
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
int ret, i; int ret, i;
...@@ -951,14 +947,14 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data, ...@@ -951,14 +947,14 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data,
} }
/* get output buffer */ /* get output buffer */
q->frame.nb_samples = COEFFS; frame->nb_samples = COEFFS;
if ((ret = ff_get_buffer(avctx, &q->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
for (i = 0; i < avctx->channels; i++) { for (i = 0; i < avctx->channels; i++) {
q->out_samples = (float *)q->frame.extended_data[i]; q->out_samples = (float *)frame->extended_data[i];
q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2); q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2);
...@@ -971,12 +967,11 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data, ...@@ -971,12 +967,11 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data,
} }
if (avctx->channels == 2) { if (avctx->channels == 2) {
q->fdsp.butterflies_float((float *)q->frame.extended_data[0], q->fdsp.butterflies_float((float *)frame->extended_data[0],
(float *)q->frame.extended_data[1], COEFFS); (float *)frame->extended_data[1], COEFFS);
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = q->frame;
return IMC_BLOCK_SIZE * avctx->channels; return IMC_BLOCK_SIZE * avctx->channels;
} }
......
...@@ -41,7 +41,6 @@ static int get_mode(AVCodecContext *avctx) ...@@ -41,7 +41,6 @@ static int get_mode(AVCodecContext *avctx)
typedef struct ILBCDecContext { typedef struct ILBCDecContext {
const AVClass *class; const AVClass *class;
AVFrame frame;
iLBC_Dec_Inst_t decoder; iLBC_Dec_Inst_t decoder;
int enhance; int enhance;
} ILBCDecContext; } ILBCDecContext;
...@@ -69,8 +68,6 @@ static av_cold int ilbc_decode_init(AVCodecContext *avctx) ...@@ -69,8 +68,6 @@ static av_cold int ilbc_decode_init(AVCodecContext *avctx)
} }
WebRtcIlbcfix_InitDecode(&s->decoder, mode, s->enhance); WebRtcIlbcfix_InitDecode(&s->decoder, mode, s->enhance);
avcodec_get_frame_defaults(&s->frame);
avctx->coded_frame = &s->frame;
avctx->channels = 1; avctx->channels = 1;
avctx->channel_layout = AV_CH_LAYOUT_MONO; avctx->channel_layout = AV_CH_LAYOUT_MONO;
...@@ -86,6 +83,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data, ...@@ -86,6 +83,7 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
ILBCDecContext *s = avctx->priv_data; ILBCDecContext *s = avctx->priv_data;
AVFrame *frame = data;
int ret; int ret;
if (s->decoder.no_of_bytes > buf_size) { if (s->decoder.no_of_bytes > buf_size) {
...@@ -94,17 +92,16 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data, ...@@ -94,17 +92,16 @@ static int ilbc_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
s->frame.nb_samples = s->decoder.blockl; frame->nb_samples = s->decoder.blockl;
if ((ret = ff_get_buffer(avctx, &s->frame)) < 0) { if ((ret = ff_get_buffer(avctx, frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) s->frame.data[0], WebRtcIlbcfix_DecodeImpl((WebRtc_Word16*) frame->data[0],
(const WebRtc_UWord16*) buf, &s->decoder, 1); (const WebRtc_UWord16*) buf, &s->decoder, 1);
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = s->frame;
return s->decoder.no_of_bytes; return s->decoder.no_of_bytes;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment