Commit e88ca80d authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'bfcd4b6a'

* commit 'bfcd4b6a':
  adpcmdec: set AVCodec.sample_fmts
  twinvq: use planar sample format
  ralf: use planar sample format
  mpc7/8: use planar sample format
  iac/imc: use planar sample format
  dcadec: use float planar sample format
  cook: use planar sample format
  atrac3: use float planar sample format
  apedec: output in planar sample format
  8svx: use planar sample format

Conflicts:
	libavcodec/8svx.c
	libavcodec/dcadec.c
	libavcodec/mpc7.c
	libavcodec/mpc8.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 82db8ee3 bfcd4b6a
...@@ -58,25 +58,6 @@ static const int8_t exponential[16] = { -128, -64, -32, -16, -8, -4, -2, -1, 0, ...@@ -58,25 +58,6 @@ static const int8_t exponential[16] = { -128, -64, -32, -16, -8, -4, -2, -1, 0,
#define MAX_FRAME_SIZE 2048 #define MAX_FRAME_SIZE 2048
/**
* Interleave samples in buffer containing all left channel samples
* at the beginning, and right channel samples at the end.
* Each sample is assumed to be in signed 8-bit format.
*
* @param size the size in bytes of the dst and src buffer
*/
static void interleave_stereo(uint8_t *dst, const uint8_t *src, int size)
{
uint8_t *dst_end = dst + size;
size = size>>1;
while (dst < dst_end) {
*dst++ = *src;
*dst++ = *(src+size);
src++;
}
}
/** /**
* Delta decode the compressed values in src, and put the resulting * Delta decode the compressed values in src, and put the resulting
* decoded n samples in dst. * decoded n samples in dst.
...@@ -107,7 +88,8 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, ...@@ -107,7 +88,8 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt) int *got_frame_ptr, AVPacket *avpkt)
{ {
EightSvxContext *esc = avctx->priv_data; EightSvxContext *esc = avctx->priv_data;
int n, out_data_size, ret; int n, out_data_size;
int ch, ret;
uint8_t *src, *dst; uint8_t *src, *dst;
/* decode and interleave the first packet */ /* decode and interleave the first packet */
...@@ -152,10 +134,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, ...@@ -152,10 +134,7 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
deinterleaved_samples = avpkt->data; deinterleaved_samples = avpkt->data;
} }
if (avctx->channels == 2) memcpy(esc->samples, deinterleaved_samples, esc->samples_size);
interleave_stereo(esc->samples, deinterleaved_samples, esc->samples_size);
else
memcpy(esc->samples, deinterleaved_samples, esc->samples_size);
av_freep(&p); av_freep(&p);
} }
...@@ -170,11 +149,14 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data, ...@@ -170,11 +149,14 @@ static int eightsvx_decode_frame(AVCodecContext *avctx, void *data,
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = esc->frame; *(AVFrame *)data = esc->frame;
dst = esc->frame.data[0]; out_data_size = esc->frame.nb_samples;
src = esc->samples + esc->samples_idx; for (ch = 0; ch<avctx->channels; ch++) {
out_data_size = esc->frame.nb_samples * avctx->channels; dst = esc->frame.data[ch];
for (n = out_data_size; n > 0; n--) src = esc->samples + esc->samples_idx / avctx->channels + ch * esc->samples_size / avctx->channels;
*dst++ = *src++ + 128; for (n = out_data_size; n > 0; n--)
*dst++ = *src++ + 128;
}
out_data_size *= avctx->channels;
esc->samples_idx += out_data_size; esc->samples_idx += out_data_size;
return esc->table ? return esc->table ?
...@@ -200,7 +182,7 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx) ...@@ -200,7 +182,7 @@ static av_cold int eightsvx_decode_init(AVCodecContext *avctx)
av_log(avctx, AV_LOG_ERROR, "Invalid codec id %d.\n", avctx->codec->id); av_log(avctx, AV_LOG_ERROR, "Invalid codec id %d.\n", avctx->codec->id);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
avctx->sample_fmt = AV_SAMPLE_FMT_U8; avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
avcodec_get_frame_defaults(&esc->frame); avcodec_get_frame_defaults(&esc->frame);
avctx->coded_frame = &esc->frame; avctx->coded_frame = &esc->frame;
...@@ -230,6 +212,8 @@ AVCodec ff_eightsvx_fib_decoder = { ...@@ -230,6 +212,8 @@ AVCodec ff_eightsvx_fib_decoder = {
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"), .long_name = NULL_IF_CONFIG_SMALL("8SVX fibonacci"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
}; };
#endif #endif
#if CONFIG_EIGHTSVX_EXP_DECODER #if CONFIG_EIGHTSVX_EXP_DECODER
...@@ -243,6 +227,8 @@ AVCodec ff_eightsvx_exp_decoder = { ...@@ -243,6 +227,8 @@ AVCodec ff_eightsvx_exp_decoder = {
.close = eightsvx_decode_close, .close = eightsvx_decode_close,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"), .long_name = NULL_IF_CONFIG_SMALL("8SVX exponential"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
}; };
#endif #endif
#if CONFIG_PCM_S8_PLANAR_DECODER #if CONFIG_PCM_S8_PLANAR_DECODER
...@@ -256,5 +242,7 @@ AVCodec ff_pcm_s8_planar_decoder = { ...@@ -256,5 +242,7 @@ AVCodec ff_pcm_s8_planar_decoder = {
.decode = eightsvx_decode_frame, .decode = eightsvx_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"), .long_name = NULL_IF_CONFIG_SMALL("PCM signed 8-bit planar"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_NONE },
}; };
#endif #endif
...@@ -1268,7 +1268,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -1268,7 +1268,10 @@ static int adpcm_decode_frame(AVCodecContext *avctx, void *data,
} }
#define ADPCM_DECODER(id_, name_, long_name_) \ static const enum AVSampleFormat sample_fmts_s16[] = { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE };
#define ADPCM_DECODER(id_, sample_fmts_, name_, long_name_) \
AVCodec ff_ ## name_ ## _decoder = { \ AVCodec ff_ ## name_ ## _decoder = { \
.name = #name_, \ .name = #name_, \
.type = AVMEDIA_TYPE_AUDIO, \ .type = AVMEDIA_TYPE_AUDIO, \
...@@ -1278,33 +1281,34 @@ AVCodec ff_ ## name_ ## _decoder = { \ ...@@ -1278,33 +1281,34 @@ AVCodec ff_ ## name_ ## _decoder = { \
.decode = adpcm_decode_frame, \ .decode = adpcm_decode_frame, \
.capabilities = CODEC_CAP_DR1, \ .capabilities = CODEC_CAP_DR1, \
.long_name = NULL_IF_CONFIG_SMALL(long_name_), \ .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
.sample_fmts = sample_fmts_, \
} }
/* Note: Do not forget to add new entries to the Makefile as well. */ /* Note: Do not forget to add new entries to the Makefile as well. */
ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, adpcm_4xm, "ADPCM 4X Movie"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_4XM, sample_fmts_s16, adpcm_4xm, "ADPCM 4X Movie");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, adpcm_ct, "ADPCM Creative Technology"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_CT, sample_fmts_s16, adpcm_ct, "ADPCM Creative Technology");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, adpcm_ea, "ADPCM Electronic Arts"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA, sample_fmts_s16, adpcm_ea, "ADPCM Electronic Arts");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_MAXIS_XA, sample_fmts_s16, adpcm_ea_maxis_xa, "ADPCM Electronic Arts Maxis CDROM XA");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, adpcm_ea_r1, "ADPCM Electronic Arts R1"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R1, sample_fmts_s16, adpcm_ea_r1, "ADPCM Electronic Arts R1");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, adpcm_ea_r2, "ADPCM Electronic Arts R2"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R2, sample_fmts_s16, adpcm_ea_r2, "ADPCM Electronic Arts R2");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, adpcm_ea_r3, "ADPCM Electronic Arts R3"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_R3, sample_fmts_s16, adpcm_ea_r3, "ADPCM Electronic Arts R3");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, adpcm_ea_xas, "ADPCM Electronic Arts XAS"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_EA_XAS, sample_fmts_s16, adpcm_ea_xas, "ADPCM Electronic Arts XAS");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, adpcm_ima_amv, "ADPCM IMA AMV"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_AMV, sample_fmts_s16, adpcm_ima_amv, "ADPCM IMA AMV");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, adpcm_ima_apc, "ADPCM IMA CRYO APC"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_APC, sample_fmts_s16, adpcm_ima_apc, "ADPCM IMA CRYO APC");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, adpcm_ima_dk3, "ADPCM IMA Duck DK3"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK3, sample_fmts_s16, adpcm_ima_dk3, "ADPCM IMA Duck DK3");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, adpcm_ima_dk4, "ADPCM IMA Duck DK4"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_DK4, sample_fmts_s16, adpcm_ima_dk4, "ADPCM IMA Duck DK4");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_EACS, sample_fmts_s16, adpcm_ima_ea_eacs, "ADPCM IMA Electronic Arts EACS");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_EA_SEAD, sample_fmts_s16, adpcm_ima_ea_sead, "ADPCM IMA Electronic Arts SEAD");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, adpcm_ima_iss, "ADPCM IMA Funcom ISS"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_ISS, sample_fmts_s16, adpcm_ima_iss, "ADPCM IMA Funcom ISS");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, "ADPCM IMA QuickTime"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_QT, sample_fmts_s16, adpcm_ima_qt, "ADPCM IMA QuickTime");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_SMJPEG, sample_fmts_s16, adpcm_ima_smjpeg, "ADPCM IMA Loki SDL MJPEG");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, "ADPCM IMA WAV"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WAV, sample_fmts_s16, adpcm_ima_wav, "ADPCM IMA WAV");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, adpcm_ima_ws, "ADPCM IMA Westwood"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_IMA_WS, sample_fmts_s16, adpcm_ima_ws, "ADPCM IMA Westwood");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, "ADPCM Microsoft"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_MS, sample_fmts_s16, adpcm_ms, "ADPCM Microsoft");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_2, sample_fmts_s16, adpcm_sbpro_2, "ADPCM Sound Blaster Pro 2-bit");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_3, sample_fmts_s16, adpcm_sbpro_3, "ADPCM Sound Blaster Pro 2.6-bit");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_SBPRO_4, sample_fmts_s16, adpcm_sbpro_4, "ADPCM Sound Blaster Pro 4-bit");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, "ADPCM Shockwave Flash"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_SWF, sample_fmts_s16, adpcm_swf, "ADPCM Shockwave Flash");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, adpcm_thp, "ADPCM Nintendo Gamecube THP"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_THP, sample_fmts_s16, adpcm_thp, "ADPCM Nintendo Gamecube THP");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, adpcm_xa, "ADPCM CDROM XA"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_XA, sample_fmts_s16, adpcm_xa, "ADPCM CDROM XA");
ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, "ADPCM Yamaha"); ADPCM_DECODER(AV_CODEC_ID_ADPCM_YAMAHA, sample_fmts_s16, adpcm_yamaha, "ADPCM Yamaha");
...@@ -196,13 +196,13 @@ static av_cold int ape_decode_init(AVCodecContext *avctx) ...@@ -196,13 +196,13 @@ static av_cold int ape_decode_init(AVCodecContext *avctx)
s->bps = avctx->bits_per_coded_sample; s->bps = avctx->bits_per_coded_sample;
switch (s->bps) { switch (s->bps) {
case 8: case 8:
avctx->sample_fmt = AV_SAMPLE_FMT_U8; avctx->sample_fmt = AV_SAMPLE_FMT_U8P;
break; break;
case 16: case 16:
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
break; break;
case 24: case 24:
avctx->sample_fmt = AV_SAMPLE_FMT_S32; avctx->sample_fmt = AV_SAMPLE_FMT_S32P;
break; break;
default: default:
av_log_ask_for_sample(avctx, "Unsupported bits per coded sample %d\n", av_log_ask_for_sample(avctx, "Unsupported bits per coded sample %d\n",
...@@ -830,7 +830,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, ...@@ -830,7 +830,7 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
uint8_t *sample8; uint8_t *sample8;
int16_t *sample16; int16_t *sample16;
int32_t *sample24; int32_t *sample24;
int i, ret; int i, ch, ret;
int blockstodecode; int blockstodecode;
int bytes_used = 0; int bytes_used = 0;
...@@ -930,27 +930,24 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data, ...@@ -930,27 +930,24 @@ static int ape_decode_frame(AVCodecContext *avctx, void *data,
switch (s->bps) { switch (s->bps) {
case 8: case 8:
sample8 = (uint8_t *)s->frame.data[0]; for (ch = 0; ch < s->channels; ch++) {
for (i = 0; i < blockstodecode; i++) { sample8 = (uint8_t *)s->frame.data[ch];
*sample8++ = (s->decoded[0][i] + 0x80) & 0xff; for (i = 0; i < blockstodecode; i++)
if (s->channels == 2) *sample8++ = (s->decoded[ch][i] + 0x80) & 0xff;
*sample8++ = (s->decoded[1][i] + 0x80) & 0xff;
} }
break; break;
case 16: case 16:
sample16 = (int16_t *)s->frame.data[0]; for (ch = 0; ch < s->channels; ch++) {
for (i = 0; i < blockstodecode; i++) { sample16 = (int16_t *)s->frame.data[ch];
*sample16++ = s->decoded[0][i]; for (i = 0; i < blockstodecode; i++)
if (s->channels == 2) *sample16++ = s->decoded[ch][i];
*sample16++ = s->decoded[1][i];
} }
break; break;
case 24: case 24:
sample24 = (int32_t *)s->frame.data[0]; for (ch = 0; ch < s->channels; ch++) {
for (i = 0; i < blockstodecode; i++) { sample24 = (int32_t *)s->frame.data[ch];
*sample24++ = s->decoded[0][i] << 8; for (i = 0; i < blockstodecode; i++)
if (s->channels == 2) *sample24++ = s->decoded[ch][i] << 8;
*sample24++ = s->decoded[1][i] << 8;
} }
break; break;
} }
...@@ -995,5 +992,9 @@ AVCodec ff_ape_decoder = { ...@@ -995,5 +992,9 @@ AVCodec ff_ape_decoder = {
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DELAY | CODEC_CAP_DR1,
.flush = ape_flush, .flush = ape_flush,
.long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"), .long_name = NULL_IF_CONFIG_SMALL("Monkey's Audio"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE },
.priv_class = &ape_decoder_class, .priv_class = &ape_decoder_class,
}; };
...@@ -112,7 +112,6 @@ typedef struct { ...@@ -112,7 +112,6 @@ typedef struct {
//@} //@}
//@{ //@{
/** data buffers */ /** data buffers */
float *outSamples[2];
uint8_t* decoded_bytes_buffer; uint8_t* decoded_bytes_buffer;
float tempBuf[1070]; float tempBuf[1070];
//@} //@}
...@@ -198,7 +197,7 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){ ...@@ -198,7 +197,7 @@ static int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
} }
static av_cold int init_atrac3_transforms(ATRAC3Context *q, int is_float) { static av_cold int init_atrac3_transforms(ATRAC3Context *q) {
float enc_window[256]; float enc_window[256];
int i; int i;
...@@ -214,7 +213,7 @@ static av_cold int init_atrac3_transforms(ATRAC3Context *q, int is_float) { ...@@ -214,7 +213,7 @@ static av_cold int init_atrac3_transforms(ATRAC3Context *q, int is_float) {
} }
/* Initialize the MDCT transform. */ /* Initialize the MDCT transform. */
return ff_mdct_init(&q->mdct_ctx, 9, 1, is_float ? 1.0 / 32768 : 1.0); return ff_mdct_init(&q->mdct_ctx, 9, 1, 1.0 / 32768);
} }
/** /**
...@@ -227,7 +226,6 @@ static av_cold int atrac3_decode_close(AVCodecContext *avctx) ...@@ -227,7 +226,6 @@ static av_cold int atrac3_decode_close(AVCodecContext *avctx)
av_free(q->pUnits); av_free(q->pUnits);
av_free(q->decoded_bytes_buffer); av_free(q->decoded_bytes_buffer);
av_freep(&q->outSamples[0]);
ff_mdct_end(&q->mdct_ctx); ff_mdct_end(&q->mdct_ctx);
...@@ -838,8 +836,6 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data, ...@@ -838,8 +836,6 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
ATRAC3Context *q = avctx->priv_data; ATRAC3Context *q = avctx->priv_data;
int result; int result;
const uint8_t* databuf; const uint8_t* databuf;
float *samples_flt;
int16_t *samples_s16;
if (buf_size < avctx->block_align) { if (buf_size < avctx->block_align) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
...@@ -853,8 +849,6 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data, ...@@ -853,8 +849,6 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return result; return result;
} }
samples_flt = (float *)q->frame.data[0];
samples_s16 = (int16_t *)q->frame.data[0];
/* Check if we need to descramble and what buffer to pass on. */ /* Check if we need to descramble and what buffer to pass on. */
if (q->scrambled_stream) { if (q->scrambled_stream) {
...@@ -864,27 +858,13 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data, ...@@ -864,27 +858,13 @@ static int atrac3_decode_frame(AVCodecContext *avctx, void *data,
databuf = buf; databuf = buf;
} }
if (q->channels == 1 && avctx->sample_fmt == AV_SAMPLE_FMT_FLT) result = decodeFrame(q, databuf, (float **)q->frame.extended_data);
result = decodeFrame(q, databuf, &samples_flt);
else
result = decodeFrame(q, databuf, q->outSamples);
if (result != 0) { if (result != 0) {
av_log(NULL,AV_LOG_ERROR,"Frame decoding error!\n"); av_log(NULL,AV_LOG_ERROR,"Frame decoding error!\n");
return result; return result;
} }
/* interleave */
if (q->channels == 2 && avctx->sample_fmt == AV_SAMPLE_FMT_FLT) {
q->fmt_conv.float_interleave(samples_flt,
(const float **)q->outSamples,
SAMPLES_PER_FRAME, 2);
} else if (avctx->sample_fmt == AV_SAMPLE_FMT_S16) {
q->fmt_conv.float_to_int16_interleave(samples_s16,
(const float **)q->outSamples,
SAMPLES_PER_FRAME, q->channels);
}
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = q->frame; *(AVFrame *)data = q->frame;
...@@ -1006,12 +986,9 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) ...@@ -1006,12 +986,9 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
vlcs_initialized = 1; vlcs_initialized = 1;
} }
if (avctx->request_sample_fmt == AV_SAMPLE_FMT_FLT) avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT;
else
avctx->sample_fmt = AV_SAMPLE_FMT_S16;
if ((ret = init_atrac3_transforms(q, avctx->sample_fmt == AV_SAMPLE_FMT_FLT))) { if ((ret = init_atrac3_transforms(q))) {
av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n"); av_log(avctx, AV_LOG_ERROR, "Error initializing MDCT\n");
av_freep(&q->decoded_bytes_buffer); av_freep(&q->decoded_bytes_buffer);
return ret; return ret;
...@@ -1049,15 +1026,6 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx) ...@@ -1049,15 +1026,6 @@ static av_cold int atrac3_decode_init(AVCodecContext *avctx)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
if (avctx->channels > 1 || avctx->sample_fmt == AV_SAMPLE_FMT_S16) {
q->outSamples[0] = av_mallocz(SAMPLES_PER_FRAME * avctx->channels * sizeof(*q->outSamples[0]));
q->outSamples[1] = q->outSamples[0] + SAMPLES_PER_FRAME;
if (!q->outSamples[0]) {
atrac3_decode_close(avctx);
return AVERROR(ENOMEM);
}
}
avcodec_get_frame_defaults(&q->frame); avcodec_get_frame_defaults(&q->frame);
avctx->coded_frame = &q->frame; avctx->coded_frame = &q->frame;
...@@ -1076,4 +1044,6 @@ AVCodec ff_atrac3_decoder = ...@@ -1076,4 +1044,6 @@ AVCodec ff_atrac3_decoder =
.decode = atrac3_decode_frame, .decode = atrac3_decode_frame,
.capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1, .capabilities = CODEC_CAP_SUBFRAMES | CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"), .long_name = NULL_IF_CONFIG_SMALL("Atrac 3 (Adaptive TRansform Acoustic Coding 3)"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
}; };
...@@ -119,9 +119,10 @@ typedef struct cook { ...@@ -119,9 +119,10 @@ typedef struct cook {
void (*interpolate)(struct cook *q, float *buffer, void (*interpolate)(struct cook *q, float *buffer,
int gain_index, int gain_index_next); int gain_index, int gain_index_next);
void (*saturate_output)(struct cook *q, int chan, float *out); void (*saturate_output)(struct cook *q, float *out);
AVCodecContext* avctx; AVCodecContext* avctx;
DSPContext dsp;
AVFrame frame; AVFrame frame;
GetBitContext gb; GetBitContext gb;
/* stream data */ /* stream data */
...@@ -887,18 +888,15 @@ static inline void decode_bytes_and_gain(COOKContext *q, COOKSubpacket *p, ...@@ -887,18 +888,15 @@ static inline void decode_bytes_and_gain(COOKContext *q, COOKSubpacket *p,
* Saturate the output signal and interleave. * Saturate the output signal and interleave.
* *
* @param q pointer to the COOKContext * @param q pointer to the COOKContext
* @param chan channel to saturate
* @param out pointer to the output vector * @param out pointer to the output vector
*/ */
static void saturate_output_float(COOKContext *q, int chan, float *out) static void saturate_output_float(COOKContext *q, float *out)
{ {
int j; q->dsp.vector_clipf(out, q->mono_mdct_output + q->samples_per_channel,
float *output = q->mono_mdct_output + q->samples_per_channel; -1.0f, 1.0f, FFALIGN(q->samples_per_channel, 8));
for (j = 0; j < q->samples_per_channel; j++) {
out[chan + q->nb_channels * j] = av_clipf(output[j], -1.0, 1.0);
}
} }
/** /**
* Final part of subpacket decoding: * Final part of subpacket decoding:
* Apply modulated lapped transform, gain compensation, * Apply modulated lapped transform, gain compensation,
...@@ -909,15 +907,14 @@ static void saturate_output_float(COOKContext *q, int chan, float *out) ...@@ -909,15 +907,14 @@ static void saturate_output_float(COOKContext *q, int chan, float *out)
* @param gains_ptr array of current/prev gain pointers * @param gains_ptr array of current/prev gain pointers
* @param previous_buffer pointer to the previous buffer to be used for overlapping * @param previous_buffer pointer to the previous buffer to be used for overlapping
* @param out pointer to the output buffer * @param out pointer to the output buffer
* @param chan 0: left or single channel, 1: right channel
*/ */
static inline void mlt_compensate_output(COOKContext *q, float *decode_buffer, static inline void mlt_compensate_output(COOKContext *q, float *decode_buffer,
cook_gains *gains_ptr, float *previous_buffer, cook_gains *gains_ptr, float *previous_buffer,
float *out, int chan) float *out)
{ {
imlt_gain(q, decode_buffer, gains_ptr, previous_buffer); imlt_gain(q, decode_buffer, gains_ptr, previous_buffer);
if (out) if (out)
q->saturate_output(q, chan, out); q->saturate_output(q, out);
} }
...@@ -930,7 +927,7 @@ static inline void mlt_compensate_output(COOKContext *q, float *decode_buffer, ...@@ -930,7 +927,7 @@ static inline void mlt_compensate_output(COOKContext *q, float *decode_buffer,
* @param outbuffer pointer to the outbuffer * @param outbuffer pointer to the outbuffer
*/ */
static int decode_subpacket(COOKContext *q, COOKSubpacket *p, static int decode_subpacket(COOKContext *q, COOKSubpacket *p,
const uint8_t *inbuffer, float *outbuffer) const uint8_t *inbuffer, float **outbuffer)
{ {
int sub_packet_size = p->size; int sub_packet_size = p->size;
int res; int res;
...@@ -953,15 +950,18 @@ static int decode_subpacket(COOKContext *q, COOKSubpacket *p, ...@@ -953,15 +950,18 @@ static int decode_subpacket(COOKContext *q, COOKSubpacket *p,
} }
mlt_compensate_output(q, q->decode_buffer_1, &p->gains1, mlt_compensate_output(q, q->decode_buffer_1, &p->gains1,
p->mono_previous_buffer1, outbuffer, p->ch_idx); p->mono_previous_buffer1,
outbuffer ? outbuffer[p->ch_idx] : NULL);
if (p->num_channels == 2) if (p->num_channels == 2)
if (p->joint_stereo) if (p->joint_stereo)
mlt_compensate_output(q, q->decode_buffer_2, &p->gains1, mlt_compensate_output(q, q->decode_buffer_2, &p->gains1,
p->mono_previous_buffer2, outbuffer, p->ch_idx + 1); p->mono_previous_buffer2,
outbuffer ? outbuffer[p->ch_idx + 1] : NULL);
else else
mlt_compensate_output(q, q->decode_buffer_2, &p->gains2, mlt_compensate_output(q, q->decode_buffer_2, &p->gains2,
p->mono_previous_buffer2, outbuffer, p->ch_idx + 1); p->mono_previous_buffer2,
outbuffer ? outbuffer[p->ch_idx + 1] : NULL);
return 0; return 0;
} }
...@@ -978,7 +978,7 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data, ...@@ -978,7 +978,7 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
const uint8_t *buf = avpkt->data; const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size; int buf_size = avpkt->size;
COOKContext *q = avctx->priv_data; COOKContext *q = avctx->priv_data;
float *samples = NULL; float **samples = NULL;
int i, ret; int i, ret;
int offset = 0; int offset = 0;
int chidx = 0; int chidx = 0;
...@@ -993,7 +993,7 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data, ...@@ -993,7 +993,7 @@ static int cook_decode_frame(AVCodecContext *avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
samples = (float *) q->frame.data[0]; samples = (float **)q->frame.extended_data;
} }
/* estimate subpacket sizes */ /* estimate subpacket sizes */
...@@ -1110,6 +1110,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) ...@@ -1110,6 +1110,8 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
/* Initialize RNG. */ /* Initialize RNG. */
av_lfg_init(&q->random_state, 0); av_lfg_init(&q->random_state, 0);
ff_dsputil_init(&q->dsp, avctx);
while (edata_ptr < edata_ptr_end) { while (edata_ptr < edata_ptr_end) {
/* 8 for mono, 16 for stereo, ? for multichannel /* 8 for mono, 16 for stereo, ? for multichannel
Swap to right endianness so we don't need to care later on. */ Swap to right endianness so we don't need to care later on. */
...@@ -1290,7 +1292,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx) ...@@ -1290,7 +1292,7 @@ static av_cold int cook_decode_init(AVCodecContext *avctx)
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
if (channel_mask) if (channel_mask)
avctx->channel_layout = channel_mask; avctx->channel_layout = channel_mask;
else else
...@@ -1315,4 +1317,6 @@ AVCodec ff_cook_decoder = { ...@@ -1315,4 +1317,6 @@ AVCodec ff_cook_decoder = {
.decode = cook_decode_frame, .decode = cook_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Cook / Cooker / Gecko (RealAudio G2)"), .long_name = NULL_IF_CONFIG_SMALL("Cook / Cooker / Gecko (RealAudio G2)"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
}; };
This diff is collapsed.
...@@ -242,7 +242,7 @@ static av_cold int imc_decode_init(AVCodecContext *avctx) ...@@ -242,7 +242,7 @@ static av_cold int imc_decode_init(AVCodecContext *avctx)
return ret; return ret;
} }
ff_dsputil_init(&q->dsp, avctx); ff_dsputil_init(&q->dsp, avctx);
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO avctx->channel_layout = avctx->channels == 1 ? AV_CH_LAYOUT_MONO
: AV_CH_LAYOUT_STEREO; : AV_CH_LAYOUT_STEREO;
...@@ -662,7 +662,7 @@ static void imc_imdct256(IMCContext *q, IMCChannel *chctx, int channels) ...@@ -662,7 +662,7 @@ static void imc_imdct256(IMCContext *q, IMCChannel *chctx, int channels)
int i; int i;
float re, im; float re, im;
float *dst1 = q->out_samples; float *dst1 = q->out_samples;
float *dst2 = q->out_samples + (COEFFS - 1) * channels; float *dst2 = q->out_samples + (COEFFS - 1);
/* prerotation */ /* prerotation */
for (i = 0; i < COEFFS / 2; i++) { for (i = 0; i < COEFFS / 2; i++) {
...@@ -684,8 +684,8 @@ static void imc_imdct256(IMCContext *q, IMCChannel *chctx, int channels) ...@@ -684,8 +684,8 @@ static void imc_imdct256(IMCContext *q, IMCChannel *chctx, int channels)
+ (q->mdct_sine_window[i * 2] * re); + (q->mdct_sine_window[i * 2] * re);
*dst2 = (q->mdct_sine_window[i * 2] * chctx->last_fft_im[i]) *dst2 = (q->mdct_sine_window[i * 2] * chctx->last_fft_im[i])
- (q->mdct_sine_window[COEFFS - 1 - i * 2] * re); - (q->mdct_sine_window[COEFFS - 1 - i * 2] * re);
dst1 += channels * 2; dst1 += 2;
dst2 -= channels * 2; dst2 -= 2;
chctx->last_fft_im[i] = im; chctx->last_fft_im[i] = im;
} }
} }
...@@ -786,7 +786,6 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch) ...@@ -786,7 +786,6 @@ static int imc_decode_block(AVCodecContext *avctx, IMCContext *q, int ch)
chctx->decoder_reset = 1; chctx->decoder_reset = 1;
if (chctx->decoder_reset) { if (chctx->decoder_reset) {
memset(q->out_samples, 0, COEFFS * sizeof(*q->out_samples));
for (i = 0; i < BANDS; i++) for (i = 0; i < BANDS; i++)
chctx->old_floor[i] = 1.0; chctx->old_floor[i] = 1.0;
for (i = 0; i < COEFFS; i++) for (i = 0; i < COEFFS; i++)
...@@ -945,7 +944,7 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data, ...@@ -945,7 +944,7 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data,
} }
for (i = 0; i < avctx->channels; i++) { for (i = 0; i < avctx->channels; i++) {
q->out_samples = (float*)q->frame.data[0] + i; q->out_samples = (float *)q->frame.extended_data[i];
q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2); q->dsp.bswap16_buf(buf16, (const uint16_t*)buf, IMC_BLOCK_SIZE / 2);
...@@ -958,15 +957,8 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data, ...@@ -958,15 +957,8 @@ static int imc_decode_frame(AVCodecContext *avctx, void *data,
} }
if (avctx->channels == 2) { if (avctx->channels == 2) {
float *src = (float*)q->frame.data[0], t1, t2; q->dsp.butterflies_float((float *)q->frame.extended_data[0],
(float *)q->frame.extended_data[1], COEFFS);
for (i = 0; i < COEFFS; i++) {
t1 = src[0];
t2 = src[1];
src[0] = t1 + t2;
src[1] = t1 - t2;
src += 2;
}
} }
*got_frame_ptr = 1; *got_frame_ptr = 1;
...@@ -996,6 +988,8 @@ AVCodec ff_imc_decoder = { ...@@ -996,6 +988,8 @@ AVCodec ff_imc_decoder = {
.decode = imc_decode_frame, .decode = imc_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"), .long_name = NULL_IF_CONFIG_SMALL("IMC (Intel Music Coder)"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
}; };
#endif #endif
#if CONFIG_IAC_DECODER #if CONFIG_IAC_DECODER
...@@ -1009,5 +1003,7 @@ AVCodec ff_iac_decoder = { ...@@ -1009,5 +1003,7 @@ AVCodec ff_iac_decoder = {
.decode = imc_decode_frame, .decode = imc_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("IAC (Indeo Audio Coder)"), .long_name = NULL_IF_CONFIG_SMALL("IAC (Indeo Audio Coder)"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
}; };
#endif #endif
...@@ -43,28 +43,24 @@ void ff_mpc_init(void) ...@@ -43,28 +43,24 @@ void ff_mpc_init(void)
/** /**
* Process decoded Musepack data and produce PCM * Process decoded Musepack data and produce PCM
*/ */
static void mpc_synth(MPCContext *c, int16_t *out, int channels) static void mpc_synth(MPCContext *c, int16_t **out, int channels)
{ {
int dither_state = 0; int dither_state = 0;
int i, ch; int i, ch;
OUT_INT samples[MPA_MAX_CHANNELS * MPA_FRAME_SIZE], *samples_ptr;
for(ch = 0; ch < channels; ch++){ for(ch = 0; ch < channels; ch++){
samples_ptr = samples + ch;
for(i = 0; i < SAMPLES_PER_BAND; i++) { for(i = 0; i < SAMPLES_PER_BAND; i++) {
ff_mpa_synth_filter_fixed(&c->mpadsp, ff_mpa_synth_filter_fixed(&c->mpadsp,
c->synth_buf[ch], &(c->synth_buf_offset[ch]), c->synth_buf[ch], &(c->synth_buf_offset[ch]),
ff_mpa_synth_window_fixed, &dither_state, ff_mpa_synth_window_fixed, &dither_state,
samples_ptr, channels, out[ch] + 32 * i, 1,
c->sb_samples[ch][i]); c->sb_samples[ch][i]);
samples_ptr += 32 * channels;
} }
} }
for(i = 0; i < MPC_FRAME_SIZE*channels; i++)
*out++=samples[i];
} }
void ff_mpc_dequantize_and_synth(MPCContext * c, int maxband, void *data, int channels) void ff_mpc_dequantize_and_synth(MPCContext * c, int maxband, int16_t **out,
int channels)
{ {
int i, j, ch; int i, j, ch;
Band *bands = c->bands; Band *bands = c->bands;
...@@ -100,5 +96,5 @@ void ff_mpc_dequantize_and_synth(MPCContext * c, int maxband, void *data, int ch ...@@ -100,5 +96,5 @@ void ff_mpc_dequantize_and_synth(MPCContext * c, int maxband, void *data, int ch
} }
} }
mpc_synth(c, data, channels); mpc_synth(c, out, channels);
} }
...@@ -73,6 +73,6 @@ typedef struct { ...@@ -73,6 +73,6 @@ typedef struct {
} MPCContext; } MPCContext;
void ff_mpc_init(void); void ff_mpc_init(void);
void ff_mpc_dequantize_and_synth(MPCContext *c, int maxband, void *dst, int channels); void ff_mpc_dequantize_and_synth(MPCContext *c, int maxband, int16_t **out, int channels);
#endif /* AVCODEC_MPC_H */ #endif /* AVCODEC_MPC_H */
...@@ -90,7 +90,7 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx) ...@@ -90,7 +90,7 @@ static av_cold int mpc7_decode_init(AVCodecContext * avctx)
c->IS, c->MSS, c->gapless, c->lastframelen, c->maxbands); c->IS, c->MSS, c->gapless, c->lastframelen, c->maxbands);
c->frames_to_skip = 0; c->frames_to_skip = 0;
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
avctx->channel_layout = AV_CH_LAYOUT_STEREO; avctx->channel_layout = AV_CH_LAYOUT_STEREO;
avcodec_get_frame_defaults(&c->frame); avcodec_get_frame_defaults(&c->frame);
...@@ -293,7 +293,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data, ...@@ -293,7 +293,7 @@ static int mpc7_decode_frame(AVCodecContext * avctx, void *data,
for(ch = 0; ch < 2; ch++) for(ch = 0; ch < 2; ch++)
idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off); idx_to_quant(c, &gb, bands[i].res[ch], c->Q[ch] + off);
ff_mpc_dequantize_and_synth(c, mb, c->frame.data[0], 2); ff_mpc_dequantize_and_synth(c, mb, (int16_t **)c->frame.extended_data, 2);
if(last_frame) if(last_frame)
c->frame.nb_samples = c->lastframelen; c->frame.nb_samples = c->lastframelen;
...@@ -342,4 +342,6 @@ AVCodec ff_mpc7_decoder = { ...@@ -342,4 +342,6 @@ AVCodec ff_mpc7_decoder = {
.flush = mpc7_decode_flush, .flush = mpc7_decode_flush,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"), .long_name = NULL_IF_CONFIG_SMALL("Musepack SV7"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE },
}; };
...@@ -139,7 +139,7 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx) ...@@ -139,7 +139,7 @@ static av_cold int mpc8_decode_init(AVCodecContext * avctx)
c->MSS = get_bits1(&gb); c->MSS = get_bits1(&gb);
c->frames = 1 << (get_bits(&gb, 3) * 2); c->frames = 1 << (get_bits(&gb, 3) * 2);
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO; avctx->channel_layout = (channels==2) ? AV_CH_LAYOUT_STEREO : AV_CH_LAYOUT_MONO;
avctx->channels = channels; avctx->channels = channels;
...@@ -413,7 +413,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data, ...@@ -413,7 +413,8 @@ static int mpc8_decode_frame(AVCodecContext * avctx, void *data,
} }
} }
ff_mpc_dequantize_and_synth(c, maxband - 1, c->frame.data[0], ff_mpc_dequantize_and_synth(c, maxband - 1,
(int16_t **)c->frame.extended_data,
avctx->channels); avctx->channels);
c->cur_frame++; c->cur_frame++;
...@@ -446,4 +447,6 @@ AVCodec ff_mpc8_decoder = { ...@@ -446,4 +447,6 @@ AVCodec ff_mpc8_decoder = {
.flush = mpc8_decode_flush, .flush = mpc8_decode_flush,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"), .long_name = NULL_IF_CONFIG_SMALL("Musepack SV8"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE },
}; };
...@@ -149,7 +149,7 @@ static av_cold int decode_init(AVCodecContext *avctx) ...@@ -149,7 +149,7 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->sample_rate, avctx->channels); avctx->sample_rate, avctx->channels);
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16P;
avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO avctx->channel_layout = (avctx->channels == 2) ? AV_CH_LAYOUT_STEREO
: AV_CH_LAYOUT_MONO; : AV_CH_LAYOUT_MONO;
...@@ -338,7 +338,8 @@ static void apply_lpc(RALFContext *ctx, int ch, int length, int bits) ...@@ -338,7 +338,8 @@ static void apply_lpc(RALFContext *ctx, int ch, int length, int bits)
} }
} }
static int decode_block(AVCodecContext *avctx, GetBitContext *gb, int16_t *dst) static int decode_block(AVCodecContext *avctx, GetBitContext *gb,
int16_t *dst0, int16_t *dst1)
{ {
RALFContext *ctx = avctx->priv_data; RALFContext *ctx = avctx->priv_data;
int len, ch, ret; int len, ch, ret;
...@@ -382,35 +383,35 @@ static int decode_block(AVCodecContext *avctx, GetBitContext *gb, int16_t *dst) ...@@ -382,35 +383,35 @@ static int decode_block(AVCodecContext *avctx, GetBitContext *gb, int16_t *dst)
switch (dmode) { switch (dmode) {
case 0: case 0:
for (i = 0; i < len; i++) for (i = 0; i < len; i++)
*dst++ = ch0[i] + ctx->bias[0]; dst0[i] = ch0[i] + ctx->bias[0];
break; break;
case 1: case 1:
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
*dst++ = ch0[i] + ctx->bias[0]; dst0[i] = ch0[i] + ctx->bias[0];
*dst++ = ch1[i] + ctx->bias[1]; dst1[i] = ch1[i] + ctx->bias[1];
} }
break; break;
case 2: case 2:
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
ch0[i] += ctx->bias[0]; ch0[i] += ctx->bias[0];
*dst++ = ch0[i]; dst0[i] = ch0[i];
*dst++ = ch0[i] - (ch1[i] + ctx->bias[1]); dst1[i] = ch0[i] - (ch1[i] + ctx->bias[1]);
} }
break; break;
case 3: case 3:
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
t = ch0[i] + ctx->bias[0]; t = ch0[i] + ctx->bias[0];
t2 = ch1[i] + ctx->bias[1]; t2 = ch1[i] + ctx->bias[1];
*dst++ = t + t2; dst0[i] = t + t2;
*dst++ = t; dst1[i] = t;
} }
break; break;
case 4: case 4:
for (i = 0; i < len; i++) { for (i = 0; i < len; i++) {
t = ch1[i] + ctx->bias[1]; t = ch1[i] + ctx->bias[1];
t2 = ((ch0[i] + ctx->bias[0]) << 1) | (t & 1); t2 = ((ch0[i] + ctx->bias[0]) << 1) | (t & 1);
*dst++ = (t2 + t) / 2; dst0[i] = (t2 + t) / 2;
*dst++ = (t2 - t) / 2; dst1[i] = (t2 - t) / 2;
} }
break; break;
} }
...@@ -424,7 +425,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, ...@@ -424,7 +425,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
AVPacket *avpkt) AVPacket *avpkt)
{ {
RALFContext *ctx = avctx->priv_data; RALFContext *ctx = avctx->priv_data;
int16_t *samples; int16_t *samples0;
int16_t *samples1;
int ret; int ret;
GetBitContext gb; GetBitContext gb;
int table_size, table_bytes, i; int table_size, table_bytes, i;
...@@ -465,7 +467,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, ...@@ -465,7 +467,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
av_log(avctx, AV_LOG_ERROR, "Me fail get_buffer()? That's unpossible!\n"); av_log(avctx, AV_LOG_ERROR, "Me fail get_buffer()? That's unpossible!\n");
return ret; return ret;
} }
samples = (int16_t*)ctx->frame.data[0]; samples0 = (int16_t *)ctx->frame.data[0];
samples1 = (int16_t *)ctx->frame.data[1];
if (src_size < 5) { if (src_size < 5) {
av_log(avctx, AV_LOG_ERROR, "too short packets are too short!\n"); av_log(avctx, AV_LOG_ERROR, "too short packets are too short!\n");
...@@ -498,8 +501,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, ...@@ -498,8 +501,8 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr,
break; break;
} }
init_get_bits(&gb, block_pointer, ctx->block_size[i] * 8); init_get_bits(&gb, block_pointer, ctx->block_size[i] * 8);
if (decode_block(avctx, &gb, samples + ctx->sample_offset if (decode_block(avctx, &gb, samples0 + ctx->sample_offset,
* avctx->channels) < 0) { samples1 + ctx->sample_offset) < 0) {
av_log(avctx, AV_LOG_ERROR, "Sir, I got carsick in your office. Not decoding the rest of packet.\n"); av_log(avctx, AV_LOG_ERROR, "Sir, I got carsick in your office. Not decoding the rest of packet.\n");
break; break;
} }
...@@ -533,4 +536,6 @@ AVCodec ff_ralf_decoder = { ...@@ -533,4 +536,6 @@ AVCodec ff_ralf_decoder = {
.flush = decode_flush, .flush = decode_flush,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("RealAudio Lossless"), .long_name = NULL_IF_CONFIG_SMALL("RealAudio Lossless"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE },
}; };
...@@ -666,7 +666,7 @@ static void imdct_and_window(TwinContext *tctx, enum FrameType ftype, int wtype, ...@@ -666,7 +666,7 @@ static void imdct_and_window(TwinContext *tctx, enum FrameType ftype, int wtype,
} }
static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype, static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype,
float *out) float **out)
{ {
const ModeTab *mtab = tctx->mtab; const ModeTab *mtab = tctx->mtab;
int size1, size2; int size1, size2;
...@@ -685,24 +685,15 @@ static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype, ...@@ -685,24 +685,15 @@ static void imdct_output(TwinContext *tctx, enum FrameType ftype, int wtype,
size2 = tctx->last_block_pos[0]; size2 = tctx->last_block_pos[0];
size1 = mtab->size - size2; size1 = mtab->size - size2;
if (tctx->avctx->channels == 2) {
tctx->dsp.butterflies_float_interleave(out, prev_buf,
&prev_buf[2*mtab->size],
size1);
out += 2 * size1;
tctx->dsp.butterflies_float_interleave(out, tctx->curr_frame,
&tctx->curr_frame[2*mtab->size],
size2);
} else {
memcpy(out, prev_buf, size1 * sizeof(*out));
out += size1; memcpy(&out[0][0 ], prev_buf, size1 * sizeof(out[0][0]));
memcpy(&out[0][size1], tctx->curr_frame, size2 * sizeof(out[0][0]));
memcpy(out, tctx->curr_frame, size2 * sizeof(*out)); if (tctx->avctx->channels == 2) {
memcpy(&out[1][0], &prev_buf[2*mtab->size], size1 * sizeof(out[1][0]));
memcpy(&out[1][size1], &tctx->curr_frame[2*mtab->size], size2 * sizeof(out[1][0]));
tctx->dsp.butterflies_float(out[0], out[1], mtab->size);
} }
} }
static void dec_bark_env(TwinContext *tctx, const uint8_t *in, int use_hist, static void dec_bark_env(TwinContext *tctx, const uint8_t *in, int use_hist,
...@@ -825,7 +816,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data, ...@@ -825,7 +816,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
TwinContext *tctx = avctx->priv_data; TwinContext *tctx = avctx->priv_data;
GetBitContext gb; GetBitContext gb;
const ModeTab *mtab = tctx->mtab; const ModeTab *mtab = tctx->mtab;
float *out = NULL; float **out = NULL;
enum FrameType ftype; enum FrameType ftype;
int window_type, ret; int window_type, ret;
static const enum FrameType wtype_to_ftype_table[] = { static const enum FrameType wtype_to_ftype_table[] = {
...@@ -846,7 +837,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data, ...@@ -846,7 +837,7 @@ static int twin_decode_frame(AVCodecContext * avctx, void *data,
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret; return ret;
} }
out = (float *)tctx->frame.data[0]; out = (float **)tctx->frame.extended_data;
} }
init_get_bits(&gb, buf, buf_size * 8); init_get_bits(&gb, buf, buf_size * 8);
...@@ -1119,7 +1110,7 @@ static av_cold int twin_decode_init(AVCodecContext *avctx) ...@@ -1119,7 +1110,7 @@ static av_cold int twin_decode_init(AVCodecContext *avctx)
int isampf, ibps; int isampf, ibps;
tctx->avctx = avctx; tctx->avctx = avctx;
avctx->sample_fmt = AV_SAMPLE_FMT_FLT; avctx->sample_fmt = AV_SAMPLE_FMT_FLTP;
if (!avctx->extradata || avctx->extradata_size < 12) { if (!avctx->extradata || avctx->extradata_size < 12) {
av_log(avctx, AV_LOG_ERROR, "Missing or incomplete extradata\n"); av_log(avctx, AV_LOG_ERROR, "Missing or incomplete extradata\n");
...@@ -1184,4 +1175,6 @@ AVCodec ff_twinvq_decoder = { ...@@ -1184,4 +1175,6 @@ AVCodec ff_twinvq_decoder = {
.decode = twin_decode_frame, .decode = twin_decode_frame,
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"), .long_name = NULL_IF_CONFIG_SMALL("VQF TwinVQ"),
.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
}; };
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment