Commit 70c0f13a authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '7c278d2a'

* commit '7c278d2a':
  alacenc: support 24-bit encoding
  pcmdec: use planar sample format for pcm_s16le_planar
  vorbisdec: use float planar sample format

Conflicts:
	libavcodec/pcm.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents da8242e2 7c278d2a
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include "mathops.h" #include "mathops.h"
#define DEFAULT_FRAME_SIZE 4096 #define DEFAULT_FRAME_SIZE 4096
#define DEFAULT_SAMPLE_SIZE 16
#define MAX_CHANNELS 8 #define MAX_CHANNELS 8
#define ALAC_EXTRADATA_SIZE 36 #define ALAC_EXTRADATA_SIZE 36
#define ALAC_FRAME_HEADER_SIZE 55 #define ALAC_FRAME_HEADER_SIZE 55
...@@ -66,6 +65,7 @@ typedef struct AlacEncodeContext { ...@@ -66,6 +65,7 @@ typedef struct AlacEncodeContext {
int max_prediction_order; int max_prediction_order;
int max_coded_frame_size; int max_coded_frame_size;
int write_sample_size; int write_sample_size;
int extra_bits;
int32_t sample_buf[MAX_CHANNELS][DEFAULT_FRAME_SIZE]; int32_t sample_buf[MAX_CHANNELS][DEFAULT_FRAME_SIZE];
int32_t predictor_buf[DEFAULT_FRAME_SIZE]; int32_t predictor_buf[DEFAULT_FRAME_SIZE];
int interlacing_shift; int interlacing_shift;
...@@ -78,16 +78,26 @@ typedef struct AlacEncodeContext { ...@@ -78,16 +78,26 @@ typedef struct AlacEncodeContext {
} AlacEncodeContext; } AlacEncodeContext;
static void init_sample_buffers(AlacEncodeContext *s, int16_t **input_samples) static void init_sample_buffers(AlacEncodeContext *s,
uint8_t * const *samples)
{ {
int ch, i; int ch, i;
int shift = av_get_bytes_per_sample(s->avctx->sample_fmt) * 8 -
for (ch = 0; ch < s->avctx->channels; ch++) { s->avctx->bits_per_raw_sample;
int32_t *bptr = s->sample_buf[ch];
const int16_t *sptr = input_samples[ch]; #define COPY_SAMPLES(type) do { \
for (i = 0; i < s->frame_size; i++) for (ch = 0; ch < s->avctx->channels; ch++) { \
bptr[i] = sptr[i]; int32_t *bptr = s->sample_buf[ch]; \
} const type *sptr = (const type *)samples[ch]; \
for (i = 0; i < s->frame_size; i++) \
bptr[i] = sptr[i] >> shift; \
} \
} while (0)
if (s->avctx->sample_fmt == AV_SAMPLE_FMT_S32P)
COPY_SAMPLES(int32_t);
else
COPY_SAMPLES(int16_t);
} }
static void encode_scalar(AlacEncodeContext *s, int x, static void encode_scalar(AlacEncodeContext *s, int x,
...@@ -128,7 +138,7 @@ static void write_frame_header(AlacEncodeContext *s) ...@@ -128,7 +138,7 @@ static void write_frame_header(AlacEncodeContext *s)
put_bits(&s->pbctx, 3, s->avctx->channels-1); // No. of channels -1 put_bits(&s->pbctx, 3, s->avctx->channels-1); // No. of channels -1
put_bits(&s->pbctx, 16, 0); // Seems to be zero put_bits(&s->pbctx, 16, 0); // Seems to be zero
put_bits(&s->pbctx, 1, encode_fs); // Sample count is in the header put_bits(&s->pbctx, 1, encode_fs); // Sample count is in the header
put_bits(&s->pbctx, 2, 0); // FIXME: Wasted bytes field put_bits(&s->pbctx, 2, s->extra_bits >> 3); // Extra bytes (for 24-bit)
put_bits(&s->pbctx, 1, s->verbatim); // Audio block is verbatim put_bits(&s->pbctx, 1, s->verbatim); // Audio block is verbatim
if (encode_fs) if (encode_fs)
put_bits32(&s->pbctx, s->frame_size); // No. of samples in the frame put_bits32(&s->pbctx, s->frame_size); // No. of samples in the frame
...@@ -345,7 +355,8 @@ static void alac_entropy_coder(AlacEncodeContext *s) ...@@ -345,7 +355,8 @@ static void alac_entropy_coder(AlacEncodeContext *s)
} }
} }
static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, int16_t **samples) static int write_frame(AlacEncodeContext *s, AVPacket *avpkt,
uint8_t * const *samples)
{ {
int i, j; int i, j;
int prediction_type = 0; int prediction_type = 0;
...@@ -356,9 +367,20 @@ static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, int16_t **samples) ...@@ -356,9 +367,20 @@ static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, int16_t **samples)
if (s->verbatim) { if (s->verbatim) {
write_frame_header(s); write_frame_header(s);
/* samples are channel-interleaved in verbatim mode */ /* samples are channel-interleaved in verbatim mode */
for (i = 0; i < s->frame_size; i++) if (s->avctx->sample_fmt == AV_SAMPLE_FMT_S32P) {
for (j = 0; j < s->avctx->channels; j++) int shift = 32 - s->avctx->bits_per_raw_sample;
put_sbits(pb, 16, samples[j][i]); int32_t * const *samples_s32 = (int32_t * const *)samples;
for (i = 0; i < s->frame_size; i++)
for (j = 0; j < s->avctx->channels; j++)
put_sbits(pb, s->avctx->bits_per_raw_sample,
samples_s32[j][i] >> shift);
} else {
int16_t * const *samples_s16 = (int16_t * const *)samples;
for (i = 0; i < s->frame_size; i++)
for (j = 0; j < s->avctx->channels; j++)
put_sbits(pb, s->avctx->bits_per_raw_sample,
samples_s16[j][i]);
}
} else { } else {
init_sample_buffers(s, samples); init_sample_buffers(s, samples);
write_frame_header(s); write_frame_header(s);
...@@ -381,6 +403,17 @@ static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, int16_t **samples) ...@@ -381,6 +403,17 @@ static int write_frame(AlacEncodeContext *s, AVPacket *avpkt, int16_t **samples)
put_sbits(pb, 16, s->lpc[i].lpc_coeff[j]); put_sbits(pb, 16, s->lpc[i].lpc_coeff[j]);
} }
// write extra bits if needed
if (s->extra_bits) {
uint32_t mask = (1 << s->extra_bits) - 1;
for (i = 0; i < s->frame_size; i++) {
for (j = 0; j < s->avctx->channels; j++) {
put_bits(pb, s->extra_bits, s->sample_buf[j][i] & mask);
s->sample_buf[j][i] >>= s->extra_bits;
}
}
}
// apply lpc and entropy coding to audio samples // apply lpc and entropy coding to audio samples
for (i = 0; i < s->avctx->channels; i++) { for (i = 0; i < s->avctx->channels; i++) {
...@@ -433,6 +466,15 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) ...@@ -433,6 +466,15 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
return AVERROR_PATCHWELCOME; return AVERROR_PATCHWELCOME;
} }
if (avctx->sample_fmt == AV_SAMPLE_FMT_S32P) {
if (avctx->bits_per_raw_sample != 24)
av_log(avctx, AV_LOG_WARNING, "encoding as 24 bits-per-sample\n");
avctx->bits_per_raw_sample = 24;
} else {
avctx->bits_per_raw_sample = 16;
s->extra_bits = 0;
}
// Set default compression level // Set default compression level
if (avctx->compression_level == FF_COMPRESSION_DEFAULT) if (avctx->compression_level == FF_COMPRESSION_DEFAULT)
s->compression_level = 2; s->compression_level = 2;
...@@ -447,10 +489,7 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) ...@@ -447,10 +489,7 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
s->max_coded_frame_size = get_max_frame_size(avctx->frame_size, s->max_coded_frame_size = get_max_frame_size(avctx->frame_size,
avctx->channels, avctx->channels,
DEFAULT_SAMPLE_SIZE); avctx->bits_per_raw_sample);
// FIXME: consider wasted_bytes
s->write_sample_size = DEFAULT_SAMPLE_SIZE + avctx->channels - 1;
avctx->extradata = av_mallocz(ALAC_EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE); avctx->extradata = av_mallocz(ALAC_EXTRADATA_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
if (!avctx->extradata) { if (!avctx->extradata) {
...@@ -463,11 +502,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx) ...@@ -463,11 +502,11 @@ static av_cold int alac_encode_init(AVCodecContext *avctx)
AV_WB32(alac_extradata, ALAC_EXTRADATA_SIZE); AV_WB32(alac_extradata, ALAC_EXTRADATA_SIZE);
AV_WB32(alac_extradata+4, MKBETAG('a','l','a','c')); AV_WB32(alac_extradata+4, MKBETAG('a','l','a','c'));
AV_WB32(alac_extradata+12, avctx->frame_size); AV_WB32(alac_extradata+12, avctx->frame_size);
AV_WB8 (alac_extradata+17, DEFAULT_SAMPLE_SIZE); AV_WB8 (alac_extradata+17, avctx->bits_per_raw_sample);
AV_WB8 (alac_extradata+21, avctx->channels); AV_WB8 (alac_extradata+21, avctx->channels);
AV_WB32(alac_extradata+24, s->max_coded_frame_size); AV_WB32(alac_extradata+24, s->max_coded_frame_size);
AV_WB32(alac_extradata+28, AV_WB32(alac_extradata+28,
avctx->sample_rate * avctx->channels * DEFAULT_SAMPLE_SIZE); // average bitrate avctx->sample_rate * avctx->channels * avctx->bits_per_raw_sample); // average bitrate
AV_WB32(alac_extradata+32, avctx->sample_rate); AV_WB32(alac_extradata+32, avctx->sample_rate);
// Set relevant extradata fields // Set relevant extradata fields
...@@ -536,13 +575,12 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, ...@@ -536,13 +575,12 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
{ {
AlacEncodeContext *s = avctx->priv_data; AlacEncodeContext *s = avctx->priv_data;
int out_bytes, max_frame_size, ret; int out_bytes, max_frame_size, ret;
int16_t **samples = (int16_t **)frame->extended_data;
s->frame_size = frame->nb_samples; s->frame_size = frame->nb_samples;
if (frame->nb_samples < DEFAULT_FRAME_SIZE) if (frame->nb_samples < DEFAULT_FRAME_SIZE)
max_frame_size = get_max_frame_size(s->frame_size, avctx->channels, max_frame_size = get_max_frame_size(s->frame_size, avctx->channels,
DEFAULT_SAMPLE_SIZE); avctx->bits_per_raw_sample);
else else
max_frame_size = s->max_coded_frame_size; max_frame_size = s->max_coded_frame_size;
...@@ -550,14 +588,24 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt, ...@@ -550,14 +588,24 @@ static int alac_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
return ret; return ret;
/* use verbatim mode for compression_level 0 */ /* use verbatim mode for compression_level 0 */
s->verbatim = !s->compression_level; if (s->compression_level) {
s->verbatim = 0;
s->extra_bits = avctx->bits_per_raw_sample - 16;
} else {
s->verbatim = 1;
s->extra_bits = 0;
}
s->write_sample_size = avctx->bits_per_raw_sample - s->extra_bits +
avctx->channels - 1;
out_bytes = write_frame(s, avpkt, samples); out_bytes = write_frame(s, avpkt, frame->extended_data);
if (out_bytes > max_frame_size) { if (out_bytes > max_frame_size) {
/* frame too large. use verbatim mode */ /* frame too large. use verbatim mode */
s->verbatim = 1; s->verbatim = 1;
out_bytes = write_frame(s, avpkt, samples); s->extra_bits = 0;
s->write_sample_size = avctx->bits_per_raw_sample + avctx->channels - 1;
out_bytes = write_frame(s, avpkt, frame->extended_data);
} }
avpkt->size = out_bytes; avpkt->size = out_bytes;
...@@ -574,7 +622,8 @@ AVCodec ff_alac_encoder = { ...@@ -574,7 +622,8 @@ AVCodec ff_alac_encoder = {
.encode2 = alac_encode_frame, .encode2 = alac_encode_frame,
.close = alac_encode_close, .close = alac_encode_close,
.capabilities = CODEC_CAP_SMALL_LAST_FRAME, .capabilities = CODEC_CAP_SMALL_LAST_FRAME,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16P, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"), .long_name = NULL_IF_CONFIG_SMALL("ALAC (Apple Lossless Audio Codec)"),
}; };
...@@ -358,7 +358,16 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data, ...@@ -358,7 +358,16 @@ static int pcm_decode_frame(AVCodecContext *avctx, void *data,
DECODE_PLANAR(16, be16, src, samples, n, 0, 0); DECODE_PLANAR(16, be16, src, samples, n, 0, 0);
break; break;
case AV_CODEC_ID_PCM_S16LE_PLANAR: case AV_CODEC_ID_PCM_S16LE_PLANAR:
DECODE_PLANAR(16, le16, src, samples, n, 0, 0); n /= avctx->channels;
for (c = 0; c < avctx->channels; c++) {
samples = s->frame.extended_data[c];
#if HAVE_BIGENDIAN
DECODE(16, le16, src, samples, n, 0, 0)
#else
memcpy(samples, src, n * 2);
#endif
src += n * 2;
}
break; break;
case AV_CODEC_ID_PCM_S24LE_PLANAR: case AV_CODEC_ID_PCM_S24LE_PLANAR:
DECODE_PLANAR(32, le24, src, samples, n, 8, 0); DECODE_PLANAR(32, le24, src, samples, n, 8, 0);
......
...@@ -153,9 +153,7 @@ typedef struct vorbis_context_s { ...@@ -153,9 +153,7 @@ typedef struct vorbis_context_s {
uint8_t mode_number; // mode number for the current packet uint8_t mode_number; // mode number for the current packet
uint8_t previous_window; uint8_t previous_window;
float *channel_residues; float *channel_residues;
float *channel_floors;
float *saved; float *saved;
float scale_bias; // for float->int conversion
} vorbis_context; } vorbis_context;
/* Helper functions */ /* Helper functions */
...@@ -194,7 +192,6 @@ static void vorbis_free(vorbis_context *vc) ...@@ -194,7 +192,6 @@ static void vorbis_free(vorbis_context *vc)
int i; int i;
av_freep(&vc->channel_residues); av_freep(&vc->channel_residues);
av_freep(&vc->channel_floors);
av_freep(&vc->saved); av_freep(&vc->saved);
for (i = 0; i < vc->residue_count; i++) for (i = 0; i < vc->residue_count; i++)
...@@ -953,12 +950,11 @@ static int vorbis_parse_id_hdr(vorbis_context *vc) ...@@ -953,12 +950,11 @@ static int vorbis_parse_id_hdr(vorbis_context *vc)
} }
vc->channel_residues = av_malloc((vc->blocksize[1] / 2) * vc->audio_channels * sizeof(*vc->channel_residues)); vc->channel_residues = av_malloc((vc->blocksize[1] / 2) * vc->audio_channels * sizeof(*vc->channel_residues));
vc->channel_floors = av_malloc((vc->blocksize[1] / 2) * vc->audio_channels * sizeof(*vc->channel_floors));
vc->saved = av_mallocz((vc->blocksize[1] / 4) * vc->audio_channels * sizeof(*vc->saved)); vc->saved = av_mallocz((vc->blocksize[1] / 4) * vc->audio_channels * sizeof(*vc->saved));
vc->previous_window = 0; vc->previous_window = 0;
ff_mdct_init(&vc->mdct[0], bl0, 1, -vc->scale_bias); ff_mdct_init(&vc->mdct[0], bl0, 1, -1.0);
ff_mdct_init(&vc->mdct[1], bl1, 1, -vc->scale_bias); ff_mdct_init(&vc->mdct[1], bl1, 1, -1.0);
av_dlog(NULL, " vorbis version %d \n audio_channels %d \n audio_samplerate %d \n bitrate_max %d \n bitrate_nom %d \n bitrate_min %d \n blk_0 %d blk_1 %d \n ", av_dlog(NULL, " vorbis version %d \n audio_channels %d \n audio_samplerate %d \n bitrate_max %d \n bitrate_nom %d \n bitrate_min %d \n blk_0 %d blk_1 %d \n ",
vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]); vc->version, vc->audio_channels, vc->audio_samplerate, vc->bitrate_maximum, vc->bitrate_nominal, vc->bitrate_minimum, vc->blocksize[0], vc->blocksize[1]);
...@@ -990,13 +986,7 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext) ...@@ -990,13 +986,7 @@ static av_cold int vorbis_decode_init(AVCodecContext *avccontext)
avpriv_float_dsp_init(&vc->fdsp, avccontext->flags & CODEC_FLAG_BITEXACT); avpriv_float_dsp_init(&vc->fdsp, avccontext->flags & CODEC_FLAG_BITEXACT);
ff_fmt_convert_init(&vc->fmt_conv, avccontext); ff_fmt_convert_init(&vc->fmt_conv, avccontext);
if (avccontext->request_sample_fmt == AV_SAMPLE_FMT_FLT) { avccontext->sample_fmt = AV_SAMPLE_FMT_FLTP;
avccontext->sample_fmt = AV_SAMPLE_FMT_FLT;
vc->scale_bias = 1.0f;
} else {
avccontext->sample_fmt = AV_SAMPLE_FMT_S16;
vc->scale_bias = 32768.0f;
}
if (!headers_len) { if (!headers_len) {
av_log(avccontext, AV_LOG_ERROR, "Extradata missing.\n"); av_log(avccontext, AV_LOG_ERROR, "Extradata missing.\n");
...@@ -1487,7 +1477,7 @@ void ff_vorbis_inverse_coupling(float *mag, float *ang, int blocksize) ...@@ -1487,7 +1477,7 @@ void ff_vorbis_inverse_coupling(float *mag, float *ang, int blocksize)
// Decode the audio packet using the functions above // Decode the audio packet using the functions above
static int vorbis_parse_audio_packet(vorbis_context *vc) static int vorbis_parse_audio_packet(vorbis_context *vc, float **floor_ptr)
{ {
GetBitContext *gb = &vc->gb; GetBitContext *gb = &vc->gb;
FFTContext *mdct; FFTContext *mdct;
...@@ -1498,7 +1488,6 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) ...@@ -1498,7 +1488,6 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
uint8_t do_not_decode[255]; uint8_t do_not_decode[255];
vorbis_mapping *mapping; vorbis_mapping *mapping;
float *ch_res_ptr = vc->channel_residues; float *ch_res_ptr = vc->channel_residues;
float *ch_floor_ptr = vc->channel_floors;
uint8_t res_chan[255]; uint8_t res_chan[255];
unsigned res_num = 0; unsigned res_num = 0;
int retlen = 0; int retlen = 0;
...@@ -1530,7 +1519,8 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) ...@@ -1530,7 +1519,8 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
} }
memset(ch_res_ptr, 0, sizeof(float) * vc->audio_channels * vlen); //FIXME can this be removed ? memset(ch_res_ptr, 0, sizeof(float) * vc->audio_channels * vlen); //FIXME can this be removed ?
memset(ch_floor_ptr, 0, sizeof(float) * vc->audio_channels * vlen); //FIXME can this be removed ? for (i = 0; i < vc->audio_channels; ++i)
memset(floor_ptr[i], 0, vlen * sizeof(floor_ptr[0][0])); //FIXME can this be removed ?
// Decode floor // Decode floor
...@@ -1543,14 +1533,13 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) ...@@ -1543,14 +1533,13 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
floor = &vc->floors[mapping->submap_floor[0]]; floor = &vc->floors[mapping->submap_floor[0]];
} }
ret = floor->decode(vc, &floor->data, ch_floor_ptr); ret = floor->decode(vc, &floor->data, floor_ptr[i]);
if (ret < 0) { if (ret < 0) {
av_log(vc->avccontext, AV_LOG_ERROR, "Invalid codebook in vorbis_floor_decode.\n"); av_log(vc->avccontext, AV_LOG_ERROR, "Invalid codebook in vorbis_floor_decode.\n");
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
no_residue[i] = ret; no_residue[i] = ret;
ch_floor_ptr += vlen;
} }
// Nonzero vector propagate // Nonzero vector propagate
...@@ -1614,10 +1603,9 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) ...@@ -1614,10 +1603,9 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
mdct = &vc->mdct[blockflag]; mdct = &vc->mdct[blockflag];
for (j = vc->audio_channels-1;j >= 0; j--) { for (j = vc->audio_channels-1;j >= 0; j--) {
ch_floor_ptr = vc->channel_floors + j * blocksize / 2;
ch_res_ptr = vc->channel_residues + res_chan[j] * blocksize / 2; ch_res_ptr = vc->channel_residues + res_chan[j] * blocksize / 2;
vc->fdsp.vector_fmul(ch_floor_ptr, ch_floor_ptr, ch_res_ptr, blocksize / 2); vc->fdsp.vector_fmul(floor_ptr[j], floor_ptr[j], ch_res_ptr, blocksize / 2);
mdct->imdct_half(mdct, ch_res_ptr, ch_floor_ptr); mdct->imdct_half(mdct, ch_res_ptr, floor_ptr[j]);
} }
// Overlap/add, save data for next overlapping // Overlap/add, save data for next overlapping
...@@ -1628,7 +1616,7 @@ static int vorbis_parse_audio_packet(vorbis_context *vc) ...@@ -1628,7 +1616,7 @@ static int vorbis_parse_audio_packet(vorbis_context *vc)
unsigned bs1 = vc->blocksize[1]; unsigned bs1 = vc->blocksize[1];
float *residue = vc->channel_residues + res_chan[j] * blocksize / 2; float *residue = vc->channel_residues + res_chan[j] * blocksize / 2;
float *saved = vc->saved + j * bs1 / 4; float *saved = vc->saved + j * bs1 / 4;
float *ret = vc->channel_floors + j * retlen; float *ret = floor_ptr[j];
float *buf = residue; float *buf = residue;
const float *win = vc->win[blockflag & previous_window]; const float *win = vc->win[blockflag & previous_window];
...@@ -1657,14 +1645,31 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, ...@@ -1657,14 +1645,31 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
int buf_size = avpkt->size; int buf_size = avpkt->size;
vorbis_context *vc = avccontext->priv_data; vorbis_context *vc = avccontext->priv_data;
GetBitContext *gb = &vc->gb; GetBitContext *gb = &vc->gb;
const float *channel_ptrs[255]; float *channel_ptrs[255];
int i, len, ret; int i, len, ret;
av_dlog(NULL, "packet length %d \n", buf_size); av_dlog(NULL, "packet length %d \n", buf_size);
/* get output buffer */
vc->frame.nb_samples = vc->blocksize[1] / 2;
if ((ret = avccontext->get_buffer(avccontext, &vc->frame)) < 0) {
av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (vc->audio_channels > 8) {
for (i = 0; i < vc->audio_channels; i++)
channel_ptrs[i] = (float *)vc->frame.extended_data[i];
} else {
for (i = 0; i < vc->audio_channels; i++) {
int ch = ff_vorbis_channel_layout_offsets[vc->audio_channels - 1][i];
channel_ptrs[ch] = (float *)vc->frame.extended_data[i];
}
}
init_get_bits(gb, buf, buf_size*8); init_get_bits(gb, buf, buf_size*8);
if ((len = vorbis_parse_audio_packet(vc)) <= 0) if ((len = vorbis_parse_audio_packet(vc, channel_ptrs)) <= 0)
return len; return len;
if (!vc->first_frame) { if (!vc->first_frame) {
...@@ -1676,30 +1681,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data, ...@@ -1676,30 +1681,7 @@ static int vorbis_decode_frame(AVCodecContext *avccontext, void *data,
av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n", av_dlog(NULL, "parsed %d bytes %d bits, returned %d samples (*ch*bits) \n",
get_bits_count(gb) / 8, get_bits_count(gb) % 8, len); get_bits_count(gb) / 8, get_bits_count(gb) % 8, len);
/* get output buffer */
vc->frame.nb_samples = len; vc->frame.nb_samples = len;
if ((ret = avccontext->get_buffer(avccontext, &vc->frame)) < 0) {
av_log(avccontext, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
if (vc->audio_channels > 8) {
for (i = 0; i < vc->audio_channels; i++)
channel_ptrs[i] = vc->channel_floors + i * len;
} else {
for (i = 0; i < vc->audio_channels; i++)
channel_ptrs[i] = vc->channel_floors +
len * ff_vorbis_channel_layout_offsets[vc->audio_channels - 1][i];
}
if (avccontext->sample_fmt == AV_SAMPLE_FMT_FLT)
vc->fmt_conv.float_interleave((float *)vc->frame.data[0], channel_ptrs,
len, vc->audio_channels);
else
vc->fmt_conv.float_to_int16_interleave((int16_t *)vc->frame.data[0],
channel_ptrs, len,
vc->audio_channels);
*got_frame_ptr = 1; *got_frame_ptr = 1;
*(AVFrame *)data = vc->frame; *(AVFrame *)data = vc->frame;
...@@ -1740,7 +1722,6 @@ AVCodec ff_vorbis_decoder = { ...@@ -1740,7 +1722,6 @@ AVCodec ff_vorbis_decoder = {
.capabilities = CODEC_CAP_DR1, .capabilities = CODEC_CAP_DR1,
.long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
.channel_layouts = ff_vorbis_channel_layouts, .channel_layouts = ff_vorbis_channel_layouts,
.sample_fmts = (const enum AVSampleFormat[]) { .sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_NONE },
},
}; };
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment