Commit 233783e2 authored by Justin Ruggles's avatar Justin Ruggles

vorbisenc: use float planar sample format

parent 11dcddb9
...@@ -963,10 +963,10 @@ static int residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc, ...@@ -963,10 +963,10 @@ static int residue_encode(vorbis_enc_context *venc, vorbis_enc_residue *rc,
return 0; return 0;
} }
static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *audio, static int apply_window_and_mdct(vorbis_enc_context *venc,
int samples) float **audio, int samples)
{ {
int i, j, channel; int i, channel;
const float * win = venc->win[0]; const float * win = venc->win[0];
int window_len = 1 << (venc->log2_blocksize[0] - 1); int window_len = 1 << (venc->log2_blocksize[0] - 1);
float n = (float)(1 << venc->log2_blocksize[0]) / 4.; float n = (float)(1 << venc->log2_blocksize[0]) / 4.;
...@@ -988,9 +988,8 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a ...@@ -988,9 +988,8 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a
if (samples) { if (samples) {
for (channel = 0; channel < venc->channels; channel++) { for (channel = 0; channel < venc->channels; channel++) {
float * offset = venc->samples + channel*window_len*2 + window_len; float * offset = venc->samples + channel*window_len*2 + window_len;
j = channel; for (i = 0; i < samples; i++)
for (i = 0; i < samples; i++, j += venc->channels) offset[i] = audio[channel][i] / n * win[window_len - i - 1];
offset[i] = audio[j] / 32768. / n * win[window_len - i - 1];
} }
} else { } else {
for (channel = 0; channel < venc->channels; channel++) for (channel = 0; channel < venc->channels; channel++)
...@@ -1005,9 +1004,8 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a ...@@ -1005,9 +1004,8 @@ static int apply_window_and_mdct(vorbis_enc_context *venc, const signed short *a
if (samples) { if (samples) {
for (channel = 0; channel < venc->channels; channel++) { for (channel = 0; channel < venc->channels; channel++) {
float *offset = venc->saved + channel * window_len; float *offset = venc->saved + channel * window_len;
j = channel; for (i = 0; i < samples; i++)
for (i = 0; i < samples; i++, j += venc->channels) offset[i] = audio[channel][i] / n * win[i];
offset[i] = audio[j] / 32768. / n * win[i];
} }
venc->have_saved = 1; venc->have_saved = 1;
} else { } else {
...@@ -1021,7 +1019,7 @@ static int vorbis_encode_frame(AVCodecContext *avccontext, AVPacket *avpkt, ...@@ -1021,7 +1019,7 @@ static int vorbis_encode_frame(AVCodecContext *avccontext, AVPacket *avpkt,
const AVFrame *frame, int *got_packet_ptr) const AVFrame *frame, int *got_packet_ptr)
{ {
vorbis_enc_context *venc = avccontext->priv_data; vorbis_enc_context *venc = avccontext->priv_data;
const int16_t *audio = frame ? (const int16_t *)frame->data[0] : NULL; float **audio = frame ? (float **)frame->extended_data : NULL;
int samples = frame ? frame->nb_samples : 0; int samples = frame ? frame->nb_samples : 0;
vorbis_enc_mode *mode; vorbis_enc_mode *mode;
vorbis_enc_mapping *mapping; vorbis_enc_mapping *mapping;
...@@ -1216,7 +1214,7 @@ AVCodec ff_vorbis_encoder = { ...@@ -1216,7 +1214,7 @@ AVCodec ff_vorbis_encoder = {
.encode2 = vorbis_encode_frame, .encode2 = vorbis_encode_frame,
.close = vorbis_encode_close, .close = vorbis_encode_close,
.capabilities = CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL, .capabilities = CODEC_CAP_DELAY | CODEC_CAP_EXPERIMENTAL,
.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, .sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE }, AV_SAMPLE_FMT_NONE },
.long_name = NULL_IF_CONFIG_SMALL("Vorbis"), .long_name = NULL_IF_CONFIG_SMALL("Vorbis"),
}; };
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment