Commit 9b8e2a87 authored by Nathan Caldwell's avatar Nathan Caldwell Committed by Alex Converse

aacenc: Deinterleave input samples before processing.

Signed-off-by: 's avatarAlex Converse <alex.converse@gmail.com>
parent 04af2efa
...@@ -143,6 +143,18 @@ static const uint8_t aac_chan_configs[6][5] = { ...@@ -143,6 +143,18 @@ static const uint8_t aac_chan_configs[6][5] = {
{4, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_LFE}, // 6 channels - front center + stereo + back stereo + LFE {4, TYPE_SCE, TYPE_CPE, TYPE_CPE, TYPE_LFE}, // 6 channels - front center + stereo + back stereo + LFE
}; };
/**
* Table to remap channels from Libav's default order to AAC order.
*/
static const uint8_t aac_chan_maps[AAC_MAX_CHANNELS][AAC_MAX_CHANNELS] = {
{ 0 },
{ 0, 1 },
{ 2, 0, 1 },
{ 2, 0, 1, 3 },
{ 2, 0, 1, 3, 4 },
{ 2, 0, 1, 4, 5, 3 },
};
/** /**
* Make AAC audio config object. * Make AAC audio config object.
* @see 1.6.2.1 "Syntax - AudioSpecificConfig" * @see 1.6.2.1 "Syntax - AudioSpecificConfig"
...@@ -172,34 +184,29 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce, ...@@ -172,34 +184,29 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
float *audio) float *audio)
{ {
int i, k; int i, k;
const int chans = s->channels;
const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024; const float * lwindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_long_1024 : ff_sine_1024;
const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128; const float * swindow = sce->ics.use_kb_window[0] ? ff_aac_kbd_short_128 : ff_sine_128;
const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128; const float * pwindow = sce->ics.use_kb_window[1] ? ff_aac_kbd_short_128 : ff_sine_128;
float *output = sce->ret; float *output = sce->ret;
if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) { if (sce->ics.window_sequence[0] != EIGHT_SHORT_SEQUENCE) {
memcpy(output, sce->saved, sizeof(float)*1024); memcpy(output, sce->saved, sizeof(output[0])*1024);
if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) { if (sce->ics.window_sequence[0] == LONG_STOP_SEQUENCE) {
memset(output, 0, sizeof(output[0]) * 448); memset(output, 0, sizeof(output[0]) * 448);
for (i = 448; i < 576; i++) for (i = 448; i < 576; i++)
output[i] = sce->saved[i] * pwindow[i - 448]; output[i] = sce->saved[i] * pwindow[i - 448];
for (i = 576; i < 704; i++)
output[i] = sce->saved[i];
} }
if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) { if (sce->ics.window_sequence[0] != LONG_START_SEQUENCE) {
for (i = 0; i < 1024; i++) { for (i = 0; i < 1024; i++) {
output[i+1024] = audio[i * chans] * lwindow[1024 - i - 1]; output[i+1024] = audio[i] * lwindow[1024 - i - 1];
sce->saved[i] = audio[i * chans] * lwindow[i]; sce->saved[i] = audio[i] * lwindow[i];
} }
} else { } else {
for (i = 0; i < 448; i++) memcpy(output + 1024, audio, sizeof(output[0]) * 448);
output[i+1024] = audio[i * chans];
for (; i < 576; i++) for (; i < 576; i++)
output[i+1024] = audio[i * chans] * swindow[576 - i - 1]; output[i+1024] = audio[i] * swindow[576 - i - 1];
memset(output+1024+576, 0, sizeof(output[0]) * 448); memset(output+1024+576, 0, sizeof(output[0]) * 448);
for (i = 0; i < 1024; i++) memcpy(sce->saved, audio, sizeof(sce->saved[0]) * 1024);
sce->saved[i] = audio[i * chans];
} }
s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output); s->mdct1024.mdct_calc(&s->mdct1024, sce->coeffs, output);
} else { } else {
...@@ -207,13 +214,12 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce, ...@@ -207,13 +214,12 @@ static void apply_window_and_mdct(AACEncContext *s, SingleChannelElement *sce,
for (i = 448 + k; i < 448 + k + 256; i++) for (i = 448 + k; i < 448 + k + 256; i++)
output[i - 448 - k] = (i < 1024) output[i - 448 - k] = (i < 1024)
? sce->saved[i] ? sce->saved[i]
: audio[(i-1024)*chans]; : audio[i-1024];
s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128); s->dsp.vector_fmul (output, output, k ? swindow : pwindow, 128);
s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128); s->dsp.vector_fmul_reverse(output+128, output+128, swindow, 128);
s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output); s->mdct128.mdct_calc(&s->mdct128, sce->coeffs + k, output);
} }
for (i = 0; i < 1024; i++) memcpy(sce->saved, audio, sizeof(sce->saved[0]) * 1024);
sce->saved[i] = audio[i * chans];
} }
} }
...@@ -432,11 +438,37 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s, ...@@ -432,11 +438,37 @@ static void put_bitstream_info(AVCodecContext *avctx, AACEncContext *s,
put_bits(&s->pb, 12 - padbits, 0); put_bits(&s->pb, 12 - padbits, 0);
} }
/*
* Deinterleave input samples.
* Channels are reordered from Libav's default order to AAC order.
*/
static void deinterleave_input_samples(AACEncContext *s,
const float *samples)
{
int ch, i;
const int sinc = s->channels;
const uint8_t *channel_map = aac_chan_maps[sinc - 1];
/* deinterleave and remap input samples */
for (ch = 0; ch < sinc; ch++) {
const float *sptr = samples + channel_map[ch];
/* copy last 1024 samples of previous frame to the start of the current frame */
memcpy(&s->planar_samples[ch][0], &s->planar_samples[ch][1024], 1024 * sizeof(s->planar_samples[0][0]));
/* deinterleave */
for (i = 1024; i < 1024 * 2; i++) {
s->planar_samples[ch][i] = *sptr;
sptr += sinc;
}
}
}
static int aac_encode_frame(AVCodecContext *avctx, static int aac_encode_frame(AVCodecContext *avctx,
uint8_t *frame, int buf_size, void *data) uint8_t *frame, int buf_size, void *data)
{ {
AACEncContext *s = avctx->priv_data; AACEncContext *s = avctx->priv_data;
float *samples = s->samples, *samples2, *la; float **samples = s->planar_samples, *samples2, *la;
ChannelElement *cpe; ChannelElement *cpe;
int i, ch, w, g, chans, tag, start_ch; int i, ch, w, g, chans, tag, start_ch;
int chan_el_counter[4]; int chan_el_counter[4];
...@@ -444,27 +476,15 @@ static int aac_encode_frame(AVCodecContext *avctx, ...@@ -444,27 +476,15 @@ static int aac_encode_frame(AVCodecContext *avctx,
if (s->last_frame) if (s->last_frame)
return 0; return 0;
if (data) { if (data) {
if (!s->psypp) { deinterleave_input_samples(s, data);
memcpy(s->samples + 1024 * s->channels, data, if (s->psypp)
1024 * s->channels * sizeof(s->samples[0])); ff_psy_preprocess(s->psypp, s->planar_samples, s->channels);
} else {
start_ch = 0;
samples2 = s->samples + 1024 * s->channels;
for (i = 0; i < s->chan_map[0]; i++) {
tag = s->chan_map[i+1];
chans = tag == TYPE_CPE ? 2 : 1;
ff_psy_preprocess(s->psypp, (float*)data + start_ch,
samples2 + start_ch, start_ch, chans);
start_ch += chans;
}
}
} }
if (!avctx->frame_number) {
memcpy(s->samples, s->samples + 1024 * s->channels, if (!avctx->frame_number)
1024 * s->channels * sizeof(s->samples[0]));
return 0; return 0;
}
start_ch = 0; start_ch = 0;
for (i = 0; i < s->chan_map[0]; i++) { for (i = 0; i < s->chan_map[0]; i++) {
...@@ -475,8 +495,8 @@ static int aac_encode_frame(AVCodecContext *avctx, ...@@ -475,8 +495,8 @@ static int aac_encode_frame(AVCodecContext *avctx,
for (ch = 0; ch < chans; ch++) { for (ch = 0; ch < chans; ch++) {
IndividualChannelStream *ics = &cpe->ch[ch].ics; IndividualChannelStream *ics = &cpe->ch[ch].ics;
int cur_channel = start_ch + ch; int cur_channel = start_ch + ch;
samples2 = samples + cur_channel; samples2 = &samples[cur_channel][0];
la = samples2 + (448+64) * s->channels; la = samples2 + (448+64);
if (!data) if (!data)
la = NULL; la = NULL;
if (tag == TYPE_LFE) { if (tag == TYPE_LFE) {
...@@ -592,8 +612,7 @@ static int aac_encode_frame(AVCodecContext *avctx, ...@@ -592,8 +612,7 @@ static int aac_encode_frame(AVCodecContext *avctx,
if (!data) if (!data)
s->last_frame = 1; s->last_frame = 1;
memcpy(s->samples, s->samples + 1024 * s->channels,
1024 * s->channels * sizeof(s->samples[0]));
return put_bits_count(&s->pb)>>3; return put_bits_count(&s->pb)>>3;
} }
...@@ -606,7 +625,7 @@ static av_cold int aac_encode_end(AVCodecContext *avctx) ...@@ -606,7 +625,7 @@ static av_cold int aac_encode_end(AVCodecContext *avctx)
ff_psy_end(&s->psy); ff_psy_end(&s->psy);
if (s->psypp) if (s->psypp)
ff_psy_preprocess_end(s->psypp); ff_psy_preprocess_end(s->psypp);
av_freep(&s->samples); av_freep(&s->buffer.samples);
av_freep(&s->cpe); av_freep(&s->cpe);
return 0; return 0;
} }
...@@ -633,10 +652,13 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s) ...@@ -633,10 +652,13 @@ static av_cold int dsp_init(AVCodecContext *avctx, AACEncContext *s)
static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s) static av_cold int alloc_buffers(AVCodecContext *avctx, AACEncContext *s)
{ {
FF_ALLOC_OR_GOTO (avctx, s->samples, 2 * 1024 * s->channels * sizeof(s->samples[0]), alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, s->buffer.samples, 2 * 1024 * s->channels * sizeof(s->buffer.samples[0]), alloc_fail);
FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, s->cpe, sizeof(ChannelElement) * s->chan_map[0], alloc_fail);
FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail); FF_ALLOCZ_OR_GOTO(avctx, avctx->extradata, 5 + FF_INPUT_BUFFER_PADDING_SIZE, alloc_fail);
for(int ch = 0; ch < s->channels; ch++)
s->planar_samples[ch] = s->buffer.samples + 2 * 1024 * ch;
return 0; return 0;
alloc_fail: alloc_fail:
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
......
...@@ -58,7 +58,7 @@ typedef struct AACEncContext { ...@@ -58,7 +58,7 @@ typedef struct AACEncContext {
FFTContext mdct1024; ///< long (1024 samples) frame transform context FFTContext mdct1024; ///< long (1024 samples) frame transform context
FFTContext mdct128; ///< short (128 samples) frame transform context FFTContext mdct128; ///< short (128 samples) frame transform context
DSPContext dsp; DSPContext dsp;
float *samples; ///< saved preprocessed input float *planar_samples[6]; ///< saved preprocessed input
int samplerate_index; ///< MPEG-4 samplerate index int samplerate_index; ///< MPEG-4 samplerate index
int channels; ///< channel count int channels; ///< channel count
...@@ -73,6 +73,10 @@ typedef struct AACEncContext { ...@@ -73,6 +73,10 @@ typedef struct AACEncContext {
float lambda; float lambda;
DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients DECLARE_ALIGNED(16, int, qcoefs)[96]; ///< quantized coefficients
DECLARE_ALIGNED(32, float, scoefs)[1024]; ///< scaled coefficients DECLARE_ALIGNED(32, float, scoefs)[1024]; ///< scaled coefficients
struct {
float *samples;
} buffer;
} AACEncContext; } AACEncContext;
extern float ff_aac_pow34sf_tab[428]; extern float ff_aac_pow34sf_tab[428];
......
...@@ -400,7 +400,7 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx, ...@@ -400,7 +400,7 @@ static av_unused FFPsyWindowInfo psy_3gpp_window(FFPsyContext *ctx,
int stay_short = 0; int stay_short = 0;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
for (j = 0; j < 128; j++) { for (j = 0; j < 128; j++) {
v = iir_filter(la[(i*128+j)*ctx->avctx->channels], pch->iir_state); v = iir_filter(la[i*128+j], pch->iir_state);
sum += v*v; sum += v*v;
} }
s[i] = sum; s[i] = sum;
...@@ -794,18 +794,17 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio, ...@@ -794,18 +794,17 @@ static FFPsyWindowInfo psy_lame_window(FFPsyContext *ctx, const float *audio,
float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS]; float attack_intensity[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS]; float energy_subshort[(AAC_NUM_BLOCKS_SHORT + 1) * PSY_LAME_NUM_SUBBLOCKS];
float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 }; float energy_short[AAC_NUM_BLOCKS_SHORT + 1] = { 0 };
int chans = ctx->avctx->channels; const float *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN);
const float *firbuf = la + (AAC_BLOCK_SIZE_SHORT/4 - PSY_LAME_FIR_LEN) * chans;
int j, att_sum = 0; int j, att_sum = 0;
/* LAME comment: apply high pass filter of fs/4 */ /* LAME comment: apply high pass filter of fs/4 */
for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) { for (i = 0; i < AAC_BLOCK_SIZE_LONG; i++) {
float sum1, sum2; float sum1, sum2;
sum1 = firbuf[(i + ((PSY_LAME_FIR_LEN - 1) / 2)) * chans]; sum1 = firbuf[i + (PSY_LAME_FIR_LEN - 1) / 2];
sum2 = 0.0; sum2 = 0.0;
for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) { for (j = 0; j < ((PSY_LAME_FIR_LEN - 1) / 2) - 1; j += 2) {
sum1 += psy_fir_coeffs[j] * (firbuf[(i + j) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j) * chans]); sum1 += psy_fir_coeffs[j] * (firbuf[i + j] + firbuf[i + PSY_LAME_FIR_LEN - j]);
sum2 += psy_fir_coeffs[j + 1] * (firbuf[(i + j + 1) * chans] + firbuf[(i + PSY_LAME_FIR_LEN - j - 1) * chans]); sum2 += psy_fir_coeffs[j + 1] * (firbuf[i + j + 1] + firbuf[i + PSY_LAME_FIR_LEN - j - 1]);
} }
/* NOTE: The LAME psymodel expects it's input in the range -32768 to 32768. Tuning this for normalized floats would be difficult. */ /* NOTE: The LAME psymodel expects it's input in the range -32768 to 32768. Tuning this for normalized floats would be difficult. */
hpfsmpl[i] = (sum1 + sum2) * 32768.0f; hpfsmpl[i] = (sum1 + sum2) * 32768.0f;
......
...@@ -112,19 +112,15 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av ...@@ -112,19 +112,15 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av
return ctx; return ctx;
} }
void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, const float *audio, void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, float **audio, int channels)
float *dest, int tag, int channels)
{ {
int ch, i; int ch;
int frame_size = ctx->avctx->frame_size;
if (ctx->fstate) { if (ctx->fstate) {
for (ch = 0; ch < channels; ch++) for (ch = 0; ch < channels; ch++)
ff_iir_filter_flt(ctx->fcoeffs, ctx->fstate[tag+ch], ctx->avctx->frame_size, ff_iir_filter_flt(ctx->fcoeffs, ctx->fstate[ch], frame_size,
audio + ch, ctx->avctx->channels, &audio[ch][frame_size], 1, &audio[ch][frame_size], 1);
dest + ch, ctx->avctx->channels);
} else {
for (ch = 0; ch < channels; ch++)
for (i = 0; i < ctx->avctx->frame_size; i++)
dest[i*ctx->avctx->channels + ch] = audio[i*ctx->avctx->channels + ch];
} }
} }
......
...@@ -174,13 +174,10 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av ...@@ -174,13 +174,10 @@ av_cold struct FFPsyPreprocessContext* ff_psy_preprocess_init(AVCodecContext *av
* Preprocess several channel in audio frame in order to compress it better. * Preprocess several channel in audio frame in order to compress it better.
* *
* @param ctx preprocessing context * @param ctx preprocessing context
* @param audio samples to preprocess * @param audio samples to be filtered (in place)
* @param dest place to put filtered samples * @param channels number of channel to preprocess
* @param tag channel number
* @param channels number of channel to preprocess (some additional work may be done on stereo pair)
*/ */
void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, const float *audio, void ff_psy_preprocess(struct FFPsyPreprocessContext *ctx, float **audio, int channels);
float *dest, int tag, int channels);
/** /**
* Cleanup audio preprocessing module. * Cleanup audio preprocessing module.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment