Commit 8f2f166c authored by James Almer's avatar James Almer

avcodec/atrac3p: use float_dsp in ff_atrac3p_power_compensation

Signed-off-by: 's avatarJames Almer <jamrial@gmail.com>
parent b664d1f3
...@@ -199,13 +199,14 @@ void ff_atrac3p_generate_tones(Atrac3pChanUnitCtx *ch_unit, AVFloatDSPContext *f ...@@ -199,13 +199,14 @@ void ff_atrac3p_generate_tones(Atrac3pChanUnitCtx *ch_unit, AVFloatDSPContext *f
* Perform power compensation aka noise dithering. * Perform power compensation aka noise dithering.
* *
* @param[in] ctx ptr to the channel context * @param[in] ctx ptr to the channel context
* @param[in] fdsp pointer to float DSP context
* @param[in] ch_index which channel to process * @param[in] ch_index which channel to process
* @param[in,out] sp ptr to channel spectrum to process * @param[in,out] sp ptr to channel spectrum to process
* @param[in] rng_index indicates which RNG table to use * @param[in] rng_index indicates which RNG table to use
* @param[in] sb_num which subband to process * @param[in] sb_num which subband to process
*/ */
void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, int ch_index, void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, AVFloatDSPContext *fdsp,
float *sp, int rng_index, int sb_num); int ch_index, float *sp, int rng_index, int sb_num);
/** /**
* Regular IMDCT and windowing without overlapping, * Regular IMDCT and windowing without overlapping,
......
...@@ -198,7 +198,7 @@ static av_cold int atrac3p_decode_init(AVCodecContext *avctx) ...@@ -198,7 +198,7 @@ static av_cold int atrac3p_decode_init(AVCodecContext *avctx)
return 0; return 0;
} }
static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx, static void decode_residual_spectrum(ATRAC3PContext *ctx, Atrac3pChanUnitCtx *ch_unit,
float out[2][ATRAC3P_FRAME_SAMPLES], float out[2][ATRAC3P_FRAME_SAMPLES],
int num_channels, int num_channels,
AVCodecContext *avctx) AVCodecContext *avctx)
...@@ -209,17 +209,17 @@ static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx, ...@@ -209,17 +209,17 @@ static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx,
/* calculate RNG table index for each subband */ /* calculate RNG table index for each subband */
int sb_RNG_index[ATRAC3P_SUBBANDS] = { 0 }; int sb_RNG_index[ATRAC3P_SUBBANDS] = { 0 };
if (ctx->mute_flag) { if (ch_unit->mute_flag) {
for (ch = 0; ch < num_channels; ch++) for (ch = 0; ch < num_channels; ch++)
memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch])); memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
return; return;
} }
for (qu = 0, RNG_index = 0; qu < ctx->used_quant_units; qu++) for (qu = 0, RNG_index = 0; qu < ch_unit->used_quant_units; qu++)
RNG_index += ctx->channels[0].qu_sf_idx[qu] + RNG_index += ch_unit->channels[0].qu_sf_idx[qu] +
ctx->channels[1].qu_sf_idx[qu]; ch_unit->channels[1].qu_sf_idx[qu];
for (sb = 0; sb < ctx->num_coded_subbands; sb++, RNG_index += 128) for (sb = 0; sb < ch_unit->num_coded_subbands; sb++, RNG_index += 128)
sb_RNG_index[sb] = RNG_index & 0x3FC; sb_RNG_index[sb] = RNG_index & 0x3FC;
/* inverse quant and power compensation */ /* inverse quant and power compensation */
...@@ -227,35 +227,35 @@ static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx, ...@@ -227,35 +227,35 @@ static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx,
/* clear channel's residual spectrum */ /* clear channel's residual spectrum */
memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch])); memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
for (qu = 0; qu < ctx->used_quant_units; qu++) { for (qu = 0; qu < ch_unit->used_quant_units; qu++) {
src = &ctx->channels[ch].spectrum[ff_atrac3p_qu_to_spec_pos[qu]]; src = &ch_unit->channels[ch].spectrum[ff_atrac3p_qu_to_spec_pos[qu]];
dst = &out[ch][ff_atrac3p_qu_to_spec_pos[qu]]; dst = &out[ch][ff_atrac3p_qu_to_spec_pos[qu]];
nspeclines = ff_atrac3p_qu_to_spec_pos[qu + 1] - nspeclines = ff_atrac3p_qu_to_spec_pos[qu + 1] -
ff_atrac3p_qu_to_spec_pos[qu]; ff_atrac3p_qu_to_spec_pos[qu];
if (ctx->channels[ch].qu_wordlen[qu] > 0) { if (ch_unit->channels[ch].qu_wordlen[qu] > 0) {
q = ff_atrac3p_sf_tab[ctx->channels[ch].qu_sf_idx[qu]] * q = ff_atrac3p_sf_tab[ch_unit->channels[ch].qu_sf_idx[qu]] *
ff_atrac3p_mant_tab[ctx->channels[ch].qu_wordlen[qu]]; ff_atrac3p_mant_tab[ch_unit->channels[ch].qu_wordlen[qu]];
for (i = 0; i < nspeclines; i++) for (i = 0; i < nspeclines; i++)
dst[i] = src[i] * q; dst[i] = src[i] * q;
} }
} }
for (sb = 0; sb < ctx->num_coded_subbands; sb++) for (sb = 0; sb < ch_unit->num_coded_subbands; sb++)
ff_atrac3p_power_compensation(ctx, ch, &out[ch][0], ff_atrac3p_power_compensation(ch_unit, ctx->fdsp, ch, &out[ch][0],
sb_RNG_index[sb], sb); sb_RNG_index[sb], sb);
} }
if (ctx->unit_type == CH_UNIT_STEREO) { if (ch_unit->unit_type == CH_UNIT_STEREO) {
for (sb = 0; sb < ctx->num_coded_subbands; sb++) { for (sb = 0; sb < ch_unit->num_coded_subbands; sb++) {
if (ctx->swap_channels[sb]) { if (ch_unit->swap_channels[sb]) {
for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++) for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
FFSWAP(float, out[0][sb * ATRAC3P_SUBBAND_SAMPLES + i], FFSWAP(float, out[0][sb * ATRAC3P_SUBBAND_SAMPLES + i],
out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]); out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
} }
/* flip coefficients' sign if requested */ /* flip coefficients' sign if requested */
if (ctx->negate_coeffs[sb]) if (ch_unit->negate_coeffs[sb])
for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++) for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i] = -(out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]); out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i] = -(out[1][sb * ATRAC3P_SUBBAND_SAMPLES + i]);
} }
...@@ -369,7 +369,7 @@ static int atrac3p_decode_frame(AVCodecContext *avctx, void *data, ...@@ -369,7 +369,7 @@ static int atrac3p_decode_frame(AVCodecContext *avctx, void *data,
avctx)) < 0) avctx)) < 0)
return ret; return ret;
decode_residual_spectrum(&ctx->ch_units[ch_block], ctx->samples, decode_residual_spectrum(ctx, &ctx->ch_units[ch_block], ctx->samples,
channels_to_process, avctx); channels_to_process, avctx);
reconstruct_frame(ctx, &ctx->ch_units[ch_block], reconstruct_frame(ctx, &ctx->ch_units[ch_block],
channels_to_process, avctx); channels_to_process, avctx);
......
...@@ -415,11 +415,12 @@ static const int subband_to_qu[17] = { ...@@ -415,11 +415,12 @@ static const int subband_to_qu[17] = {
0, 8, 12, 16, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 0, 8, 12, 16, 18, 20, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32
}; };
void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, int ch_index, void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, AVFloatDSPContext *fdsp,
float *sp, int rng_index, int sb) int ch_index, float *sp, int rng_index, int sb)
{ {
AtracGainInfo *g1, *g2; AtracGainInfo *g1, *g2;
float pwcsp[ATRAC3P_SUBBAND_SAMPLES], *dst, grp_lev, qu_lev; LOCAL_ALIGNED_32(float, pwcsp, [ATRAC3P_SUBBAND_SAMPLES]);
float *dst, grp_lev, qu_lev;
int i, gain_lev, gcv = 0, qu, nsp; int i, gain_lev, gcv = 0, qu, nsp;
int swap_ch = (ctx->unit_type == CH_UNIT_STEREO && ctx->swap_channels[sb]) ? 1 : 0; int swap_ch = (ctx->unit_type == CH_UNIT_STEREO && ctx->swap_channels[sb]) ? 1 : 0;
...@@ -456,8 +457,7 @@ void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, int ch_index, ...@@ -456,8 +457,7 @@ void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, int ch_index,
dst = &sp[ff_atrac3p_qu_to_spec_pos[qu]]; dst = &sp[ff_atrac3p_qu_to_spec_pos[qu]];
nsp = ff_atrac3p_qu_to_spec_pos[qu + 1] - ff_atrac3p_qu_to_spec_pos[qu]; nsp = ff_atrac3p_qu_to_spec_pos[qu + 1] - ff_atrac3p_qu_to_spec_pos[qu];
for (i = 0; i < nsp; i++) fdsp->vector_fmac_scalar(dst, pwcsp, qu_lev, nsp);
dst[i] += pwcsp[i] * qu_lev;
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment