Commit a53a9f1c authored by Paul B Mahol's avatar Paul B Mahol

avfilter/af_afir: implement non-uniform partitioned convolution

Using multiple frequency delay lines.
parent 300dc45f
...@@ -1231,14 +1231,14 @@ Set video stream size. This option is used only when @var{response} is enabled. ...@@ -1231,14 +1231,14 @@ Set video stream size. This option is used only when @var{response} is enabled.
Set video stream frame rate. This option is used only when @var{response} is enabled. Set video stream frame rate. This option is used only when @var{response} is enabled.
@item minp @item minp
Set minimal partition size used for convolution. Default is @var{16}. Set minimal partition size used for convolution. Default is @var{8192}.
Allowed range is from @var{16} to @var{32768}. Allowed range is from @var{16} to @var{32768}.
Lower values decreases latency at cost of higher CPU usage. Lower values decreases latency at cost of higher CPU usage.
@item maxp @item maxp
Set maximal partition size used for convolution. Default is @var{8192}. Set maximal partition size used for convolution. Default is @var{8192}.
Allowed range is from @var{16} to @var{32768}. Allowed range is from @var{16} to @var{32768}.
Lower values decreases latency at cost of higher CPU usage. Lower values may increase CPU usage.
@end table @end table
@subsection Examples @subsection Examples
......
...@@ -59,54 +59,84 @@ static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t le ...@@ -59,54 +59,84 @@ static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t le
static int fir_channel(AVFilterContext *ctx, void *arg, int ch, int nb_jobs) static int fir_channel(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
{ {
AudioFIRContext *s = ctx->priv; AudioFIRContext *s = ctx->priv;
AudioFIRSegment *seg = &s->seg[0]; const float *in = (const float *)s->in[0]->extended_data[ch];
const float *src = (const float *)s->in[0]->extended_data[ch];
float *sum = (float *)seg->sum->extended_data[ch];
AVFrame *out = arg; AVFrame *out = arg;
float *block, *dst, *ptr; float *block, *buf, *ptr = (float *)out->extended_data[ch];
int n, i, j; int n, i, j;
memset(sum, 0, sizeof(*sum) * seg->fft_length); for (int segment = 0; segment < s->nb_segments; segment++) {
block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size; AudioFIRSegment *seg = &s->seg[segment];
memset(block, 0, sizeof(*block) * seg->fft_length); float *src = (float *)seg->input->extended_data[ch];
float *dst = (float *)seg->output->extended_data[ch];
float *sum = (float *)seg->sum->extended_data[ch];
s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(out->nb_samples, 4));
emms_c();
seg->output_offset[ch] += s->min_part_size;
if (seg->output_offset[ch] == seg->part_size) {
seg->output_offset[ch] = 0;
memset(dst, 0, sizeof(*dst) * seg->part_size);
} else {
memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
dst += seg->output_offset[ch];
for (n = 0; n < out->nb_samples; n++) {
ptr[n] += dst[n];
}
continue;
}
s->fdsp->vector_fmul_scalar(block, src, s->dry_gain, FFALIGN(out->nb_samples, 4)); memset(sum, 0, sizeof(*sum) * seg->fft_length);
emms_c(); block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size;
memset(block + seg->part_size, 0, sizeof(*block) * (seg->fft_length - seg->part_size));
av_rdft_calc(seg->rdft[ch], block); memcpy(block, src, sizeof(*src) * seg->part_size);
block[2 * seg->part_size] = block[1];
block[1] = 0;
j = seg->part_index[ch]; av_rdft_calc(seg->rdft[ch], block);
block[2 * seg->part_size] = block[1];
block[1] = 0;
for (i = 0; i < seg->nb_partitions; i++) { j = seg->part_index[ch];
const int coffset = i * seg->coeff_size;
const float *block = (const float *)seg->block->extended_data[ch] + j * seg->block_size;
const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
s->fcmul_add(sum, block, (const float *)coeff, seg->part_size); for (i = 0; i < seg->nb_partitions; i++) {
const int coffset = j * seg->coeff_size;
const float *block = (const float *)seg->block->extended_data[ch] + i * seg->block_size;
const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
if (j == 0) s->fcmul_add(sum, block, (const float *)coeff, seg->part_size);
j = seg->nb_partitions;
j--;
}
sum[1] = sum[2 * seg->part_size]; if (j == 0)
av_rdft_calc(seg->irdft[ch], sum); j = seg->nb_partitions;
j--;
}
dst = (float *)seg->buffer->extended_data[ch]; sum[1] = sum[2 * seg->part_size];
for (n = 0; n < seg->part_size; n++) { av_rdft_calc(seg->irdft[ch], sum);
dst[n] += sum[n];
}
ptr = (float *)out->extended_data[ch]; buf = (float *)seg->buffer->extended_data[ch];
s->fdsp->vector_fmul_scalar(ptr, dst, s->wet_gain, FFALIGN(out->nb_samples, 4)); for (n = 0; n < seg->part_size; n++) {
emms_c(); buf[n] += sum[n];
}
dst = (float *)seg->buffer->extended_data[ch]; for (n = 0; n < seg->part_size; n++) {
memcpy(dst, sum + seg->part_size, seg->part_size * sizeof(*dst)); dst[n] += buf[n];
}
buf = (float *)seg->buffer->extended_data[ch];
memcpy(buf, sum + seg->part_size, seg->part_size * sizeof(*buf));
seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions; for (n = 0; n < out->nb_samples; n++) {
ptr[n] += dst[n];
}
}
s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(out->nb_samples, 4));
emms_c();
return 0; return 0;
} }
...@@ -275,19 +305,28 @@ end: ...@@ -275,19 +305,28 @@ end:
av_free(mag); av_free(mag);
} }
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int nb_partitions, int part_size) static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg,
int offset, int nb_partitions, int part_size)
{ {
AudioFIRContext *s = ctx->priv;
seg->rdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft)); seg->rdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft));
seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft)); seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft));
if (!seg->rdft || !seg->irdft) if (!seg->rdft || !seg->irdft)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
seg->fft_length = part_size * 4 + 1; seg->fft_length = part_size * 4 + 1;
seg->part_size = part_size; seg->part_size = part_size;
seg->block_size = FFALIGN(seg->fft_length, 32); seg->block_size = FFALIGN(seg->fft_length, 32);
seg->coeff_size = FFALIGN(seg->part_size + 1, 32); seg->coeff_size = FFALIGN(seg->part_size + 1, 32);
seg->nb_partitions = nb_partitions; seg->nb_partitions = nb_partitions;
seg->segment_size = part_size * nb_partitions; seg->input_size = offset + s->min_part_size;
seg->input_offset = offset;
seg->part_index = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
seg->output_offset = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->output_offset));
if (!seg->part_index || !seg->output_offset)
return AVERROR(ENOMEM);
for (int ch = 0; ch < ctx->inputs[0]->channels; ch++) { for (int ch = 0; ch < ctx->inputs[0]->channels; ch++) {
seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C); seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C);
...@@ -296,15 +335,13 @@ static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int nb_parti ...@@ -296,15 +335,13 @@ static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int nb_parti
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
seg->part_index = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
if (!seg->part_index)
return AVERROR(ENOMEM);
seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length); seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size); seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size);
seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size); seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
seg->coeff = ff_get_audio_buffer(ctx->inputs[1], seg->nb_partitions * seg->coeff_size * 2); seg->coeff = ff_get_audio_buffer(ctx->inputs[1], seg->nb_partitions * seg->coeff_size * 2);
if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff) seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
return 0; return 0;
...@@ -313,20 +350,37 @@ static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int nb_parti ...@@ -313,20 +350,37 @@ static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg, int nb_parti
static int convert_coeffs(AVFilterContext *ctx) static int convert_coeffs(AVFilterContext *ctx)
{ {
AudioFIRContext *s = ctx->priv; AudioFIRContext *s = ctx->priv;
int ret, i, ch, n, N; int left, offset = 0, part_size, max_part_size;
int ret, i, ch, n;
float power = 0; float power = 0;
s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1]); s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1]);
if (s->nb_taps <= 0) if (s->nb_taps <= 0)
return AVERROR(EINVAL); return AVERROR(EINVAL);
for (n = av_log2(s->minp); (1 << n) < s->nb_taps; n++); if (s->minp > s->maxp) {
N = FFMIN(n, av_log2(s->maxp)); s->maxp = s->minp;
}
s->nb_segments = 1; left = s->nb_taps;
ret = init_segment(ctx, &s->seg[0], (s->nb_taps + (1 << N) - 1) / (1 << N), 1 << N); part_size = 1 << av_log2(s->minp);
if (ret < 0) max_part_size = 1 << av_log2(s->maxp);
return ret;
s->min_part_size = part_size;
for (i = 0; left > 0; i++) {
int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0);
int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
s->nb_segments = i + 1;
ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size);
if (ret < 0)
return ret;
offset += nb_partitions * part_size;
left -= nb_partitions * part_size;
part_size *= 2;
part_size = FFMIN(part_size, max_part_size);
}
ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_taps, s->nb_taps, &s->in[1]); ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_taps, s->nb_taps, &s->in[1]);
if (ret < 0) if (ret < 0)
...@@ -426,7 +480,11 @@ static int convert_coeffs(AVFilterContext *ctx) ...@@ -426,7 +480,11 @@ static int convert_coeffs(AVFilterContext *ctx)
av_log(ctx, AV_LOG_DEBUG, "nb_partitions: %d\n", seg->nb_partitions); av_log(ctx, AV_LOG_DEBUG, "nb_partitions: %d\n", seg->nb_partitions);
av_log(ctx, AV_LOG_DEBUG, "partition size: %d\n", seg->part_size); av_log(ctx, AV_LOG_DEBUG, "partition size: %d\n", seg->part_size);
av_log(ctx, AV_LOG_DEBUG, "block size: %d\n", seg->block_size);
av_log(ctx, AV_LOG_DEBUG, "fft_length: %d\n", seg->fft_length); av_log(ctx, AV_LOG_DEBUG, "fft_length: %d\n", seg->fft_length);
av_log(ctx, AV_LOG_DEBUG, "coeff_size: %d\n", seg->coeff_size);
av_log(ctx, AV_LOG_DEBUG, "input_size: %d\n", seg->input_size);
av_log(ctx, AV_LOG_DEBUG, "input_offset: %d\n", seg->input_offset);
} }
} }
...@@ -488,7 +546,7 @@ static int activate(AVFilterContext *ctx) ...@@ -488,7 +546,7 @@ static int activate(AVFilterContext *ctx)
return ret; return ret;
} }
ret = ff_inlink_consume_samples(ctx->inputs[0], s->seg[0].part_size, s->seg[0].part_size, &in); ret = ff_inlink_consume_samples(ctx->inputs[0], s->min_part_size, s->min_part_size, &in);
if (ret > 0) if (ret > 0)
ret = fir_frame(s, in, outlink); ret = fir_frame(s, in, outlink);
...@@ -505,7 +563,7 @@ static int activate(AVFilterContext *ctx) ...@@ -505,7 +563,7 @@ static int activate(AVFilterContext *ctx)
} }
} }
if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->seg[0].part_size) { if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
ff_filter_set_ready(ctx, 10); ff_filter_set_ready(ctx, 10);
return 0; return 0;
} }
...@@ -624,12 +682,16 @@ static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg) ...@@ -624,12 +682,16 @@ static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
} }
av_freep(&seg->irdft); av_freep(&seg->irdft);
av_freep(&seg->output_offset);
av_freep(&seg->part_index); av_freep(&seg->part_index);
av_frame_free(&seg->block); av_frame_free(&seg->block);
av_frame_free(&seg->sum); av_frame_free(&seg->sum);
av_frame_free(&seg->buffer); av_frame_free(&seg->buffer);
av_frame_free(&seg->coeff); av_frame_free(&seg->coeff);
av_frame_free(&seg->input);
av_frame_free(&seg->output);
seg->input_size = 0;
} }
static av_cold void uninit(AVFilterContext *ctx) static av_cold void uninit(AVFilterContext *ctx)
...@@ -720,11 +782,11 @@ static av_cold int init(AVFilterContext *ctx) ...@@ -720,11 +782,11 @@ static av_cold int init(AVFilterContext *ctx)
static const AVFilterPad afir_inputs[] = { static const AVFilterPad afir_inputs[] = {
{ {
.name = "main", .name = "main",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
},{ },{
.name = "ir", .name = "ir",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
}, },
{ NULL } { NULL }
}; };
...@@ -751,7 +813,7 @@ static const AVOption afir_options[] = { ...@@ -751,7 +813,7 @@ static const AVOption afir_options[] = {
{ "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF }, { "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF }, { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
{ "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF }, { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
{ "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=16}, 16, 32768, AF }, { "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 16, 32768, AF },
{ "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 16, 32768, AF }, { "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 16, 32768, AF },
{ NULL } { NULL }
}; };
......
...@@ -37,14 +37,18 @@ typedef struct AudioFIRSegment { ...@@ -37,14 +37,18 @@ typedef struct AudioFIRSegment {
int block_size; int block_size;
int fft_length; int fft_length;
int coeff_size; int coeff_size;
int segment_size; int input_size;
int input_offset;
int *output_offset;
int *part_index; int *part_index;
AVFrame *sum; AVFrame *sum;
AVFrame *block; AVFrame *block;
AVFrame *buffer; AVFrame *buffer;
AVFrame *coeff; AVFrame *coeff;
AVFrame *input;
AVFrame *output;
RDFTContext **rdft, **irdft; RDFTContext **rdft, **irdft;
} AudioFIRSegment; } AudioFIRSegment;
...@@ -80,6 +84,7 @@ typedef struct AudioFIRContext { ...@@ -80,6 +84,7 @@ typedef struct AudioFIRContext {
AVFrame *in[2]; AVFrame *in[2];
AVFrame *video; AVFrame *video;
int min_part_size;
int64_t pts; int64_t pts;
AVFloatDSPContext *fdsp; AVFloatDSPContext *fdsp;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment