Commit 73438dbb authored by Paul B Mahol's avatar Paul B Mahol

avfilter/af_afir: draw IR frequency response

Signed-off-by: 's avatarPaul B Mahol <onemda@gmail.com>
parent bdf1bbdb
...@@ -969,6 +969,17 @@ Enable applying gain measured from power of IR. ...@@ -969,6 +969,17 @@ Enable applying gain measured from power of IR.
@item maxir @item maxir
Set max allowed Impulse Response filter duration in seconds. Default is 30 seconds. Set max allowed Impulse Response filter duration in seconds. Default is 30 seconds.
Allowed range is 0.1 to 60 seconds. Allowed range is 0.1 to 60 seconds.
@item response
Show IR frequency reponse, magnitude and phase in additional video stream.
By default it is disabled.
@item channel
Set for which IR channel to display frequency response. By default is first channel
displayed. This option is used only when @var{response} is enabled.
@item size
Set video stream size. This option is used only when @var{response} is enabled.
@end table @end table
@subsection Examples @subsection Examples
......
...@@ -23,10 +23,14 @@ ...@@ -23,10 +23,14 @@
* An arbitrary audio FIR filter * An arbitrary audio FIR filter
*/ */
#include <float.h>
#include "libavutil/audio_fifo.h" #include "libavutil/audio_fifo.h"
#include "libavutil/common.h" #include "libavutil/common.h"
#include "libavutil/float_dsp.h" #include "libavutil/float_dsp.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/xga_font_data.h"
#include "libavcodec/avfft.h" #include "libavcodec/avfft.h"
#include "audio.h" #include "audio.h"
...@@ -162,6 +166,122 @@ static int fir_frame(AudioFIRContext *s, AVFilterLink *outlink) ...@@ -162,6 +166,122 @@ static int fir_frame(AudioFIRContext *s, AVFilterLink *outlink)
return ret; return ret;
} }
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
{
const uint8_t *font;
int font_height;
int i;
font = avpriv_cga_font, font_height = 8;
for (i = 0; txt[i]; i++) {
int char_y, mask;
uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
for (char_y = 0; char_y < font_height; char_y++) {
for (mask = 0x80; mask; mask >>= 1) {
if (font[txt[i] * font_height + char_y] & mask)
AV_WL32(p, color);
p += 4;
}
p += pic->linesize[0] - 8 * 4;
}
}
}
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
{
int dx = FFABS(x1-x0), sx = x0 < x1 ? 1 : -1;
int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
int err = (dx>dy ? dx : -dy) / 2, e2;
for (;;) {
AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
if (x0 == x1 && y0 == y1)
break;
e2 = err;
if (e2 >-dx) {
err -= dy;
x0 += sx;
}
if (e2 < dy) {
err += dx;
y0 += sy;
}
}
}
static void draw_response(AVFilterContext *ctx, AVFrame *out)
{
AudioFIRContext *s = ctx->priv;
float *mag, *phase, min = FLT_MAX, max = FLT_MIN;
int prev_ymag = -1, prev_yphase = -1;
char text[32];
int channel, i, x;
memset(out->data[0], 0, s->h * out->linesize[0]);
phase = av_malloc_array(s->w, sizeof(*phase));
mag = av_malloc_array(s->w, sizeof(*mag));
if (!mag || !phase)
goto end;
channel = av_clip(s->ir_channel, 0, s->in[1]->channels);
for (i = 0; i < s->w; i++) {
const float *src = (const float *)s->in[1]->extended_data[channel];
double w = i * M_PI / (s->w - 1);
double real = 0.;
double imag = 0.;
for (x = 0; x < s->nb_taps; x++) {
real += cos(-x * w) * src[x];
imag += sin(-x * w) * src[x];
}
mag[i] = hypot(real, imag);
phase[i] = atan2(imag, real);
min = fminf(min, mag[i]);
max = fmaxf(max, mag[i]);
}
for (i = 0; i < s->w; i++) {
int ymag = mag[i] / max * (s->h - 1);
int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1);
ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
if (prev_ymag < 0)
prev_ymag = ymag;
if (prev_yphase < 0)
prev_yphase = yphase;
draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
prev_ymag = ymag;
prev_yphase = yphase;
}
if (s->w > 400 && s->h > 100) {
drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
snprintf(text, sizeof(text), "%.2f", max);
drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
snprintf(text, sizeof(text), "%.2f", min);
drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
}
end:
av_free(phase);
av_free(mag);
}
static int convert_coeffs(AVFilterContext *ctx) static int convert_coeffs(AVFilterContext *ctx)
{ {
AudioFIRContext *s = ctx->priv; AudioFIRContext *s = ctx->priv;
...@@ -216,6 +336,9 @@ static int convert_coeffs(AVFilterContext *ctx) ...@@ -216,6 +336,9 @@ static int convert_coeffs(AVFilterContext *ctx)
av_audio_fifo_read(s->fifo[1], (void **)s->in[1]->extended_data, s->nb_taps); av_audio_fifo_read(s->fifo[1], (void **)s->in[1]->extended_data, s->nb_taps);
if (s->response)
draw_response(ctx, s->video);
if (s->again) { if (s->again) {
float power = 0; float power = 0;
...@@ -322,6 +445,13 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame) ...@@ -322,6 +445,13 @@ static int filter_frame(AVFilterLink *link, AVFrame *frame)
return ret; return ret;
} }
if (s->response && s->have_coeffs) {
s->video->pts = s->pts;
ret = ff_filter_frame(ctx->outputs[1], av_frame_clone(s->video));
if (ret < 0)
return ret;
}
if (s->have_coeffs) { if (s->have_coeffs) {
while (av_audio_fifo_size(s->fifo[0]) >= s->part_size) { while (av_audio_fifo_size(s->fifo[0]) >= s->part_size) {
ret = fir_frame(s, outlink); ret = fir_frame(s, outlink);
...@@ -373,14 +503,26 @@ static int request_frame(AVFilterLink *outlink) ...@@ -373,14 +503,26 @@ static int request_frame(AVFilterLink *outlink)
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
{ {
AudioFIRContext *s = ctx->priv;
AVFilterFormats *formats; AVFilterFormats *formats;
AVFilterChannelLayouts *layouts; AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = { static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE AV_SAMPLE_FMT_NONE
}; };
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB0,
AV_PIX_FMT_NONE
};
int ret, i; int ret, i;
if (s->response) {
AVFilterLink *videolink = ctx->outputs[1];
formats = ff_make_format_list(pix_fmts);
if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
return ret;
}
layouts = ff_all_channel_counts(); layouts = ff_all_channel_counts();
if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0) if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
return ret; return ret;
...@@ -488,11 +630,59 @@ static av_cold void uninit(AVFilterContext *ctx) ...@@ -488,11 +630,59 @@ static av_cold void uninit(AVFilterContext *ctx)
av_audio_fifo_free(s->fifo[1]); av_audio_fifo_free(s->fifo[1]);
av_freep(&s->fdsp); av_freep(&s->fdsp);
av_freep(&ctx->output_pads[0].name);
if (s->response)
av_freep(&ctx->output_pads[1].name);
av_frame_free(&s->video);
}
static int config_video(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioFIRContext *s = ctx->priv;
outlink->sample_aspect_ratio = (AVRational){1,1};
outlink->w = s->w;
outlink->h = s->h;
av_frame_free(&s->video);
s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!s->video)
return AVERROR(ENOMEM);
return 0;
} }
static av_cold int init(AVFilterContext *ctx) static av_cold int init(AVFilterContext *ctx)
{ {
AudioFIRContext *s = ctx->priv; AudioFIRContext *s = ctx->priv;
AVFilterPad pad, vpad;
pad = (AVFilterPad){
.name = av_strdup("default"),
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
};
if (!pad.name)
return AVERROR(ENOMEM);
if (s->response) {
vpad = (AVFilterPad){
.name = av_strdup("filter_response"),
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_video,
};
if (!vpad.name)
return AVERROR(ENOMEM);
}
ff_insert_outpad(ctx, 0, &pad);
if (s->response)
ff_insert_outpad(ctx, 1, &vpad);
s->fcmul_add = fcmul_add_c; s->fcmul_add = fcmul_add_c;
...@@ -519,17 +709,8 @@ static const AVFilterPad afir_inputs[] = { ...@@ -519,17 +709,8 @@ static const AVFilterPad afir_inputs[] = {
{ NULL } { NULL }
}; };
static const AVFilterPad afir_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM #define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OFFSET(x) offsetof(AudioFIRContext, x) #define OFFSET(x) offsetof(AudioFIRContext, x)
static const AVOption afir_options[] = { static const AVOption afir_options[] = {
...@@ -537,7 +718,10 @@ static const AVOption afir_options[] = { ...@@ -537,7 +718,10 @@ static const AVOption afir_options[] = {
{ "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF }, { "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
{ "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF }, { "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
{ "again", "enable auto gain", OFFSET(again), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF }, { "again", "enable auto gain", OFFSET(again), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, AF },
{ "maxir", "set max ir length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF }, { "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
{ "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
{ "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
{ NULL } { NULL }
}; };
...@@ -552,6 +736,6 @@ AVFilter ff_af_afir = { ...@@ -552,6 +736,6 @@ AVFilter ff_af_afir = {
.init = init, .init = init,
.uninit = uninit, .uninit = uninit,
.inputs = afir_inputs, .inputs = afir_inputs,
.outputs = afir_outputs, .flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS |
.flags = AVFILTER_FLAG_SLICE_THREADS, AVFILTER_FLAG_SLICE_THREADS,
}; };
...@@ -40,6 +40,9 @@ typedef struct AudioFIRContext { ...@@ -40,6 +40,9 @@ typedef struct AudioFIRContext {
float length; float length;
int again; int again;
float max_ir_len; float max_ir_len;
int response;
int w, h;
int ir_channel;
float gain; float gain;
...@@ -69,6 +72,7 @@ typedef struct AudioFIRContext { ...@@ -69,6 +72,7 @@ typedef struct AudioFIRContext {
AVAudioFifo *fifo[2]; AVAudioFifo *fifo[2];
AVFrame *in[2]; AVFrame *in[2];
AVFrame *buffer; AVFrame *buffer;
AVFrame *video;
int64_t pts; int64_t pts;
int index; int index;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment