Commit 8c9a91ac authored by Paul B Mahol's avatar Paul B Mahol

avfilter: add deconvolve filter

Signed-off-by: 's avatarPaul B Mahol <onemda@gmail.com>
parent 87f148d5
......@@ -30,6 +30,7 @@ version <next>:
- AVX-512/ZMM support added
- Dropped support for building for Windows XP. The minimum supported Windows
version is Windows Vista.
- deconvolve video filter
version 3.4:
......
......@@ -6937,6 +6937,29 @@ Set whether or not chroma is considered in the metric calculations. Default is
@code{1}.
@end table
@section deconvolve
Apply 2D deconvolution of video stream in frequency domain using second stream
as impulse.
The filter accepts the following options:
@table @option
@item planes
Set which planes to process.
@item impulse
Set which impulse video frames will be processed, can be @var{first}
or @var{all}. Default is @var{all}.
@item noise
Set noise when doing divisions. Default is @var{0.0000001}. Useful when width
and height are not same and not power of 2 or if stream prior to convolving
had noise.
@end table
The @code{deconvolve} filter also supports the @ref{framesync} options.
@section deflate
Apply deflate effect to the video.
......
......@@ -165,6 +165,7 @@ OBJS-$(CONFIG_DATASCOPE_FILTER) += vf_datascope.o
OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o
OBJS-$(CONFIG_DEBAND_FILTER) += vf_deband.o
OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o
OBJS-$(CONFIG_DECONVOLVE_FILTER) += vf_convolve.o framesync.o
OBJS-$(CONFIG_DEFLATE_FILTER) += vf_neighbor.o
OBJS-$(CONFIG_DEFLICKER_FILTER) += vf_deflicker.o
OBJS-$(CONFIG_DEINTERLACE_QSV_FILTER) += vf_deinterlace_qsv.o
......
......@@ -175,6 +175,7 @@ static void register_all(void)
REGISTER_FILTER(DCTDNOIZ, dctdnoiz, vf);
REGISTER_FILTER(DEBAND, deband, vf);
REGISTER_FILTER(DECIMATE, decimate, vf);
REGISTER_FILTER(DECONVOLVE, deconvolve, vf);
REGISTER_FILTER(DEFLATE, deflate, vf);
REGISTER_FILTER(DEFLICKER, deflicker, vf);
REGISTER_FILTER(DEINTERLACE_QSV,deinterlace_qsv,vf);
......
......@@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 7
#define LIBAVFILTER_VERSION_MINOR 7
#define LIBAVFILTER_VERSION_MINOR 8
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
......
......@@ -18,6 +18,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <float.h>
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
......@@ -51,8 +53,11 @@ typedef struct ConvolveContext {
int depth;
int planes;
int impulse;
float noise;
int nb_planes;
int got_impulse[4];
int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
} ConvolveContext;
#define OFFSET(x) offsetof(ConvolveContext, x)
......@@ -63,11 +68,10 @@ static const AVOption convolve_options[] = {
{ "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
{ "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
{ "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
{ "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
{ NULL },
};
FRAMESYNC_DEFINE_CLASS(convolve, ConvolveContext, fs);
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pixel_fmts_fftfilt[] = {
......@@ -322,11 +326,9 @@ static int ifft_horizontal(AVFilterContext *ctx, void *arg, int jobnr, int nb_jo
return 0;
}
static void get_output(ConvolveContext *s, AVFrame *out,
int w, int h, int n, int plane)
static void get_output(ConvolveContext *s, FFTComplex *input, AVFrame *out,
int w, int h, int n, int plane, float scale)
{
FFTComplex *input = s->fft_hdata[plane];
const float scale = 1.f / (n * n);
const int max = (1 << s->depth) - 1;
const int hh = h / 2;
const int hw = w / 2;
......@@ -379,9 +381,11 @@ static void get_output(ConvolveContext *s, AVFrame *out,
static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ConvolveContext *s = ctx->priv;
ThreadData *td = arg;
FFTComplex *input = td->hdata;
FFTComplex *filter = td->vdata;
const float noise = s->noise;
const int n = td->n;
int start = (n * jobnr) / nb_jobs;
int end = (n * (jobnr+1)) / nb_jobs;
......@@ -395,7 +399,7 @@ static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_j
re = input[yn + x].re;
im = input[yn + x].im;
ire = filter[yn + x].re;
ire = filter[yn + x].re + noise;
iim = filter[yn + x].im;
input[yn + x].re = ire * re - iim * im;
......@@ -406,6 +410,38 @@ static int complex_multiply(AVFilterContext *ctx, void *arg, int jobnr, int nb_j
return 0;
}
static int complex_divide(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ConvolveContext *s = ctx->priv;
ThreadData *td = arg;
FFTComplex *input = td->hdata;
FFTComplex *filter = td->vdata;
const float noise = s->noise;
const int n = td->n;
int start = (n * jobnr) / nb_jobs;
int end = (n * (jobnr+1)) / nb_jobs;
int y, x;
for (y = start; y < end; y++) {
int yn = y * n;
for (x = 0; x < n; x++) {
FFTSample re, im, ire, iim, div;
re = input[yn + x].re;
im = input[yn + x].im;
ire = filter[yn + x].re;
iim = filter[yn + x].im;
div = ire * ire + iim * iim + noise;
input[yn + x].re = (ire * re + iim * im) / div;
input[yn + x].im = (ire * im - iim * re) / div;
}
}
return 0;
}
static int do_convolve(FFFrameSync *fs)
{
AVFilterContext *ctx = fs->parent;
......@@ -460,7 +496,7 @@ static int do_convolve(FFFrameSync *fs)
}
total = FFMAX(1, total);
get_input(s, s->fft_hdata_impulse[plane], impulsepic, w, h, n, plane, 1 / total);
get_input(s, s->fft_hdata_impulse[plane], impulsepic, w, h, n, plane, 1.f / total);
td.hdata = s->fft_hdata_impulse[plane];
td.vdata = s->fft_vdata_impulse[plane];
......@@ -474,14 +510,15 @@ static int do_convolve(FFFrameSync *fs)
td.hdata = input;
td.vdata = filter;
ctx->internal->execute(ctx, complex_multiply, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
td.hdata = s->fft_hdata[plane];
td.vdata = s->fft_vdata[plane];
ctx->internal->execute(ctx, ifft_vertical, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
ctx->internal->execute(ctx, ifft_horizontal, &td, NULL, FFMIN3(MAX_THREADS, n, ff_filter_get_nb_threads(ctx)));
get_output(s, mainpic, w, h, n, plane);
get_output(s, s->fft_hdata[plane], mainpic, w, h, n, plane, 1.f / (n * n));
}
return ff_filter_frame(outlink, mainpic);
......@@ -525,6 +562,21 @@ static int activate(AVFilterContext *ctx)
return ff_framesync_activate(&s->fs);
}
static av_cold int init(AVFilterContext *ctx)
{
ConvolveContext *s = ctx->priv;
if (!strcmp(ctx->filter->name, "convolve")) {
s->filter = complex_multiply;
} else if (!strcmp(ctx->filter->name, "deconvolve")) {
s->filter = complex_divide;
} else {
return AVERROR_BUG;
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
ConvolveContext *s = ctx->priv;
......@@ -567,10 +619,15 @@ static const AVFilterPad convolve_outputs[] = {
{ NULL }
};
#if CONFIG_CONVOLVE_FILTER
FRAMESYNC_DEFINE_CLASS(convolve, ConvolveContext, fs);
AVFilter ff_vf_convolve = {
.name = "convolve",
.description = NULL_IF_CONFIG_SMALL("Convolve first video stream with second video stream."),
.preinit = convolve_framesync_preinit,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.activate = activate,
......@@ -580,3 +637,35 @@ AVFilter ff_vf_convolve = {
.outputs = convolve_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
#endif /* CONFIG_CONVOLVE_FILTER */
#if CONFIG_DECONVOLVE_FILTER
static const AVOption deconvolve_options[] = {
{ "planes", "set planes to deconvolve", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=7}, 0, 15, FLAGS },
{ "impulse", "when to process impulses", OFFSET(impulse), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, FLAGS, "impulse" },
{ "first", "process only first impulse, ignore rest", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "impulse" },
{ "all", "process all impulses", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "impulse" },
{ "noise", "set noise", OFFSET(noise), AV_OPT_TYPE_FLOAT, {.dbl=0.0000001}, 0, 1, FLAGS },
{ NULL },
};
FRAMESYNC_DEFINE_CLASS(deconvolve, ConvolveContext, fs);
AVFilter ff_vf_deconvolve = {
.name = "deconvolve",
.description = NULL_IF_CONFIG_SMALL("Deconvolve first video stream with second video stream."),
.preinit = deconvolve_framesync_preinit,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.activate = activate,
.priv_size = sizeof(ConvolveContext),
.priv_class = &deconvolve_class,
.inputs = convolve_inputs,
.outputs = convolve_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL | AVFILTER_FLAG_SLICE_THREADS,
};
#endif /* CONFIG_DECONVOLVE_FILTER */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment