Commit 23000c3d authored by Nicolas George's avatar Nicolas George

lavfi/vf_paletteuse: convert to framesync2.

parent eacb3ec9
...@@ -249,7 +249,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o framesync2.o ...@@ -249,7 +249,7 @@ OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o framesync2.o
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o
OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o dualinput.o framesync.o OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o framesync2.o
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
......
...@@ -27,8 +27,10 @@ ...@@ -27,8 +27,10 @@
#include "libavutil/internal.h" #include "libavutil/internal.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "libavutil/qsort.h" #include "libavutil/qsort.h"
#include "dualinput.h"
#include "avfilter.h" #include "avfilter.h"
#include "filters.h"
#include "framesync2.h"
#include "internal.h"
enum dithering_mode { enum dithering_mode {
DITHERING_NONE, DITHERING_NONE,
...@@ -80,7 +82,7 @@ typedef int (*set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame ...@@ -80,7 +82,7 @@ typedef int (*set_frame_func)(struct PaletteUseContext *s, AVFrame *out, AVFrame
typedef struct PaletteUseContext { typedef struct PaletteUseContext {
const AVClass *class; const AVClass *class;
FFDualInputContext dinput; FFFrameSync fs;
struct cache_node cache[CACHE_SIZE]; /* lookup cache */ struct cache_node cache[CACHE_SIZE]; /* lookup cache */
struct color_node map[AVPALETTE_COUNT]; /* 3D-Tree (KD-Tree with K=3) for reverse colormap */ struct color_node map[AVPALETTE_COUNT]; /* 3D-Tree (KD-Tree with K=3) for reverse colormap */
uint32_t palette[AVPALETTE_COUNT]; uint32_t palette[AVPALETTE_COUNT];
...@@ -129,6 +131,8 @@ static const AVOption paletteuse_options[] = { ...@@ -129,6 +131,8 @@ static const AVOption paletteuse_options[] = {
AVFILTER_DEFINE_CLASS(paletteuse); AVFILTER_DEFINE_CLASS(paletteuse);
static int load_apply_palette(FFFrameSync *fs);
static int query_formats(AVFilterContext *ctx) static int query_formats(AVFilterContext *ctx)
{ {
static const enum AVPixelFormat in_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE}; static const enum AVPixelFormat in_fmts[] = {AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE};
...@@ -900,11 +904,18 @@ static int config_output(AVFilterLink *outlink) ...@@ -900,11 +904,18 @@ static int config_output(AVFilterLink *outlink)
AVFilterContext *ctx = outlink->src; AVFilterContext *ctx = outlink->src;
PaletteUseContext *s = ctx->priv; PaletteUseContext *s = ctx->priv;
ret = ff_framesync2_init_dualinput(&s->fs, ctx);
if (ret < 0)
return ret;
s->fs.opt_repeatlast = 1; // only 1 frame in the palette
s->fs.in[1].before = s->fs.in[1].after = EXT_INFINITY;
s->fs.on_event = load_apply_palette;
outlink->w = ctx->inputs[0]->w; outlink->w = ctx->inputs[0]->w;
outlink->h = ctx->inputs[0]->h; outlink->h = ctx->inputs[0]->h;
outlink->time_base = ctx->inputs[0]->time_base; outlink->time_base = ctx->inputs[0]->time_base;
if ((ret = ff_dualinput_init(ctx, &s->dinput)) < 0) if ((ret = ff_framesync2_configure(&s->fs)) < 0)
return ret; return ret;
return 0; return 0;
} }
...@@ -951,21 +962,32 @@ static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame) ...@@ -951,21 +962,32 @@ static void load_palette(PaletteUseContext *s, const AVFrame *palette_frame)
s->palette_loaded = 1; s->palette_loaded = 1;
} }
static AVFrame *load_apply_palette(AVFilterContext *ctx, AVFrame *main, static int load_apply_palette(FFFrameSync *fs)
const AVFrame *second)
{ {
AVFilterContext *ctx = fs->parent;
AVFilterLink *inlink = ctx->inputs[0]; AVFilterLink *inlink = ctx->inputs[0];
PaletteUseContext *s = ctx->priv; PaletteUseContext *s = ctx->priv;
AVFrame *main, *second, *out;
int ret;
// writable for error diffusal dithering
ret = ff_framesync2_dualinput_get_writable(fs, &main, &second);
if (ret < 0)
return ret;
if (!main || !second) {
ret = AVERROR_BUG;
goto error;
}
if (!s->palette_loaded) { if (!s->palette_loaded) {
load_palette(s, second); load_palette(s, second);
} }
return apply_palette(inlink, main); out = apply_palette(inlink, main);
} return ff_filter_frame(ctx->outputs[0], out);
static int filter_frame(AVFilterLink *inlink, AVFrame *in) error:
{ av_frame_free(&main);
PaletteUseContext *s = inlink->dst->priv; av_frame_free(&second);
return ff_dualinput_filter_frame(&s->dinput, inlink, in); return ret;
} }
#define DEFINE_SET_FRAME(color_search, name, value) \ #define DEFINE_SET_FRAME(color_search, name, value) \
...@@ -1013,9 +1035,6 @@ static int dither_value(int p) ...@@ -1013,9 +1035,6 @@ static int dither_value(int p)
static av_cold int init(AVFilterContext *ctx) static av_cold int init(AVFilterContext *ctx)
{ {
PaletteUseContext *s = ctx->priv; PaletteUseContext *s = ctx->priv;
s->dinput.repeatlast = 1; // only 1 frame in the palette
s->dinput.skip_initial_unpaired = 1;
s->dinput.process = load_apply_palette;
s->set_frame = set_frame_lut[s->color_search_method][s->dither]; s->set_frame = set_frame_lut[s->color_search_method][s->dither];
...@@ -1030,10 +1049,10 @@ static av_cold int init(AVFilterContext *ctx) ...@@ -1030,10 +1049,10 @@ static av_cold int init(AVFilterContext *ctx)
return 0; return 0;
} }
static int request_frame(AVFilterLink *outlink) static int activate(AVFilterContext *ctx)
{ {
PaletteUseContext *s = outlink->src->priv; PaletteUseContext *s = ctx->priv;
return ff_dualinput_request_frame(&s->dinput, outlink); return ff_framesync2_activate(&s->fs);
} }
static av_cold void uninit(AVFilterContext *ctx) static av_cold void uninit(AVFilterContext *ctx)
...@@ -1041,7 +1060,7 @@ static av_cold void uninit(AVFilterContext *ctx) ...@@ -1041,7 +1060,7 @@ static av_cold void uninit(AVFilterContext *ctx)
int i; int i;
PaletteUseContext *s = ctx->priv; PaletteUseContext *s = ctx->priv;
ff_dualinput_uninit(&s->dinput); ff_framesync2_uninit(&s->fs);
for (i = 0; i < CACHE_SIZE; i++) for (i = 0; i < CACHE_SIZE; i++)
av_freep(&s->cache[i].entries); av_freep(&s->cache[i].entries);
av_frame_free(&s->last_in); av_frame_free(&s->last_in);
...@@ -1052,13 +1071,10 @@ static const AVFilterPad paletteuse_inputs[] = { ...@@ -1052,13 +1071,10 @@ static const AVFilterPad paletteuse_inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.needs_writable = 1, // for error diffusal dithering
},{ },{
.name = "palette", .name = "palette",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_palette, .config_props = config_input_palette,
.filter_frame = filter_frame,
}, },
{ NULL } { NULL }
}; };
...@@ -1068,7 +1084,6 @@ static const AVFilterPad paletteuse_outputs[] = { ...@@ -1068,7 +1084,6 @@ static const AVFilterPad paletteuse_outputs[] = {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output, .config_props = config_output,
.request_frame = request_frame,
}, },
{ NULL } { NULL }
}; };
...@@ -1080,6 +1095,7 @@ AVFilter ff_vf_paletteuse = { ...@@ -1080,6 +1095,7 @@ AVFilter ff_vf_paletteuse = {
.query_formats = query_formats, .query_formats = query_formats,
.init = init, .init = init,
.uninit = uninit, .uninit = uninit,
.activate = activate,
.inputs = paletteuse_inputs, .inputs = paletteuse_inputs,
.outputs = paletteuse_outputs, .outputs = paletteuse_outputs,
.priv_class = &paletteuse_class, .priv_class = &paletteuse_class,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment