Commit 21566b21 authored by Michael Niedermayer's avatar Michael Niedermayer

avfilter: add scale2ref filter

This filter can be used to scale one stream to match another or based on
another, useful to scale subtitles or other things to be overlayed
Signed-off-by: 's avatarMichael Niedermayer <michael@niedermayer.cc>
parent 84170d4b
......@@ -2780,6 +2780,7 @@ repeatfields_filter_deps="gpl"
resample_filter_deps="avresample"
sab_filter_deps="gpl swscale"
scale_filter_deps="swscale"
scale2ref_filter_deps="swscale"
select_filter_select="pixelutils"
smartblur_filter_deps="gpl swscale"
showcqt_filter_deps="avcodec"
......@@ -5831,6 +5832,7 @@ enabled removelogo_filter && prepend avfilter_deps "avformat avcodec swscale"
enabled resample_filter && prepend avfilter_deps "avresample"
enabled sab_filter && prepend avfilter_deps "swscale"
enabled scale_filter && prepend avfilter_deps "swscale"
enabled scale2ref_filter && prepend avfilter_deps "swscale"
enabled showspectrum_filter && prepend avfilter_deps "avcodec"
enabled smartblur_filter && prepend avfilter_deps "swscale"
enabled subtitles_filter && prepend avfilter_deps "avformat avcodec"
......
......@@ -9122,6 +9122,23 @@ If the specified expression is not valid, it is kept at its current
value.
@end table
@section scale2ref
Scale (resize) the input video, based on a reference video.
See the scale filter for available options, scale2ref supports the same but
uses the reference video instead of the main input as basis.
@subsection Examples
@itemize
@item
Scale a subtitle stream to match the main video in size before overlaying
@example
'scale2ref[b][a];[a][b]overlay'
@end example
@end itemize
@section separatefields
The @code{separatefields} takes a frame-based video input and splits
......
......@@ -197,6 +197,7 @@ OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o
OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o
OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o
OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
......
......@@ -211,6 +211,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(ROTATE, rotate, vf);
REGISTER_FILTER(SAB, sab, vf);
REGISTER_FILTER(SCALE, scale, vf);
REGISTER_FILTER(SCALE2REF, scale2ref, vf);
REGISTER_FILTER(SELECT, select, vf);
REGISTER_FILTER(SENDCMD, sendcmd, vf);
REGISTER_FILTER(SEPARATEFIELDS, separatefields, vf);
......
......@@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 5
#define LIBAVFILTER_VERSION_MINOR 33
#define LIBAVFILTER_VERSION_MINOR 34
#define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
......
......@@ -111,6 +111,8 @@ typedef struct ScaleContext {
int force_original_aspect_ratio;
} ScaleContext;
AVFilter ff_vf_scale2ref;
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
ScaleContext *scale = ctx->priv;
......@@ -234,7 +236,10 @@ static const int *parse_yuv_type(const char *s, enum AVColorSpace colorspace)
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = outlink->src->inputs[0];
AVFilterLink *inlink0 = outlink->src->inputs[0];
AVFilterLink *inlink = ctx->filter == &ff_vf_scale2ref ?
outlink->src->inputs[1] :
outlink->src->inputs[0];
enum AVPixelFormat outfmt = outlink->format;
ScaleContext *scale = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
......@@ -343,8 +348,9 @@ static int config_props(AVFilterLink *outlink)
if (scale->isws[1])
sws_freeContext(scale->isws[1]);
scale->isws[0] = scale->isws[1] = scale->sws = NULL;
if (inlink->w == outlink->w && inlink->h == outlink->h &&
inlink->format == outlink->format)
if (inlink0->w == outlink->w &&
inlink0->h == outlink->h &&
inlink0->format == outlink->format)
;
else {
struct SwsContext **swscs[3] = {&scale->sws, &scale->isws[0], &scale->isws[1]};
......@@ -356,9 +362,9 @@ static int config_props(AVFilterLink *outlink)
if (!*s)
return AVERROR(ENOMEM);
av_opt_set_int(*s, "srcw", inlink ->w, 0);
av_opt_set_int(*s, "srch", inlink ->h >> !!i, 0);
av_opt_set_int(*s, "src_format", inlink->format, 0);
av_opt_set_int(*s, "srcw", inlink0 ->w, 0);
av_opt_set_int(*s, "srch", inlink0 ->h >> !!i, 0);
av_opt_set_int(*s, "src_format", inlink0->format, 0);
av_opt_set_int(*s, "dstw", outlink->w, 0);
av_opt_set_int(*s, "dsth", outlink->h >> !!i, 0);
av_opt_set_int(*s, "dst_format", outfmt, 0);
......@@ -374,7 +380,7 @@ static int config_props(AVFilterLink *outlink)
/* Override YUV420P settings to have the correct (MPEG-2) chroma positions
* MPEG-2 chroma positions are used by convention
* XXX: support other 4:2:0 pixel formats */
if (inlink->format == AV_PIX_FMT_YUV420P) {
if (inlink0->format == AV_PIX_FMT_YUV420P) {
scale->in_v_chr_pos = (i == 0) ? 128 : (i == 1) ? 64 : 192;
}
......@@ -415,6 +421,17 @@ fail:
return ret;
}
static int config_props_ref(AVFilterLink *outlink)
{
AVFilterLink *inlink = outlink->src->inputs[1];
outlink->w = inlink->w;
outlink->h = inlink->h;
outlink->sample_aspect_ratio = inlink->sample_aspect_ratio;
return 0;
}
static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, struct SwsContext *sws, int y, int h, int mul, int field)
{
ScaleContext *scale = link->dst->priv;
......@@ -542,6 +559,13 @@ static int filter_frame(AVFilterLink *link, AVFrame *in)
return ff_filter_frame(outlink, out);
}
static int filter_frame_ref(AVFilterLink *link, AVFrame *in)
{
AVFilterLink *outlink = link->dst->outputs[1];
return ff_filter_frame(outlink, in);
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
......@@ -643,3 +667,53 @@ AVFilter ff_vf_scale = {
.outputs = avfilter_vf_scale_outputs,
.process_command = process_command,
};
static const AVClass scale2ref_class = {
.class_name = "scale2ref",
.item_name = av_default_item_name,
.option = scale_options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_FILTER,
.child_class_next = child_class_next,
};
static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
},
{
.name = "ref",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame_ref,
},
{ NULL }
};
static const AVFilterPad avfilter_vf_scale2ref_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props,
},
{
.name = "ref",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_props_ref,
},
{ NULL }
};
AVFilter ff_vf_scale2ref = {
.name = "scale2ref",
.description = NULL_IF_CONFIG_SMALL("Scale the input video size and/or convert the image format to the given reference."),
.init_dict = init_dict,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(ScaleContext),
.priv_class = &scale2ref_class,
.inputs = avfilter_vf_scale2ref_inputs,
.outputs = avfilter_vf_scale2ref_outputs,
.process_command = process_command,
};
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment