Commit 68e23c08 authored by Stefano Sabatini's avatar Stefano Sabatini Committed by Anton Khirnov

scale: make the filter parametric

Make the filter accept parametric expressions for the output video
size.
Signed-off-by: 's avatarStefano Sabatini <stefano.sabatini-lala@poste.it>
Signed-off-by: 's avatarAnton Khirnov <anton@khirnov.net>
parent b137bf7d
...@@ -772,13 +772,33 @@ can be used to test the monowhite pixel format descriptor definition. ...@@ -772,13 +772,33 @@ can be used to test the monowhite pixel format descriptor definition.
Scale the input video to @var{width}:@var{height} and/or convert the image format. Scale the input video to @var{width}:@var{height} and/or convert the image format.
For example the command: The parameters @var{width} and @var{height} are expressions containing
the following constants:
@example @table @option
./ffmpeg -i in.avi -vf "scale=200:100" out.avi @item E, PI, PHI
@end example the corresponding mathematical approximated values for e
(euler number), pi (greek PI), phi (golden ratio)
@item in_w, in_h
the input width and heigth
@item iw, ih
same as @var{in_w} and @var{in_h}
@item out_w, out_h
the output (cropped) width and heigth
@item ow, oh
same as @var{out_w} and @var{out_h}
will scale the input video to a size of 200x100. @item a
input display aspect ratio, same as @var{iw} / @var{ih}
@item hsub, vsub
horizontal and vertical chroma subsample values. For example for the
pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
@end table
If the input image format is different from the format requested by If the input image format is different from the format requested by
the next filter, the scale filter will convert the input to the the next filter, the scale filter will convert the input to the
...@@ -793,6 +813,36 @@ ratio of the input image. ...@@ -793,6 +813,36 @@ ratio of the input image.
The default value of @var{width} and @var{height} is 0. The default value of @var{width} and @var{height} is 0.
Some examples follow:
@example
# scale the input video to a size of 200x100.
scale=200:100
# scale the input to 2x
scale=2*iw:2*ih
# the above is the same as
scale=2*in_w:2*in_h
# scale the input to half size
scale=iw/2:ih/2
# increase the width, and set the height to the same size
scale=3/2*iw:ow
# seek for Greek harmony
scale=iw:1/PHI*iw
scale=ih*PHI:ih
# increase the height, and set the width to 3/2 of the height
scale=3/2*oh:3/5*ih
# increase the size, but make the size a multiple of the chroma
scale="trunc(3/2*iw/hsub)*hsub:trunc(3/2*ih/vsub)*vsub"
# increase the width to a maximum of 500 pixels, keep the same input aspect ratio
scale='min(500\, iw*3/2):-1'
@end example
@anchor{setdar} @anchor{setdar}
@section setdar @section setdar
......
...@@ -24,9 +24,39 @@ ...@@ -24,9 +24,39 @@
*/ */
#include "avfilter.h" #include "avfilter.h"
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/pixdesc.h" #include "libavutil/pixdesc.h"
#include "libswscale/swscale.h" #include "libswscale/swscale.h"
static const char *var_names[] = {
"PI",
"PHI",
"E",
"in_w", "iw",
"in_h", "ih",
"out_w", "ow",
"out_h", "oh",
"a",
"hsub",
"vsub",
NULL
};
enum var_name {
VAR_PI,
VAR_PHI,
VAR_E,
VAR_IN_W, VAR_IW,
VAR_IN_H, VAR_IH,
VAR_OUT_W, VAR_OW,
VAR_OUT_H, VAR_OH,
VAR_A,
VAR_HSUB,
VAR_VSUB,
VARS_NB
};
typedef struct { typedef struct {
struct SwsContext *sws; ///< software scaler context struct SwsContext *sws; ///< software scaler context
...@@ -41,6 +71,9 @@ typedef struct { ...@@ -41,6 +71,9 @@ typedef struct {
int hsub, vsub; ///< chroma subsampling int hsub, vsub; ///< chroma subsampling
int slice_y; ///< top of current output slice int slice_y; ///< top of current output slice
int input_is_pal; ///< set to 1 if the input format is paletted int input_is_pal; ///< set to 1 if the input format is paletted
char w_expr[256]; ///< width expression string
char h_expr[256]; ///< height expression string
} ScaleContext; } ScaleContext;
static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
...@@ -48,21 +81,16 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque) ...@@ -48,21 +81,16 @@ static av_cold int init(AVFilterContext *ctx, const char *args, void *opaque)
ScaleContext *scale = ctx->priv; ScaleContext *scale = ctx->priv;
const char *p; const char *p;
av_strlcpy(scale->w_expr, "iw", sizeof(scale->w_expr));
av_strlcpy(scale->h_expr, "ih", sizeof(scale->h_expr));
scale->flags = SWS_BILINEAR; scale->flags = SWS_BILINEAR;
if (args) { if (args) {
sscanf(args, "%d:%d", &scale->w, &scale->h); sscanf(args, "%255[^:]:%255[^:]", scale->w_expr, scale->h_expr);
p = strstr(args,"flags="); p = strstr(args,"flags=");
if (p) scale->flags = strtoul(p+6, NULL, 0); if (p) scale->flags = strtoul(p+6, NULL, 0);
} }
/* sanity check params */
if (scale->w < -1 || scale->h < -1) {
av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
return AVERROR(EINVAL);
}
if (scale->w == -1 && scale->h == -1)
scale->w = scale->h = 0;
return 0; return 0;
} }
...@@ -109,6 +137,48 @@ static int config_props(AVFilterLink *outlink) ...@@ -109,6 +137,48 @@ static int config_props(AVFilterLink *outlink)
AVFilterLink *inlink = outlink->src->inputs[0]; AVFilterLink *inlink = outlink->src->inputs[0];
ScaleContext *scale = ctx->priv; ScaleContext *scale = ctx->priv;
int64_t w, h; int64_t w, h;
double var_values[VARS_NB], res;
char *expr;
int ret;
var_values[VAR_PI] = M_PI;
var_values[VAR_PHI] = M_PHI;
var_values[VAR_E] = M_E;
var_values[VAR_IN_W] = var_values[VAR_IW] = inlink->w;
var_values[VAR_IN_H] = var_values[VAR_IH] = inlink->h;
var_values[VAR_OUT_W] = var_values[VAR_OW] = NAN;
var_values[VAR_OUT_H] = var_values[VAR_OH] = NAN;
var_values[VAR_A] = (float) inlink->w / inlink->h;
var_values[VAR_HSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_w;
var_values[VAR_VSUB] = 1<<av_pix_fmt_descriptors[inlink->format].log2_chroma_h;
/* evaluate width and height */
av_expr_parse_and_eval(&res, (expr = scale->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx);
scale->w = var_values[VAR_OUT_W] = var_values[VAR_OW] = res;
if ((ret = av_expr_parse_and_eval(&res, (expr = scale->h_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail;
scale->h = var_values[VAR_OUT_H] = var_values[VAR_OH] = res;
/* evaluate again the width, as it may depend on the output height */
if ((ret = av_expr_parse_and_eval(&res, (expr = scale->w_expr),
var_names, var_values,
NULL, NULL, NULL, NULL, NULL, 0, ctx)) < 0)
goto fail;
scale->w = res;
w = scale->w;
h = scale->h;
/* sanity check params */
if (w < -1 || h < -1) {
av_log(ctx, AV_LOG_ERROR, "Size values less than -1 are not acceptable.\n");
return AVERROR(EINVAL);
}
if (w == -1 && h == -1)
scale->w = scale->h = 0;
if (!(w = scale->w)) if (!(w = scale->w))
w = inlink->w; w = inlink->w;
...@@ -142,6 +212,11 @@ static int config_props(AVFilterLink *outlink) ...@@ -142,6 +212,11 @@ static int config_props(AVFilterLink *outlink)
return AVERROR(EINVAL); return AVERROR(EINVAL);
return 0; return 0;
fail:
av_log(NULL, AV_LOG_ERROR,
"Error when evaluating the expression '%s'\n", expr);
return ret;
} }
static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref) static void start_frame(AVFilterLink *link, AVFilterBufferRef *picref)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment