Commit 3e40b856 authored by Stefano Sabatini's avatar Stefano Sabatini

lavfi: add interleave filters

parent df766673
...@@ -29,6 +29,7 @@ version <next>: ...@@ -29,6 +29,7 @@ version <next>:
become the default at the next libavformat major bump. become the default at the next libavformat major bump.
- decent native animated GIF encoding - decent native animated GIF encoding
- asetrate filter - asetrate filter
- interleave filter
version 1.2: version 1.2:
......
...@@ -7475,6 +7475,54 @@ do not have exactly the same duration in the first file. ...@@ -7475,6 +7475,54 @@ do not have exactly the same duration in the first file.
@end itemize @end itemize
@section interleave, ainterleave
Temporally interleave frames from several inputs.
@code{interleave} works with video inputs, @code{ainterleave} with audio.
These filters read frames from several inputs and send the oldest
queued frame to the output.
Input streams must have a well defined, monotonically increasing frame
timestamp values.
In order to submit one frame to output, these filters need to enqueue
at least one frame for each input, so they cannot work in case one
input is not yet terminated and will not receive incoming frames.
For example consider the case when one input is a @code{select} filter
which always drop input frames. The @code{interleave} filter will keep
reading from that input, but it will never be able to send new frames
to output until the input will send an end-of-stream signal.
Also, depending on inputs synchronization, the filters will drop
frames in case one input receives more frames than the other ones, and
the queue is already filled.
These filters accept the following options:
@table @option
@item nb_inputs, n
Set the number of different inputs, it is 2 by default.
@end table
@subsection Examples
@itemize
@item
Interleave frames belonging to different streams using @command{ffmpeg}:
@example
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
@end example
@item
Add flickering blur effect:
@example
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
@end example
@end itemize
@section showspectrum @section showspectrum
Convert input audio to a video output, representing the audio frequency Convert input audio to a video output, representing the audio frequency
......
...@@ -52,6 +52,7 @@ OBJS-$(CONFIG_SWSCALE) += lswsutils.o ...@@ -52,6 +52,7 @@ OBJS-$(CONFIG_SWSCALE) += lswsutils.o
OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o OBJS-$(CONFIG_ACONVERT_FILTER) += af_aconvert.o
OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o
...@@ -136,6 +137,7 @@ OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o ...@@ -136,6 +137,7 @@ OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o
OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o
OBJS-$(CONFIG_IL_FILTER) += vf_il.o OBJS-$(CONFIG_IL_FILTER) += vf_il.o
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o OBJS-$(CONFIG_INTERLACE_FILTER) += vf_interlace.o
OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
......
...@@ -50,6 +50,7 @@ void avfilter_register_all(void) ...@@ -50,6 +50,7 @@ void avfilter_register_all(void)
#endif #endif
REGISTER_FILTER(AFADE, afade, af); REGISTER_FILTER(AFADE, afade, af);
REGISTER_FILTER(AFORMAT, aformat, af); REGISTER_FILTER(AFORMAT, aformat, af);
REGISTER_FILTER(AINTERLEAVE, ainterleave, af);
REGISTER_FILTER(ALLPASS, allpass, af); REGISTER_FILTER(ALLPASS, allpass, af);
REGISTER_FILTER(AMERGE, amerge, af); REGISTER_FILTER(AMERGE, amerge, af);
REGISTER_FILTER(AMIX, amix, af); REGISTER_FILTER(AMIX, amix, af);
...@@ -134,6 +135,7 @@ void avfilter_register_all(void) ...@@ -134,6 +135,7 @@ void avfilter_register_all(void)
REGISTER_FILTER(IDET, idet, vf); REGISTER_FILTER(IDET, idet, vf);
REGISTER_FILTER(IL, il, vf); REGISTER_FILTER(IL, il, vf);
REGISTER_FILTER(INTERLACE, interlace, vf); REGISTER_FILTER(INTERLACE, interlace, vf);
REGISTER_FILTER(INTERLEAVE, interleave, vf);
REGISTER_FILTER(KERNDEINT, kerndeint, vf); REGISTER_FILTER(KERNDEINT, kerndeint, vf);
REGISTER_FILTER(LUT, lut, vf); REGISTER_FILTER(LUT, lut, vf);
REGISTER_FILTER(LUTRGB, lutrgb, vf); REGISTER_FILTER(LUTRGB, lutrgb, vf);
......
/*
* Copyright (c) 2013 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio and video interleaver
*/
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "bufferqueue.h"
#include "formats.h"
#include "internal.h"
#include "audio.h"
#include "video.h"
typedef struct {
const AVClass *class;
int nb_inputs;
struct FFBufQueue *queues;
} InterleaveContext;
#define OFFSET(x) offsetof(InterleaveContext, x)
#define DEFINE_OPTIONS(filt_name, flags_) \
static const AVOption filt_name##_options[] = { \
{ "nb_inputs", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
{ "n", "set number of inputs", OFFSET(nb_inputs), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, .flags = flags_ }, \
{ NULL }, \
}
inline static int push_frame(AVFilterContext *ctx)
{
InterleaveContext *interleave = ctx->priv;
AVFrame *frame;
int i, queue_idx = -1;
int64_t pts_min = INT64_MAX;
/* look for oldest frame */
for (i = 0; i < ctx->nb_inputs; i++) {
struct FFBufQueue *q = &interleave->queues[i];
if (!q->available && !ctx->inputs[i]->closed)
return 0;
if (q->available) {
frame = ff_bufqueue_peek(q, 0);
if (frame->pts < pts_min) {
pts_min = frame->pts;
queue_idx = i;
}
}
}
/* all inputs are closed */
if (queue_idx < 0)
return AVERROR_EOF;
frame = ff_bufqueue_get(&interleave->queues[queue_idx]);
av_log(ctx, AV_LOG_DEBUG, "queue:%d -> frame time:%f\n",
queue_idx, frame->pts * av_q2d(AV_TIME_BASE_Q));
return ff_filter_frame(ctx->outputs[0], frame);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
InterleaveContext *interleave = ctx->priv;
unsigned in_no = FF_INLINK_IDX(inlink);
if (frame->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING,
"NOPTS value for input frame cannot be accepted, frame discarded\n");
av_frame_free(&frame);
return AVERROR_INVALIDDATA;
}
/* queue frame */
frame->pts = av_rescale_q(frame->pts, inlink->time_base, AV_TIME_BASE_Q);
av_log(ctx, AV_LOG_DEBUG, "frame pts:%f -> queue idx:%d available:%d\n",
frame->pts * av_q2d(AV_TIME_BASE_Q), in_no, interleave->queues[in_no].available);
ff_bufqueue_add(ctx, &interleave->queues[in_no], frame);
return push_frame(ctx);
}
static int init(AVFilterContext *ctx)
{
InterleaveContext *interleave = ctx->priv;
const AVFilterPad *outpad = &ctx->filter->outputs[0];
int i;
interleave->queues = av_calloc(interleave->nb_inputs, sizeof(interleave->queues[0]));
if (!interleave->queues)
return AVERROR(ENOMEM);
for (i = 0; i < interleave->nb_inputs; i++) {
AVFilterPad inpad = { 0 };
inpad.name = av_asprintf("input%d", i);
if (!inpad.name)
return AVERROR(ENOMEM);
inpad.type = outpad->type;
inpad.filter_frame = filter_frame;
switch (outpad->type) {
case AVMEDIA_TYPE_VIDEO:
inpad.get_video_buffer = ff_null_get_video_buffer; break;
case AVMEDIA_TYPE_AUDIO:
inpad.get_audio_buffer = ff_null_get_audio_buffer; break;
default:
av_assert0(0);
}
ff_insert_inpad(ctx, i, &inpad);
}
return 0;
}
static void uninit(AVFilterContext *ctx)
{
InterleaveContext *interleave = ctx->priv;
int i;
for (i = 0; i < ctx->nb_inputs; i++) {
ff_bufqueue_discard_all(&interleave->queues[i]);
av_freep(&interleave->queues[i]);
av_freep(&ctx->input_pads[i].name);
}
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink0 = ctx->inputs[0];
int i;
if (outlink->type == AVMEDIA_TYPE_VIDEO) {
outlink->time_base = AV_TIME_BASE_Q;
outlink->w = inlink0->w;
outlink->h = inlink0->h;
outlink->sample_aspect_ratio = inlink0->sample_aspect_ratio;
outlink->format = inlink0->format;
outlink->frame_rate = (AVRational) {1, 0};
for (i = 1; i < ctx->nb_inputs; i++) {
AVFilterLink *inlink = ctx->inputs[i];
if (outlink->w != inlink->w ||
outlink->h != inlink->h ||
outlink->sample_aspect_ratio.num != inlink->sample_aspect_ratio.num ||
outlink->sample_aspect_ratio.den != inlink->sample_aspect_ratio.den) {
av_log(ctx, AV_LOG_ERROR, "Parameters for input link %s "
"(size %dx%d, SAR %d:%d) do not match the corresponding "
"output link parameters (%dx%d, SAR %d:%d)\n",
ctx->input_pads[i].name, inlink->w, inlink->h,
inlink->sample_aspect_ratio.num,
inlink->sample_aspect_ratio.den,
outlink->w, outlink->h,
outlink->sample_aspect_ratio.num,
outlink->sample_aspect_ratio.den);
return AVERROR(EINVAL);
}
}
}
outlink->flags |= FF_LINK_FLAG_REQUEST_LOOP;
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
InterleaveContext *interleave = ctx->priv;
int i, ret;
for (i = 0; i < ctx->nb_inputs; i++) {
if (!interleave->queues[i].available && !ctx->inputs[i]->closed) {
ret = ff_request_frame(ctx->inputs[i]);
if (ret != AVERROR_EOF)
return ret;
}
}
return push_frame(ctx);
}
#if CONFIG_INTERLEAVE_FILTER
DEFINE_OPTIONS(interleave, AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(interleave);
static const AVFilterPad interleave_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter avfilter_vf_interleave = {
.name = "interleave",
.description = NULL_IF_CONFIG_SMALL("Temporally interleave video inputs."),
.priv_size = sizeof(InterleaveContext),
.init = init,
.uninit = uninit,
.outputs = interleave_outputs,
.priv_class = &interleave_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
#endif
#if CONFIG_AINTERLEAVE_FILTER
DEFINE_OPTIONS(ainterleave, AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM);
AVFILTER_DEFINE_CLASS(ainterleave);
static const AVFilterPad ainterleave_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter avfilter_af_ainterleave = {
.name = "ainterleave",
.description = NULL_IF_CONFIG_SMALL("Temporally interleave audio inputs."),
.priv_size = sizeof(InterleaveContext),
.init = init,
.uninit = uninit,
.outputs = ainterleave_outputs,
.priv_class = &ainterleave_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};
#endif
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
#include "libavutil/avutil.h" #include "libavutil/avutil.h"
#define LIBAVFILTER_VERSION_MAJOR 3 #define LIBAVFILTER_VERSION_MAJOR 3
#define LIBAVFILTER_VERSION_MINOR 59 #define LIBAVFILTER_VERSION_MINOR 60
#define LIBAVFILTER_VERSION_MICRO 100 #define LIBAVFILTER_VERSION_MICRO 100
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \ #define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment