Commit d92e0848 authored by Nicolas George's avatar Nicolas George

lavfi: remove astreamsync.

It was only useful for very specific testing purposes
and appears to be currently partially broken.
parent 47ea04ff
...@@ -347,7 +347,6 @@ Filters: ...@@ -347,7 +347,6 @@ Filters:
af_aphaser.c Paul B Mahol af_aphaser.c Paul B Mahol
af_aresample.c Michael Niedermayer af_aresample.c Michael Niedermayer
af_astats.c Paul B Mahol af_astats.c Paul B Mahol
af_astreamsync.c Nicolas George
af_atempo.c Pavel Koshevoy af_atempo.c Pavel Koshevoy
af_biquads.c Paul B Mahol af_biquads.c Paul B Mahol
af_chorus.c Paul B Mahol af_chorus.c Paul B Mahol
......
...@@ -1170,42 +1170,6 @@ Number of occasions (not the number of samples) that the signal attained either ...@@ -1170,42 +1170,6 @@ Number of occasions (not the number of samples) that the signal attained either
Overall bit depth of audio. Number of bits used for each sample. Overall bit depth of audio. Number of bits used for each sample.
@end table @end table
@section astreamsync
Forward two audio streams and control the order the buffers are forwarded.
The filter accepts the following options:
@table @option
@item expr, e
Set the expression deciding which stream should be
forwarded next: if the result is negative, the first stream is forwarded; if
the result is positive or zero, the second stream is forwarded. It can use
the following variables:
@table @var
@item b1 b2
number of buffers forwarded so far on each stream
@item s1 s2
number of samples forwarded so far on each stream
@item t1 t2
current timestamp of each stream
@end table
The default value is @code{t1-t2}, which means to always forward the stream
that has a smaller timestamp.
@end table
@subsection Examples
Stress-test @code{amerge} by randomly sending buffers on the wrong
input, while avoiding too much of a desynchronization:
@example
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
[a2] [b2] amerge
@end example
@section asyncts @section asyncts
Synchronize audio data with timestamps by squeezing/stretching it and/or Synchronize audio data with timestamps by squeezing/stretching it and/or
......
...@@ -51,7 +51,6 @@ OBJS-$(CONFIG_ASETTB_FILTER) += settb.o ...@@ -51,7 +51,6 @@ OBJS-$(CONFIG_ASETTB_FILTER) += settb.o
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
OBJS-$(CONFIG_ASTREAMSYNC_FILTER) += af_astreamsync.o
OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o OBJS-$(CONFIG_ASYNCTS_FILTER) += af_asyncts.o
OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
......
/*
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Stream (de)synchronization filter
*/
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
#define QUEUE_SIZE 16
static const char * const var_names[] = {
"b1", "b2",
"s1", "s2",
"t1", "t2",
NULL
};
enum var_name {
VAR_B1, VAR_B2,
VAR_S1, VAR_S2,
VAR_T1, VAR_T2,
VAR_NB
};
typedef struct {
const AVClass *class;
AVExpr *expr;
char *expr_str;
double var_values[VAR_NB];
struct buf_queue {
AVFrame *buf[QUEUE_SIZE];
unsigned tail, nb;
/* buf[tail] is the oldest,
buf[(tail + nb) % QUEUE_SIZE] is where the next is added */
} queue[2];
int req[2];
int next_out;
int eof; /* bitmask, one bit for each stream */
} AStreamSyncContext;
#define OFFSET(x) offsetof(AStreamSyncContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption astreamsync_options[] = {
{ "expr", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
{ "e", "set stream selection expression", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "t1-t2" }, .flags = FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(astreamsync);
static av_cold int init(AVFilterContext *ctx)
{
AStreamSyncContext *as = ctx->priv;
int r, i;
r = av_expr_parse(&as->expr, as->expr_str, var_names,
NULL, NULL, NULL, NULL, 0, ctx);
if (r < 0) {
av_log(ctx, AV_LOG_ERROR, "Error in expression \"%s\"\n", as->expr_str);
return r;
}
for (i = 0; i < 42; i++)
av_expr_eval(as->expr, as->var_values, NULL); /* exercize prng */
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
int i, ret;
AVFilterFormats *formats, *rates;
AVFilterChannelLayouts *layouts;
for (i = 0; i < 2; i++) {
formats = ctx->inputs[i]->in_formats;
if ((ret = ff_formats_ref(formats, &ctx->inputs[i]->out_formats)) < 0 ||
(ret = ff_formats_ref(formats, &ctx->outputs[i]->in_formats)) < 0)
return ret;
rates = ff_all_samplerates();
if ((ret = ff_formats_ref(rates, &ctx->inputs[i]->out_samplerates)) < 0 ||
(ret = ff_formats_ref(rates, &ctx->outputs[i]->in_samplerates)) < 0)
return ret;
layouts = ctx->inputs[i]->in_channel_layouts;
if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0 ||
(ret = ff_channel_layouts_ref(layouts, &ctx->outputs[i]->in_channel_layouts)) < 0)
return ret;
}
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
int id = outlink == ctx->outputs[1];
outlink->sample_rate = ctx->inputs[id]->sample_rate;
outlink->time_base = ctx->inputs[id]->time_base;
return 0;
}
static int send_out(AVFilterContext *ctx, int out_id)
{
AStreamSyncContext *as = ctx->priv;
struct buf_queue *queue = &as->queue[out_id];
AVFrame *buf = queue->buf[queue->tail];
int ret;
queue->buf[queue->tail] = NULL;
as->var_values[VAR_B1 + out_id]++;
as->var_values[VAR_S1 + out_id] += buf->nb_samples;
if (buf->pts != AV_NOPTS_VALUE)
as->var_values[VAR_T1 + out_id] =
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
as->var_values[VAR_T1 + out_id] += buf->nb_samples /
(double)ctx->inputs[out_id]->sample_rate;
ret = ff_filter_frame(ctx->outputs[out_id], buf);
queue->nb--;
queue->tail = (queue->tail + 1) % QUEUE_SIZE;
if (as->req[out_id])
as->req[out_id]--;
return ret;
}
static void send_next(AVFilterContext *ctx)
{
AStreamSyncContext *as = ctx->priv;
int i;
while (1) {
if (!as->queue[as->next_out].nb)
break;
send_out(ctx, as->next_out);
if (!as->eof)
as->next_out = av_expr_eval(as->expr, as->var_values, NULL) >= 0;
}
for (i = 0; i < 2; i++)
if (as->queue[i].nb == QUEUE_SIZE)
send_out(ctx, i);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AStreamSyncContext *as = ctx->priv;
int id = outlink == ctx->outputs[1];
as->req[id]++;
while (as->req[id] && !(as->eof & (1 << id))) {
if (as->queue[as->next_out].nb) {
send_next(ctx);
} else {
as->eof |= 1 << as->next_out;
ff_request_frame(ctx->inputs[as->next_out]);
if (as->eof & (1 << as->next_out))
as->next_out = !as->next_out;
}
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AStreamSyncContext *as = ctx->priv;
int id = inlink == ctx->inputs[1];
as->queue[id].buf[(as->queue[id].tail + as->queue[id].nb++) % QUEUE_SIZE] =
insamples;
as->eof &= ~(1 << id);
send_next(ctx);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AStreamSyncContext *as = ctx->priv;
av_expr_free(as->expr);
as->expr = NULL;
}
static const AVFilterPad astreamsync_inputs[] = {
{
.name = "in1",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},{
.name = "in2",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad astreamsync_outputs[] = {
{
.name = "out1",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},{
.name = "out2",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_af_astreamsync = {
.name = "astreamsync",
.description = NULL_IF_CONFIG_SMALL("Copy two streams of audio data "
"in a configurable order."),
.priv_size = sizeof(AStreamSyncContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = astreamsync_inputs,
.outputs = astreamsync_outputs,
.priv_class = &astreamsync_class,
};
...@@ -73,7 +73,6 @@ void avfilter_register_all(void) ...@@ -73,7 +73,6 @@ void avfilter_register_all(void)
REGISTER_FILTER(ASHOWINFO, ashowinfo, af); REGISTER_FILTER(ASHOWINFO, ashowinfo, af);
REGISTER_FILTER(ASPLIT, asplit, af); REGISTER_FILTER(ASPLIT, asplit, af);
REGISTER_FILTER(ASTATS, astats, af); REGISTER_FILTER(ASTATS, astats, af);
REGISTER_FILTER(ASTREAMSYNC, astreamsync, af);
REGISTER_FILTER(ASYNCTS, asyncts, af); REGISTER_FILTER(ASYNCTS, asyncts, af);
REGISTER_FILTER(ATEMPO, atempo, af); REGISTER_FILTER(ATEMPO, atempo, af);
REGISTER_FILTER(ATRIM, atrim, af); REGISTER_FILTER(ATRIM, atrim, af);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment