Commit 90622f9e authored by Paul B Mahol's avatar Paul B Mahol

avfilter/af_aecho: switch to activate

parent b741a84a
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "libavutil/samplefmt.h" #include "libavutil/samplefmt.h"
#include "avfilter.h" #include "avfilter.h"
#include "audio.h" #include "audio.h"
#include "filters.h"
#include "internal.h" #include "internal.h"
typedef struct AudioEchoContext { typedef struct AudioEchoContext {
...@@ -36,6 +37,7 @@ typedef struct AudioEchoContext { ...@@ -36,6 +37,7 @@ typedef struct AudioEchoContext {
uint8_t **delayptrs; uint8_t **delayptrs;
int max_samples, fade_out; int max_samples, fade_out;
int *samples; int *samples;
int eof;
int64_t next_pts; int64_t next_pts;
void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs, void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
...@@ -302,15 +304,9 @@ static int request_frame(AVFilterLink *outlink) ...@@ -302,15 +304,9 @@ static int request_frame(AVFilterLink *outlink)
{ {
AVFilterContext *ctx = outlink->src; AVFilterContext *ctx = outlink->src;
AudioEchoContext *s = ctx->priv; AudioEchoContext *s = ctx->priv;
int ret;
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
int nb_samples = FFMIN(s->fade_out, 2048); int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame; AVFrame *frame = ff_get_audio_buffer(outlink, nb_samples);
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame) if (!frame)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
s->fade_out -= nb_samples; s->fade_out -= nb_samples;
...@@ -328,16 +324,45 @@ static int request_frame(AVFilterLink *outlink) ...@@ -328,16 +324,45 @@ static int request_frame(AVFilterLink *outlink)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base); s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
return ff_filter_frame(outlink, frame); return ff_filter_frame(outlink, frame);
} }
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioEchoContext *s = ctx->priv;
AVFrame *in;
int ret, status;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_frame(inlink, &in);
if (ret < 0)
return ret; return ret;
if (ret > 0)
return filter_frame(inlink, in);
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF)
s->eof = 1;
}
if (s->eof && s->fade_out <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return request_frame(outlink);
} }
static const AVFilterPad aecho_inputs[] = { static const AVFilterPad aecho_inputs[] = {
{ {
.name = "default", .name = "default",
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
}, },
{ NULL } { NULL }
}; };
...@@ -345,7 +370,6 @@ static const AVFilterPad aecho_inputs[] = { ...@@ -345,7 +370,6 @@ static const AVFilterPad aecho_inputs[] = {
static const AVFilterPad aecho_outputs[] = { static const AVFilterPad aecho_outputs[] = {
{ {
.name = "default", .name = "default",
.request_frame = request_frame,
.config_props = config_output, .config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO, .type = AVMEDIA_TYPE_AUDIO,
}, },
...@@ -359,6 +383,7 @@ AVFilter ff_af_aecho = { ...@@ -359,6 +383,7 @@ AVFilter ff_af_aecho = {
.priv_size = sizeof(AudioEchoContext), .priv_size = sizeof(AudioEchoContext),
.priv_class = &aecho_class, .priv_class = &aecho_class,
.init = init, .init = init,
.activate = activate,
.uninit = uninit, .uninit = uninit,
.inputs = aecho_inputs, .inputs = aecho_inputs,
.outputs = aecho_outputs, .outputs = aecho_outputs,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment