Commit b6e7041f authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'b35e5d98'

* commit 'b35e5d98':
  doc: improve documentation for the asyncts filter first_pts option
  asyncts: fix the asyncts behavior when using the first_pts option

Conflicts:
	libavfilter/af_asyncts.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 593f5c0f b35e5d98
...@@ -782,11 +782,12 @@ Maximum compensation in samples per second. Relevant only with compensate=1. ...@@ -782,11 +782,12 @@ Maximum compensation in samples per second. Relevant only with compensate=1.
Default value 500. Default value 500.
@item first_pts @item first_pts
Assume the first pts should be this value. Assume the first pts should be this value. The time base is 1 / sample rate.
This allows for padding/trimming at the start of stream. By default, no This allows for padding/trimming at the start of stream. By default, no
assumption is made about the first frame's expected pts, so no padding or assumption is made about the first frame's expected pts, so no padding or
trimming is done. For example, this could be set to 0 to pad the beginning with trimming is done. For example, this could be set to 0 to pad the beginning with
silence if an audio stream starts after the video stream. silence if an audio stream starts after the video stream or to trim any samples
with a negative pts due to encoder delay.
@end table @end table
......
...@@ -33,6 +33,8 @@ typedef struct ASyncContext { ...@@ -33,6 +33,8 @@ typedef struct ASyncContext {
AVAudioResampleContext *avr; AVAudioResampleContext *avr;
int64_t pts; ///< timestamp in samples of the first sample in fifo int64_t pts; ///< timestamp in samples of the first sample in fifo
int min_delta; ///< pad/trim min threshold in samples int min_delta; ///< pad/trim min threshold in samples
int first_frame; ///< 1 until filter_frame() has processed at least 1 frame with a pts != AV_NOPTS_VALUE
int64_t first_pts; ///< user-specified first expected pts, in samples
/* options */ /* options */
int resample; int resample;
...@@ -51,7 +53,7 @@ static const AVOption asyncts_options[] = { ...@@ -51,7 +53,7 @@ static const AVOption asyncts_options[] = {
{ "min_delta", "Minimum difference between timestamps and audio data " { "min_delta", "Minimum difference between timestamps and audio data "
"(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F }, "(in seconds) to trigger padding/trimmin the data.", OFFSET(min_delta_sec), AV_OPT_TYPE_FLOAT, { .dbl = 0.1 }, 0, INT_MAX, A|F },
{ "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F }, { "max_comp", "Maximum compensation in samples per second.", OFFSET(max_comp), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, A|F },
{ "first_pts", "Assume the first pts should be this value.", OFFSET(pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F }, { "first_pts", "Assume the first pts should be this value.", OFFSET(first_pts), AV_OPT_TYPE_INT64, { .i64 = AV_NOPTS_VALUE }, INT64_MIN, INT64_MAX, A|F },
{ NULL }, { NULL },
}; };
...@@ -69,6 +71,9 @@ static int init(AVFilterContext *ctx, const char *args) ...@@ -69,6 +71,9 @@ static int init(AVFilterContext *ctx, const char *args)
return ret; return ret;
av_opt_free(s); av_opt_free(s);
s->pts = AV_NOPTS_VALUE;
s->first_frame = 1;
return 0; return 0;
} }
...@@ -116,6 +121,20 @@ static int64_t get_delay(ASyncContext *s) ...@@ -116,6 +121,20 @@ static int64_t get_delay(ASyncContext *s)
return avresample_available(s->avr) + avresample_get_delay(s->avr); return avresample_available(s->avr) + avresample_get_delay(s->avr);
} }
static void handle_trimming(AVFilterContext *ctx)
{
ASyncContext *s = ctx->priv;
if (s->pts < s->first_pts) {
int delta = FFMIN(s->first_pts - s->pts, avresample_available(s->avr));
av_log(ctx, AV_LOG_VERBOSE, "Trimming %d samples from start\n",
delta);
avresample_read(s->avr, NULL, delta);
s->pts += delta;
} else if (s->first_frame)
s->pts = s->first_pts;
}
static int request_frame(AVFilterLink *link) static int request_frame(AVFilterLink *link)
{ {
AVFilterContext *ctx = link->src; AVFilterContext *ctx = link->src;
...@@ -128,7 +147,11 @@ static int request_frame(AVFilterLink *link) ...@@ -128,7 +147,11 @@ static int request_frame(AVFilterLink *link)
ret = ff_request_frame(ctx->inputs[0]); ret = ff_request_frame(ctx->inputs[0]);
/* flush the fifo */ /* flush the fifo */
if (ret == AVERROR_EOF && (nb_samples = get_delay(s))) { if (ret == AVERROR_EOF) {
if (s->first_pts != AV_NOPTS_VALUE)
handle_trimming(ctx);
if (nb_samples = get_delay(s)) {
AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE, AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE,
nb_samples); nb_samples);
if (!buf) if (!buf)
...@@ -142,6 +165,7 @@ static int request_frame(AVFilterLink *link) ...@@ -142,6 +165,7 @@ static int request_frame(AVFilterLink *link)
buf->pts = s->pts; buf->pts = s->pts;
return ff_filter_frame(link, buf); return ff_filter_frame(link, buf);
}
} }
return ret; return ret;
...@@ -179,12 +203,18 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) ...@@ -179,12 +203,18 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
return write_to_fifo(s, buf); return write_to_fifo(s, buf);
} }
if (s->first_pts != AV_NOPTS_VALUE) {
handle_trimming(ctx);
if (!avresample_available(s->avr))
return write_to_fifo(s, buf);
}
/* when we have two timestamps, compute how many samples would we have /* when we have two timestamps, compute how many samples would we have
* to add/remove to get proper sync between data and timestamps */ * to add/remove to get proper sync between data and timestamps */
delta = pts - s->pts - get_delay(s); delta = pts - s->pts - get_delay(s);
out_size = avresample_available(s->avr); out_size = avresample_available(s->avr);
if (labs(delta) > s->min_delta) { if (labs(delta) > s->min_delta || (s->first_frame && delta)) {
av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta); av_log(ctx, AV_LOG_VERBOSE, "Discontinuity - %"PRId64" samples.\n", delta);
out_size = av_clipl_int32((int64_t)out_size + delta); out_size = av_clipl_int32((int64_t)out_size + delta);
} else { } else {
...@@ -204,18 +234,33 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) ...@@ -204,18 +234,33 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
goto fail; goto fail;
} }
avresample_read(s->avr, buf_out->extended_data, out_size); if (s->first_frame && delta > 0) {
buf_out->pts = s->pts; int ch;
av_samples_set_silence(buf_out->extended_data, 0, delta,
nb_channels, buf->format);
for (ch = 0; ch < nb_channels; ch++)
buf_out->extended_data[ch] += delta;
if (delta > 0) { avresample_read(s->avr, buf_out->extended_data, out_size);
av_samples_set_silence(buf_out->extended_data, out_size - delta,
delta, nb_channels, buf->format); for (ch = 0; ch < nb_channels; ch++)
buf_out->extended_data[ch] -= delta;
} else {
avresample_read(s->avr, buf_out->extended_data, out_size);
if (delta > 0) {
av_samples_set_silence(buf_out->extended_data, out_size - delta,
delta, nb_channels, buf->format);
}
} }
buf_out->pts = s->pts;
ret = ff_filter_frame(outlink, buf_out); ret = ff_filter_frame(outlink, buf_out);
if (ret < 0) if (ret < 0)
goto fail; goto fail;
s->got_output = 1; s->got_output = 1;
} else { } else if (avresample_available(s->avr)) {
av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping " av_log(ctx, AV_LOG_WARNING, "Non-monotonous timestamps, dropping "
"whole buffer.\n"); "whole buffer.\n");
} }
...@@ -227,6 +272,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf) ...@@ -227,6 +272,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data, ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->audio->nb_samples); buf->linesize[0], buf->audio->nb_samples);
s->first_frame = 0;
fail: fail:
avfilter_unref_buffer(buf); avfilter_unref_buffer(buf);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment