Commit a05a44e2 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '7e350379'

* commit '7e350379':
  lavfi: switch to AVFrame.

Conflicts:
	doc/filters.texi
	libavfilter/af_ashowinfo.c
	libavfilter/audio.c
	libavfilter/avfilter.c
	libavfilter/avfilter.h
	libavfilter/buffersink.c
	libavfilter/buffersrc.c
	libavfilter/buffersrc.h
	libavfilter/f_select.c
	libavfilter/f_setpts.c
	libavfilter/fifo.c
	libavfilter/split.c
	libavfilter/src_movie.c
	libavfilter/version.h
	libavfilter/vf_aspect.c
	libavfilter/vf_bbox.c
	libavfilter/vf_blackframe.c
	libavfilter/vf_delogo.c
	libavfilter/vf_drawbox.c
	libavfilter/vf_drawtext.c
	libavfilter/vf_fade.c
	libavfilter/vf_fieldorder.c
	libavfilter/vf_fps.c
	libavfilter/vf_frei0r.c
	libavfilter/vf_gradfun.c
	libavfilter/vf_hqdn3d.c
	libavfilter/vf_lut.c
	libavfilter/vf_overlay.c
	libavfilter/vf_pad.c
	libavfilter/vf_scale.c
	libavfilter/vf_showinfo.c
	libavfilter/vf_transpose.c
	libavfilter/vf_vflip.c
	libavfilter/vf_yadif.c
	libavfilter/video.c
	libavfilter/vsrc_testsrc.c
	libavfilter/yadif.h

Following are notes about the merge authorship and various technical details.

Michael Niedermayer:
  * Main merge operation, notably avfilter.c and video.c
  * Switch to AVFrame:
    - afade
    - anullsrc
    - apad
    - aresample
    - blackframe
    - deshake
    - idet
    - il
    - mandelbrot
    - mptestsrc
    - noise
    - setfield
    - smartblur
    - tinterlace
  * various merge changes and fixes in:
    - ashowinfo
    - blackdetect
    - field
    - fps
    - select
    - testsrc
    - yadif

Nicolas George:
  * Switch to AVFrame:
    - make rawdec work with refcounted frames. Adapted from commit
      759001c5 by Anton Khirnov.
      Also, fix the use of || instead of | in a flags check.
    - make buffer sink and src, audio and video work all together

Clément Bœsch:
  * Switch to AVFrame:
    - aevalsrc
    - alphaextract
    - blend
    - cellauto
    - colormatrix
    - concat
    - earwax
    - ebur128
    - edgedetect
    - geq
    - histeq
    - histogram
    - hue
    - kerndeint
    - life
    - movie
    - mp (with the help of Michael)
    - overlay
    - pad
    - pan
    - pp
    - pp
    - removelogo
    - sendcmd
    - showspectrum
    - showwaves
    - silencedetect
    - stereo3d
    - subtitles
    - super2xsai
    - swapuv
    - thumbnail
    - tile

Hendrik Leppkes:
  * Switch to AVFrame:
    - aconvert
    - amerge
    - asetnsamples
    - atempo
    - biquads

Matthieu Bouron:
  * Switch to AVFrame
    - alphamerge
    - decimate
    - volumedetect

Stefano Sabatini:
  * Switch to AVFrame:
    - astreamsync
    - flite
    - framestep
Signed-off-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
Signed-off-by: 's avatarNicolas George <nicolas.george@normalesup.org>
Signed-off-by: 's avatarClément Bœsch <ubitux@gmail.com>
Signed-off-by: 's avatarHendrik Leppkes <h.leppkes@gmail.com>
Signed-off-by: 's avatarMatthieu Bouron <matthieu.bouron@gmail.com>
Signed-off-by: 's avatarStefano Sabatini <stefasab@gmail.com>
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 586ae70b 7e350379
......@@ -2086,9 +2086,6 @@ pixel format "yuv422p" @var{hsub} is 2 and @var{vsub} is 1.
@item n
the number of input frame, starting from 0
@item pos
the position in the file of the input frame, NAN if unknown
@item t
timestamp expressed in seconds, NAN if the input timestamp is unknown
......
......@@ -1627,8 +1627,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
(AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
(AVRational){1, ist->st->codec->sample_rate});
for (i = 0; i < ist->nb_filters; i++)
av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame,
AV_BUFFERSRC_FLAG_PUSH);
av_buffersrc_write_frame(ist->filters[i]->filter, decoded_frame);
/* TODO re-add AV_BUFFERSRC_FLAG_PUSH */
decoded_frame->pts = AV_NOPTS_VALUE;
......@@ -1737,7 +1737,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
AV_BUFFERSRC_FLAG_NO_COPY |
AV_BUFFERSRC_FLAG_PUSH);
} else
if(av_buffersrc_add_frame(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
if(av_buffersrc_add_frame_flags(ist->filters[i]->filter, decoded_frame, AV_BUFFERSRC_FLAG_PUSH)<0) {
av_log(NULL, AV_LOG_FATAL, "Failed to inject frame into filter network\n");
exit(1);
}
......
......@@ -33,7 +33,6 @@ OBJS = allfilters.o \
avfilter.o \
avfiltergraph.o \
buffer.o \
buffersink.o \
buffersrc.o \
drawutils.o \
fifo.o \
......@@ -41,7 +40,6 @@ OBJS = allfilters.o \
graphdump.o \
graphparser.o \
sink_buffer.o \
src_buffer.o \
transform.o \
video.o \
......
......@@ -135,23 +135,23 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
{
AConvertContext *aconvert = inlink->dst->priv;
const int n = insamplesref->audio->nb_samples;
const int n = insamplesref->nb_samples;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n);
int ret;
swr_convert(aconvert->swr, outsamplesref->data, n,
(void *)insamplesref->data, n);
swr_convert(aconvert->swr, outsamplesref->extended_data, n,
(void *)insamplesref->extended_data, n);
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
outsamplesref->audio->channels = outlink->channels;
outsamplesref->audio->channel_layout = outlink->channel_layout;
av_frame_copy_props(outsamplesref, insamplesref);
outsamplesref->channels = outlink->channels;
outsamplesref->channel_layout = outlink->channel_layout;
ret = ff_filter_frame(outlink, outsamplesref);
avfilter_unref_buffer(insamplesref);
av_frame_free(&insamplesref);
return ret;
}
......@@ -160,7 +160,6 @@ static const AVFilterPad aconvert_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
......@@ -232,22 +232,22 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AudioFadeContext *afade = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->audio->nb_samples;
AVFilterBufferRef *out_buf;
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
int64_t cur_sample = av_rescale_q(buf->pts, (AVRational){1, outlink->sample_rate}, outlink->time_base);
if ((!afade->type && (afade->start_sample + afade->nb_samples < cur_sample)) ||
( afade->type && (cur_sample + afade->nb_samples < afade->start_sample)))
return ff_filter_frame(outlink, buf);
if (buf->perms & AV_PERM_WRITE) {
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples);
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
out_buf->pts = buf->pts;
......@@ -256,7 +256,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
if ((!afade->type && (cur_sample + nb_samples < afade->start_sample)) ||
( afade->type && (afade->start_sample + afade->nb_samples < cur_sample))) {
av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
out_buf->audio->channels, out_buf->format);
out_buf->channels, out_buf->format);
} else {
int64_t start;
......@@ -266,13 +266,13 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
start = afade->start_sample + afade->nb_samples - cur_sample;
afade->fade_samples(out_buf->extended_data, buf->extended_data,
nb_samples, buf->audio->channels,
nb_samples, buf->channels,
afade->type ? -1 : 1, start,
afade->nb_samples, afade->curve);
}
if (buf != out_buf)
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ff_filter_frame(outlink, out_buf);
}
......
......@@ -219,14 +219,14 @@ static inline void copy_samples(int nb_inputs, struct amerge_input in[],
}
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AMergeContext *am = ctx->priv;
AVFilterLink *const outlink = ctx->outputs[0];
int input_number;
int nb_samples, ns, i;
AVFilterBufferRef *outbuf, *inbuf[SWR_CH_MAX];
AVFrame *outbuf, *inbuf[SWR_CH_MAX];
uint8_t *ins[SWR_CH_MAX], *outs;
for (input_number = 0; input_number < am->nb_inputs; input_number++)
......@@ -235,39 +235,40 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
av_assert1(input_number < am->nb_inputs);
if (ff_bufqueue_is_full(&am->in[input_number].queue)) {
av_log(ctx, AV_LOG_ERROR, "Buffer queue overflow\n");
avfilter_unref_buffer(insamples);
av_frame_free(&insamples);
return AVERROR(ENOMEM);
}
ff_bufqueue_add(ctx, &am->in[input_number].queue, insamples);
am->in[input_number].nb_samples += insamples->audio->nb_samples;
ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples));
am->in[input_number].nb_samples += insamples->nb_samples;
av_frame_free(&insamples);
nb_samples = am->in[0].nb_samples;
for (i = 1; i < am->nb_inputs; i++)
nb_samples = FFMIN(nb_samples, am->in[i].nb_samples);
if (!nb_samples)
return 0;
outbuf = ff_get_audio_buffer(ctx->outputs[0], AV_PERM_WRITE, nb_samples);
outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
outs = outbuf->data[0];
for (i = 0; i < am->nb_inputs; i++) {
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
ins[i] = inbuf[i]->data[0] +
am->in[i].pos * am->in[i].nb_ch * am->bps;
}
avfilter_copy_buffer_ref_props(outbuf, inbuf[0]);
av_frame_copy_props(outbuf, inbuf[0]);
outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE :
inbuf[0]->pts +
av_rescale_q(am->in[0].pos,
(AVRational){ 1, ctx->inputs[0]->sample_rate },
ctx->outputs[0]->time_base);
outbuf->audio->nb_samples = nb_samples;
outbuf->audio->channel_layout = outlink->channel_layout;
outbuf->audio->channels = outlink->channels;
outbuf->nb_samples = nb_samples;
outbuf->channel_layout = outlink->channel_layout;
outbuf->channels = outlink->channels;
while (nb_samples) {
ns = nb_samples;
for (i = 0; i < am->nb_inputs; i++)
ns = FFMIN(ns, inbuf[i]->audio->nb_samples - am->in[i].pos);
ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos);
/* Unroll the most common sample formats: speed +~350% for the loop,
+~13% overall (including two common decoders) */
switch (am->bps) {
......@@ -289,9 +290,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
for (i = 0; i < am->nb_inputs; i++) {
am->in[i].nb_samples -= ns;
am->in[i].pos += ns;
if (am->in[i].pos == inbuf[i]->audio->nb_samples) {
if (am->in[i].pos == inbuf[i]->nb_samples) {
am->in[i].pos = 0;
avfilter_unref_buffer(inbuf[i]);
av_frame_free(&inbuf[i]);
ff_bufqueue_get(&am->in[i].queue);
inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0);
ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL;
......@@ -322,7 +323,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
.name = name,
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
};
if (!name)
return AVERROR(ENOMEM);
......
......@@ -270,18 +270,18 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
AVFilterBufferRef *out_buf, *in_buf;
AVFrame *out_buf, *in_buf;
int i;
calculate_scales(s, nb_samples);
out_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
in_buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
in_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!in_buf) {
avfilter_unref_buffer(out_buf);
av_frame_free(&out_buf);
return AVERROR(ENOMEM);
}
......@@ -303,7 +303,7 @@ static int output_frame(AVFilterLink *outlink, int nb_samples)
}
}
}
avfilter_unref_buffer(in_buf);
av_frame_free(&in_buf);
out_buf->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
......@@ -450,7 +450,7 @@ static int request_frame(AVFilterLink *outlink)
return output_frame(outlink, available_samples);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
MixContext *s = ctx->priv;
......@@ -469,16 +469,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (i == 0) {
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
outlink->time_base);
ret = frame_list_add_frame(s->frame_list, buf->audio->nb_samples, pts);
ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
if (ret < 0)
goto fail;
}
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->audio->nb_samples);
buf->nb_samples);
fail:
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ret;
}
......
......@@ -77,15 +77,15 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
APadContext *apad = ctx->priv;
if (apad->whole_len)
apad->whole_len -= frame->audio->nb_samples;
apad->whole_len -= frame->nb_samples;
apad->next_pts = frame->pts + av_rescale_q(frame->audio->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
apad->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
return ff_filter_frame(ctx->outputs[0], frame);
}
......@@ -99,7 +99,7 @@ static int request_frame(AVFilterLink *outlink)
if (ret == AVERROR_EOF) {
int n_out = apad->packet_size;
AVFilterBufferRef *outsamplesref;
AVFrame *outsamplesref;
if (apad->whole_len > 0) {
apad->pad_len = apad->whole_len;
......@@ -113,16 +113,16 @@ static int request_frame(AVFilterLink *outlink)
if(!n_out)
return AVERROR_EOF;
outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
outsamplesref = ff_get_audio_buffer(outlink, n_out);
if (!outsamplesref)
return AVERROR(ENOMEM);
av_assert0(outsamplesref->audio->sample_rate == outlink->sample_rate);
av_assert0(outsamplesref->audio->nb_samples == n_out);
av_assert0(outsamplesref->sample_rate == outlink->sample_rate);
av_assert0(outsamplesref->nb_samples == n_out);
av_samples_set_silence(outsamplesref->extended_data, 0,
n_out,
outsamplesref->audio->channels,
outsamplesref->channels,
outsamplesref->format);
outsamplesref->pts = apad->next_pts;
......
......@@ -174,23 +174,23 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
{
AResampleContext *aresample = inlink->dst->priv;
const int n_in = insamplesref->audio->nb_samples;
const int n_in = insamplesref->nb_samples;
int n_out = n_in * aresample->ratio * 2 + 256;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
AVFrame *outsamplesref = ff_get_audio_buffer(outlink, n_out);
int ret;
if(!outsamplesref)
return AVERROR(ENOMEM);
avfilter_copy_buffer_ref_props(outsamplesref, insamplesref);
av_frame_copy_props(outsamplesref, insamplesref);
outsamplesref->format = outlink->format;
outsamplesref->audio->channels = outlink->channels;
outsamplesref->audio->channel_layout = outlink->channel_layout;
outsamplesref->audio->sample_rate = outlink->sample_rate;
outsamplesref->channels = outlink->channels;
outsamplesref->channel_layout = outlink->channel_layout;
outsamplesref->sample_rate = outlink->sample_rate;
if(insamplesref->pts != AV_NOPTS_VALUE) {
int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den);
......@@ -203,16 +203,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamplesref)
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out,
(void *)insamplesref->extended_data, n_in);
if (n_out <= 0) {
avfilter_unref_buffer(outsamplesref);
avfilter_unref_buffer(insamplesref);
av_frame_free(&outsamplesref);
av_frame_free(&insamplesref);
return 0;
}
outsamplesref->audio->nb_samples = n_out;
outsamplesref->nb_samples = n_out;
ret = ff_filter_frame(outlink, outsamplesref);
aresample->req_fullfilled= 1;
avfilter_unref_buffer(insamplesref);
av_frame_free(&insamplesref);
return ret;
}
......@@ -229,20 +229,20 @@ static int request_frame(AVFilterLink *outlink)
}while(!aresample->req_fullfilled && ret>=0);
if (ret == AVERROR_EOF) {
AVFilterBufferRef *outsamplesref;
AVFrame *outsamplesref;
int n_out = 4096;
outsamplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n_out);
outsamplesref = ff_get_audio_buffer(outlink, n_out);
if (!outsamplesref)
return AVERROR(ENOMEM);
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, 0, 0);
if (n_out <= 0) {
avfilter_unref_buffer(outsamplesref);
av_frame_free(&outsamplesref);
return (n_out == 0) ? AVERROR_EOF : n_out;
}
outsamplesref->audio->sample_rate = outlink->sample_rate;
outsamplesref->audio->nb_samples = n_out;
outsamplesref->sample_rate = outlink->sample_rate;
outsamplesref->nb_samples = n_out;
#if 0
outsamplesref->pts = aresample->next_pts;
if(aresample->next_pts != AV_NOPTS_VALUE)
......@@ -263,7 +263,6 @@ static const AVFilterPad aresample_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL },
};
......
......@@ -93,7 +93,7 @@ static int config_props_output(AVFilterLink *outlink)
static int push_samples(AVFilterLink *outlink)
{
ASNSContext *asns = outlink->src->priv;
AVFilterBufferRef *outsamples = NULL;
AVFrame *outsamples = NULL;
int nb_out_samples, nb_pad_samples;
if (asns->pad) {
......@@ -107,7 +107,7 @@ static int push_samples(AVFilterLink *outlink)
if (!nb_out_samples)
return 0;
outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_out_samples);
outsamples = ff_get_audio_buffer(outlink, nb_out_samples);
av_assert0(outsamples);
av_audio_fifo_read(asns->fifo,
......@@ -117,9 +117,9 @@ static int push_samples(AVFilterLink *outlink)
av_samples_set_silence(outsamples->extended_data, nb_out_samples - nb_pad_samples,
nb_pad_samples, av_get_channel_layout_nb_channels(outlink->channel_layout),
outlink->format);
outsamples->audio->nb_samples = nb_out_samples;
outsamples->audio->channel_layout = outlink->channel_layout;
outsamples->audio->sample_rate = outlink->sample_rate;
outsamples->nb_samples = nb_out_samples;
outsamples->channel_layout = outlink->channel_layout;
outsamples->sample_rate = outlink->sample_rate;
outsamples->pts = asns->next_out_pts;
if (asns->next_out_pts != AV_NOPTS_VALUE)
......@@ -130,13 +130,13 @@ static int push_samples(AVFilterLink *outlink)
return nb_out_samples;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
ASNSContext *asns = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret;
int nb_samples = insamples->audio->nb_samples;
int nb_samples = insamples->nb_samples;
if (av_audio_fifo_space(asns->fifo) < nb_samples) {
av_log(ctx, AV_LOG_DEBUG, "No space for %d samples, stretching audio fifo\n", nb_samples);
......@@ -150,7 +150,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
av_audio_fifo_write(asns->fifo, (void **)insamples->extended_data, nb_samples);
if (asns->next_out_pts == AV_NOPTS_VALUE)
asns->next_out_pts = insamples->pts;
avfilter_unref_buffer(insamples);
av_frame_free(&insamples);
while (av_audio_fifo_size(asns->fifo) >= asns->nb_out_samples)
push_samples(outlink);
......@@ -177,10 +177,10 @@ static int request_frame(AVFilterLink *outlink)
static const AVFilterPad asetnsamples_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_WRITE,
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.needs_writable = 1,
},
{ NULL }
};
......
......@@ -55,16 +55,16 @@ static void uninit(AVFilterContext *ctx)
av_freep(&s->plane_checksums);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
uint32_t checksum = 0;
int channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
int channels = av_get_channel_layout_nb_channels(buf->channel_layout);
int planar = av_sample_fmt_is_planar(buf->format);
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
int data_size = buf->audio->nb_samples * block_align;
int data_size = buf->nb_samples * block_align;
int planes = planar ? channels : 1;
int i;
void *tmp_ptr = av_realloc(s->plane_checksums, channels * sizeof(*s->plane_checksums));
......@@ -82,7 +82,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), -1,
buf->audio->channel_layout);
buf->channel_layout);
av_log(ctx, AV_LOG_INFO,
"n:%"PRIu64" pts:%s pts_time:%s pos:%"PRId64" "
......@@ -90,9 +90,9 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
"checksum:%08X ",
s->frame,
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
buf->pos,
av_get_sample_fmt_name(buf->format), buf->audio->channels, chlayout_str,
buf->audio->sample_rate, buf->audio->nb_samples,
av_frame_get_pkt_pos(buf),
av_get_sample_fmt_name(buf->format), av_frame_get_channels(buf), chlayout_str,
buf->sample_rate, buf->nb_samples,
checksum);
av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
......@@ -110,7 +110,6 @@ static const AVFilterPad inputs[] = {
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL },
};
......
......@@ -48,7 +48,7 @@ typedef struct {
AVExpr *expr;
double var_values[VAR_NB];
struct buf_queue {
AVFilterBufferRef *buf[QUEUE_SIZE];
AVFrame *buf[QUEUE_SIZE];
unsigned tail, nb;
/* buf[tail] is the oldest,
buf[(tail + nb) % QUEUE_SIZE] is where the next is added */
......@@ -111,16 +111,16 @@ static int send_out(AVFilterContext *ctx, int out_id)
{
AStreamSyncContext *as = ctx->priv;
struct buf_queue *queue = &as->queue[out_id];
AVFilterBufferRef *buf = queue->buf[queue->tail];
AVFrame *buf = queue->buf[queue->tail];
int ret;
queue->buf[queue->tail] = NULL;
as->var_values[VAR_B1 + out_id]++;
as->var_values[VAR_S1 + out_id] += buf->audio->nb_samples;
as->var_values[VAR_S1 + out_id] += buf->nb_samples;
if (buf->pts != AV_NOPTS_VALUE)
as->var_values[VAR_T1 + out_id] =
av_q2d(ctx->outputs[out_id]->time_base) * buf->pts;
as->var_values[VAR_T1 + out_id] += buf->audio->nb_samples /
as->var_values[VAR_T1 + out_id] += buf->nb_samples /
(double)ctx->inputs[out_id]->sample_rate;
ret = ff_filter_frame(ctx->outputs[out_id], buf);
queue->nb--;
......@@ -167,7 +167,7 @@ static int request_frame(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AStreamSyncContext *as = ctx->priv;
......@@ -185,12 +185,10 @@ static const AVFilterPad astreamsync_inputs[] = {
.name = "in1",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
},{
.name = "in2",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
},
{ NULL }
};
......
......@@ -152,14 +152,13 @@ static int request_frame(AVFilterLink *link)
handle_trimming(ctx);
if (nb_samples = get_delay(s)) {
AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE,
nb_samples);
AVFrame *buf = ff_get_audio_buffer(link, nb_samples);
if (!buf)
return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, buf->extended_data,
buf->linesize[0], nb_samples, NULL, 0, 0);
if (ret <= 0) {
avfilter_unref_bufferp(&buf);
av_frame_free(&buf);
return (ret < 0) ? ret : AVERROR_EOF;
}
......@@ -171,20 +170,20 @@ static int request_frame(AVFilterLink *link)
return ret;
}
static int write_to_fifo(ASyncContext *s, AVFilterBufferRef *buf)
static int write_to_fifo(ASyncContext *s, AVFrame *buf)
{
int ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
avfilter_unref_buffer(buf);
buf->linesize[0], buf->nb_samples);
av_frame_free(&buf);
return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
ASyncContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int nb_channels = av_get_channel_layout_nb_channels(buf->audio->channel_layout);
int nb_channels = av_get_channel_layout_nb_channels(buf->channel_layout);
int64_t pts = (buf->pts == AV_NOPTS_VALUE) ? buf->pts :
av_rescale_q(buf->pts, inlink->time_base, outlink->time_base);
int out_size, ret;
......@@ -223,8 +222,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
if (out_size > 0) {
AVFilterBufferRef *buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
out_size);
AVFrame *buf_out = ff_get_audio_buffer(outlink, out_size);
if (!buf_out) {
ret = AVERROR(ENOMEM);
goto fail;
......@@ -266,11 +264,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
s->pts = pts - avresample_get_delay(s->avr);
ret = avresample_convert(s->avr, NULL, 0, 0, buf->extended_data,
buf->linesize[0], buf->audio->nb_samples);
buf->linesize[0], buf->nb_samples);
s->first_frame = 0;
fail:
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ret;
}
......
......@@ -140,7 +140,7 @@ typedef struct {
// for managing AVFilterPad.request_frame and AVFilterPad.filter_frame
int request_fulfilled;
AVFilterBufferRef *dst_buffer;
AVFrame *dst_buffer;
uint8_t *dst;
uint8_t *dst_end;
uint64_t nsamples_in;
......@@ -177,7 +177,7 @@ static void yae_clear(ATempoContext *atempo)
atempo->frag[0].position[0] = -(int64_t)(atempo->window / 2);
atempo->frag[0].position[1] = -(int64_t)(atempo->window / 2);
avfilter_unref_bufferp(&atempo->dst_buffer);
av_frame_free(&atempo->dst_buffer);
atempo->dst = NULL;
atempo->dst_end = NULL;
......@@ -1024,8 +1024,8 @@ static void push_samples(ATempoContext *atempo,
AVFilterLink *outlink,
int n_out)
{
atempo->dst_buffer->audio->sample_rate = outlink->sample_rate;
atempo->dst_buffer->audio->nb_samples = n_out;
atempo->dst_buffer->sample_rate = outlink->sample_rate;
atempo->dst_buffer->nb_samples = n_out;
// adjust the PTS:
atempo->dst_buffer->pts =
......@@ -1041,14 +1041,13 @@ static void push_samples(ATempoContext *atempo,
atempo->nsamples_out += n_out;
}
static int filter_frame(AVFilterLink *inlink,
AVFilterBufferRef *src_buffer)
static int filter_frame(AVFilterLink *inlink, AVFrame *src_buffer)
{
AVFilterContext *ctx = inlink->dst;
ATempoContext *atempo = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int n_in = src_buffer->audio->nb_samples;
int n_in = src_buffer->nb_samples;
int n_out = (int)(0.5 + ((double)n_in) / atempo->tempo);
const uint8_t *src = src_buffer->data[0];
......@@ -1056,10 +1055,8 @@ static int filter_frame(AVFilterLink *inlink,
while (src < src_end) {
if (!atempo->dst_buffer) {
atempo->dst_buffer = ff_get_audio_buffer(outlink,
AV_PERM_WRITE,
n_out);
avfilter_copy_buffer_ref_props(atempo->dst_buffer, src_buffer);
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_out);
av_frame_copy_props(atempo->dst_buffer, src_buffer);
atempo->dst = atempo->dst_buffer->data[0];
atempo->dst_end = atempo->dst + n_out * atempo->stride;
......@@ -1074,7 +1071,7 @@ static int filter_frame(AVFilterLink *inlink,
}
atempo->nsamples_in += n_in;
avfilter_unref_bufferp(&src_buffer);
av_frame_free(&src_buffer);
return 0;
}
......@@ -1098,9 +1095,7 @@ static int request_frame(AVFilterLink *outlink)
while (err == AVERROR(EAGAIN)) {
if (!atempo->dst_buffer) {
atempo->dst_buffer = ff_get_audio_buffer(outlink,
AV_PERM_WRITE,
n_max);
atempo->dst_buffer = ff_get_audio_buffer(outlink, n_max);
atempo->dst = atempo->dst_buffer->data[0];
atempo->dst_end = atempo->dst + n_max * atempo->stride;
......@@ -1116,7 +1111,7 @@ static int request_frame(AVFilterLink *outlink)
}
}
avfilter_unref_bufferp(&atempo->dst_buffer);
av_frame_free(&atempo->dst_buffer);
atempo->dst = NULL;
atempo->dst_end = NULL;
......@@ -1142,7 +1137,6 @@ static const AVFilterPad atempo_inputs[] = {
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_props,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
......@@ -392,24 +392,24 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
BiquadsContext *p = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out_buf;
int nb_samples = buf->audio->nb_samples;
AVFrame *out_buf;
int nb_samples = buf->nb_samples;
int ch;
if (buf->perms & AV_PERM_WRITE) {
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples);
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
out_buf->pts = buf->pts;
}
for (ch = 0; ch < buf->audio->channels; ch++)
for (ch = 0; ch < buf->channels; ch++)
p->filter(buf->extended_data[ch],
out_buf->extended_data[ch], nb_samples,
&p->cache[ch].i1, &p->cache[ch].i2,
......@@ -417,7 +417,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
p->b0, p->b1, p->b2, p->a1, p->a2);
if (buf != out_buf)
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ff_filter_frame(outlink, out_buf);
}
......
......@@ -312,7 +312,7 @@ static int channelmap_query_formats(AVFilterContext *ctx)
return 0;
}
static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
......@@ -330,7 +330,7 @@ static int channelmap_filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
uint8_t **new_extended_data =
av_mallocz(nch_out * sizeof(*buf->extended_data));
if (!new_extended_data) {
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return AVERROR(ENOMEM);
}
if (buf->extended_data == buf->data) {
......
......@@ -105,13 +105,13 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterBufferRef *buf_out = avfilter_ref_buffer(buf, ~AV_PERM_WRITE);
AVFrame *buf_out = av_frame_clone(buf);
if (!buf_out) {
ret = AVERROR(ENOMEM);
......@@ -119,14 +119,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[i];
buf_out->audio->channel_layout =
av_channel_layout_extract_channel(buf->audio->channel_layout, i);
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, i);
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
break;
}
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ret;
}
......
......@@ -109,18 +109,18 @@ static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, in
return out;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
int16_t *taps, *endin, *in, *out;
AVFilterBufferRef *outsamples =
ff_get_audio_buffer(inlink, AV_PERM_WRITE,
insamples->audio->nb_samples);
AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
int ret;
if (!outsamples)
if (!outsamples) {
av_frame_free(&insamples);
return AVERROR(ENOMEM);
avfilter_copy_buffer_ref_props(outsamples, insamples);
}
av_frame_copy_props(outsamples, insamples);
taps = ((EarwaxContext *)inlink->dst->priv)->taps;
out = (int16_t *)outsamples->data[0];
......@@ -131,14 +131,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
out = scalarproduct(taps, taps + NUMTAPS, out);
// process current input
endin = in + insamples->audio->nb_samples * 2 - NUMTAPS;
endin = in + insamples->nb_samples * 2 - NUMTAPS;
scalarproduct(in, endin, out);
// save part of input for next round
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
ret = ff_filter_frame(outlink, outsamples);
avfilter_unref_buffer(insamples);
av_frame_free(&insamples);
return ret;
}
......@@ -147,7 +147,6 @@ static const AVFilterPad earwax_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
......@@ -56,24 +56,14 @@ typedef struct JoinContext {
/**
* Temporary storage for input frames, until we get one on each input.
*/
AVFilterBufferRef **input_frames;
AVFrame **input_frames;
/**
* Temporary storage for data pointers, for assembling the output buffer.
* Temporary storage for buffer references, for assembling the output frame.
*/
uint8_t **data;
AVBufferRef **buffers;
} JoinContext;
/**
* To avoid copying the data from input buffers, this filter creates
* a custom output buffer that stores references to all inputs and
* unrefs them on free.
*/
typedef struct JoinBufferPriv {
AVFilterBufferRef **in_buffers;
int nb_in_buffers;
} JoinBufferPriv;
#define OFFSET(x) offsetof(JoinContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
......@@ -94,7 +84,7 @@ static const AVClass join_class = {
.version = LIBAVUTIL_VERSION_INT,
};
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
JoinContext *s = ctx->priv;
......@@ -105,7 +95,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
break;
av_assert0(i < ctx->nb_inputs);
av_assert0(!s->input_frames[i]);
s->input_frames[i] = buf;
s->input_frames[i] = frame;
return 0;
}
......@@ -207,9 +197,9 @@ static int join_init(AVFilterContext *ctx, const char *args)
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
s->channels = av_mallocz(sizeof(*s->channels) * s->nb_channels);
s->data = av_mallocz(sizeof(*s->data) * s->nb_channels);
s->buffers = av_mallocz(sizeof(*s->buffers) * s->nb_channels);
s->input_frames = av_mallocz(sizeof(*s->input_frames) * s->inputs);
if (!s->channels || !s->data || !s->input_frames) {
if (!s->channels || !s->buffers|| !s->input_frames) {
ret = AVERROR(ENOMEM);
goto fail;
}
......@@ -248,11 +238,11 @@ static void join_uninit(AVFilterContext *ctx)
for (i = 0; i < ctx->nb_inputs; i++) {
av_freep(&ctx->input_pads[i].name);
avfilter_unref_bufferp(&s->input_frames[i]);
av_frame_free(&s->input_frames[i]);
}
av_freep(&s->channels);
av_freep(&s->data);
av_freep(&s->buffers);
av_freep(&s->input_frames);
}
......@@ -394,34 +384,14 @@ fail:
return ret;
}
static void join_free_buffer(AVFilterBuffer *buf)
{
JoinBufferPriv *priv = buf->priv;
if (priv) {
int i;
for (i = 0; i < priv->nb_in_buffers; i++)
avfilter_unref_bufferp(&priv->in_buffers[i]);
av_freep(&priv->in_buffers);
av_freep(&buf->priv);
}
if (buf->extended_data != buf->data)
av_freep(&buf->extended_data);
av_freep(&buf);
}
static int join_request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
JoinContext *s = ctx->priv;
AVFilterBufferRef *buf;
JoinBufferPriv *priv;
AVFrame *frame;
int linesize = INT_MAX;
int perms = ~0;
int nb_samples = 0;
int nb_buffers = 0;
int i, j, ret;
/* get a frame on each input */
......@@ -434,54 +404,95 @@ static int join_request_frame(AVFilterLink *outlink)
/* request the same number of samples on all inputs */
if (i == 0) {
nb_samples = s->input_frames[0]->audio->nb_samples;
nb_samples = s->input_frames[0]->nb_samples;
for (j = 1; !i && j < ctx->nb_inputs; j++)
ctx->inputs[j]->request_samples = nb_samples;
}
}
/* setup the output frame */
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
frame->extended_data = av_mallocz(s->nb_channels *
sizeof(*frame->extended_data));
if (!frame->extended_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
/* copy the data pointers */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
AVFilterBufferRef *cur_buf = s->input_frames[ch->input];
s->data[i] = cur_buf->extended_data[ch->in_channel_idx];
linesize = FFMIN(linesize, cur_buf->linesize[0]);
perms &= cur_buf->perms;
}
AVFrame *cur = s->input_frames[ch->input];
AVBufferRef *buf;
av_assert0(nb_samples > 0);
buf = avfilter_get_audio_buffer_ref_from_arrays(s->data, linesize, perms,
nb_samples, outlink->format,
outlink->channel_layout);
if (!buf)
return AVERROR(ENOMEM);
frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
linesize = FFMIN(linesize, cur->linesize[0]);
buf->buf->free = join_free_buffer;
buf->pts = s->input_frames[0]->pts;
/* add the buffer where this plan is stored to the list if it's
* not already there */
buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
if (!buf) {
ret = AVERROR(EINVAL);
goto fail;
}
for (j = 0; j < nb_buffers; j++)
if (s->buffers[j]->buffer == buf->buffer)
break;
if (j == i)
s->buffers[nb_buffers++] = buf;
}
if (!(priv = av_mallocz(sizeof(*priv))))
goto fail;
if (!(priv->in_buffers = av_mallocz(sizeof(*priv->in_buffers) * ctx->nb_inputs)))
goto fail;
/* create references to the buffers we copied to output */
if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
frame->extended_buf = av_mallocz(sizeof(*frame->extended_buf) *
frame->nb_extended_buf);
if (!frame->extended_buf) {
frame->nb_extended_buf = 0;
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
frame->buf[i] = av_buffer_ref(s->buffers[i]);
if (!frame->buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < frame->nb_extended_buf; i++) {
frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
FF_ARRAY_ELEMS(frame->buf)]);
if (!frame->extended_buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < ctx->nb_inputs; i++)
priv->in_buffers[i] = s->input_frames[i];
priv->nb_in_buffers = ctx->nb_inputs;
buf->buf->priv = priv;
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
frame->sample_rate = outlink->sample_rate;
frame->pts = s->input_frames[0]->pts;
frame->linesize[0] = linesize;
if (frame->data != frame->extended_data) {
memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
}
ret = ff_filter_frame(outlink, buf);
ret = ff_filter_frame(outlink, frame);
memset(s->input_frames, 0, sizeof(*s->input_frames) * ctx->nb_inputs);
return ret;
fail:
avfilter_unref_buffer(buf);
if (priv)
av_freep(&priv->in_buffers);
av_freep(&priv);
return AVERROR(ENOMEM);
av_frame_free(&frame);
return ret;
}
static const AVFilterPad avfilter_af_join_outputs[] = {
......
......@@ -353,21 +353,21 @@ static int config_props(AVFilterLink *link)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
int ret;
int n = insamples->audio->nb_samples;
int n = insamples->nb_samples;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFilterBufferRef *outsamples = ff_get_audio_buffer(outlink, AV_PERM_WRITE, n);
AVFrame *outsamples = ff_get_audio_buffer(outlink, n);
PanContext *pan = inlink->dst->priv;
swr_convert(pan->swr, outsamples->data, n, (void *)insamples->data, n);
avfilter_copy_buffer_ref_props(outsamples, insamples);
outsamples->audio->channel_layout = outlink->channel_layout;
outsamples->audio->channels = outlink->channels;
av_frame_copy_props(outsamples, insamples);
outsamples->channel_layout = outlink->channel_layout;
outsamples->channels = outlink->channels;
ret = ff_filter_frame(outlink, outsamples);
avfilter_unref_buffer(insamples);
av_frame_free(&insamples);
return ret;
}
......@@ -383,7 +383,6 @@ static const AVFilterPad pan_inputs[] = {
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
......@@ -174,7 +174,7 @@ static int request_frame(AVFilterLink *outlink)
/* flush the lavr delay buffer */
if (ret == AVERROR_EOF && s->avr) {
AVFilterBufferRef *buf;
AVFrame *frame;
int nb_samples = av_rescale_rnd(avresample_get_delay(s->avr),
outlink->sample_rate,
ctx->inputs[0]->sample_rate,
......@@ -183,25 +183,25 @@ static int request_frame(AVFilterLink *outlink)
if (!nb_samples)
return ret;
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!buf)
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, buf->extended_data,
buf->linesize[0], nb_samples,
ret = avresample_convert(s->avr, frame->extended_data,
frame->linesize[0], nb_samples,
NULL, 0, 0);
if (ret <= 0) {
avfilter_unref_buffer(buf);
av_frame_free(&frame);
return (ret == 0) ? AVERROR_EOF : ret;
}
buf->pts = s->next_pts;
return ff_filter_frame(outlink, buf);
frame->pts = s->next_pts;
return ff_filter_frame(outlink, frame);
}
return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv;
......@@ -209,27 +209,26 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
int ret;
if (s->avr) {
AVFilterBufferRef *buf_out;
AVFrame *out;
int delay, nb_samples;
/* maximum possible samples lavr can output */
delay = avresample_get_delay(s->avr);
nb_samples = av_rescale_rnd(buf->audio->nb_samples + delay,
nb_samples = av_rescale_rnd(in->nb_samples + delay,
outlink->sample_rate, inlink->sample_rate,
AV_ROUND_UP);
buf_out = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
if (!buf_out) {
out = ff_get_audio_buffer(outlink, nb_samples);
if (!out) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = avresample_convert(s->avr, buf_out->extended_data,
buf_out->linesize[0], nb_samples,
buf->extended_data, buf->linesize[0],
buf->audio->nb_samples);
ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
nb_samples, in->extended_data, in->linesize[0],
in->nb_samples);
if (ret <= 0) {
avfilter_unref_buffer(buf_out);
av_frame_free(&out);
if (ret < 0)
goto fail;
}
......@@ -237,36 +236,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
av_assert0(!avresample_available(s->avr));
if (s->next_pts == AV_NOPTS_VALUE) {
if (buf->pts == AV_NOPTS_VALUE) {
if (in->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
"assuming 0.\n");
s->next_pts = 0;
} else
s->next_pts = av_rescale_q(buf->pts, inlink->time_base,
s->next_pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base);
}
if (ret > 0) {
buf_out->audio->nb_samples = ret;
if (buf->pts != AV_NOPTS_VALUE) {
buf_out->pts = av_rescale_q(buf->pts, inlink->time_base,
out->nb_samples = ret;
if (in->pts != AV_NOPTS_VALUE) {
out->pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base) -
av_rescale(delay, outlink->sample_rate,
inlink->sample_rate);
} else
buf_out->pts = s->next_pts;
out->pts = s->next_pts;
s->next_pts = buf_out->pts + buf_out->audio->nb_samples;
s->next_pts = out->pts + out->nb_samples;
ret = ff_filter_frame(outlink, buf_out);
ret = ff_filter_frame(outlink, out);
s->got_output = 1;
}
fail:
avfilter_unref_buffer(buf);
av_frame_free(&in);
} else {
buf->format = outlink->format;
ret = ff_filter_frame(outlink, buf);
in->format = outlink->format;
ret = ff_filter_frame(outlink, in);
s->got_output = 1;
}
......@@ -278,7 +277,6 @@ static const AVFilterPad avfilter_af_resample_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ
},
{ NULL }
};
......
......@@ -70,20 +70,20 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static char *get_metadata_val(AVFilterBufferRef *insamples, const char *key)
static char *get_metadata_val(AVFrame *insamples, const char *key)
{
AVDictionaryEntry *e = av_dict_get(insamples->metadata, key, NULL, 0);
return e && e->value ? e->value : NULL;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
int i;
SilenceDetectContext *silence = inlink->dst->priv;
const int nb_channels = av_get_channel_layout_nb_channels(inlink->channel_layout);
const int srate = inlink->sample_rate;
const int nb_samples = insamples->audio->nb_samples * nb_channels;
const int64_t nb_samples_notify = srate * silence->duration * nb_channels;
const int nb_samples = insamples->nb_samples * nb_channels;
const int64_t nb_samples_notify = srate * silence->duration * nb_channels;
// scale number of null samples to the new sample rate
if (silence->last_sample_rate && silence->last_sample_rate != srate)
......
......@@ -226,21 +226,21 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->audio->nb_samples;
AVFilterBufferRef *out_buf;
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
if (vol->volume == 1.0 || vol->volume_i == 256)
return ff_filter_frame(outlink, buf);
/* do volume scaling in-place if input buffer is writable */
if (buf->perms & AV_PERM_WRITE) {
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(inlink, AV_PERM_WRITE, nb_samples);
out_buf = ff_get_audio_buffer(inlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
out_buf->pts = buf->pts;
......@@ -276,7 +276,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
if (buf != out_buf)
avfilter_unref_buffer(buf);
av_frame_free(&buf);
return ff_filter_frame(outlink, out_buf);
}
......
......@@ -49,12 +49,12 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *samples)
static int filter_frame(AVFilterLink *inlink, AVFrame *samples)
{
AVFilterContext *ctx = inlink->dst;
VolDetectContext *vd = ctx->priv;
int64_t layout = samples->audio->channel_layout;
int nb_samples = samples->audio->nb_samples;
int64_t layout = samples->channel_layout;
int nb_samples = samples->nb_samples;
int nb_channels = av_get_channel_layout_nb_channels(layout);
int nb_planes = nb_channels;
int plane, i;
......@@ -137,7 +137,6 @@ static const AVFilterPad volumedetect_inputs[] = {
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
......@@ -193,8 +193,8 @@ void avfilter_register_all(void)
* unconditionally */
REGISTER_FILTER_UNCONDITIONAL(asrc_abuffer);
REGISTER_FILTER_UNCONDITIONAL(vsrc_buffer);
REGISTER_FILTER_UNCONDITIONAL(asink_abuffer);
REGISTER_FILTER_UNCONDITIONAL(vsink_buffer);
//REGISTER_FILTER_UNCONDITIONAL(asink_abuffer);
//REGISTER_FILTER_UNCONDITIONAL(vsink_buffer);
REGISTER_FILTER_UNCONDITIONAL(af_afifo);
REGISTER_FILTER_UNCONDITIONAL(vf_fifo);
}
......@@ -22,9 +22,9 @@
#include "avfilter.h"
#include "internal.h"
static int null_filter_frame(AVFilterLink *link, AVFilterBufferRef *samplesref)
static int null_filter_frame(AVFilterLink *link, AVFrame *frame)
{
avfilter_unref_bufferp(&samplesref);
av_frame_free(&frame);
return 0;
}
......
......@@ -212,14 +212,14 @@ static int query_formats(AVFilterContext *ctx)
static int request_frame(AVFilterLink *outlink)
{
EvalContext *eval = outlink->src->priv;
AVFilterBufferRef *samplesref;
AVFrame *samplesref;
int i, j;
double t = eval->n * (double)1/eval->sample_rate;
if (eval->duration >= 0 && t >= eval->duration)
return AVERROR_EOF;
samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, eval->nb_samples);
samplesref = ff_get_audio_buffer(outlink, eval->nb_samples);
/* evaluate expression for each single sample and for each channel */
for (i = 0; i < eval->nb_samples; i++, eval->n++) {
......@@ -233,8 +233,7 @@ static int request_frame(AVFilterLink *outlink)
}
samplesref->pts = eval->pts;
samplesref->pos = -1;
samplesref->audio->sample_rate = eval->sample_rate;
samplesref->sample_rate = eval->sample_rate;
eval->pts += eval->nb_samples;
ff_filter_frame(outlink, samplesref);
......
......@@ -102,17 +102,15 @@ static int config_props(AVFilterLink *outlink)
static int request_frame(AVFilterLink *outlink)
{
ANullContext *null = outlink->src->priv;
AVFilterBufferRef *samplesref;
AVFrame *samplesref;
samplesref =
ff_get_audio_buffer(outlink, AV_PERM_WRITE, null->nb_samples);
samplesref = ff_get_audio_buffer(outlink, null->nb_samples);
samplesref->pts = null->pts;
samplesref->pos = -1;
samplesref->audio->channel_layout = null->channel_layout;
samplesref->audio->sample_rate = outlink->sample_rate;
samplesref->channel_layout = null->channel_layout;
samplesref->sample_rate = outlink->sample_rate;
ff_filter_frame(outlink, avfilter_ref_buffer(samplesref, ~0));
avfilter_unref_buffer(samplesref);
ff_filter_frame(outlink, av_frame_clone(samplesref));
av_frame_free(&samplesref);
null->pts += null->nb_samples;
return 0;
......
......@@ -245,22 +245,22 @@ static int config_props(AVFilterLink *outlink)
static int request_frame(AVFilterLink *outlink)
{
AVFilterBufferRef *samplesref;
AVFrame *samplesref;
FliteContext *flite = outlink->src->priv;
int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples);
if (!nb_samples)
return AVERROR_EOF;
samplesref = ff_get_audio_buffer(outlink, AV_PERM_WRITE, nb_samples);
samplesref = ff_get_audio_buffer(outlink, nb_samples);
if (!samplesref)
return AVERROR(ENOMEM);
memcpy(samplesref->data[0], flite->wave_samples,
nb_samples * flite->wave->num_channels * 2);
samplesref->pts = flite->pts;
samplesref->pos = -1;
samplesref->audio->sample_rate = flite->wave->sample_rate;
av_frame_set_pkt_pos(samplesref, -1);
av_frame_set_sample_rate(samplesref, flite->wave->sample_rate);
flite->pts += nb_samples;
flite->wave_samples += nb_samples * flite->wave->num_channels;
flite->wave_nb_samples -= nb_samples;
......
......@@ -22,6 +22,7 @@
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavcodec/avcodec.h"
#include "audio.h"
#include "avfilter.h"
......@@ -32,69 +33,70 @@ int avfilter_ref_get_channels(AVFilterBufferRef *ref)
return ref->audio ? ref->audio->channels : 0;
}
AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
return ff_get_audio_buffer(link->dst->outputs[0], perms, nb_samples);
return ff_get_audio_buffer(link->dst->outputs[0], nb_samples);
}
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFilterBufferRef *samplesref = NULL;
uint8_t **data;
int planar = av_sample_fmt_is_planar(link->format);
int nb_channels = link->channels;
int planes = planar ? nb_channels : 1;
int linesize;
int full_perms = AV_PERM_READ | AV_PERM_WRITE | AV_PERM_PRESERVE |
AV_PERM_REUSE | AV_PERM_REUSE2 | AV_PERM_ALIGN;
av_assert1(!(perms & ~(full_perms | AV_PERM_NEG_LINESIZES)));
if (!(data = av_mallocz(sizeof(*data) * planes)))
AVFrame *frame = av_frame_alloc();
int channels = link->channels;
int buf_size, ret;
av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout));
if (!frame)
return NULL;
buf_size = av_samples_get_buffer_size(NULL, channels, nb_samples,
link->format, 0);
if (buf_size < 0)
goto fail;
if (av_samples_alloc(data, &linesize, nb_channels, nb_samples, link->format, 0) < 0)
frame->buf[0] = av_buffer_alloc(buf_size);
if (!frame->buf[0])
goto fail;
samplesref = avfilter_get_audio_buffer_ref_from_arrays_channels(
data, linesize, full_perms, nb_samples, link->format,
link->channels, link->channel_layout);
if (!samplesref)
frame->nb_samples = nb_samples;
ret = avcodec_fill_audio_frame(frame, channels, link->format,
frame->buf[0]->data, buf_size, 0);
if (ret < 0)
goto fail;
samplesref->audio->sample_rate = link->sample_rate;
av_samples_set_silence(frame->extended_data, 0, nb_samples, channels,
link->format);
frame->nb_samples = nb_samples;
frame->format = link->format;
frame->channels = link->channels;
frame->channel_layout = link->channel_layout;
frame->sample_rate = link->sample_rate;
av_freep(&data);
return frame;
fail:
if (data)
av_freep(&data[0]);
av_freep(&data);
return samplesref;
av_buffer_unref(&frame->buf[0]);
av_frame_free(&frame);
return NULL;
}
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples)
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFilterBufferRef *ret = NULL;
AVFrame *ret = NULL;
if (link->dstpad->get_audio_buffer)
ret = link->dstpad->get_audio_buffer(link, perms, nb_samples);
ret = link->dstpad->get_audio_buffer(link, nb_samples);
if (!ret)
ret = ff_default_get_audio_buffer(link, perms, nb_samples);
if (ret)
ret->type = AVMEDIA_TYPE_AUDIO;
ret = ff_default_get_audio_buffer(link, nb_samples);
return ret;
}
#if FF_API_AVFILTERBUFFER
AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
int linesize,
int perms,
int linesize,int perms,
int nb_samples,
enum AVSampleFormat sample_fmt,
int channels,
......@@ -179,3 +181,4 @@ AVFilterBufferRef* avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
nb_samples, sample_fmt,
channels, channel_layout);
}
#endif
......@@ -44,25 +44,21 @@ static const enum AVSampleFormat ff_planar_sample_fmts_array[] = {
};
/** default handler for get_audio_buffer() for audio inputs */
AVFilterBufferRef *ff_default_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples);
/** get_audio_buffer() handler for filters which simply pass audio along */
AVFilterBufferRef *ff_null_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples);
/**
* Request an audio samples buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param perms the required access permissions
* @param nb_samples the number of samples per channel
* @return A reference to the samples. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFilterBufferRef *ff_get_audio_buffer(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples);
/**
* Send a buffer of audio samples to the next filter.
......
......@@ -27,52 +27,6 @@
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
{
dst->pts = src->pts;
dst->pos = av_frame_get_pkt_pos(src);
dst->format = src->format;
av_dict_free(&dst->metadata);
av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0);
switch (dst->type) {
case AVMEDIA_TYPE_VIDEO:
dst->video->w = src->width;
dst->video->h = src->height;
dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
dst->video->interlaced = src->interlaced_frame;
dst->video->top_field_first = src->top_field_first;
dst->video->key_frame = src->key_frame;
dst->video->pict_type = src->pict_type;
av_freep(&dst->video->qp_table);
dst->video->qp_table_linesize = 0;
if (src->qscale_table) {
int qsize = src->qstride ? src->qstride * ((src->height+15)/16) : (src->width+15)/16;
dst->video->qp_table = av_malloc(qsize);
if (!dst->video->qp_table)
return AVERROR(ENOMEM);
dst->video->qp_table_linesize = src->qstride;
dst->video->qp_table_size = qsize;
memcpy(dst->video->qp_table, src->qscale_table, qsize);
}
break;
case AVMEDIA_TYPE_AUDIO:
dst->audio->sample_rate = src->sample_rate;
dst->audio->channel_layout = src->channel_layout;
dst->audio->channels = src->channels;
if(src->channels < av_get_channel_layout_nb_channels(src->channel_layout)) {
av_log(NULL, AV_LOG_ERROR, "libavfilter does not support this channel layout\n");
return AVERROR(EINVAL);
}
break;
default:
return AVERROR(EINVAL);
}
return 0;
}
AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame,
int perms)
{
......
......@@ -31,22 +31,6 @@
#include "libavcodec/avcodec.h" // AVFrame
#include "avfilter.h"
/**
* Copy the frame properties of src to dst, without copying the actual
* image data.
*
* @return 0 on success, a negative number on error.
*/
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
/**
* Copy the frame properties and data pointers of src to dst, without copying
* the actual data.
*
* @return 0 on success, a negative number on error.
*/
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);
/**
* Create and return a picref reference from the data and properties
* contained in frame.
......@@ -116,16 +100,4 @@ int avfilter_fill_frame_from_buffer_ref(AVFrame *frame,
const AVFilterBufferRef *ref);
#endif
/**
* Add frame data to buffer_src.
*
* @param buffer_src pointer to a buffer source context
* @param frame a frame, or NULL to mark EOF
* @param flags a combination of AV_BUFFERSRC_FLAG_*
* @return >= 0 in case of success, a negative AVERROR code
* in case of failure
*/
int av_buffersrc_add_frame(AVFilterContext *buffer_src,
const AVFrame *frame, int flags);
#endif /* AVFILTER_AVCODEC_H */
......@@ -157,7 +157,7 @@ static int config_output(AVFilterLink *outlink)
}
static void push_frame(AVFilterContext *ctx, unsigned in_no,
AVFilterBufferRef *buf)
AVFrame *buf)
{
ConcatContext *cat = ctx->priv;
unsigned out_no = in_no % ctx->nb_outputs;
......@@ -171,7 +171,7 @@ static void push_frame(AVFilterContext *ctx, unsigned in_no,
/* add duration to input PTS */
if (inlink->sample_rate)
/* use number of audio samples */
in->pts += av_rescale_q(buf->audio->nb_samples,
in->pts += av_rescale_q(buf->nb_samples,
(AVRational){ 1, inlink->sample_rate },
outlink->time_base);
else if (in->nb_frames >= 2)
......@@ -182,7 +182,7 @@ static void push_frame(AVFilterContext *ctx, unsigned in_no,
ff_filter_frame(outlink, buf);
}
static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static void process_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
ConcatContext *cat = ctx->priv;
......@@ -191,7 +191,7 @@ static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
if (in_no < cat->cur_idx) {
av_log(ctx, AV_LOG_ERROR, "Frame after EOF on input %s\n",
ctx->input_pads[in_no].name);
avfilter_unref_buffer(buf);
av_frame_free(&buf);
} else if (in_no >= cat->cur_idx + ctx->nb_outputs) {
ff_bufqueue_add(ctx, &cat->in[in_no].queue, buf);
} else {
......@@ -199,27 +199,25 @@ static void process_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
}
}
static AVFilterBufferRef *get_video_buffer(AVFilterLink *inlink, int perms,
int w, int h)
static AVFrame *get_video_buffer(AVFilterLink *inlink, int w, int h)
{
AVFilterContext *ctx = inlink->dst;
unsigned in_no = FF_INLINK_IDX(inlink);
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
return ff_get_video_buffer(outlink, perms, w, h);
return ff_get_video_buffer(outlink, w, h);
}
static AVFilterBufferRef *get_audio_buffer(AVFilterLink *inlink, int perms,
int nb_samples)
static AVFrame *get_audio_buffer(AVFilterLink *inlink, int nb_samples)
{
AVFilterContext *ctx = inlink->dst;
unsigned in_no = FF_INLINK_IDX(inlink);
AVFilterLink *outlink = ctx->outputs[in_no % ctx->nb_outputs];
return ff_get_audio_buffer(outlink, perms, nb_samples);
return ff_get_audio_buffer(outlink, nb_samples);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
process_frame(inlink, buf);
return 0; /* enhancement: handle error return */
......@@ -256,7 +254,7 @@ static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no)
int64_t nb_samples, sent = 0;
int frame_nb_samples;
AVRational rate_tb = { 1, ctx->inputs[in_no]->sample_rate };
AVFilterBufferRef *buf;
AVFrame *buf;
int nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
if (!rate_tb.den)
......@@ -266,7 +264,7 @@ static void send_silence(AVFilterContext *ctx, unsigned in_no, unsigned out_no)
frame_nb_samples = FFMAX(9600, rate_tb.den / 5); /* arbitrary */
while (nb_samples) {
frame_nb_samples = FFMIN(frame_nb_samples, nb_samples);
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE, frame_nb_samples);
buf = ff_get_audio_buffer(outlink, frame_nb_samples);
if (!buf)
return;
av_samples_set_silence(buf->extended_data, 0, frame_nb_samples,
......@@ -360,7 +358,6 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
for (str = 0; str < cat->nb_streams[type]; str++) {
AVFilterPad pad = {
.type = type,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
.get_video_buffer = get_video_buffer,
.get_audio_buffer = get_audio_buffer,
.filter_frame = filter_frame,
......
......@@ -41,7 +41,7 @@ enum ColorMode { CHANNEL, INTENSITY, NB_CLMODES };
typedef struct {
const AVClass *class;
int w, h;
AVFilterBufferRef *outpicref;
AVFrame *outpicref;
int req_fullfilled;
int nb_display_channels;
int channel_height;
......@@ -122,7 +122,7 @@ static av_cold void uninit(AVFilterContext *ctx)
av_freep(&showspectrum->rdft_data[i]);
av_freep(&showspectrum->rdft_data);
av_freep(&showspectrum->window_func_lut);
avfilter_unref_bufferp(&showspectrum->outpicref);
av_frame_free(&showspectrum->outpicref);
}
static int query_formats(AVFilterContext *ctx)
......@@ -179,7 +179,7 @@ static int config_output(AVFilterLink *outlink)
/* (re-)configuration if the video output changed (or first init) */
if (rdft_bits != showspectrum->rdft_bits) {
size_t rdft_size, rdft_listsize;
AVFilterBufferRef *outpicref;
AVFrame *outpicref;
av_rdft_end(showspectrum->rdft);
showspectrum->rdft = av_rdft_init(rdft_bits, DFT_R2C);
......@@ -219,10 +219,9 @@ static int config_output(AVFilterLink *outlink)
showspectrum->window_func_lut[i] = .5f * (1 - cos(2*M_PI*i / (win_size-1)));
/* prepare the initial picref buffer (black frame) */
avfilter_unref_bufferp(&showspectrum->outpicref);
av_frame_free(&showspectrum->outpicref);
showspectrum->outpicref = outpicref =
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2,
outlink->w, outlink->h);
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outlink->sample_aspect_ratio = (AVRational){1,1};
......@@ -253,7 +252,7 @@ inline static void push_frame(AVFilterLink *outlink)
showspectrum->filled = 0;
showspectrum->req_fullfilled = 1;
ff_filter_frame(outlink, avfilter_ref_buffer(showspectrum->outpicref, ~AV_PERM_WRITE));
ff_filter_frame(outlink, av_frame_clone(showspectrum->outpicref));
}
static int request_frame(AVFilterLink *outlink)
......@@ -272,12 +271,12 @@ static int request_frame(AVFilterLink *outlink)
return ret;
}
static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insamples, int nb_samples)
static int plot_spectrum_column(AVFilterLink *inlink, AVFrame *insamples, int nb_samples)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowSpectrumContext *showspectrum = ctx->priv;
AVFilterBufferRef *outpicref = showspectrum->outpicref;
AVFrame *outpicref = showspectrum->outpicref;
/* nb_freq contains the power of two superior or equal to the output image
* height (or half the RDFT window size) */
......@@ -462,11 +461,11 @@ static int plot_spectrum_column(AVFilterLink *inlink, AVFilterBufferRef *insampl
return add_samples;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
ShowSpectrumContext *showspectrum = ctx->priv;
int left_samples = insamples->audio->nb_samples;
int left_samples = insamples->nb_samples;
showspectrum->consumed = 0;
while (left_samples) {
......@@ -475,7 +474,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
left_samples -= added_samples;
}
avfilter_unref_buffer(insamples);
av_frame_free(&insamples);
return 0;
}
......@@ -484,7 +483,6 @@ static const AVFilterPad showspectrum_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
......@@ -44,7 +44,7 @@ typedef struct {
char *rate_str;
AVRational rate;
int buf_idx;
AVFilterBufferRef *outpicref;
AVFrame *outpicref;
int req_fullfilled;
int n;
int sample_count_mod;
......@@ -89,7 +89,7 @@ static av_cold void uninit(AVFilterContext *ctx)
ShowWavesContext *showwaves = ctx->priv;
av_freep(&showwaves->rate_str);
avfilter_unref_bufferp(&showwaves->outpicref);
av_frame_free(&showwaves->outpicref);
}
static int query_formats(AVFilterContext *ctx)
......@@ -190,16 +190,16 @@ static int request_frame(AVFilterLink *outlink)
#define MAX_INT16 ((1<<15) -1)
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ShowWavesContext *showwaves = ctx->priv;
const int nb_samples = insamples->audio->nb_samples;
AVFilterBufferRef *outpicref = showwaves->outpicref;
const int nb_samples = insamples->nb_samples;
AVFrame *outpicref = showwaves->outpicref;
int linesize = outpicref ? outpicref->linesize[0] : 0;
int16_t *p = (int16_t *)insamples->data[0];
int nb_channels = av_get_channel_layout_nb_channels(insamples->audio->channel_layout);
int nb_channels = av_get_channel_layout_nb_channels(insamples->channel_layout);
int i, j, k, h, ret = 0;
const int n = showwaves->n;
const int x = 255 / (nb_channels * n); /* multiplication factor, pre-computed to avoid in-loop divisions */
......@@ -208,12 +208,11 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
for (i = 0; i < nb_samples; i++) {
if (!showwaves->outpicref) {
showwaves->outpicref = outpicref =
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_ALIGN,
outlink->w, outlink->h);
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outpicref->video->w = outlink->w;
outpicref->video->h = outlink->h;
outpicref->width = outlink->w;
outpicref->height = outlink->h;
outpicref->pts = insamples->pts +
av_rescale_q((p - (int16_t *)insamples->data[0]) / nb_channels,
(AVRational){ 1, inlink->sample_rate },
......@@ -251,7 +250,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
outpicref = showwaves->outpicref;
}
avfilter_unref_buffer(insamples);
av_frame_free(&insamples);
return ret;
}
......@@ -260,7 +259,6 @@ static const AVFilterPad showwaves_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
This diff is collapsed.
......@@ -37,6 +37,7 @@
#include "libavutil/avutil.h"
#include "libavutil/dict.h"
#include "libavutil/frame.h"
#include "libavutil/log.h"
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
......@@ -69,6 +70,7 @@ typedef struct AVFilterLink AVFilterLink;
typedef struct AVFilterPad AVFilterPad;
typedef struct AVFilterFormats AVFilterFormats;
#if FF_API_AVFILTERBUFFER
/**
* A reference-counted buffer data type used by the filter system. Filters
* should not store pointers to this structure directly, but instead use the
......@@ -200,6 +202,7 @@ typedef struct AVFilterBufferRef {
/**
* Copy properties of src to dst, without copying the actual data
*/
attribute_deprecated
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src);
/**
......@@ -211,6 +214,7 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s
* @return a new reference to the buffer with the same properties as the
* old, excluding any permissions denied by pmask
*/
attribute_deprecated
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
/**
......@@ -222,6 +226,7 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
* @note it is recommended to use avfilter_unref_bufferp() instead of this
* function
*/
attribute_deprecated
void avfilter_unref_buffer(AVFilterBufferRef *ref);
/**
......@@ -231,11 +236,14 @@ void avfilter_unref_buffer(AVFilterBufferRef *ref);
*
* @param ref pointer to the buffer reference
*/
attribute_deprecated
void avfilter_unref_bufferp(AVFilterBufferRef **ref);
#endif
/**
* Get the number of channels of a buffer reference.
*/
attribute_deprecated
int avfilter_ref_get_channels(AVFilterBufferRef *ref);
#if FF_API_AVFILTERPAD_PUBLIC
......@@ -273,7 +281,7 @@ struct AVFilterPad {
* link must have at least these permissions; this fact is checked by
* asserts. It can be used to optimize buffer allocation.
*/
int min_perms;
attribute_deprecated int min_perms;
/**
* Input pads:
......@@ -287,7 +295,7 @@ struct AVFilterPad {
* Permissions which are automatically removed on outgoing buffers. It
* can be used to optimize buffer allocation.
*/
int rej_perms;
attribute_deprecated int rej_perms;
/**
* @deprecated unused
......@@ -300,7 +308,7 @@ struct AVFilterPad {
*
* Input video pads only.
*/
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h);
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
/**
* Callback function to get an audio buffer. If NULL, the filter system will
......@@ -308,8 +316,7 @@ struct AVFilterPad {
*
* Input audio pads only.
*/
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
/**
* @deprecated unused
......@@ -331,7 +338,7 @@ struct AVFilterPad {
* must ensure that frame is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame);
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
/**
* Frame poll callback. This returns the number of immediately available
......@@ -381,6 +388,8 @@ struct AVFilterPad {
* input pads only.
*/
int needs_fifo;
int needs_writable;
};
#endif
......@@ -616,7 +625,7 @@ struct AVFilterLink {
/**
* Buffer partially filled with samples to achieve a fixed/minimum size.
*/
AVFilterBufferRef *partial_buf;
AVFrame *partial_buf;
/**
* Size of the partial buffer to allocate.
......@@ -701,6 +710,7 @@ void avfilter_link_set_closed(AVFilterLink *link, int closed);
*/
int avfilter_config_links(AVFilterContext *filter);
#if FF_API_AVFILTERBUFFER
/**
* Create a buffer reference wrapped around an already allocated image
* buffer.
......@@ -712,6 +722,7 @@ int avfilter_config_links(AVFilterContext *filter);
* @param h the height of the image specified by the data and linesize arrays
* @param format the pixel format of the image specified by the data and linesize arrays
*/
attribute_deprecated
AVFilterBufferRef *
avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
int w, int h, enum AVPixelFormat format);
......@@ -730,6 +741,7 @@ avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int lin
* @param sample_fmt the format of each sample in the buffer to allocate
* @param channel_layout the channel layout of the buffer
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
int linesize,
int perms,
......@@ -749,6 +761,7 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
* @param channel_layout the channel layout of the buffer,
* must be either 0 or consistent with channels
*/
attribute_deprecated
AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
int linesize,
int perms,
......@@ -757,6 +770,7 @@ AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **
int channels,
uint64_t channel_layout);
#endif
#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically
......@@ -845,6 +859,26 @@ void avfilter_free(AVFilterContext *filter);
int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);
#if FF_API_AVFILTERBUFFER
/**
* Copy the frame properties of src to dst, without copying the actual
* image data.
*
* @return 0 on success, a negative number on error.
*/
attribute_deprecated
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
/**
* Copy the frame properties and data pointers of src to dst, without copying
* the actual data.
*
* @return 0 on success, a negative number on error.
*/
attribute_deprecated
int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);
#endif
/**
* @}
*/
......
......@@ -92,84 +92,13 @@ AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
return ret;
}
void ff_free_pool(AVFilterPool *pool)
{
int i;
av_assert0(pool->refcount > 0);
for (i = 0; i < POOL_SIZE; i++) {
if (pool->pic[i]) {
AVFilterBufferRef *picref = pool->pic[i];
/* free buffer: picrefs stored in the pool are not
* supposed to contain a free callback */
av_assert0(!picref->buf->refcount);
av_freep(&picref->buf->data[0]);
av_freep(&picref->buf);
av_freep(&picref->audio);
av_assert0(!picref->video || !picref->video->qp_table);
av_freep(&picref->video);
av_freep(&pool->pic[i]);
pool->count--;
}
}
pool->draining = 1;
if (!--pool->refcount) {
av_assert0(!pool->count);
av_free(pool);
}
}
static void store_in_pool(AVFilterBufferRef *ref)
{
int i;
AVFilterPool *pool= ref->buf->priv;
av_assert0(ref->buf->data[0]);
av_assert0(pool->refcount>0);
if (ref->video)
av_freep(&ref->video->qp_table);
if (pool->count == POOL_SIZE) {
AVFilterBufferRef *ref1 = pool->pic[0];
av_freep(&ref1->video);
av_freep(&ref1->audio);
av_freep(&ref1->buf->data[0]);
av_freep(&ref1->buf);
av_free(ref1);
memmove(&pool->pic[0], &pool->pic[1], sizeof(void*)*(POOL_SIZE-1));
pool->count--;
pool->pic[POOL_SIZE-1] = NULL;
}
for (i = 0; i < POOL_SIZE; i++) {
if (!pool->pic[i]) {
pool->pic[i] = ref;
pool->count++;
break;
}
}
if (pool->draining) {
ff_free_pool(pool);
} else
--pool->refcount;
}
void avfilter_unref_buffer(AVFilterBufferRef *ref)
{
if (!ref)
return;
av_assert0(ref->buf->refcount > 0);
if (!(--ref->buf->refcount)) {
if (!ref->buf->free) {
store_in_pool(ref);
return;
}
if (!(--ref->buf->refcount))
ref->buf->free(ref->buf);
}
if (ref->extended_data != ref->data)
av_freep(&ref->extended_data);
if (ref->video)
......@@ -186,6 +115,36 @@ void avfilter_unref_bufferp(AVFilterBufferRef **ref)
*ref = NULL;
}
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
{
dst->pts = src->pts;
dst->pos = av_frame_get_pkt_pos(src);
dst->format = src->format;
av_dict_free(&dst->metadata);
av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0);
switch (dst->type) {
case AVMEDIA_TYPE_VIDEO:
dst->video->w = src->width;
dst->video->h = src->height;
dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
dst->video->interlaced = src->interlaced_frame;
dst->video->top_field_first = src->top_field_first;
dst->video->key_frame = src->key_frame;
dst->video->pict_type = src->pict_type;
break;
case AVMEDIA_TYPE_AUDIO:
dst->audio->sample_rate = src->sample_rate;
dst->audio->channel_layout = src->channel_layout;
break;
default:
return AVERROR(EINVAL);
}
return 0;
}
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src)
{
// copy common properties
......@@ -206,40 +165,3 @@ void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *s
av_dict_free(&dst->metadata);
av_dict_copy(&dst->metadata, src->metadata, 0);
}
AVFilterBufferRef *ff_copy_buffer_ref(AVFilterLink *outlink,
AVFilterBufferRef *ref)
{
AVFilterBufferRef *buf;
int channels;
switch (outlink->type) {
case AVMEDIA_TYPE_VIDEO:
buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
ref->video->w, ref->video->h);
if(!buf)
return NULL;
av_image_copy(buf->data, buf->linesize,
(void*)ref->data, ref->linesize,
ref->format, ref->video->w, ref->video->h);
break;
case AVMEDIA_TYPE_AUDIO:
buf = ff_get_audio_buffer(outlink, AV_PERM_WRITE,
ref->audio->nb_samples);
if(!buf)
return NULL;
channels = ref->audio->channels;
av_samples_copy(buf->extended_data, ref->buf->extended_data,
0, 0, ref->audio->nb_samples,
channels,
ref->format);
break;
default:
return NULL;
}
avfilter_copy_buffer_ref_props(buf, ref);
return buf;
}
......@@ -23,7 +23,7 @@
#define AVFILTER_BUFFERQUEUE_H
/**
* FFBufQueue: simple AVFilterBufferRef queue API
* FFBufQueue: simple AVFrame queue API
*
* Note: this API is not thread-safe. Concurrent access to the same queue
* must be protected by a mutex or any synchronization mechanism.
......@@ -47,7 +47,7 @@
* Structure holding the queue
*/
struct FFBufQueue {
AVFilterBufferRef *queue[FF_BUFQUEUE_SIZE];
AVFrame *queue[FF_BUFQUEUE_SIZE];
unsigned short head;
unsigned short available; /**< number of available buffers */
};
......@@ -69,11 +69,11 @@ static inline int ff_bufqueue_is_full(struct FFBufQueue *queue)
* (and unrefed) with a warning before adding the new buffer.
*/
static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue,
AVFilterBufferRef *buf)
AVFrame *buf)
{
if (ff_bufqueue_is_full(queue)) {
av_log(log, AV_LOG_WARNING, "Buffer queue overflow, dropping.\n");
avfilter_unref_buffer(BUCKET(--queue->available));
av_frame_free(&BUCKET(--queue->available));
}
BUCKET(queue->available++) = buf;
}
......@@ -84,8 +84,8 @@ static inline void ff_bufqueue_add(void *log, struct FFBufQueue *queue,
* Buffer with index 0 is the first buffer in the queue.
* Return NULL if the queue has not enough buffers.
*/
static inline AVFilterBufferRef *ff_bufqueue_peek(struct FFBufQueue *queue,
unsigned index)
static inline AVFrame *ff_bufqueue_peek(struct FFBufQueue *queue,
unsigned index)
{
return index < queue->available ? BUCKET(index) : NULL;
}
......@@ -95,9 +95,9 @@ static inline AVFilterBufferRef *ff_bufqueue_peek(struct FFBufQueue *queue,
*
* Do not use on an empty queue.
*/
static inline AVFilterBufferRef *ff_bufqueue_get(struct FFBufQueue *queue)
static inline AVFrame *ff_bufqueue_get(struct FFBufQueue *queue)
{
AVFilterBufferRef *ret = queue->queue[queue->head];
AVFrame *ret = queue->queue[queue->head];
av_assert0(queue->available);
queue->available--;
queue->queue[queue->head] = NULL;
......@@ -110,8 +110,10 @@ static inline AVFilterBufferRef *ff_bufqueue_get(struct FFBufQueue *queue)
*/
static inline void ff_bufqueue_discard_all(struct FFBufQueue *queue)
{
while (queue->available)
avfilter_unref_buffer(ff_bufqueue_get(queue));
while (queue->available) {
AVFrame *buf = ff_bufqueue_get(queue);
av_frame_free(&buf);
}
}
#undef BUCKET
......
......@@ -35,7 +35,7 @@
#include "internal.h"
typedef struct {
AVFilterBufferRef *cur_buf; ///< last buffer delivered on the sink
AVFrame *cur_frame; ///< last frame delivered on the sink
AVAudioFifo *audio_fifo; ///< FIFO for audio samples
int64_t next_pts; ///< interpolating audio pts
} BufferSinkContext;
......@@ -48,59 +48,71 @@ static av_cold void uninit(AVFilterContext *ctx)
av_audio_fifo_free(sink->audio_fifo);
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
BufferSinkContext *s = link->dst->priv;
// av_assert0(!s->cur_buf);
s->cur_buf = buf;
// av_assert0(!s->cur_frame);
s->cur_frame = frame;
return 0;
}
<<<<<<< HEAD
int ff_buffersink_read_compat(AVFilterContext *ctx, AVFilterBufferRef **buf)
||||||| merged common ancestors
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
=======
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
>>>>>>> 7e350379f87e7f74420b4813170fe808e2313911
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
int ret;
if (!buf)
return ff_poll_frame(ctx->inputs[0]);
if ((ret = ff_request_frame(link)) < 0)
return ret;
if (!s->cur_buf)
if (!s->cur_frame)
return AVERROR(EINVAL);
*buf = s->cur_buf;
s->cur_buf = NULL;
av_frame_move_ref(frame, s->cur_frame);
av_frame_free(&s->cur_frame);
return 0;
}
static int read_from_fifo(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
static int read_from_fifo(AVFilterContext *ctx, AVFrame *frame,
int nb_samples)
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
AVFilterBufferRef *buf;
AVFrame *tmp;
if (!(buf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples)))
if (!(tmp = ff_get_audio_buffer(link, nb_samples)))
return AVERROR(ENOMEM);
av_audio_fifo_read(s->audio_fifo, (void**)buf->extended_data, nb_samples);
av_audio_fifo_read(s->audio_fifo, (void**)tmp->extended_data, nb_samples);
buf->pts = s->next_pts;
tmp->pts = s->next_pts;
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, link->sample_rate},
link->time_base);
*pbuf = buf;
av_frame_move_ref(frame, tmp);
av_frame_free(&tmp);
return 0;
}
<<<<<<< HEAD
int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
int nb_samples)
||||||| merged common ancestors
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **pbuf,
int nb_samples)
=======
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
>>>>>>> 7e350379f87e7f74420b4813170fe808e2313911
{
BufferSinkContext *s = ctx->priv;
AVFilterLink *link = ctx->inputs[0];
......@@ -113,38 +125,107 @@ int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **
}
while (ret >= 0) {
AVFilterBufferRef *buf;
if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
return read_from_fifo(ctx, pbuf, nb_samples);
return read_from_fifo(ctx, frame, nb_samples);
ret = av_buffersink_read(ctx, &buf);
ret = ff_request_frame(link);
if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo))
return read_from_fifo(ctx, pbuf, av_audio_fifo_size(s->audio_fifo));
return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
else if (ret < 0)
return ret;
if (buf->pts != AV_NOPTS_VALUE) {
s->next_pts = buf->pts -
if (s->cur_frame->pts != AV_NOPTS_VALUE) {
s->next_pts = s->cur_frame->pts -
av_rescale_q(av_audio_fifo_size(s->audio_fifo),
(AVRational){ 1, link->sample_rate },
link->time_base);
}
ret = av_audio_fifo_write(s->audio_fifo, (void**)buf->extended_data,
buf->audio->nb_samples);
avfilter_unref_buffer(buf);
ret = av_audio_fifo_write(s->audio_fifo, (void**)s->cur_frame->extended_data,
s->cur_frame->nb_samples);
av_frame_free(&s->cur_frame);
}
return ret;
}
#if FF_API_AVFILTERBUFFER
static void compat_free_buffer(AVFilterBuffer *buf)
{
AVFrame *frame = buf->priv;
av_frame_free(&frame);
av_free(buf);
}
static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples)
{
AVFilterBufferRef *buf;
AVFrame *frame;
int ret;
if (!pbuf)
return ff_poll_frame(ctx->inputs[0]);
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
if (!nb_samples)
ret = av_buffersink_get_frame(ctx, frame);
else
ret = av_buffersink_get_samples(ctx, frame, nb_samples);
if (ret < 0)
goto fail;
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
AV_PERM_READ,
frame->width, frame->height,
frame->format);
} else {
buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
frame->linesize[0], AV_PERM_READ,
frame->nb_samples,
frame->format,
frame->channel_layout);
}
if (!buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
avfilter_copy_frame_props(buf, frame);
buf->buf->priv = frame;
buf->buf->free = compat_free_buffer;
*pbuf = buf;
return 0;
fail:
av_frame_free(&frame);
return ret;
}
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
return compat_read(ctx, buf, 0);
}
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples)
{
return compat_read(ctx, buf, nb_samples);
}
#endif
static const AVFilterPad avfilter_vsink_buffer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
.needs_fifo = 1
},
{ NULL }
......@@ -169,7 +250,6 @@ static const AVFilterPad avfilter_asink_abuffer_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
.needs_fifo = 1
},
{ NULL }
......
......@@ -26,6 +26,7 @@
#include "avfilter.h"
#if FF_API_AVFILTERBUFFER
/**
* Struct to use for initializing a buffersink context.
*/
......@@ -94,6 +95,8 @@ void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
AVFilterBufferRef **bufref, int flags);
/* TODO */
int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
/**
* Get the number of immediately available frames.
......@@ -122,6 +125,7 @@ AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure.
*/
attribute_deprecated
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
/**
......@@ -140,8 +144,38 @@ int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
* @warning do not mix this function with av_buffersink_read(). Use only one or
* the other with a single sink, not both.
*/
attribute_deprecated
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples);
#endif
/**
* Get a frame with filtered data from sink and put it in frame.
*
* @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
*
* @return >= 0 in case of success, a negative AVERROR code in case of
* failure.
*/
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
/**
* Same as av_buffersink_get_frame(), but with the ability to specify the number
* of samples read. This function is less efficient than
* av_buffersink_get_frame(), because it copies the data around.
*
* @param ctx pointer to a context of the abuffersink AVFilter.
* @param frame pointer to an allocated frame that will be filled with data.
* The data must be freed using av_frame_unref() / av_frame_free()
* frame will contain exactly nb_samples audio samples, except at
* the end of stream, when it can contain less than nb_samples.
*
* @warning do not mix this function with av_buffersink_get_frame(). Use only one or
* the other with a single sink, not both.
*/
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);
/**
* @}
......
This diff is collapsed.
......@@ -68,14 +68,15 @@ int av_buffersrc_add_ref(AVFilterContext *buffer_src,
*/
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
#ifdef FF_API_BUFFERSRC_BUFFER
#if FF_API_AVFILTERBUFFER
/**
* Add a buffer to the filtergraph s.
*
* @param buf buffer containing frame data to be passed down the filtergraph.
* This function will take ownership of buf, the user must not free it.
* A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
* @deprecated Use av_buffersrc_add_ref(s, picref, AV_BUFFERSRC_FLAG_NO_COPY) instead.
*
* @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()
*/
attribute_deprecated
int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
......@@ -85,11 +86,42 @@ int av_buffersrc_buffer(AVFilterContext *s, AVFilterBufferRef *buf);
* Add a frame to the buffer source.
*
* @param s an instance of the buffersrc filter.
* @param frame frame to be added.
* @param frame frame to be added. If the frame is reference counted, this
* function will make a new reference to it. Otherwise the frame data will be
* copied.
*
* @warning frame data will be memcpy()ed, which may be a big performance
* hit. Use av_buffersrc_buffer() to avoid copying the data.
* @return 0 on success, a negative AVERROR on error
*/
int av_buffersrc_write_frame(AVFilterContext *s, const AVFrame *frame);
/**
* Add a frame to the buffer source.
*
* @param s an instance of the buffersrc filter.
* @param frame frame to be added. If the frame is reference counted, this
* function will take ownership of the reference(s) and reset the frame.
* Otherwise the frame data will be copied. If this function returns an error,
* the input frame is not touched.
*
* @return 0 on success, a negative AVERROR on error.
*
* @note the difference between this function and av_buffersrc_write_frame() is
* that av_buffersrc_write_frame() creates a new reference to the input frame,
* while this function takes ownership of the reference passed to it.
*/
int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
/**
* Add frame data to buffer_src. XXX
*
* @param buffer_src pointer to a buffer source context
* @param frame a frame, or NULL to mark EOF
* @param flags a combination of AV_BUFFERSRC_FLAG_*
* @return >= 0 in case of success, a negative AVERROR code
* in case of failure
*/
int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
AVFrame *frame, int flags);
#endif /* AVFILTER_BUFFERSRC_H */
......@@ -97,7 +97,7 @@ typedef struct {
struct rect text; ///< rectangle for the LU legend on the left
struct rect graph; ///< rectangle for the main graph in the center
struct rect gauge; ///< rectangle for the gauge on the right
AVFilterBufferRef *outpicref; ///< output picture reference, updated regularly
AVFrame *outpicref; ///< output picture reference, updated regularly
int meter; ///< select a EBU mode between +9 and +18
int scale_range; ///< the range of LU values according to the meter
int y_zero_lu; ///< the y value (pixel position) for 0 LU
......@@ -174,7 +174,7 @@ static const uint8_t font_colors[] = {
0x00, 0x96, 0x96,
};
static void drawtext(AVFilterBufferRef *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...)
static void drawtext(AVFrame *pic, int x, int y, int ftid, const uint8_t *color, const char *fmt, ...)
{
int i;
char buf[128] = {0};
......@@ -207,7 +207,7 @@ static void drawtext(AVFilterBufferRef *pic, int x, int y, int ftid, const uint8
}
}
static void drawline(AVFilterBufferRef *pic, int x, int y, int len, int step)
static void drawline(AVFrame *pic, int x, int y, int len, int step)
{
int i;
uint8_t *p = pic->data[0] + y*pic->linesize[0] + x*3;
......@@ -224,7 +224,7 @@ static int config_video_output(AVFilterLink *outlink)
uint8_t *p;
AVFilterContext *ctx = outlink->src;
EBUR128Context *ebur128 = ctx->priv;
AVFilterBufferRef *outpicref;
AVFrame *outpicref;
/* check if there is enough space to represent everything decently */
if (ebur128->w < 640 || ebur128->h < 480) {
......@@ -259,10 +259,9 @@ static int config_video_output(AVFilterLink *outlink)
av_assert0(ebur128->graph.h == ebur128->gauge.h);
/* prepare the initial picref buffer */
avfilter_unref_bufferp(&ebur128->outpicref);
av_frame_free(&ebur128->outpicref);
ebur128->outpicref = outpicref =
ff_get_video_buffer(outlink, AV_PERM_WRITE|AV_PERM_PRESERVE|AV_PERM_REUSE2,
outlink->w, outlink->h);
ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!outpicref)
return AVERROR(ENOMEM);
outlink->sample_aspect_ratio = (AVRational){1,1};
......@@ -450,15 +449,15 @@ static int gate_update(struct integrator *integ, double power,
return gate_hist_pos;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
int i, ch, idx_insample;
AVFilterContext *ctx = inlink->dst;
EBUR128Context *ebur128 = ctx->priv;
const int nb_channels = ebur128->nb_channels;
const int nb_samples = insamples->audio->nb_samples;
const int nb_samples = insamples->nb_samples;
const double *samples = (double *)insamples->data[0];
AVFilterBufferRef *pic = ebur128->outpicref;
AVFrame *pic = ebur128->outpicref;
for (idx_insample = 0; idx_insample < nb_samples; idx_insample++) {
const int bin_id_400 = ebur128->i400.cache_pos;
......@@ -639,7 +638,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *insamples)
/* set pts and push frame */
pic->pts = pts;
ret = ff_filter_frame(outlink, avfilter_ref_buffer(pic, ~AV_PERM_WRITE));
ret = ff_filter_frame(outlink, av_frame_clone(pic));
if (ret < 0)
return ret;
}
......@@ -738,7 +737,7 @@ static av_cold void uninit(AVFilterContext *ctx)
}
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
avfilter_unref_bufferp(&ebur128->outpicref);
av_frame_free(&ebur128->outpicref);
}
static const AVFilterPad ebur128_inputs[] = {
......
......@@ -134,7 +134,7 @@ typedef struct {
DSPContext c; ///< context providing optimized SAD methods (scene detect only)
double prev_mafd; ///< previous MAFD (scene detect only)
#endif
AVFilterBufferRef *prev_picref; ///< previous frame (scene detect only)
AVFrame *prev_picref; ///< previous frame (scene detect only)
double select;
} SelectContext;
......@@ -219,25 +219,25 @@ static int config_input(AVFilterLink *inlink)
}
#if CONFIG_AVCODEC
static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref)
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
{
double ret = 0;
SelectContext *select = ctx->priv;
AVFilterBufferRef *prev_picref = select->prev_picref;
AVFrame *prev_picref = select->prev_picref;
if (prev_picref &&
picref->video->h == prev_picref->video->h &&
picref->video->w == prev_picref->video->w &&
picref->linesize[0] == prev_picref->linesize[0]) {
frame->height == prev_picref->height &&
frame->width == prev_picref->width &&
frame->linesize[0] == prev_picref->linesize[0]) {
int x, y, nb_sad = 0;
int64_t sad = 0;
double mafd, diff;
uint8_t *p1 = picref->data[0];
uint8_t *p1 = frame->data[0];
uint8_t *p2 = prev_picref->data[0];
const int linesize = picref->linesize[0];
const int linesize = frame->linesize[0];
for (y = 0; y < picref->video->h - 8; y += 8) {
for (x = 0; x < picref->video->w*3 - 8; x += 8) {
for (y = 0; y < frame->height - 8; y += 8) {
for (x = 0; x < frame->width*3 - 8; x += 8) {
sad += select->c.sad[1](select, p1 + x, p2 + x,
linesize, 8);
nb_sad += 8 * 8;
......@@ -250,9 +250,9 @@ static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref)
diff = fabs(mafd - select->prev_mafd);
ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
select->prev_mafd = mafd;
avfilter_unref_buffer(prev_picref);
av_frame_free(&prev_picref);
}
select->prev_picref = avfilter_ref_buffer(picref, ~0);
select->prev_picref = av_frame_clone(frame);
return ret;
}
#endif
......@@ -260,38 +260,38 @@ static double get_scene_score(AVFilterContext *ctx, AVFilterBufferRef *picref)
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref)
static int select_frame(AVFilterContext *ctx, AVFrame *frame)
{
SelectContext *select = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
double res;
if (isnan(select->var_values[VAR_START_PTS]))
select->var_values[VAR_START_PTS] = TS2D(ref->pts);
select->var_values[VAR_START_PTS] = TS2D(frame->pts);
if (isnan(select->var_values[VAR_START_T]))
select->var_values[VAR_START_T] = TS2D(ref->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_PTS] = TS2D(ref->pts);
select->var_values[VAR_T ] = TS2D(ref->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_POS] = ref->pos == -1 ? NAN : ref->pos;
select->var_values[VAR_PTS] = TS2D(frame->pts);
select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
select->var_values[VAR_POS] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
switch (inlink->type) {
case AVMEDIA_TYPE_AUDIO:
select->var_values[VAR_SAMPLES_N] = ref->audio->nb_samples;
select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
break;
case AVMEDIA_TYPE_VIDEO:
select->var_values[VAR_INTERLACE_TYPE] =
!ref->video->interlaced ? INTERLACE_TYPE_P :
ref->video->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
select->var_values[VAR_PICT_TYPE] = ref->video->pict_type;
!frame->interlaced_frame ? INTERLACE_TYPE_P :
frame->top_field_first ? INTERLACE_TYPE_T : INTERLACE_TYPE_B;
select->var_values[VAR_PICT_TYPE] = frame->pict_type;
#if CONFIG_AVCODEC
if (select->do_scene_detect) {
char buf[32];
select->var_values[VAR_SCENE] = get_scene_score(ctx, ref);
select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
// TODO: document metadata
snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
av_dict_set(&ref->metadata, "lavfi.scene_score", buf, 0);
av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
}
#endif
break;
......@@ -299,11 +299,10 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref)
res = av_expr_eval(select->expr, select->var_values, NULL);
av_log(inlink->dst, AV_LOG_DEBUG,
"n:%f pts:%f t:%f pos:%f key:%d",
"n:%f pts:%f t:%f key:%d",
select->var_values[VAR_N],
select->var_values[VAR_PTS],
select->var_values[VAR_T],
select->var_values[VAR_POS],
(int)select->var_values[VAR_KEY]);
switch (inlink->type) {
......@@ -330,7 +329,7 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref)
select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
select->var_values[VAR_SELECTED_N] += 1.0;
if (inlink->type == AVMEDIA_TYPE_AUDIO)
select->var_values[VAR_CONSUMED_SAMPLES_N] += ref->audio->nb_samples;
select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
}
select->var_values[VAR_N] += 1.0;
......@@ -340,7 +339,7 @@ static int select_frame(AVFilterContext *ctx, AVFilterBufferRef *ref)
return res;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
SelectContext *select = inlink->dst->priv;
......@@ -348,7 +347,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (select->select)
return ff_filter_frame(inlink->dst->outputs[0], frame);
avfilter_unref_bufferp(&frame);
av_frame_free(&frame);
return 0;
}
......@@ -378,7 +377,7 @@ static av_cold void uninit(AVFilterContext *ctx)
#if CONFIG_AVCODEC
if (select->do_scene_detect) {
avfilter_unref_bufferp(&select->prev_picref);
av_frame_free(&select->prev_picref);
if (select->avctx) {
avcodec_close(select->avctx);
av_freep(&select->avctx);
......
......@@ -448,7 +448,7 @@ static void av_cold uninit(AVFilterContext *ctx)
av_freep(&sendcmd->intervals);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref)
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
AVFilterContext *ctx = inlink->dst;
SendCmdContext *sendcmd = ctx->priv;
......
......@@ -138,7 +138,7 @@ static inline char *double2int64str(char *buf, double v)
#define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
SetPTSContext *setpts = inlink->dst->priv;
int64_t in_pts = frame->pts;
......@@ -150,16 +150,16 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
}
setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
setpts->var_values[VAR_POS ] = frame->pos == -1 ? NAN : frame->pos;
setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
setpts->var_values[VAR_RTCTIME ] = av_gettime();
switch (inlink->type) {
case AVMEDIA_TYPE_VIDEO:
setpts->var_values[VAR_INTERLACED] = frame->video->interlaced;
setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
break;
case AVMEDIA_TYPE_AUDIO:
setpts->var_values[VAR_NB_SAMPLES] = frame->audio->nb_samples;
setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
break;
}
......@@ -192,7 +192,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
setpts->var_values[VAR_N] += 1.0;
if (setpts->type == AVMEDIA_TYPE_AUDIO) {
setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->audio->nb_samples;
setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
}
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
......
......@@ -103,7 +103,7 @@ static int config_output_props(AVFilterLink *outlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
......
......@@ -35,7 +35,7 @@
#include "video.h"
typedef struct Buf {
AVFilterBufferRef *buf;
AVFrame *frame;
struct Buf *next;
} Buf;
......@@ -47,8 +47,8 @@ typedef struct {
* When a specific number of output samples is requested, the partial
* buffer is stored here
*/
AVFilterBufferRef *buf_out;
int allocated_samples; ///< number of samples buf_out was allocated for
AVFrame *out;
int allocated_samples; ///< number of samples out was allocated for
} FifoContext;
static av_cold int init(AVFilterContext *ctx, const char *args)
......@@ -66,25 +66,25 @@ static av_cold void uninit(AVFilterContext *ctx)
for (buf = fifo->root.next; buf; buf = tmp) {
tmp = buf->next;
avfilter_unref_bufferp(&buf->buf);
av_frame_free(&buf->frame);
av_free(buf);
}
avfilter_unref_bufferp(&fifo->buf_out);
av_frame_free(&fifo->out);
}
static int add_to_queue(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int add_to_queue(AVFilterLink *inlink, AVFrame *frame)
{
FifoContext *fifo = inlink->dst->priv;
fifo->last->next = av_mallocz(sizeof(Buf));
if (!fifo->last->next) {
avfilter_unref_buffer(buf);
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
fifo->last = fifo->last->next;
fifo->last->buf = buf;
fifo->last->frame = frame;
return 0;
}
......@@ -101,7 +101,7 @@ static void queue_pop(FifoContext *s)
/**
* Move data pointers and pts offset samples forward.
*/
static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf,
static void buffer_offset(AVFilterLink *link, AVFrame *frame,
int offset)
{
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
......@@ -110,32 +110,32 @@ static void buffer_offset(AVFilterLink *link, AVFilterBufferRef *buf,
int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
int i;
av_assert0(buf->audio->nb_samples > offset);
av_assert0(frame->nb_samples > offset);
for (i = 0; i < planes; i++)
buf->extended_data[i] += block_align*offset;
if (buf->data != buf->extended_data)
memcpy(buf->data, buf->extended_data,
FFMIN(planes, FF_ARRAY_ELEMS(buf->data)) * sizeof(*buf->data));
buf->linesize[0] -= block_align*offset;
buf->audio->nb_samples -= offset;
if (buf->pts != AV_NOPTS_VALUE) {
buf->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
link->time_base);
frame->extended_data[i] += block_align * offset;
if (frame->data != frame->extended_data)
memcpy(frame->data, frame->extended_data,
FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data));
frame->linesize[0] -= block_align*offset;
frame->nb_samples -= offset;
if (frame->pts != AV_NOPTS_VALUE) {
frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
link->time_base);
}
}
static int calc_ptr_alignment(AVFilterBufferRef *buf)
static int calc_ptr_alignment(AVFrame *frame)
{
int planes = av_sample_fmt_is_planar(buf->format) ?
av_get_channel_layout_nb_channels(buf->audio->channel_layout) : 1;
int planes = av_sample_fmt_is_planar(frame->format) ?
av_get_channel_layout_nb_channels(frame->channel_layout) : 1;
int min_align = 128;
int p;
for (p = 0; p < planes; p++) {
int cur_align = 128;
while ((intptr_t)buf->extended_data[p] % cur_align)
while ((intptr_t)frame->extended_data[p] % cur_align)
cur_align >>= 1;
if (cur_align < min_align)
min_align = cur_align;
......@@ -147,35 +147,34 @@ static int return_audio_frame(AVFilterContext *ctx)
{
AVFilterLink *link = ctx->outputs[0];
FifoContext *s = ctx->priv;
AVFilterBufferRef *head = s->root.next->buf;
AVFilterBufferRef *buf_out;
AVFrame *head = s->root.next->frame;
AVFrame *out;
int ret;
if (!s->buf_out &&
head->audio->nb_samples >= link->request_samples &&
if (!s->out &&
head->nb_samples >= link->request_samples &&
calc_ptr_alignment(head) >= 32) {
if (head->audio->nb_samples == link->request_samples) {
buf_out = head;
if (head->nb_samples == link->request_samples) {
out = head;
queue_pop(s);
} else {
buf_out = avfilter_ref_buffer(head, AV_PERM_READ);
if (!buf_out)
out = av_frame_clone(head);
if (!out)
return AVERROR(ENOMEM);
buf_out->audio->nb_samples = link->request_samples;
out->nb_samples = link->request_samples;
buffer_offset(link, head, link->request_samples);
}
} else {
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
if (!s->buf_out) {
s->buf_out = ff_get_audio_buffer(link, AV_PERM_WRITE,
link->request_samples);
if (!s->buf_out)
if (!s->out) {
s->out = ff_get_audio_buffer(link, link->request_samples);
if (!s->out)
return AVERROR(ENOMEM);
s->buf_out->audio->nb_samples = 0;
s->buf_out->pts = head->pts;
s->out->nb_samples = 0;
s->out->pts = head->pts;
s->allocated_samples = link->request_samples;
} else if (link->request_samples != s->allocated_samples) {
av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
......@@ -183,41 +182,41 @@ static int return_audio_frame(AVFilterContext *ctx)
return AVERROR(EINVAL);
}
while (s->buf_out->audio->nb_samples < s->allocated_samples) {
int len = FFMIN(s->allocated_samples - s->buf_out->audio->nb_samples,
head->audio->nb_samples);
while (s->out->nb_samples < s->allocated_samples) {
int len = FFMIN(s->allocated_samples - s->out->nb_samples,
head->nb_samples);
av_samples_copy(s->buf_out->extended_data, head->extended_data,
s->buf_out->audio->nb_samples, 0, len, nb_channels,
av_samples_copy(s->out->extended_data, head->extended_data,
s->out->nb_samples, 0, len, nb_channels,
link->format);
s->buf_out->audio->nb_samples += len;
s->out->nb_samples += len;
if (len == head->audio->nb_samples) {
avfilter_unref_buffer(head);
if (len == head->nb_samples) {
av_frame_free(&head);
queue_pop(s);
if (!s->root.next &&
(ret = ff_request_frame(ctx->inputs[0])) < 0) {
if (ret == AVERROR_EOF) {
av_samples_set_silence(s->buf_out->extended_data,
s->buf_out->audio->nb_samples,
av_samples_set_silence(s->out->extended_data,
s->out->nb_samples,
s->allocated_samples -
s->buf_out->audio->nb_samples,
s->out->nb_samples,
nb_channels, link->format);
s->buf_out->audio->nb_samples = s->allocated_samples;
s->out->nb_samples = s->allocated_samples;
break;
}
return ret;
}
head = s->root.next->buf;
head = s->root.next->frame;
} else {
buffer_offset(link, head, len);
}
}
buf_out = s->buf_out;
s->buf_out = NULL;
out = s->out;
s->out = NULL;
}
return ff_filter_frame(link, buf_out);
return ff_filter_frame(link, out);
}
static int request_frame(AVFilterLink *outlink)
......@@ -234,7 +233,7 @@ static int request_frame(AVFilterLink *outlink)
if (outlink->request_samples) {
return return_audio_frame(outlink->src);
} else {
ret = ff_filter_frame(outlink, fifo->root.next->buf);
ret = ff_filter_frame(outlink, fifo->root.next->frame);
queue_pop(fifo);
}
......@@ -247,7 +246,6 @@ static const AVFilterPad avfilter_vf_fifo_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = add_to_queue,
.min_perms = AV_PERM_PRESERVE,
},
{ NULL }
};
......@@ -280,7 +278,6 @@ static const AVFilterPad avfilter_af_afifo_inputs[] = {
.type = AVMEDIA_TYPE_AUDIO,
.get_audio_buffer = ff_null_get_audio_buffer,
.filter_frame = add_to_queue,
.min_perms = AV_PERM_PRESERVE,
},
{ NULL }
};
......
......@@ -67,33 +67,13 @@ struct AVFilterPad {
*/
enum AVMediaType type;
/**
* Minimum required permissions on incoming buffers. Any buffer with
* insufficient permissions will be automatically copied by the filter
* system to a new buffer which provides the needed access permissions.
*
* Input pads only.
*/
int min_perms;
/**
* Permissions which are not accepted on incoming buffers. Any buffer
* which has any of these permissions set will be automatically copied
* by the filter system to a new buffer which does not have those
* permissions. This can be used to easily disallow buffers with
* AV_PERM_REUSE.
*
* Input pads only.
*/
int rej_perms;
/**
* Callback function to get a video buffer. If NULL, the filter system will
* use ff_default_get_video_buffer().
*
* Input video pads only.
*/
AVFilterBufferRef *(*get_video_buffer)(AVFilterLink *link, int perms, int w, int h);
AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
/**
* Callback function to get an audio buffer. If NULL, the filter system will
......@@ -101,8 +81,7 @@ struct AVFilterPad {
*
* Input audio pads only.
*/
AVFilterBufferRef *(*get_audio_buffer)(AVFilterLink *link, int perms,
int nb_samples);
AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
/**
* Filtering callback. This is where a filter receives a frame with
......@@ -114,7 +93,7 @@ struct AVFilterPad {
* must ensure that samplesref is properly unreferenced on error if it
* hasn't been passed on to another filter.
*/
int (*filter_frame)(AVFilterLink *link, AVFilterBufferRef *frame);
int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
/**
* Frame poll callback. This returns the number of immediately available
......@@ -234,8 +213,6 @@ int ff_parse_channel_layout(int64_t *ret, const char *arg, void *log_ctx);
void ff_update_link_current_pts(AVFilterLink *link, int64_t pts);
void ff_free_pool(AVFilterPool *pool);
void ff_command_queue_pop(AVFilterContext *filter);
/* misc trace functions */
......@@ -252,7 +229,7 @@ void ff_command_queue_pop(AVFilterContext *filter);
char *ff_get_ref_perms_string(char *buf, size_t buf_size, int perms);
void ff_tlog_ref(void *ctx, AVFilterBufferRef *ref, int end);
void ff_tlog_ref(void *ctx, AVFrame *ref, int end);
void ff_tlog_link(void *ctx, AVFilterLink *link, int end);
......@@ -346,6 +323,6 @@ int ff_buffersink_read_samples_compat(AVFilterContext *ctx, AVFilterBufferRef **
* @return >= 0 on success, a negative AVERROR on error. The receiving filter
* is responsible for unreferencing frame in case of error.
*/
int ff_filter_frame(AVFilterLink *link, AVFilterBufferRef *frame);
int ff_filter_frame(AVFilterLink *link, AVFrame *frame);
#endif /* AVFILTER_INTERNAL_H */
......@@ -31,6 +31,8 @@
#include "audio.h"
#include "internal.h"
#include "libavutil/audio_fifo.h"
AVBufferSinkParams *av_buffersink_params_alloc(void)
{
static const int pixel_fmts[] = { AV_PIX_FMT_NONE };
......@@ -88,14 +90,14 @@ static av_cold void common_uninit(AVFilterContext *ctx)
if (buf->fifo) {
while (av_fifo_size(buf->fifo) >= sizeof(AVFilterBufferRef *)) {
av_fifo_generic_read(buf->fifo, &picref, sizeof(picref), NULL);
avfilter_unref_buffer(picref);
av_frame_unref(picref);
}
av_fifo_free(buf->fifo);
buf->fifo = NULL;
}
}
static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref)
static int add_buffer_ref(AVFilterContext *ctx, AVFrame *ref)
{
BufferSinkContext *buf = ctx->priv;
......@@ -114,7 +116,7 @@ static int add_buffer_ref(AVFilterContext *ctx, AVFilterBufferRef *ref)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *ref)
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
AVFilterContext *ctx = inlink->dst;
BufferSinkContext *buf = inlink->dst->priv;
......@@ -141,18 +143,12 @@ void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
inlink->partial_buf_size = frame_size;
}
int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
AVFilterBufferRef **bufref, int flags)
int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
{
BufferSinkContext *buf = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int ret;
*bufref = NULL;
av_assert0( !strcmp(ctx->filter->name, "buffersink")
|| !strcmp(ctx->filter->name, "abuffersink")
|| !strcmp(ctx->filter->name, "ffbuffersink")
|| !strcmp(ctx->filter->name, "ffabuffersink"));
AVFrame *cur_frame;
/* no picref available, fetch it from the filterchain */
if (!av_fifo_size(buf->fifo)) {
......@@ -165,13 +161,114 @@ int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
if (!av_fifo_size(buf->fifo))
return AVERROR(EINVAL);
if (flags & AV_BUFFERSINK_FLAG_PEEK)
*bufref = *((AVFilterBufferRef **)av_fifo_peek2(buf->fifo, 0));
if (flags & AV_BUFFERSINK_FLAG_PEEK) {
cur_frame = *((AVFrame **)av_fifo_peek2(buf->fifo, 0));
av_frame_ref(frame, cur_frame); /* TODO check failure */
} else {
av_fifo_generic_read(buf->fifo, &cur_frame, sizeof(cur_frame), NULL);
av_frame_move_ref(frame, cur_frame);
av_frame_free(&cur_frame);
}
return 0;
}
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
return av_buffersink_get_frame_flags(ctx, frame, 0);
}
int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples)
{
av_assert0(!"TODO");
}
#if FF_API_AVFILTERBUFFER
static void compat_free_buffer(AVFilterBuffer *buf)
{
AVFrame *frame = buf->priv;
av_frame_free(&frame);
av_free(buf);
}
static int compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags)
{
AVFilterBufferRef *buf;
AVFrame *frame;
int ret;
if (!pbuf)
return ff_poll_frame(ctx->inputs[0]);
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
if (!nb_samples)
ret = av_buffersink_get_frame_flags(ctx, frame, flags);
else
av_fifo_generic_read(buf->fifo, bufref, sizeof(*bufref), NULL);
ret = av_buffersink_get_samples(ctx, frame, nb_samples);
if (ret < 0)
goto fail;
if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
AV_PERM_READ,
frame->width, frame->height,
frame->format);
} else {
buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
frame->linesize[0], AV_PERM_READ,
frame->nb_samples,
frame->format,
frame->channel_layout);
}
if (!buf) {
ret = AVERROR(ENOMEM);
goto fail;
}
avfilter_copy_frame_props(buf, frame);
buf->buf->priv = frame;
buf->buf->free = compat_free_buffer;
*pbuf = buf;
return 0;
fail:
av_frame_free(&frame);
return ret;
}
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
return compat_read(ctx, buf, 0, 0);
}
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples)
{
return compat_read(ctx, buf, nb_samples, 0);
}
int av_buffersink_get_buffer_ref(AVFilterContext *ctx,
AVFilterBufferRef **bufref, int flags)
{
BufferSinkContext *buf = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int ret;
*bufref = NULL;
av_assert0( !strcmp(ctx->filter->name, "buffersink")
|| !strcmp(ctx->filter->name, "abuffersink")
|| !strcmp(ctx->filter->name, "ffbuffersink")
|| !strcmp(ctx->filter->name, "ffabuffersink"));
return compat_read(ctx, bufref, 0, flags);
}
#endif
AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx)
{
......@@ -406,94 +503,3 @@ AVFilter avfilter_asink_abuffersink = {
.inputs = abuffersink_inputs,
.outputs = NULL,
};
/* Libav compatibility API */
extern AVFilter avfilter_vsink_buffer;
extern AVFilter avfilter_asink_abuffer;
int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf)
{
AVFilterBufferRef *tbuf;
int ret;
if (ctx->filter-> inputs[0].start_frame ==
avfilter_vsink_buffer. inputs[0].start_frame ||
ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_abuffer.inputs[0].filter_frame)
return ff_buffersink_read_compat(ctx, buf);
av_assert0(ctx->filter-> inputs[0].end_frame ==
avfilter_vsink_ffbuffersink. inputs[0].end_frame ||
ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_ffabuffersink.inputs[0].filter_frame);
ret = av_buffersink_get_buffer_ref(ctx, &tbuf,
buf ? 0 : AV_BUFFERSINK_FLAG_PEEK);
if (!buf)
return ret >= 0;
if (ret < 0)
return ret;
*buf = tbuf;
return 0;
}
int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
int nb_samples)
{
BufferSinkContext *sink = ctx->priv;
int ret = 0, have_samples = 0, need_samples;
AVFilterBufferRef *tbuf, *in_buf;
AVFilterLink *link = ctx->inputs[0];
int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
if (ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_abuffer.inputs[0].filter_frame)
return ff_buffersink_read_samples_compat(ctx, buf, nb_samples);
av_assert0(ctx->filter-> inputs[0].filter_frame ==
avfilter_asink_ffabuffersink.inputs[0].filter_frame);
tbuf = ff_get_audio_buffer(link, AV_PERM_WRITE, nb_samples);
if (!tbuf)
return AVERROR(ENOMEM);
while (have_samples < nb_samples) {
ret = av_buffersink_get_buffer_ref(ctx, &in_buf,
AV_BUFFERSINK_FLAG_PEEK);
if (ret < 0) {
if (ret == AVERROR_EOF && have_samples) {
nb_samples = have_samples;
ret = 0;
}
break;
}
need_samples = FFMIN(in_buf->audio->nb_samples,
nb_samples - have_samples);
av_samples_copy(tbuf->extended_data, in_buf->extended_data,
have_samples, 0, need_samples,
nb_channels, in_buf->format);
have_samples += need_samples;
if (need_samples < in_buf->audio->nb_samples) {
in_buf->audio->nb_samples -= need_samples;
av_samples_copy(in_buf->extended_data, in_buf->extended_data,
0, need_samples, in_buf->audio->nb_samples,
nb_channels, in_buf->format);
} else {
av_buffersink_get_buffer_ref(ctx, &in_buf, 0);
avfilter_unref_buffer(in_buf);
}
}
tbuf->audio->nb_samples = have_samples;
if (ret < 0) {
av_assert0(!av_fifo_size(sink->fifo));
if (have_samples)
add_buffer_ref(ctx, tbuf);
else
avfilter_unref_buffer(tbuf);
return ret;
}
*buf = tbuf;
return 0;
}
......@@ -68,17 +68,17 @@ static void split_uninit(AVFilterContext *ctx)
av_freep(&ctx->output_pads[i].name);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
int i, ret = AVERROR_EOF;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterBufferRef *buf_out;
AVFrame *buf_out;
if (ctx->outputs[i]->closed)
continue;
buf_out = avfilter_ref_buffer(frame, ~AV_PERM_WRITE);
buf_out = av_frame_clone(frame);
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
......@@ -88,7 +88,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (ret < 0)
break;
}
avfilter_unref_bufferp(&frame);
av_frame_free(&frame);
return ret;
}
......
/*
* Copyright (c) 2008 Vitor Sessak
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* memory buffer source filter
*/
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#include "avcodec.h"
#include "buffersrc.h"
#include "asrc_abuffer.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/fifo.h"
#include "libavutil/imgutils.h"
typedef struct {
AVFifoBuffer *fifo;
AVRational time_base; ///< time_base to set in the output link
int eof;
unsigned nb_failed_requests;
/* Video only */
AVFilterContext *scale;
int h, w;
enum AVPixelFormat pix_fmt;
AVRational sample_aspect_ratio;
char sws_param[256];
/* Audio only */
// Audio format of incoming buffers
int sample_rate;
unsigned int sample_format;
int64_t channel_layout;
// Normalization filters
AVFilterContext *aconvert;
AVFilterContext *aresample;
} BufferSourceContext;
static void buf_free(AVFilterBuffer *ptr)
{
av_free(ptr);
return;
}
int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *ctx,
AVFilterBufferRef *samplesref,
int av_unused flags)
{
return av_buffersrc_add_ref(ctx, samplesref, AV_BUFFERSRC_FLAG_NO_COPY);
}
int av_asrc_buffer_add_samples(AVFilterContext *ctx,
uint8_t *data[8], int linesize[8],
int nb_samples, int sample_rate,
int sample_fmt, int64_t channel_layout, int planar,
int64_t pts, int av_unused flags)
{
AVFilterBufferRef *samplesref;
if (!channel_layout)
return AVERROR(EINVAL);
samplesref = avfilter_get_audio_buffer_ref_from_arrays(
data, linesize[0], AV_PERM_WRITE,
nb_samples,
sample_fmt, channel_layout);
if (!samplesref)
return AVERROR(ENOMEM);
samplesref->buf->free = buf_free;
samplesref->pts = pts;
samplesref->audio->sample_rate = sample_rate;
AV_NOWARN_DEPRECATED(
return av_asrc_buffer_add_audio_buffer_ref(ctx, samplesref, 0);
)
}
int av_asrc_buffer_add_buffer(AVFilterContext *ctx,
uint8_t *buf, int buf_size, int sample_rate,
int sample_fmt, int64_t channel_layout, int planar,
int64_t pts, int av_unused flags)
{
uint8_t *data[8] = {0};
int linesize[8];
int nb_channels = av_get_channel_layout_nb_channels(channel_layout),
nb_samples = buf_size / nb_channels / av_get_bytes_per_sample(sample_fmt);
av_samples_fill_arrays(data, linesize,
buf, nb_channels, nb_samples,
sample_fmt, 16);
AV_NOWARN_DEPRECATED(
return av_asrc_buffer_add_samples(ctx,
data, linesize, nb_samples,
sample_rate,
sample_fmt, channel_layout, planar,
pts, flags);
)
}
......@@ -313,11 +313,6 @@ static av_cold int movie_common_init(AVFilterContext *ctx, const char *args, con
}
}
if (!(movie->frame = avcodec_alloc_frame()) ) {
av_log(log, AV_LOG_ERROR, "Failed to alloc frame\n");
return AVERROR(ENOMEM);
}
av_log(ctx, AV_LOG_VERBOSE, "seek_point:%"PRIi64" format_name:%s file_name:%s stream_index:%d\n",
movie->seek_point, movie->format_name, movie->file_name,
movie->stream_index);
......@@ -339,7 +334,7 @@ static av_cold void movie_uninit(AVFilterContext *ctx)
av_freep(&movie->file_name);
av_freep(&movie->st);
av_freep(&movie->out_index);
avcodec_free_frame(&movie->frame);
av_frame_free(&movie->frame);
if (movie->format_ctx)
avformat_close_input(&movie->format_ctx);
}
......@@ -399,54 +394,34 @@ static int movie_config_output_props(AVFilterLink *outlink)
return 0;
}
static AVFilterBufferRef *frame_to_buf(enum AVMediaType type, AVFrame *frame,
AVFilterLink *outlink)
{
AVFilterBufferRef *buf, *copy;
buf = avfilter_get_buffer_ref_from_frame(type, frame,
AV_PERM_WRITE |
AV_PERM_PRESERVE |
AV_PERM_REUSE2);
if (!buf)
return NULL;
buf->pts = av_frame_get_best_effort_timestamp(frame);
copy = ff_copy_buffer_ref(outlink, buf);
if (!copy)
return NULL;
buf->buf->data[0] = NULL; /* it belongs to the frame */
avfilter_unref_buffer(buf);
return copy;
}
static char *describe_bufref_to_str(char *dst, size_t dst_size,
AVFilterBufferRef *buf,
static char *describe_frame_to_str(char *dst, size_t dst_size,
AVFrame *frame,
AVFilterLink *link)
{
switch (buf->type) {
switch (frame->type) {
case AVMEDIA_TYPE_VIDEO:
snprintf(dst, dst_size,
"video pts:%s time:%s pos:%"PRId64" size:%dx%d aspect:%d/%d",
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base),
buf->pos, buf->video->w, buf->video->h,
buf->video->sample_aspect_ratio.num,
buf->video->sample_aspect_ratio.den);
"video pts:%s time:%s size:%dx%d aspect:%d/%d",
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
frame->width, frame->height,
frame->sample_aspect_ratio.num,
frame->sample_aspect_ratio.den);
break;
case AVMEDIA_TYPE_AUDIO:
snprintf(dst, dst_size,
"audio pts:%s time:%s pos:%"PRId64" samples:%d",
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &link->time_base),
buf->pos, buf->audio->nb_samples);
"audio pts:%s time:%s samples:%d",
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &link->time_base),
frame->nb_samples);
break;
default:
snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(buf->type));
snprintf(dst, dst_size, "%s BUG", av_get_media_type_string(frame->type));
break;
}
return dst;
}
#define describe_bufref(buf, link) \
describe_bufref_to_str((char[1024]){0}, 1024, buf, link)
#define describe_frameref(f, link) \
describe_frame_to_str((char[1024]){0}, 1024, f, link)
static int rewind_file(AVFilterContext *ctx)
{
......@@ -489,7 +464,6 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
MovieStream *st;
int ret, got_frame = 0, pkt_out_id;
AVFilterLink *outlink;
AVFilterBufferRef *buf;
if (!pkt->size) {
if (movie->eof) {
......@@ -532,6 +506,10 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
st = &movie->st[pkt_out_id];
outlink = ctx->outputs[pkt_out_id];
movie->frame = av_frame_alloc();
if (!movie->frame)
return AVERROR(ENOMEM);
switch (st->st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ret = avcodec_decode_video2(st->st->codec, movie->frame, &got_frame, pkt);
......@@ -545,6 +523,7 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
}
if (ret < 0) {
av_log(ctx, AV_LOG_WARNING, "Decode error: %s\n", av_err2str(ret));
av_frame_free(&movie->frame);
return 0;
}
if (!ret)
......@@ -560,23 +539,16 @@ static int movie_push_frame(AVFilterContext *ctx, unsigned out_id)
if (!got_frame) {
if (!ret)
st->done = 1;
av_frame_free(&movie->frame);
return 0;
}
buf = frame_to_buf(st->st->codec->codec_type, movie->frame, outlink);
if (!buf)
return AVERROR(ENOMEM);
av_dlog(ctx, "movie_push_frame(): file:'%s' %s\n", movie->file_name,
describe_bufref(buf, outlink));
switch (st->st->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
if (!movie->frame->sample_aspect_ratio.num)
buf->video->sample_aspect_ratio = st->st->sample_aspect_ratio;
/* Fall through */
case AVMEDIA_TYPE_AUDIO:
ff_filter_frame(outlink, buf);
break;
}
describe_frameref(movie->frame, outlink));
movie->frame->pts = av_frame_get_best_effort_timestamp(movie->frame);
ff_filter_frame(outlink, movie->frame); // FIXME: raise error properly
movie->frame = NULL;
return pkt_out_id == out_id;
}
......
......@@ -60,5 +60,8 @@
#ifndef FF_API_BUFFERSRC_BUFFER
#define FF_API_BUFFERSRC_BUFFER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#ifndef FF_API_AVFILTERBUFFER
#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 4)
#endif
#endif /* AVFILTER_VERSION_H */
......@@ -60,19 +60,18 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur_buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *cur_buf)
{
AlphaExtractContext *extract = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out_buf =
ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
AVFrame *out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
int ret;
if (!out_buf) {
ret = AVERROR(ENOMEM);
goto end;
}
avfilter_copy_buffer_ref_props(out_buf, cur_buf);
av_frame_copy_props(out_buf, cur_buf);
if (extract->is_packed_rgb) {
int x, y;
......@@ -99,7 +98,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *cur_buf)
ret = ff_filter_frame(outlink, out_buf);
end:
avfilter_unref_buffer(cur_buf);
av_frame_unref(cur_buf);
return ret;
}
......@@ -109,7 +108,6 @@ static const AVFilterPad alphaextract_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ,
},
{ NULL }
};
......
......@@ -96,11 +96,11 @@ static int config_output(AVFilterLink *outlink)
}
static void draw_frame(AVFilterContext *ctx,
AVFilterBufferRef *main_buf,
AVFilterBufferRef *alpha_buf)
AVFrame *main_buf,
AVFrame *alpha_buf)
{
AlphaMergeContext *merge = ctx->priv;
int h = main_buf->video->h;
int h = main_buf->height;
if (merge->is_packed_rgb) {
int x, y;
......@@ -108,7 +108,7 @@ static void draw_frame(AVFilterContext *ctx,
for (y = 0; y < h; y++) {
pin = alpha_buf->data[0] + y * alpha_buf->linesize[0];
pout = main_buf->data[0] + y * main_buf->linesize[0] + merge->rgba_map[A];
for (x = 0; x < main_buf->video->w; x++) {
for (x = 0; x < main_buf->width; x++) {
*pout = *pin;
pin += 1;
pout += 4;
......@@ -118,7 +118,7 @@ static void draw_frame(AVFilterContext *ctx,
int y;
const int main_linesize = main_buf->linesize[A];
const int alpha_linesize = alpha_buf->linesize[Y];
for (y = 0; y < h && y < alpha_buf->video->h; y++) {
for (y = 0; y < h && y < alpha_buf->height; y++) {
memcpy(main_buf->data[A] + y * main_linesize,
alpha_buf->data[Y] + y * alpha_linesize,
FFMIN(main_linesize, alpha_linesize));
......@@ -126,7 +126,7 @@ static void draw_frame(AVFilterContext *ctx,
}
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AlphaMergeContext *merge = ctx->priv;
......@@ -137,7 +137,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
ff_bufqueue_add(ctx, queue, buf);
while (1) {
AVFilterBufferRef *main_buf, *alpha_buf;
AVFrame *main_buf, *alpha_buf;
if (!ff_bufqueue_peek(&merge->queue_main, 0) ||
!ff_bufqueue_peek(&merge->queue_alpha, 0)) break;
......@@ -148,7 +148,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
merge->frame_requested = 0;
draw_frame(ctx, main_buf, alpha_buf);
ff_filter_frame(ctx->outputs[0], main_buf);
avfilter_unref_buffer(alpha_buf);
av_frame_free(&alpha_buf);
}
return 0;
}
......
......@@ -80,11 +80,11 @@ static av_cold int init(AVFilterContext *ctx, const char *args, const AVClass *c
return 0;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AspectContext *aspect = link->dst->priv;
frame->video->sample_aspect_ratio = aspect->ratio;
frame->sample_aspect_ratio = aspect->ratio;
return ff_filter_frame(link->dst->outputs[0], frame);
}
......
......@@ -56,7 +56,7 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
BBoxContext *bbox = ctx->priv;
......@@ -65,14 +65,14 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
has_bbox =
ff_calculate_bounding_box(&box,
picref->data[0], picref->linesize[0],
frame->data[0], frame->linesize[0],
inlink->w, inlink->h, 16);
w = box.x2 - box.x1 + 1;
h = box.y2 - box.y1 + 1;
av_log(ctx, AV_LOG_INFO,
"n:%d pts:%s pts_time:%s", bbox->frame,
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base));
av_ts2str(frame->pts), av_ts2timestr(frame->pts, &inlink->time_base));
if (has_bbox) {
av_log(ctx, AV_LOG_INFO,
......@@ -85,7 +85,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
av_log(ctx, AV_LOG_INFO, "\n");
bbox->frame++;
return ff_filter_frame(inlink->dst->outputs[0], picref);
return ff_filter_frame(inlink->dst->outputs[0], frame);
}
static const AVFilterPad bbox_inputs[] = {
......
......@@ -146,7 +146,7 @@ static int request_frame(AVFilterLink *outlink)
return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
{
AVFilterContext *ctx = inlink->dst;
BlackDetectContext *blackdetect = ctx->priv;
......@@ -163,10 +163,10 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *picref)
picture_black_ratio = (double)blackdetect->nb_black_pixels / (inlink->w * inlink->h);
av_log(ctx, AV_LOG_DEBUG,
"frame:%u picture_black_ratio:%f pos:%"PRId64" pts:%s t:%s type:%c\n",
"frame:%u picture_black_ratio:%f pts:%s t:%s type:%c\n",
blackdetect->frame_count, picture_black_ratio,
picref->pos, av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
av_get_picture_type_char(picref->video->pict_type));
av_ts2str(picref->pts), av_ts2timestr(picref->pts, &inlink->time_base),
av_get_picture_type_char(picref->pict_type));
if (picture_black_ratio >= blackdetect->picture_black_ratio_th) {
if (!blackdetect->black_started) {
......
......@@ -81,7 +81,7 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
BlackFrameContext *blackframe = ctx->priv;
......@@ -89,22 +89,22 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
int pblack = 0;
uint8_t *p = frame->data[0];
for (i = 0; i < frame->video->h; i++) {
for (i = 0; i < frame->height; i++) {
for (x = 0; x < inlink->w; x++)
blackframe->nblack += p[x] < blackframe->bthresh;
p += frame->linesize[0];
}
if (frame->video->key_frame)
if (frame->key_frame)
blackframe->last_keyframe = blackframe->frame;
pblack = blackframe->nblack * 100 / (inlink->w * inlink->h);
if (pblack >= blackframe->bamount)
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pos:%"PRId64" pts:%"PRId64" t:%f "
av_log(ctx, AV_LOG_INFO, "frame:%u pblack:%u pts:%"PRId64" t:%f "
"type:%c last_keyframe:%d\n",
blackframe->frame, pblack, frame->pos, frame->pts,
blackframe->frame, pblack, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
av_get_picture_type_char(frame->video->pict_type), blackframe->last_keyframe);
av_get_picture_type_char(frame->pict_type), blackframe->last_keyframe);
blackframe->frame++;
blackframe->nblack = 0;
......
......@@ -368,9 +368,9 @@ static int request_frame(AVFilterLink *outlink)
}
static void blend_frame(AVFilterContext *ctx,
AVFilterBufferRef *top_buf,
AVFilterBufferRef *bottom_buf,
AVFilterBufferRef *dst_buf)
AVFrame *top_buf,
AVFrame *bottom_buf,
AVFrame *dst_buf)
{
BlendContext *b = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
......@@ -380,8 +380,8 @@ static void blend_frame(AVFilterContext *ctx,
for (plane = 0; dst_buf->data[plane]; plane++) {
int hsub = plane == 1 || plane == 2 ? b->hsub : 0;
int vsub = plane == 1 || plane == 2 ? b->vsub : 0;
int outw = dst_buf->video->w >> hsub;
int outh = dst_buf->video->h >> vsub;
int outw = dst_buf->width >> hsub;
int outh = dst_buf->height >> vsub;
uint8_t *dst = dst_buf->data[plane];
uint8_t *top = top_buf->data[plane];
uint8_t *bottom = bottom_buf->data[plane];
......@@ -390,15 +390,15 @@ static void blend_frame(AVFilterContext *ctx,
param->values[VAR_T] = dst_buf->pts == AV_NOPTS_VALUE ? NAN : dst_buf->pts * av_q2d(inlink->time_base);
param->values[VAR_W] = outw;
param->values[VAR_H] = outh;
param->values[VAR_SW] = outw / dst_buf->video->w;
param->values[VAR_SH] = outh / dst_buf->video->h;
param->values[VAR_SW] = outw / dst_buf->width;
param->values[VAR_SH] = outh / dst_buf->height;
param->blend(top, top_buf->linesize[plane],
bottom, bottom_buf->linesize[plane],
dst, dst_buf->linesize[plane], outw, outh, param);
}
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
......@@ -411,7 +411,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
ff_bufqueue_add(ctx, queue, buf);
while (1) {
AVFilterBufferRef *top_buf, *bottom_buf, *out_buf;
AVFrame *top_buf, *bottom_buf, *out_buf;
if (!ff_bufqueue_peek(&b->queue_top, TOP) ||
!ff_bufqueue_peek(&b->queue_bottom, BOTTOM)) break;
......@@ -419,18 +419,17 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *buf)
top_buf = ff_bufqueue_get(&b->queue_top);
bottom_buf = ff_bufqueue_get(&b->queue_bottom);
out_buf = ff_get_video_buffer(outlink, AV_PERM_WRITE,
outlink->w, outlink->h);
out_buf = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out_buf) {
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out_buf, top_buf);
av_frame_copy_props(out_buf, top_buf);
b->frame_requested = 0;
blend_frame(ctx, top_buf, bottom_buf, out_buf);
ret = ff_filter_frame(ctx->outputs[0], out_buf);
avfilter_unref_buffer(top_buf);
avfilter_unref_buffer(bottom_buf);
av_frame_free(&top_buf);
av_frame_free(&bottom_buf);
}
return ret;
}
......@@ -441,12 +440,10 @@ static const AVFilterPad blend_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input_top,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
},{
.name = "bottom",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ | AV_PERM_PRESERVE,
},
{ NULL }
};
......
......@@ -328,23 +328,23 @@ static void vblur(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_li
h, radius, power, temp);
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
BoxBlurContext *boxblur = ctx->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
int plane;
int cw = inlink->w >> boxblur->hsub, ch = in->video->h >> boxblur->vsub;
int cw = inlink->w >> boxblur->hsub, ch = in->height >> boxblur->vsub;
int w[4] = { inlink->w, cw, cw, inlink->w };
int h[4] = { in->video->h, ch, ch, in->video->h };
int h[4] = { in->height, ch, ch, in->height };
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
for (plane = 0; in->data[plane] && plane < 4; plane++)
hblur(out->data[plane], out->linesize[plane],
......@@ -358,7 +358,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
w[plane], h[plane], boxblur->radius[plane], boxblur->power[plane],
boxblur->temp);
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
......@@ -369,7 +369,6 @@ static const AVFilterPad avfilter_vf_boxblur_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.filter_frame = filter_frame,
.min_perms = AV_PERM_READ
},
{ NULL }
};
......
......@@ -183,12 +183,12 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
}
static void process_frame_uyvy422(ColorMatrixContext *color,
AVFilterBufferRef *dst, AVFilterBufferRef *src)
AVFrame *dst, AVFrame *src)
{
const unsigned char *srcp = src->data[0];
const int src_pitch = src->linesize[0];
const int height = src->video->h;
const int width = src->video->w*2;
const int height = src->height;
const int width = src->width*2;
unsigned char *dstp = dst->data[0];
const int dst_pitch = dst->linesize[0];
const int c2 = color->yuv_convert[color->mode][0][1];
......@@ -215,15 +215,15 @@ static void process_frame_uyvy422(ColorMatrixContext *color,
}
static void process_frame_yuv422p(ColorMatrixContext *color,
AVFilterBufferRef *dst, AVFilterBufferRef *src)
AVFrame *dst, AVFrame *src)
{
const unsigned char *srcpU = src->data[1];
const unsigned char *srcpV = src->data[2];
const unsigned char *srcpY = src->data[0];
const int src_pitchY = src->linesize[0];
const int src_pitchUV = src->linesize[1];
const int height = src->video->h;
const int width = src->video->w;
const int height = src->height;
const int width = src->width;
unsigned char *dstpU = dst->data[1];
unsigned char *dstpV = dst->data[2];
unsigned char *dstpY = dst->data[0];
......@@ -257,7 +257,7 @@ static void process_frame_yuv422p(ColorMatrixContext *color,
}
static void process_frame_yuv420p(ColorMatrixContext *color,
AVFilterBufferRef *dst, AVFilterBufferRef *src)
AVFrame *dst, AVFrame *src)
{
const unsigned char *srcpU = src->data[1];
const unsigned char *srcpV = src->data[2];
......@@ -265,8 +265,8 @@ static void process_frame_yuv420p(ColorMatrixContext *color,
const unsigned char *srcpN = src->data[0] + src->linesize[0];
const int src_pitchY = src->linesize[0];
const int src_pitchUV = src->linesize[1];
const int height = src->video->h;
const int width = src->video->w;
const int height = src->height;
const int width = src->width;
unsigned char *dstpU = dst->data[1];
unsigned char *dstpV = dst->data[2];
unsigned char *dstpY = dst->data[0];
......@@ -332,19 +332,19 @@ static int query_formats(AVFilterContext *ctx)
return 0;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *link, AVFrame *in)
{
AVFilterContext *ctx = link->dst;
ColorMatrixContext *color = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFilterBufferRef *out;
AVFrame *out;
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
if (in->format == AV_PIX_FMT_YUV422P)
process_frame_yuv422p(color, out, in);
......@@ -353,7 +353,7 @@ static int filter_frame(AVFilterLink *link, AVFilterBufferRef *in)
else
process_frame_uyvy422(color, out, in);
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
......@@ -362,7 +362,6 @@ static const AVFilterPad colormatrix_inputs[] = {
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_input,
.min_perms = AV_PERM_READ,
.filter_frame = filter_frame,
},
{ NULL }
......
......@@ -21,17 +21,35 @@
* copy video filter
*/
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "avfilter.h"
#include "internal.h"
#include "video.h"
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out = ff_get_video_buffer(outlink, in->width, in->height);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
av_image_copy(out->data, out->linesize, in->data, in->linesize,
in->format, in->width, in->height);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad avfilter_vf_copy_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
.rej_perms = ~0
.filter_frame = filter_frame,
},
{ NULL }
};
......
......@@ -70,7 +70,6 @@ enum var_name {
VAR_X,
VAR_Y,
VAR_N,
VAR_POS,
VAR_T,
VAR_VARS_NB
};
......@@ -198,7 +197,6 @@ static int config_input(AVFilterLink *link)
crop->var_values[VAR_OUT_H] = crop->var_values[VAR_OH] = NAN;
crop->var_values[VAR_N] = 0;
crop->var_values[VAR_T] = NAN;
crop->var_values[VAR_POS] = NAN;
av_image_fill_max_pixsteps(crop->max_step, NULL, pix_desc);
crop->hsub = pix_desc->log2_chroma_w;
......@@ -277,19 +275,18 @@ static int config_output(AVFilterLink *link)
return 0;
}
static int filter_frame(AVFilterLink *link, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
CropContext *crop = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(link->format);
int i;
frame->video->w = crop->w;
frame->video->h = crop->h;
frame->width = crop->w;
frame->height = crop->h;
crop->var_values[VAR_T] = frame->pts == AV_NOPTS_VALUE ?
NAN : frame->pts * av_q2d(link->time_base);
crop->var_values[VAR_POS] = frame->pos == -1 ? NAN : frame->pos;
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
crop->var_values[VAR_Y] = av_expr_eval(crop->y_pexpr, crop->var_values, NULL);
crop->var_values[VAR_X] = av_expr_eval(crop->x_pexpr, crop->var_values, NULL);
......
......@@ -117,7 +117,7 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
CropDetectContext *cd = ctx->priv;
......@@ -128,36 +128,36 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
if (++cd->frame_nb > 0) {
// Reset the crop area every reset_count frames, if reset_count is > 0
if (cd->reset_count > 0 && cd->frame_nb > cd->reset_count) {
cd->x1 = frame->video->w-1;
cd->y1 = frame->video->h-1;
cd->x1 = frame->width - 1;
cd->y1 = frame->height - 1;
cd->x2 = 0;
cd->y2 = 0;
cd->frame_nb = 1;
}
for (y = 0; y < cd->y1; y++) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) {
cd->y1 = y;
break;
}
}
for (y = frame->video->h-1; y > cd->y2; y--) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->video->w, bpp) > cd->limit) {
for (y = frame->height - 1; y > cd->y2; y--) {
if (checkline(ctx, frame->data[0] + frame->linesize[0] * y, bpp, frame->width, bpp) > cd->limit) {
cd->y2 = y;
break;
}
}
for (y = 0; y < cd->x1; y++) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) {
cd->x1 = y;
break;
}
}
for (y = frame->video->w-1; y > cd->x2; y--) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->video->h, bpp) > cd->limit) {
for (y = frame->width - 1; y > cd->x2; y--) {
if (checkline(ctx, frame->data[0] + bpp*y, frame->linesize[0], frame->height, bpp) > cd->limit) {
cd->x2 = y;
break;
}
......@@ -187,8 +187,8 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *frame)
y += (shrink_by/2 + 1) & ~1;
av_log(ctx, AV_LOG_INFO,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pos:%"PRId64" pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pos, frame->pts,
"x1:%d x2:%d y1:%d y2:%d w:%d h:%d x:%d y:%d pts:%"PRId64" t:%f crop=%d:%d:%d:%d\n",
cd->x1, cd->x2, cd->y1, cd->y2, w, h, x, y, frame->pts,
frame->pts == AV_NOPTS_VALUE ? -1 : frame->pts * av_q2d(inlink->time_base),
w, h, x, y);
}
......
This diff is collapsed.
......@@ -209,27 +209,28 @@ static av_cold int init(AVFilterContext *ctx, const char *args)
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
DelogoContext *delogo = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
AVFilterBufferRef *out;
AVFrame *out;
int hsub0 = desc->log2_chroma_w;
int vsub0 = desc->log2_chroma_h;
int direct = 0;
int plane;
if (in->perms & AV_PERM_WRITE) {
if (av_frame_is_writable(in)) {
direct = 1;
out = in;
} else {
out = ff_get_video_buffer(outlink, AV_PERM_WRITE, outlink->w, outlink->h);
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return AVERROR(ENOMEM);
}
avfilter_copy_buffer_ref_props(out, in);
av_frame_copy_props(out, in);
}
for (plane = 0; plane < 4 && in->data[plane]; plane++) {
......@@ -246,7 +247,7 @@ static int filter_frame(AVFilterLink *inlink, AVFilterBufferRef *in)
}
if (!direct)
avfilter_unref_bufferp(&in);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
......@@ -257,7 +258,6 @@ static const AVFilterPad avfilter_vf_delogo_inputs[] = {
.type = AVMEDIA_TYPE_VIDEO,
.get_video_buffer = ff_null_get_video_buffer,
.filter_frame = filter_frame,
.min_perms = AV_PERM_WRITE | AV_PERM_READ,
},
{ NULL }
};
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment