Commit a3a0230a authored by Anton Khirnov's avatar Anton Khirnov

avconv: init filtergraphs only after we have a frame on each input

This makes sure the actual stream parameters are used, which is
important mainly for hardware decoding+filtering cases, which would
previously require various weird workarounds to handle the fact that a
fake software graph has to be constructed, but never used.
This should also improve behaviour in rare cases where
avformat_find_stream_info() does not provide accurate information.
parent 3e265ca5
This diff is collapsed.
......@@ -200,6 +200,8 @@ typedef struct InputFilter {
struct FilterGraph *graph;
uint8_t *name;
AVFifoBuffer *frame_queue;
// parameters configured for this input
int format;
......@@ -272,14 +274,6 @@ typedef struct InputStream {
AVRational framerate; /* framerate forced with -r */
int autorotate;
int resample_height;
int resample_width;
int resample_pix_fmt;
int resample_sample_fmt;
int resample_sample_rate;
int resample_channels;
uint64_t resample_channel_layout;
/* decoded data from this stream goes into all those filters
* currently video and audio only */
......@@ -494,7 +488,6 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost);
int init_complex_filtergraph(FilterGraph *fg);
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame);
int ifilter_parameters_from_decoder(InputFilter *ifilter, const AVCodecContext *avctx);
int avconv_parse_options(int argc, char **argv);
......
......@@ -100,6 +100,10 @@ int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
fg->inputs[0]->graph = fg;
fg->inputs[0]->format = -1;
fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
if (!fg->inputs[0])
exit_program(1);
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[0];
......@@ -176,6 +180,10 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
fg->inputs[fg->nb_inputs - 1]->graph = fg;
fg->inputs[fg->nb_inputs - 1]->format = -1;
fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
if (!fg->inputs[fg->nb_inputs - 1])
exit_program(1);
GROW_ARRAY(ist->filters, ist->nb_filters);
ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
}
......@@ -786,31 +794,6 @@ int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
return 0;
}
int ifilter_parameters_from_decoder(InputFilter *ifilter, const AVCodecContext *avctx)
{
av_buffer_unref(&ifilter->hw_frames_ctx);
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO)
ifilter->format = avctx->pix_fmt;
else
ifilter->format = avctx->sample_fmt;
ifilter->width = avctx->width;
ifilter->height = avctx->height;
ifilter->sample_aspect_ratio = avctx->sample_aspect_ratio;
ifilter->sample_rate = avctx->sample_rate;
ifilter->channel_layout = avctx->channel_layout;
if (avctx->hw_frames_ctx) {
ifilter->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
if (!ifilter->hw_frames_ctx)
return AVERROR(ENOMEM);
}
return 0;
}
int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
{
int i;
......
......@@ -553,10 +553,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
switch (par->codec_type) {
case AVMEDIA_TYPE_VIDEO:
ist->resample_height = ist->dec_ctx->height;
ist->resample_width = ist->dec_ctx->width;
ist->resample_pix_fmt = ist->dec_ctx->pix_fmt;
MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
if (framerate && av_parse_video_rate(&ist->framerate,
framerate) < 0) {
......@@ -616,12 +612,6 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
break;
case AVMEDIA_TYPE_AUDIO:
guess_input_channel_layout(ist);
ist->resample_sample_fmt = ist->dec_ctx->sample_fmt;
ist->resample_sample_rate = ist->dec_ctx->sample_rate;
ist->resample_channels = ist->dec_ctx->channels;
ist->resample_channel_layout = ist->dec_ctx->channel_layout;
break;
case AVMEDIA_TYPE_DATA:
case AVMEDIA_TYPE_SUBTITLE:
......@@ -1497,33 +1487,6 @@ static int init_complex_filters(void)
return 0;
}
static int configure_complex_filters(void)
{
int i, j, ret = 0;
for (i = 0; i < nb_filtergraphs; i++) {
FilterGraph *fg = filtergraphs[i];
if (filtergraph_is_simple(fg))
continue;
for (j = 0; j < fg->nb_inputs; j++) {
ret = ifilter_parameters_from_decoder(fg->inputs[j],
fg->inputs[j]->ist->dec_ctx);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error initializing filtergraph %d input %d\n", i, j);
return ret;
}
}
ret = configure_filtergraph(filtergraphs[i]);
if (ret < 0)
return ret;
}
return 0;
}
static int open_output_file(OptionsContext *o, const char *filename)
{
AVFormatContext *oc;
......@@ -2471,13 +2434,6 @@ int avconv_parse_options(int argc, char **argv)
goto fail;
}
/* configure the complex filtergraphs */
ret = configure_complex_filters();
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "Error configuring complex filters.\n");
goto fail;
}
fail:
uninit_parse_context(&octx);
if (ret < 0) {
......
......@@ -257,7 +257,6 @@ int qsv_transcode_init(OutputStream *ost)
ist->hwaccel_ctx = qsv;
ist->dec_ctx->pix_fmt = AV_PIX_FMT_QSV;
ist->resample_pix_fmt = AV_PIX_FMT_QSV;
return 0;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment