Commit d0979357 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '41776ba9'

* commit '41776ba9':
  avconv: do not use the stream codec context for decoding

Conflicts:
	ffmpeg.c
	ffmpeg_filter.c
	ffmpeg_opt.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 955b31a7 41776ba9
...@@ -503,6 +503,8 @@ static void ffmpeg_cleanup(int ret) ...@@ -503,6 +503,8 @@ static void ffmpeg_cleanup(int ret)
av_freep(&ist->filters); av_freep(&ist->filters);
av_freep(&ist->hwaccel_device); av_freep(&ist->hwaccel_device);
avcodec_free_context(&ist->dec_ctx);
av_freep(&input_streams[i]); av_freep(&input_streams[i]);
} }
...@@ -1246,7 +1248,7 @@ static void print_final_stats(int64_t total_size) ...@@ -1246,7 +1248,7 @@ static void print_final_stats(int64_t total_size)
for (j = 0; j < f->nb_streams; j++) { for (j = 0; j < f->nb_streams; j++) {
InputStream *ist = input_streams[f->ist_index + j]; InputStream *ist = input_streams[f->ist_index + j];
enum AVMediaType type = ist->st->codec->codec_type; enum AVMediaType type = ist->dec_ctx->codec_type;
total_size += ist->data_size; total_size += ist->data_size;
total_packets += ist->nb_packets; total_packets += ist->nb_packets;
...@@ -1676,7 +1678,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p ...@@ -1676,7 +1678,7 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p
int guess_input_channel_layout(InputStream *ist) int guess_input_channel_layout(InputStream *ist)
{ {
AVCodecContext *dec = ist->st->codec; AVCodecContext *dec = ist->dec_ctx;
if (!dec->channel_layout) { if (!dec->channel_layout) {
char layout_name[256]; char layout_name[256];
...@@ -1697,7 +1699,7 @@ int guess_input_channel_layout(InputStream *ist) ...@@ -1697,7 +1699,7 @@ int guess_input_channel_layout(InputStream *ist)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
{ {
AVFrame *decoded_frame, *f; AVFrame *decoded_frame, *f;
AVCodecContext *avctx = ist->st->codec; AVCodecContext *avctx = ist->dec_ctx;
int i, ret, err = 0, resample_changed; int i, ret, err = 0, resample_changed;
AVRational decoded_frame_tb; AVRational decoded_frame_tb;
...@@ -1812,8 +1814,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output) ...@@ -1812,8 +1814,8 @@ static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
pkt->pts = AV_NOPTS_VALUE; pkt->pts = AV_NOPTS_VALUE;
if (decoded_frame->pts != AV_NOPTS_VALUE) if (decoded_frame->pts != AV_NOPTS_VALUE)
decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts, decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
(AVRational){1, ist->st->codec->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last, (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
(AVRational){1, ist->st->codec->sample_rate}); (AVRational){1, avctx->sample_rate});
for (i = 0; i < ist->nb_filters; i++) { for (i = 0; i < ist->nb_filters; i++) {
if (i < ist->nb_filters - 1) { if (i < ist->nb_filters - 1) {
f = ist->filter_frame; f = ist->filter_frame;
...@@ -1851,7 +1853,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) ...@@ -1851,7 +1853,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base); pkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
update_benchmark(NULL); update_benchmark(NULL);
ret = avcodec_decode_video2(ist->st->codec, ret = avcodec_decode_video2(ist->dec_ctx,
decoded_frame, got_output, pkt); decoded_frame, got_output, pkt);
update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index); update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
...@@ -1876,7 +1878,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output) ...@@ -1876,7 +1878,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output)
ist->frames_decoded++; ist->frames_decoded++;
if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) { if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
err = ist->hwaccel_retrieve_data(ist->st->codec, decoded_frame); err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
if (err < 0) if (err < 0)
goto fail; goto fail;
} }
...@@ -1956,7 +1958,7 @@ fail: ...@@ -1956,7 +1958,7 @@ fail:
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output) static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
{ {
AVSubtitle subtitle; AVSubtitle subtitle;
int i, ret = avcodec_decode_subtitle2(ist->st->codec, int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
&subtitle, got_output, pkt); &subtitle, got_output, pkt);
if (*got_output || ret<0 || pkt->size) if (*got_output || ret<0 || pkt->size)
...@@ -2066,7 +2068,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt) ...@@ -2066,7 +2068,7 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
ist->showed_multi_packet_warning = 1; ist->showed_multi_packet_warning = 1;
} }
switch (ist->st->codec->codec_type) { switch (ist->dec_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ret = decode_audio (ist, &avpkt, &got_output); ret = decode_audio (ist, &avpkt, &got_output);
break; break;
...@@ -2074,11 +2076,11 @@ static int output_packet(InputStream *ist, const AVPacket *pkt) ...@@ -2074,11 +2076,11 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
ret = decode_video (ist, &avpkt, &got_output); ret = decode_video (ist, &avpkt, &got_output);
if (avpkt.duration) { if (avpkt.duration) {
duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q); duration = av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
} else if(ist->st->codec->time_base.num != 0 && ist->st->codec->time_base.den != 0) { } else if(ist->dec_ctx->time_base.num != 0 && ist->dec_ctx->time_base.den != 0) {
int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->st->codec->ticks_per_frame; int ticks= ist->st->parser ? ist->st->parser->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
duration = ((int64_t)AV_TIME_BASE * duration = ((int64_t)AV_TIME_BASE *
ist->st->codec->time_base.num * ticks) / ist->dec_ctx->time_base.num * ticks) /
ist->st->codec->time_base.den; ist->dec_ctx->time_base.den;
} else } else
duration = 0; duration = 0;
...@@ -2118,10 +2120,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt) ...@@ -2118,10 +2120,10 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
/* handle stream copy */ /* handle stream copy */
if (!ist->decoding_needed) { if (!ist->decoding_needed) {
ist->dts = ist->next_dts; ist->dts = ist->next_dts;
switch (ist->st->codec->codec_type) { switch (ist->dec_ctx->codec_type) {
case AVMEDIA_TYPE_AUDIO: case AVMEDIA_TYPE_AUDIO:
ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) / ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
ist->st->codec->sample_rate; ist->dec_ctx->sample_rate;
break; break;
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
if (ist->framerate.num) { if (ist->framerate.num) {
...@@ -2131,11 +2133,11 @@ static int output_packet(InputStream *ist, const AVPacket *pkt) ...@@ -2131,11 +2133,11 @@ static int output_packet(InputStream *ist, const AVPacket *pkt)
ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q); ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
} else if (pkt->duration) { } else if (pkt->duration) {
ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q); ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
} else if(ist->st->codec->time_base.num != 0) { } else if(ist->dec_ctx->time_base.num != 0) {
int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame; int ticks= ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
ist->next_dts += ((int64_t)AV_TIME_BASE * ist->next_dts += ((int64_t)AV_TIME_BASE *
ist->st->codec->time_base.num * ticks) / ist->dec_ctx->time_base.num * ticks) /
ist->st->codec->time_base.den; ist->dec_ctx->time_base.den;
} }
break; break;
} }
...@@ -2237,20 +2239,20 @@ static int init_input_stream(int ist_index, char *error, int error_len) ...@@ -2237,20 +2239,20 @@ static int init_input_stream(int ist_index, char *error, int error_len)
AVCodec *codec = ist->dec; AVCodec *codec = ist->dec;
if (!codec) { if (!codec) {
snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d", snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
avcodec_get_name(ist->st->codec->codec_id), ist->file_index, ist->st->index); avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
ist->st->codec->opaque = ist; ist->dec_ctx->opaque = ist;
ist->st->codec->get_format = get_format; ist->dec_ctx->get_format = get_format;
ist->st->codec->get_buffer2 = get_buffer; ist->dec_ctx->get_buffer2 = get_buffer;
ist->st->codec->thread_safe_callbacks = 1; ist->dec_ctx->thread_safe_callbacks = 1;
av_opt_set_int(ist->st->codec, "refcounted_frames", 1, 0); av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0)) if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
av_dict_set(&ist->decoder_opts, "threads", "auto", 0); av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
if ((ret = avcodec_open2(ist->st->codec, codec, &ist->decoder_opts)) < 0) { if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
if (ret == AVERROR_EXPERIMENTAL) if (ret == AVERROR_EXPERIMENTAL)
abort_codec_experimental(codec, 0); abort_codec_experimental(codec, 0);
...@@ -2462,7 +2464,7 @@ static int transcode_init(void) ...@@ -2462,7 +2464,7 @@ static int transcode_init(void)
enc_ctx = ost->st->codec; enc_ctx = ost->st->codec;
if (ist) { if (ist) {
dec_ctx = ist->st->codec; dec_ctx = ist->dec_ctx;
ost->st->disposition = ist->st->disposition; ost->st->disposition = ist->st->disposition;
enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample; enc_ctx->bits_per_raw_sample = dec_ctx->bits_per_raw_sample;
...@@ -2795,7 +2797,7 @@ static int transcode_init(void) ...@@ -2795,7 +2797,7 @@ static int transcode_init(void)
AVCodecContext *dec = NULL; AVCodecContext *dec = NULL;
if ((ist = get_input_stream(ost))) if ((ist = get_input_stream(ost)))
dec = ist->st->codec; dec = ist->dec_ctx;
if (dec && dec->subtitle_header) { if (dec && dec->subtitle_header) {
/* ASS code assumes this buffer is null terminated so add extra byte. */ /* ASS code assumes this buffer is null terminated so add extra byte. */
ost->st->codec->subtitle_header = av_mallocz(dec->subtitle_header_size + 1); ost->st->codec->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
...@@ -3624,9 +3626,9 @@ static int transcode(void) ...@@ -3624,9 +3626,9 @@ static int transcode(void)
for (i = 0; i < nb_input_streams; i++) { for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i]; ist = input_streams[i];
if (ist->decoding_needed) { if (ist->decoding_needed) {
avcodec_close(ist->st->codec); avcodec_close(ist->dec_ctx);
if (ist->hwaccel_uninit) if (ist->hwaccel_uninit)
ist->hwaccel_uninit(ist->st->codec); ist->hwaccel_uninit(ist->dec_ctx);
} }
} }
......
...@@ -241,6 +241,7 @@ typedef struct InputStream { ...@@ -241,6 +241,7 @@ typedef struct InputStream {
AVStream *st; AVStream *st;
int discard; /* true if stream data should be discarded */ int discard; /* true if stream data should be discarded */
int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */ int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
AVCodecContext *dec_ctx;
AVCodec *dec; AVCodec *dec;
AVFrame *decoded_frame; AVFrame *decoded_frame;
AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */ AVFrame *filter_frame; /* a ref of decoded_frame, to be sent to filters */
......
...@@ -262,7 +262,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in) ...@@ -262,7 +262,7 @@ static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
/* find the first unused stream of corresponding type */ /* find the first unused stream of corresponding type */
for (i = 0; i < nb_input_streams; i++) { for (i = 0; i < nb_input_streams; i++) {
ist = input_streams[i]; ist = input_streams[i];
if (ist->st->codec->codec_type == type && ist->discard) if (ist->dec_ctx->codec_type == type && ist->discard)
break; break;
} }
if (i == nb_input_streams) { if (i == nb_input_streams) {
...@@ -654,7 +654,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, ...@@ -654,7 +654,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
sar = ist->st->sample_aspect_ratio.num ? sar = ist->st->sample_aspect_ratio.num ?
ist->st->sample_aspect_ratio : ist->st->sample_aspect_ratio :
ist->st->codec->sample_aspect_ratio; ist->dec_ctx->sample_aspect_ratio;
if(!sar.den) if(!sar.den)
sar = (AVRational){0,1}; sar = (AVRational){0,1};
av_bprint_init(&args, 0, 1); av_bprint_init(&args, 0, 1);
...@@ -664,7 +664,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter, ...@@ -664,7 +664,7 @@ static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
ist->resample_height, ist->resample_height,
ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt, ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->resample_pix_fmt,
tb.num, tb.den, sar.num, sar.den, tb.num, tb.den, sar.num, sar.den,
SWS_BILINEAR + ((ist->st->codec->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0)); SWS_BILINEAR + ((ist->dec_ctx->flags&CODEC_FLAG_BITEXACT) ? SWS_BITEXACT:0));
if (fr.num && fr.den) if (fr.num && fr.den)
av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den); av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
...@@ -732,21 +732,21 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter, ...@@ -732,21 +732,21 @@ static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
char name[255]; char name[255];
int ret, pad_idx = 0; int ret, pad_idx = 0;
if (ist->st->codec->codec_type != AVMEDIA_TYPE_AUDIO) { if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n"); av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC); av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s", av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
1, ist->st->codec->sample_rate, 1, ist->dec_ctx->sample_rate,
ist->st->codec->sample_rate, ist->dec_ctx->sample_rate,
av_get_sample_fmt_name(ist->st->codec->sample_fmt)); av_get_sample_fmt_name(ist->dec_ctx->sample_fmt));
if (ist->st->codec->channel_layout) if (ist->dec_ctx->channel_layout)
av_bprintf(&args, ":channel_layout=0x%"PRIx64, av_bprintf(&args, ":channel_layout=0x%"PRIx64,
ist->st->codec->channel_layout); ist->dec_ctx->channel_layout);
else else
av_bprintf(&args, ":channels=%d", ist->st->codec->channels); av_bprintf(&args, ":channels=%d", ist->dec_ctx->channels);
snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index, snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
ist->file_index, ist->st->index); ist->file_index, ist->st->index);
......
...@@ -564,7 +564,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream * ...@@ -564,7 +564,7 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *
* list of input streams. */ * list of input streams. */
static void add_input_streams(OptionsContext *o, AVFormatContext *ic) static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
{ {
int i; int i, ret;
for (i = 0; i < ic->nb_streams; i++) { for (i = 0; i < ic->nb_streams; i++) {
AVStream *st = ic->streams[i]; AVStream *st = ic->streams[i];
...@@ -604,6 +604,18 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) ...@@ -604,6 +604,18 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE; ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
ist->dec_ctx = avcodec_alloc_context3(ist->dec);
if (!ist->dec_ctx) {
av_log(NULL, AV_LOG_ERROR, "Error allocating the decoder context.\n");
exit_program(1);
}
ret = avcodec_copy_context(ist->dec_ctx, dec);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error initializing the decoder context.\n");
exit_program(1);
}
switch (dec->codec_type) { switch (dec->codec_type) {
case AVMEDIA_TYPE_VIDEO: case AVMEDIA_TYPE_VIDEO:
if(!ist->dec) if(!ist->dec)
...@@ -612,9 +624,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) ...@@ -612,9 +624,9 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
dec->flags |= CODEC_FLAG_EMU_EDGE; dec->flags |= CODEC_FLAG_EMU_EDGE;
} }
ist->resample_height = dec->height; ist->resample_height = ist->dec_ctx->height;
ist->resample_width = dec->width; ist->resample_width = ist->dec_ctx->width;
ist->resample_pix_fmt = dec->pix_fmt; ist->resample_pix_fmt = ist->dec_ctx->pix_fmt;
MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st); MATCH_PER_STREAM_OPT(frame_rates, str, framerate, ic, st);
if (framerate && av_parse_video_rate(&ist->framerate, if (framerate && av_parse_video_rate(&ist->framerate,
...@@ -668,10 +680,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic) ...@@ -668,10 +680,10 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
MATCH_PER_STREAM_OPT(guess_layout_max, i, ist->guess_layout_max, ic, st); MATCH_PER_STREAM_OPT(guess_layout_max, i, ist->guess_layout_max, ic, st);
guess_input_channel_layout(ist); guess_input_channel_layout(ist);
ist->resample_sample_fmt = dec->sample_fmt; ist->resample_sample_fmt = ist->dec_ctx->sample_fmt;
ist->resample_sample_rate = dec->sample_rate; ist->resample_sample_rate = ist->dec_ctx->sample_rate;
ist->resample_channels = dec->channels; ist->resample_channels = ist->dec_ctx->channels;
ist->resample_channel_layout = dec->channel_layout; ist->resample_channel_layout = ist->dec_ctx->channel_layout;
break; break;
case AVMEDIA_TYPE_DATA: case AVMEDIA_TYPE_DATA:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment