Commit 51bfaa21 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge remote-tracking branch 'qatar/master'

* qatar/master: (22 commits)
  g722dec: check output buffer size before decoding
  g722dec: cosmetics: reindent/linewrap
  g722dec: remove the use of lowres for half-rate decoding.
  tta: check for extradata allocation failure in tta demuxer
  tta: check for allocation failure of decode_buffer
  tta: use correct frame_length calculation.
  tta: add support for decoding 24-bit sample format
  cosmetics: indentation
  tta: remove pointless braces
  tta: check output buffer size after adjusting frame length for last frame
  tta: fix reading of format in TTA header.
  tta: remove useless commented-out lines
  tta: check remaining bitstream size while reading unary value
  lavf: deprecate AVStream.stream_copy
  avconc: split choose_codec() to choose_decoder/choose_encoder.
  lavf: simplify by using FFMAX/FFMIN.
  mpegenc: add preload private option.
  cosmetics: simplify latm_decode_init
  latm: avoid unnecessary reinit of the aac decoder
  aacdec: initialize sbr context only in new channel elements
  ...

Conflicts:
	avconv.c
	libavcodec/resample.c
	libavcodec/tta.c
	libavformat/utils.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents f5fdb12d a3a85721
......@@ -243,6 +243,7 @@ typedef struct OutputStream {
int64_t sws_flags;
AVDictionary *opts;
int is_past_recording_time;
int stream_copy;
} OutputStream;
#if HAVE_TERMIOS_H
......@@ -396,7 +397,6 @@ static void reset_options(OptionsContext *o)
memset(o, 0, sizeof(*o));
o->mux_preload = 0.5;
o->mux_max_delay = 0.7;
o->recording_time = INT64_MAX;
o->limit_filesize = UINT64_MAX;
......@@ -1407,7 +1407,7 @@ static void print_report(OutputFile *output_files,
float q = -1;
ost = &ost_table[i];
enc = ost->st->codec;
if (!ost->st->stream_copy && enc->coded_frame)
if (!ost->stream_copy && enc->coded_frame)
q = enc->coded_frame->quality/(float)FF_QP2LAMBDA;
if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
......@@ -2054,7 +2054,7 @@ static int transcode_init(OutputFile *output_files,
codec->bits_per_raw_sample= icodec->bits_per_raw_sample;
codec->chroma_sample_location = icodec->chroma_sample_location;
if (ost->st->stream_copy) {
if (ost->stream_copy) {
uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
if (extra_size > INT_MAX) {
......@@ -2150,9 +2150,8 @@ static int transcode_init(OutputFile *output_files,
return AVERROR(ENOMEM);
}
ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE);
if (!codec->sample_rate) {
if (!codec->sample_rate)
codec->sample_rate = icodec->sample_rate;
}
choose_sample_rate(ost->st, ost->enc);
codec->time_base = (AVRational){1, codec->sample_rate};
if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
......@@ -2360,7 +2359,7 @@ static int transcode_init(OutputFile *output_files,
av_log(NULL, AV_LOG_INFO, " [sync #%d.%d]",
ost->sync_ist->file_index,
ost->sync_ist->st->index);
if (ost->st->stream_copy)
if (ost->stream_copy)
av_log(NULL, AV_LOG_INFO, " (copy)");
else
av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index].dec ?
......@@ -2643,7 +2642,7 @@ static int transcode(OutputFile *output_files,
for (i = 0; i < nb_output_streams; i++) {
ost = &output_streams[i];
if (ost) {
if (ost->st->stream_copy)
if (ost->stream_copy)
av_freep(&ost->st->codec->extradata);
if (ost->logfile) {
fclose(ost->logfile);
......@@ -2842,13 +2841,11 @@ static int opt_map_metadata(OptionsContext *o, const char *opt, const char *arg)
return 0;
}
static enum CodecID find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
{
const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec;
if(!name)
return CODEC_ID_NONE;
codec = encoder ?
avcodec_find_encoder_by_name(name) :
avcodec_find_decoder_by_name(name);
......@@ -2860,29 +2857,20 @@ static enum CodecID find_codec_or_die(const char *name, enum AVMediaType type, i
av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
exit_program(1);
}
return codec->id;
return codec;
}
static AVCodec *choose_codec(OptionsContext *o, AVFormatContext *s, AVStream *st, enum AVMediaType type)
static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
{
char *codec_name = NULL;
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
if (!codec_name) {
if (s->oformat) {
st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename, NULL, type);
return avcodec_find_encoder(st->codec->codec_id);
}
} else if (!strcmp(codec_name, "copy"))
st->stream_copy = 1;
else {
st->codec->codec_id = find_codec_or_die(codec_name, type, s->iformat == NULL);
return s->oformat ? avcodec_find_encoder_by_name(codec_name) :
avcodec_find_decoder_by_name(codec_name);
}
return NULL;
if (codec_name) {
AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
st->codec->codec_id = codec->id;
return codec;
} else
return avcodec_find_decoder(st->codec->codec_id);
}
/**
......@@ -2909,9 +2897,7 @@ static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
MATCH_PER_STREAM_OPT(ts_scale, dbl, scale, ic, st);
ist->ts_scale = scale;
ist->dec = choose_codec(o, ic, st, dec->codec_type);
if (!ist->dec)
ist->dec = avcodec_find_decoder(dec->codec_id);
ist->dec = choose_decoder(o, ic, st);
switch (dec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
......@@ -3016,7 +3002,7 @@ static int opt_input_file(OptionsContext *o, const char *opt, const char *filena
/* apply forced codec ids */
for (i = 0; i < ic->nb_streams; i++)
choose_codec(o, ic, ic->streams[i], ic->streams[i]->codec->codec_type);
choose_decoder(o, ic, ic->streams[i]);
/* Set AVCodecContext options for avformat_find_stream_info */
opts = setup_find_stream_info_opts(ic, codec_opts);
......@@ -3134,6 +3120,23 @@ static int get_preset_file_2(const char *preset_name, const char *codec_name, AV
return ret;
}
static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
{
char *codec_name = NULL;
MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
if (!codec_name) {
ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
NULL, ost->st->codec->codec_type);
ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
} else if (!strcmp(codec_name, "copy"))
ost->stream_copy = 1;
else {
ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
ost->st->codec->codec_id = ost->enc->id;
}
}
static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
{
OutputStream *ost;
......@@ -3161,7 +3164,7 @@ static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, e
ost->index = idx;
ost->st = st;
st->codec->codec_type = type;
ost->enc = choose_codec(o, oc, st, type);
choose_encoder(o, oc, ost);
if (ost->enc) {
ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
}
......@@ -3262,7 +3265,7 @@ static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
st = ost->st;
video_enc = st->codec;
if (!st->stream_copy) {
if (!ost->stream_copy) {
const char *p = NULL;
char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
......@@ -3379,7 +3382,7 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
audio_enc = st->codec;
audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
if (!st->stream_copy) {
if (!ost->stream_copy) {
char *sample_fmt = NULL;
MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
......@@ -3399,12 +3402,10 @@ static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
{
AVStream *st;
OutputStream *ost;
ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
st = ost->st;
if (!st->stream_copy) {
if (!ost->stream_copy) {
av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
exit_program(1);
}
......@@ -3415,7 +3416,7 @@ static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
{
OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
ost->st->stream_copy = 1;
ost->stream_copy = 1;
return ost;
}
......@@ -3520,9 +3521,9 @@ static int read_ffserver_streams(OptionsContext *o, AVFormatContext *s, const ch
memcpy(st->info, ic->streams[i]->info, sizeof(*st->info));
avcodec_copy_context(st->codec, ic->streams[i]->codec);
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !st->stream_copy)
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && !ost->stream_copy)
choose_sample_fmt(st, codec);
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !st->stream_copy)
else if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO && !ost->stream_copy)
choose_pixel_fmt(st, codec);
}
......@@ -3678,7 +3679,11 @@ static void opt_output_file(void *optctx, const char *filename)
}
}
oc->preload = (int)(o->mux_preload * AV_TIME_BASE);
if (o->mux_preload) {
uint8_t buf[64];
snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0);
}
oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
/* copy chapters */
......
This diff is collapsed.
......@@ -184,9 +184,11 @@ static av_cold int che_configure(AACContext *ac,
int type, int id, int *channels)
{
if (che_pos[type][id]) {
if (!ac->che[type][id] && !(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
return AVERROR(ENOMEM);
ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr);
if (!ac->che[type][id]) {
if (!(ac->che[type][id] = av_mallocz(sizeof(ChannelElement))))
return AVERROR(ENOMEM);
ff_aac_sbr_ctx_init(ac, &ac->che[type][id]->sbr);
}
if (type != TYPE_CCE) {
ac->output_data[(*channels)++] = ac->che[type][id]->ch[0].ret;
if (type == TYPE_CPE ||
......@@ -2521,8 +2523,9 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
*out_size = 0;
return avpkt->size;
} else {
aac_decode_close(avctx);
if ((err = aac_decode_init(avctx)) < 0)
if ((err = decode_audio_specific_config(
&latmctx->aac_ctx, avctx, &latmctx->aac_ctx.m4ac,
avctx->extradata, avctx->extradata_size, 8*avctx->extradata_size)) < 0)
return err;
latmctx->initialized = 1;
}
......@@ -2544,15 +2547,10 @@ static int latm_decode_frame(AVCodecContext *avctx, void *out, int *out_size,
av_cold static int latm_decode_init(AVCodecContext *avctx)
{
struct LATMContext *latmctx = avctx->priv_data;
int ret;
ret = aac_decode_init(avctx);
int ret = aac_decode_init(avctx);
if (avctx->extradata_size > 0) {
if (avctx->extradata_size > 0)
latmctx->initialized = !ret;
} else {
latmctx->initialized = 0;
}
return ret;
}
......
......@@ -66,9 +66,6 @@ static av_cold int g722_decode_init(AVCodecContext * avctx)
c->band[1].scale_factor = 2;
c->prev_samples_pos = 22;
if (avctx->lowres)
avctx->sample_rate /= 2;
return 0;
}
......@@ -88,15 +85,22 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
{
G722Context *c = avctx->priv_data;
int16_t *out_buf = data;
int j, out_len = 0;
int j, out_len;
const int skip = 8 - avctx->bits_per_coded_sample;
const int16_t *quantizer_table = low_inv_quants[skip];
GetBitContext gb;
out_len = avpkt->size * 2 * av_get_bytes_per_sample(avctx->sample_fmt);
if (*data_size < out_len) {
av_log(avctx, AV_LOG_ERROR, "Output buffer is too small\n");
return AVERROR(EINVAL);
}
init_get_bits(&gb, avpkt->data, avpkt->size * 8);
for (j = 0; j < avpkt->size; j++) {
int ilow, ihigh, rlow;
int ilow, ihigh, rlow, rhigh, dhigh;
int xout1, xout2;
ihigh = get_bits(&gb, 2);
ilow = get_bits(&gb, 6 - skip);
......@@ -107,31 +111,24 @@ static int g722_decode_frame(AVCodecContext *avctx, void *data,
ff_g722_update_low_predictor(&c->band[0], ilow >> (2 - skip));
if (!avctx->lowres) {
const int dhigh = c->band[1].scale_factor *
ff_g722_high_inv_quant[ihigh] >> 10;
const int rhigh = av_clip(dhigh + c->band[1].s_predictor,
-16384, 16383);
int xout1, xout2;
ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh);
c->prev_samples[c->prev_samples_pos++] = rlow + rhigh;
c->prev_samples[c->prev_samples_pos++] = rlow - rhigh;
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24,
&xout1, &xout2);
out_buf[out_len++] = av_clip_int16(xout1 >> 12);
out_buf[out_len++] = av_clip_int16(xout2 >> 12);
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
memmove(c->prev_samples,
c->prev_samples + c->prev_samples_pos - 22,
22 * sizeof(c->prev_samples[0]));
c->prev_samples_pos = 22;
}
} else
out_buf[out_len++] = rlow;
dhigh = c->band[1].scale_factor * ff_g722_high_inv_quant[ihigh] >> 10;
rhigh = av_clip(dhigh + c->band[1].s_predictor, -16384, 16383);
ff_g722_update_high_predictor(&c->band[1], dhigh, ihigh);
c->prev_samples[c->prev_samples_pos++] = rlow + rhigh;
c->prev_samples[c->prev_samples_pos++] = rlow - rhigh;
ff_g722_apply_qmf(c->prev_samples + c->prev_samples_pos - 24,
&xout1, &xout2);
*out_buf++ = av_clip_int16(xout1 >> 12);
*out_buf++ = av_clip_int16(xout2 >> 12);
if (c->prev_samples_pos >= PREV_SAMPLES_BUF_SIZE) {
memmove(c->prev_samples, c->prev_samples + c->prev_samples_pos - 22,
22 * sizeof(c->prev_samples[0]));
c->prev_samples_pos = 22;
}
}
*data_size = out_len << 1;
*data_size = out_len;
return avpkt->size;
}
......@@ -143,5 +140,4 @@ AVCodec ff_adpcm_g722_decoder = {
.init = g722_decode_init,
.decode = g722_decode_frame,
.long_name = NULL_IF_CONFIG_SMALL("G.722 ADPCM"),
.max_lowres = 1,
};
......@@ -265,7 +265,6 @@ ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
}
}
#define TAPS 16
s->resample_context = av_resample_init(output_rate, input_rate,
filter_length, log2_phase_count,
linear, cutoff);
......
This diff is collapsed.
......@@ -540,8 +540,10 @@ typedef struct AVStream {
*/
AVRational time_base;
int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
#if FF_API_STREAM_COPY
/* ffmpeg.c private use */
int stream_copy; /**< If set, just copy stream. */
attribute_deprecated int stream_copy; /**< If set, just copy stream. */
#endif
enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed.
#if FF_API_AVSTREAM_QUALITY
......@@ -772,7 +774,9 @@ typedef struct AVFormatContext {
attribute_deprecated int mux_rate;
#endif
unsigned int packet_size;
int preload;
#if FF_API_PRELOAD
attribute_deprecated int preload;
#endif
int max_delay;
#if FF_API_LOOP_OUTPUT
......
......@@ -77,6 +77,7 @@ typedef struct {
double vcd_padding_bitrate; //FIXME floats
int64_t vcd_padding_bytes_written;
int preload;
} MpegMuxContext;
extern AVOutputFormat ff_mpeg1vcd_muxer;
......@@ -1158,9 +1159,15 @@ static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
StreamInfo *stream = st->priv_data;
int64_t pts, dts;
PacketDesc *pkt_desc;
const int preload= av_rescale(ctx->preload, 90000, AV_TIME_BASE);
int preload;
const int is_iframe = st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (pkt->flags & AV_PKT_FLAG_KEY);
#if FF_API_PRELOAD
if (ctx->preload)
s->preload = ctx->preload;
#endif
preload = av_rescale(s->preload, 90000, AV_TIME_BASE);
pts= pkt->pts;
dts= pkt->dts;
......@@ -1237,6 +1244,7 @@ static int mpeg_mux_end(AVFormatContext *ctx)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "muxrate", NULL, OFFSET(mux_rate), AV_OPT_TYPE_INT, {0}, 0, INT_MAX, E },
{ "preload", "Initial demux-decode delay in microseconds.", OFFSET(preload), AV_OPT_TYPE_INT, {500000}, 0, INT_MAX, E},
{ NULL },
};
......
......@@ -107,6 +107,10 @@ static int tta_read_header(AVFormatContext *s, AVFormatParameters *ap)
return -1;
}
st->codec->extradata = av_mallocz(st->codec->extradata_size+FF_INPUT_BUFFER_PADDING_SIZE);
if (!st->codec->extradata) {
st->codec->extradata_size = 0;
return AVERROR(ENOMEM);
}
avio_seek(s->pb, start_offset, SEEK_SET);
avio_read(s->pb, st->codec->extradata, st->codec->extradata_size);
......
......@@ -1932,29 +1932,24 @@ static void update_stream_timings(AVFormatContext *ic)
if (start_time1 < start_time_text)
start_time_text = start_time1;
} else
if (start_time1 < start_time)
start_time = start_time1;
start_time = FFMIN(start_time, start_time1);
if (st->duration != AV_NOPTS_VALUE) {
end_time1 = start_time1
+ av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
if (end_time1 > end_time)
end_time = end_time1;
end_time = FFMAX(end_time, end_time1);
}
}
if (st->duration != AV_NOPTS_VALUE) {
duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
if (duration1 > duration)
duration = duration1;
duration = FFMAX(duration, duration1);
}
}
if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
start_time = start_time_text;
if (start_time != INT64_MAX) {
ic->start_time = start_time;
if (end_time != INT64_MIN) {
if (end_time - start_time > duration)
duration = end_time - start_time;
}
if (end_time != INT64_MIN)
duration = FFMAX(duration, end_time - start_time);
}
if (duration != INT64_MIN && ic->duration == AV_NOPTS_VALUE) {
ic->duration = duration;
......@@ -2108,8 +2103,7 @@ static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
file_size = 0;
} else {
file_size = avio_size(ic->pb);
if (file_size < 0)
file_size = 0;
file_size = FFMAX(0, file_size);
}
if ((!strcmp(ic->iformat->name, "mpeg") ||
......
......@@ -101,5 +101,11 @@
#ifndef FF_API_NEW_STREAM
#define FF_API_NEW_STREAM (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_PRELOAD
#define FF_API_PRELOAD (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
#ifndef FF_API_STREAM_COPY
#define FF_API_STREAM_COPY (LIBAVFORMAT_VERSION_MAJOR < 54)
#endif
#endif /* AVFORMAT_VERSION_H */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment