Commit 0f1446a4 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit 'ab35ec29'

* commit 'ab35ec29':
  vf_overlay: get rid of pointless messing with timebase.
  samplefmt: make av_samples_alloc() initialize the data to silence.
  libspeexdec: handle NULL return value from speex_packet_to_header()
  h264probe: Don't error out on bits that no longer are reserved
  mpegvideo: set extended_data in ff_update_duplicate_context()
  libspeexdec: properly handle DTX for multiple frames-per-packet
  libspeexdec: move the SpeexHeader from LibSpeexContext to where it is used
  libspeexdec: simplify setting of frame_size
  libspeexdec: set channel_layout

Conflicts:
	libavfilter/vf_overlay.c
	libavformat/h264dec.c
	libavutil/version.h
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents 741f5b02 ab35ec29
...@@ -22,15 +22,16 @@ ...@@ -22,15 +22,16 @@
#include <speex/speex_header.h> #include <speex/speex_header.h>
#include <speex/speex_stereo.h> #include <speex/speex_stereo.h>
#include <speex/speex_callbacks.h> #include <speex/speex_callbacks.h>
#include "avcodec.h"
#include "libavutil/audioconvert.h"
#include "libavutil/common.h" #include "libavutil/common.h"
#include "avcodec.h"
typedef struct { typedef struct {
AVFrame frame; AVFrame frame;
SpeexBits bits; SpeexBits bits;
SpeexStereoState stereo; SpeexStereoState stereo;
void *dec_state; void *dec_state;
SpeexHeader *header;
int frame_size; int frame_size;
} LibSpeexContext; } LibSpeexContext;
...@@ -39,16 +40,20 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx) ...@@ -39,16 +40,20 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
{ {
LibSpeexContext *s = avctx->priv_data; LibSpeexContext *s = avctx->priv_data;
const SpeexMode *mode; const SpeexMode *mode;
SpeexHeader *header = NULL;
int spx_mode; int spx_mode;
if (avctx->extradata_size >= 80)
s->header = speex_packet_to_header(avctx->extradata, avctx->extradata_size);
avctx->sample_fmt = AV_SAMPLE_FMT_S16; avctx->sample_fmt = AV_SAMPLE_FMT_S16;
if (s->header) { if (avctx->extradata && avctx->extradata_size >= 80) {
avctx->channels = s->header->nb_channels; header = speex_packet_to_header(avctx->extradata,
s->frame_size = s->header->frame_size; avctx->extradata_size);
spx_mode = s->header->mode; if (!header)
av_log(avctx, AV_LOG_WARNING, "Invalid Speex header\n");
}
if (header) {
avctx->channels = header->nb_channels;
spx_mode = header->mode;
speex_header_free(header);
} else { } else {
switch (avctx->sample_rate) { switch (avctx->sample_rate) {
case 8000: spx_mode = 0; break; case 8000: spx_mode = 0; break;
...@@ -69,6 +74,7 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx) ...@@ -69,6 +74,7 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
avctx->sample_rate = 8000 << spx_mode; avctx->sample_rate = 8000 << spx_mode;
s->frame_size = 160 << spx_mode;
if (avctx->channels < 1 || avctx->channels > 2) { if (avctx->channels < 1 || avctx->channels > 2) {
/* libspeex can handle mono or stereo if initialized as stereo */ /* libspeex can handle mono or stereo if initialized as stereo */
...@@ -76,6 +82,8 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx) ...@@ -76,6 +82,8 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
"Decoding as stereo.\n", avctx->channels); "Decoding as stereo.\n", avctx->channels);
avctx->channels = 2; avctx->channels = 2;
} }
avctx->channel_layout = avctx->channels == 2 ? AV_CH_LAYOUT_STEREO :
AV_CH_LAYOUT_MONO;
speex_bits_init(&s->bits); speex_bits_init(&s->bits);
s->dec_state = speex_decoder_init(mode); s->dec_state = speex_decoder_init(mode);
...@@ -84,10 +92,6 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx) ...@@ -84,10 +92,6 @@ static av_cold int libspeex_decode_init(AVCodecContext *avctx)
return -1; return -1;
} }
if (!s->header) {
speex_decoder_ctl(s->dec_state, SPEEX_GET_FRAME_SIZE, &s->frame_size);
}
if (avctx->channels == 2) { if (avctx->channels == 2) {
SpeexCallback callback; SpeexCallback callback;
callback.callback_id = SPEEX_INBAND_STEREO; callback.callback_id = SPEEX_INBAND_STEREO;
...@@ -120,10 +124,12 @@ static int libspeex_decode_frame(AVCodecContext *avctx, void *data, ...@@ -120,10 +124,12 @@ static int libspeex_decode_frame(AVCodecContext *avctx, void *data,
} }
output = (int16_t *)s->frame.data[0]; output = (int16_t *)s->frame.data[0];
/* if there is not enough data left for the smallest possible frame, /* if there is not enough data left for the smallest possible frame or the
reset the libspeex buffer using the current packet, otherwise ignore next 5 bits are a terminator code, reset the libspeex buffer using the
the current packet and keep decoding frames from the libspeex buffer. */ current packet, otherwise ignore the current packet and keep decoding
if (speex_bits_remaining(&s->bits) < 43) { frames from the libspeex buffer. */
if (speex_bits_remaining(&s->bits) < 5 ||
speex_bits_peek_unsigned(&s->bits, 5) == 0x1F) {
/* check for flush packet */ /* check for flush packet */
if (!buf || !buf_size) { if (!buf || !buf_size) {
*got_frame_ptr = 0; *got_frame_ptr = 0;
...@@ -153,7 +159,6 @@ static av_cold int libspeex_decode_close(AVCodecContext *avctx) ...@@ -153,7 +159,6 @@ static av_cold int libspeex_decode_close(AVCodecContext *avctx)
{ {
LibSpeexContext *s = avctx->priv_data; LibSpeexContext *s = avctx->priv_data;
speex_header_free(s->header);
speex_bits_destroy(&s->bits); speex_bits_destroy(&s->bits);
speex_decoder_destroy(s->dec_state); speex_decoder_destroy(s->dec_state);
......
...@@ -531,6 +531,7 @@ void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src) ...@@ -531,6 +531,7 @@ void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
int ff_mpeg_update_thread_context(AVCodecContext *dst, int ff_mpeg_update_thread_context(AVCodecContext *dst,
const AVCodecContext *src) const AVCodecContext *src)
{ {
int i;
MpegEncContext *s = dst->priv_data, *s1 = src->priv_data; MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
if (dst == src) if (dst == src)
...@@ -574,6 +575,10 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst, ...@@ -574,6 +575,10 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
memcpy(&s->last_picture, &s1->last_picture, memcpy(&s->last_picture, &s1->last_picture,
(char *) &s1->last_picture_ptr - (char *) &s1->last_picture); (char *) &s1->last_picture_ptr - (char *) &s1->last_picture);
// reset s->picture[].f.extended_data to s->picture[].f.data
for (i = 0; i < s->picture_count; i++)
s->picture[i].f.extended_data = s->picture[i].f.data;
s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1); s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1); s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1); s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
......
...@@ -276,24 +276,10 @@ fail: ...@@ -276,24 +276,10 @@ fail:
static int config_output(AVFilterLink *outlink) static int config_output(AVFilterLink *outlink)
{ {
AVFilterContext *ctx = outlink->src; AVFilterContext *ctx = outlink->src;
int exact;
// common timebase computation:
AVRational tb1 = ctx->inputs[MAIN ]->time_base;
AVRational tb2 = ctx->inputs[OVERLAY]->time_base;
AVRational *tb = &ctx->outputs[0]->time_base;
exact = av_reduce(&tb->num, &tb->den,
av_gcd((int64_t)tb1.num * tb2.den,
(int64_t)tb2.num * tb1.den),
(int64_t)tb1.den * tb2.den, INT_MAX);
av_log(ctx, AV_LOG_VERBOSE,
"main_tb:%d/%d overlay_tb:%d/%d -> tb:%d/%d exact:%d\n",
tb1.num, tb1.den, tb2.num, tb2.den, tb->num, tb->den, exact);
if (!exact)
av_log(ctx, AV_LOG_WARNING,
"Timestamp conversion inexact, timestamp information loss may occurr\n");
outlink->w = ctx->inputs[MAIN]->w; outlink->w = ctx->inputs[MAIN]->w;
outlink->h = ctx->inputs[MAIN]->h; outlink->h = ctx->inputs[MAIN]->h;
outlink->time_base = ctx->inputs[MAIN]->time_base;
return 0; return 0;
} }
...@@ -448,7 +434,8 @@ static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) ...@@ -448,7 +434,8 @@ static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic)
* before the main frame, we can drop the current overlay. */ * before the main frame, we can drop the current overlay. */
while (1) { while (1) {
next_overpic = ff_bufqueue_peek(&over->queue_over, 0); next_overpic = ff_bufqueue_peek(&over->queue_over, 0);
if (!next_overpic || next_overpic->pts > mainpic->pts) if (!next_overpic || av_compare_ts(next_overpic->pts, ctx->inputs[OVERLAY]->time_base,
mainpic->pts , ctx->inputs[MAIN]->time_base) > 0)
break; break;
ff_bufqueue_get(&over->queue_over); ff_bufqueue_get(&over->queue_over);
avfilter_unref_buffer(over->overpicref); avfilter_unref_buffer(over->overpicref);
...@@ -457,7 +444,8 @@ static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic) ...@@ -457,7 +444,8 @@ static int try_start_frame(AVFilterContext *ctx, AVFilterBufferRef *mainpic)
/* If there is no next frame and no EOF and the overlay frame is before /* If there is no next frame and no EOF and the overlay frame is before
* the main frame, we can not know yet if it will be superseded. */ * the main frame, we can not know yet if it will be superseded. */
if (!over->queue_over.available && !over->overlay_eof && if (!over->queue_over.available && !over->overlay_eof &&
(!over->overpicref || over->overpicref->pts < mainpic->pts)) (!over->overpicref || av_compare_ts(over->overpicref->pts, ctx->inputs[OVERLAY]->time_base,
mainpic->pts , ctx->inputs[MAIN]->time_base) < 0))
return AVERROR(EAGAIN); return AVERROR(EAGAIN);
/* At this point, we know that the current overlay frame extends to the /* At this point, we know that the current overlay frame extends to the
* time of the main frame. */ * time of the main frame. */
...@@ -525,8 +513,6 @@ static int start_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref) ...@@ -525,8 +513,6 @@ static int start_frame_main(AVFilterLink *inlink, AVFilterBufferRef *inpicref)
if ((ret = flush_frames(ctx)) < 0) if ((ret = flush_frames(ctx)) < 0)
return ret; return ret;
inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[MAIN]->time_base,
ctx->outputs[0]->time_base);
if ((ret = try_start_frame(ctx, inpicref)) < 0) { if ((ret = try_start_frame(ctx, inpicref)) < 0) {
if (ret != AVERROR(EAGAIN)) if (ret != AVERROR(EAGAIN))
return ret; return ret;
...@@ -583,8 +569,6 @@ static int end_frame_over(AVFilterLink *inlink) ...@@ -583,8 +569,6 @@ static int end_frame_over(AVFilterLink *inlink)
if ((ret = flush_frames(ctx)) < 0) if ((ret = flush_frames(ctx)) < 0)
return ret; return ret;
inpicref->pts = av_rescale_q(inpicref->pts, ctx->inputs[OVERLAY]->time_base,
ctx->outputs[0]->time_base);
ff_bufqueue_add(ctx, &over->queue_over, inpicref); ff_bufqueue_add(ctx, &over->queue_over, inpicref);
ret = try_push_frame(ctx); ret = try_push_frame(ctx);
return ret == AVERROR(EAGAIN) ? 0 : ret; return ret == AVERROR(EAGAIN) ? 0 : ret;
......
...@@ -54,7 +54,7 @@ static int h264_probe(AVProbeData *p) ...@@ -54,7 +54,7 @@ static int h264_probe(AVProbeData *p)
case 1: sli++; break; case 1: sli++; break;
case 5: idr++; break; case 5: idr++; break;
case 7: case 7:
if(p->buf[i+2]&0x03) if (p->buf[i + 2] & 0x03)
return 0; return 0;
sps++; sps++;
break; break;
......
...@@ -183,7 +183,7 @@ int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, ...@@ -183,7 +183,7 @@ int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
if (size < 0) if (size < 0)
return size; return size;
buf = av_mallocz(size); buf = av_malloc(size);
if (!buf) if (!buf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
...@@ -193,6 +193,9 @@ int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, ...@@ -193,6 +193,9 @@ int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
av_free(buf); av_free(buf);
return size; return size;
} }
av_samples_set_silence(audio_data, 0, nb_samples, nb_channels, sample_fmt);
return 0; return 0;
} }
......
...@@ -194,6 +194,7 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, ...@@ -194,6 +194,7 @@ int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
* Allocate a samples buffer for nb_samples samples, and fill data pointers and * Allocate a samples buffer for nb_samples samples, and fill data pointers and
* linesize accordingly. * linesize accordingly.
* The allocated samples buffer can be freed by using av_freep(&audio_data[0]) * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
* Allocated data will be initialized to silence.
* *
* @see enum AVSampleFormat * @see enum AVSampleFormat
* The documentation for AVSampleFormat describes the data layout. * The documentation for AVSampleFormat describes the data layout.
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
#define LIBAVUTIL_VERSION_MAJOR 51 #define LIBAVUTIL_VERSION_MAJOR 51
#define LIBAVUTIL_VERSION_MINOR 73 #define LIBAVUTIL_VERSION_MINOR 73
#define LIBAVUTIL_VERSION_MICRO 101 #define LIBAVUTIL_VERSION_MICRO 102
#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
LIBAVUTIL_VERSION_MINOR, \ LIBAVUTIL_VERSION_MINOR, \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment