Commit b9d887c2 authored by Michael Niedermayer's avatar Michael Niedermayer

Merge commit '072be3e8'

* commit '072be3e8':
  h264: set parameters from SPS whenever it changes
  asyncts: cosmetics: reindent

Conflicts:
	libavcodec/h264.c
Merged-by: 's avatarMichael Niedermayer <michaelni@gmx.at>
parents b6e7041f 072be3e8
...@@ -2357,6 +2357,54 @@ int ff_h264_get_profile(SPS *sps) ...@@ -2357,6 +2357,54 @@ int ff_h264_get_profile(SPS *sps)
return profile; return profile;
} }
static int h264_set_parameter_from_sps(H264Context *h)
{
MpegEncContext *s = &h->s;
if (s->flags & CODEC_FLAG_LOW_DELAY ||
(h->sps.bitstream_restriction_flag &&
!h->sps.num_reorder_frames)) {
if (s->avctx->has_b_frames > 1 || h->delayed_pic[0])
av_log(h->s.avctx, AV_LOG_WARNING, "Delayed frames seen. "
"Reenabling low delay requires a codec flush.\n");
else
s->low_delay = 1;
}
if (s->avctx->has_b_frames < 2)
s->avctx->has_b_frames = !s->low_delay;
if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
if (s->avctx->codec &&
s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU &&
(h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) {
av_log(s->avctx, AV_LOG_ERROR,
"VDPAU decoding does not support video colorspace.\n");
return AVERROR_INVALIDDATA;
}
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 14 &&
h->sps.bit_depth_luma != 11 && h->sps.bit_depth_luma != 13 &&
(h->sps.bit_depth_luma != 9 || !CHROMA422)) {
s->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
h->cur_chroma_format_idc = h->sps.chroma_format_idc;
h->pixel_shift = h->sps.bit_depth_luma > 8;
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma,
h->sps.chroma_format_idc);
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma,
h->sps.chroma_format_idc);
s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
ff_dsputil_init(&s->dsp, s->avctx);
} else {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d\n",
h->sps.bit_depth_luma);
return AVERROR_INVALIDDATA;
}
}
return 0;
}
/** /**
* Decode a slice header. * Decode a slice header.
* This will also call ff_MPV_common_init() and frame_start() as needed. * This will also call ff_MPV_common_init() and frame_start() as needed.
...@@ -2373,7 +2421,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0) ...@@ -2373,7 +2421,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
MpegEncContext *const s0 = &h0->s; MpegEncContext *const s0 = &h0->s;
unsigned int first_mb_in_slice; unsigned int first_mb_in_slice;
unsigned int pps_id; unsigned int pps_id;
int num_ref_idx_active_override_flag; int num_ref_idx_active_override_flag, ret;
unsigned int slice_type, tmp, i, j; unsigned int slice_type, tmp, i, j;
int default_ref_list_done = 0; int default_ref_list_done = 0;
int last_pic_structure, last_pic_droppable; int last_pic_structure, last_pic_droppable;
...@@ -2450,7 +2498,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0) ...@@ -2450,7 +2498,14 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
h->pps.sps_id); h->pps.sps_id);
return -1; return -1;
} }
h->sps = *h0->sps_buffers[h->pps.sps_id];
if (h->pps.sps_id != h->current_sps_id ||
h0->sps_buffers[h->pps.sps_id]->new) {
h0->sps_buffers[h->pps.sps_id]->new = 0;
h->current_sps_id = h->pps.sps_id;
h->sps = *h0->sps_buffers[h->pps.sps_id];
}
s->avctx->profile = ff_h264_get_profile(&h->sps); s->avctx->profile = ff_h264_get_profile(&h->sps);
s->avctx->level = h->sps.level_idc; s->avctx->level = h->sps.level_idc;
...@@ -2508,33 +2563,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0) ...@@ -2508,33 +2563,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
s->avctx->sample_aspect_ratio = h->sps.sar; s->avctx->sample_aspect_ratio = h->sps.sar;
av_assert0(s->avctx->sample_aspect_ratio.den); av_assert0(s->avctx->sample_aspect_ratio.den);
if (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU if ((ret = h264_set_parameter_from_sps(h)) < 0)
&& (h->sps.bit_depth_luma != 8 || return ret;
h->sps.chroma_format_idc > 1)) {
av_log(s->avctx, AV_LOG_ERROR,
"VDPAU decoding does not support video "
"colorspace\n");
return -1;
}
if (s->avctx->bits_per_raw_sample != h->sps.bit_depth_luma ||
h->cur_chroma_format_idc != h->sps.chroma_format_idc) {
if (h->sps.bit_depth_luma >= 8 && h->sps.bit_depth_luma <= 14 && h->sps.bit_depth_luma != 11 && h->sps.bit_depth_luma != 13 &&
(h->sps.bit_depth_luma != 9 || !CHROMA422)) {
s->avctx->bits_per_raw_sample = h->sps.bit_depth_luma;
h->cur_chroma_format_idc = h->sps.chroma_format_idc;
h->pixel_shift = h->sps.bit_depth_luma > 8;
ff_h264dsp_init(&h->h264dsp, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
ff_h264_pred_init(&h->hpc, s->codec_id, h->sps.bit_depth_luma, h->sps.chroma_format_idc);
s->dsp.dct_bits = h->sps.bit_depth_luma > 8 ? 32 : 16;
ff_dsputil_init(&s->dsp, s->avctx);
} else {
av_log(s->avctx, AV_LOG_ERROR, "Unsupported bit depth: %d chroma_idc: %d\n",
h->sps.bit_depth_luma, h->sps.chroma_format_idc);
return -1;
}
}
if (h->sps.video_signal_type_present_flag) { if (h->sps.video_signal_type_present_flag) {
s->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG s->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG
...@@ -4027,6 +4057,7 @@ again: ...@@ -4027,6 +4057,7 @@ again:
ff_h264_decode_seq_parameter_set(h); ff_h264_decode_seq_parameter_set(h);
} }
if (s->flags & CODEC_FLAG_LOW_DELAY || if (s->flags & CODEC_FLAG_LOW_DELAY ||
(h->sps.bitstream_restriction_flag && (h->sps.bitstream_restriction_flag &&
!h->sps.num_reorder_frames)) { !h->sps.num_reorder_frames)) {
......
...@@ -207,6 +207,7 @@ typedef struct SPS { ...@@ -207,6 +207,7 @@ typedef struct SPS {
int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8 int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8
int residual_color_transform_flag; ///< residual_colour_transform_flag int residual_color_transform_flag; ///< residual_colour_transform_flag
int constraint_set_flags; ///< constraint_set[0-3]_flag int constraint_set_flags; ///< constraint_set[0-3]_flag
int new; ///< flag to keep track if the decoder context needs re-init due to changed SPS
} SPS; } SPS;
/** /**
...@@ -333,6 +334,7 @@ typedef struct H264Context { ...@@ -333,6 +334,7 @@ typedef struct H264Context {
int emu_edge_width; int emu_edge_width;
int emu_edge_height; int emu_edge_height;
unsigned current_sps_id; ///< id of the current SPS
SPS sps; ///< current sps SPS sps; ///< current sps
/** /**
......
...@@ -517,10 +517,13 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){ ...@@ -517,10 +517,13 @@ int ff_h264_decode_seq_parameter_set(H264Context *h){
h->sps.bitstream_restriction_flag ? sps->num_reorder_frames : -1 h->sps.bitstream_restriction_flag ? sps->num_reorder_frames : -1
); );
} }
sps->new = 1;
av_free(h->sps_buffers[sps_id]); av_free(h->sps_buffers[sps_id]);
h->sps_buffers[sps_id]= sps; h->sps_buffers[sps_id] = sps;
h->sps = *sps; h->sps = *sps;
h->current_sps_id = sps_id;
return 0; return 0;
fail: fail:
av_free(sps); av_free(sps);
......
...@@ -152,19 +152,19 @@ static int request_frame(AVFilterLink *link) ...@@ -152,19 +152,19 @@ static int request_frame(AVFilterLink *link)
handle_trimming(ctx); handle_trimming(ctx);
if (nb_samples = get_delay(s)) { if (nb_samples = get_delay(s)) {
AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE, AVFilterBufferRef *buf = ff_get_audio_buffer(link, AV_PERM_WRITE,
nb_samples); nb_samples);
if (!buf) if (!buf)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, buf->extended_data, ret = avresample_convert(s->avr, buf->extended_data,
buf->linesize[0], nb_samples, NULL, 0, 0); buf->linesize[0], nb_samples, NULL, 0, 0);
if (ret <= 0) { if (ret <= 0) {
avfilter_unref_bufferp(&buf); avfilter_unref_bufferp(&buf);
return (ret < 0) ? ret : AVERROR_EOF; return (ret < 0) ? ret : AVERROR_EOF;
} }
buf->pts = s->pts; buf->pts = s->pts;
return ff_filter_frame(link, buf); return ff_filter_frame(link, buf);
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment