Commit 3176217c authored by Anton Khirnov's avatar Anton Khirnov

h264: decouple h264_ps from the h264 decoder

Make the SPS/PPS parsing independent of the H264Context, to allow
decoupling the parser from the decoder. The change is modelled after the
one done earlier for HEVC.

Move the dequant buffers to the PPS to avoid complex checks whether they
changed and an expensive copy for frame threads.
parent 44d16df4
......@@ -50,6 +50,8 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
DXVA_PicParams_H264 *pp)
{
const H264Picture *current_picture = h->cur_pic_ptr;
const SPS *sps = h->ps.sps;
const PPS *pps = h->ps.pps;
int i, j;
memset(pp, 0, sizeof(*pp));
......@@ -94,30 +96,30 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
pp->wFrameWidthInMbsMinus1 = h->mb_width - 1;
pp->wFrameHeightInMbsMinus1 = h->mb_height - 1;
pp->num_ref_frames = h->sps.ref_frame_count;
pp->num_ref_frames = sps->ref_frame_count;
pp->wBitFields = ((h->picture_structure != PICT_FRAME) << 0) |
((h->sps.mb_aff &&
((sps->mb_aff &&
(h->picture_structure == PICT_FRAME)) << 1) |
(h->sps.residual_color_transform_flag << 2) |
(sps->residual_color_transform_flag << 2) |
/* sp_for_switch_flag (not implemented by Libav) */
(0 << 3) |
(h->sps.chroma_format_idc << 4) |
(sps->chroma_format_idc << 4) |
((h->nal_ref_idc != 0) << 6) |
(h->pps.constrained_intra_pred << 7) |
(h->pps.weighted_pred << 8) |
(h->pps.weighted_bipred_idc << 9) |
(pps->constrained_intra_pred << 7) |
(pps->weighted_pred << 8) |
(pps->weighted_bipred_idc << 9) |
/* MbsConsecutiveFlag */
(1 << 11) |
(h->sps.frame_mbs_only_flag << 12) |
(h->pps.transform_8x8_mode << 13) |
((h->sps.level_idc >= 31) << 14) |
(sps->frame_mbs_only_flag << 12) |
(pps->transform_8x8_mode << 13) |
((sps->level_idc >= 31) << 14) |
/* IntraPicFlag (Modified if we detect a non
* intra slice in dxva2_h264_decode_slice) */
(1 << 15);
pp->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8;
pp->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8;
pp->bit_depth_luma_minus8 = sps->bit_depth_luma - 8;
pp->bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8;
if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG)
pp->Reserved16Bits = 0;
else if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO)
......@@ -133,28 +135,28 @@ static void fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *
if ((h->picture_structure & PICT_BOTTOM_FIELD) &&
current_picture->field_poc[1] != INT_MAX)
pp->CurrFieldOrderCnt[1] = current_picture->field_poc[1];
pp->pic_init_qs_minus26 = h->pps.init_qs - 26;
pp->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0];
pp->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
pp->pic_init_qs_minus26 = pps->init_qs - 26;
pp->chroma_qp_index_offset = pps->chroma_qp_index_offset[0];
pp->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1];
pp->ContinuationFlag = 1;
pp->pic_init_qp_minus26 = h->pps.init_qp - 26;
pp->num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1;
pp->num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1;
pp->pic_init_qp_minus26 = pps->init_qp - 26;
pp->num_ref_idx_l0_active_minus1 = pps->ref_count[0] - 1;
pp->num_ref_idx_l1_active_minus1 = pps->ref_count[1] - 1;
pp->Reserved8BitsA = 0;
pp->frame_num = h->frame_num;
pp->log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4;
pp->pic_order_cnt_type = h->sps.poc_type;
if (h->sps.poc_type == 0)
pp->log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
else if (h->sps.poc_type == 1)
pp->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
pp->direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag;
pp->entropy_coding_mode_flag = h->pps.cabac;
pp->pic_order_present_flag = h->pps.pic_order_present;
pp->num_slice_groups_minus1 = h->pps.slice_group_count - 1;
pp->slice_group_map_type = h->pps.mb_slice_group_map_type;
pp->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
pp->redundant_pic_cnt_present_flag= h->pps.redundant_pic_cnt_present;
pp->log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4;
pp->pic_order_cnt_type = sps->poc_type;
if (sps->poc_type == 0)
pp->log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4;
else if (sps->poc_type == 1)
pp->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
pp->direct_8x8_inference_flag = sps->direct_8x8_inference_flag;
pp->entropy_coding_mode_flag = pps->cabac;
pp->pic_order_present_flag = pps->pic_order_present;
pp->num_slice_groups_minus1 = pps->slice_group_count - 1;
pp->slice_group_map_type = pps->mb_slice_group_map_type;
pp->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
pp->redundant_pic_cnt_present_flag= pps->redundant_pic_cnt_present;
pp->Reserved8BitsB = 0;
pp->slice_group_change_rate_minus1= 0; /* XXX not implemented by Libav */
//pp->SliceGroupMap[810]; /* XXX not implemented by Libav */
......@@ -167,20 +169,20 @@ static void fill_scaling_lists(const AVCodecContext *avctx, AVDXVAContext *ctx,
if (DXVA_CONTEXT_WORKAROUND(avctx, ctx) & FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG) {
for (i = 0; i < 6; i++)
for (j = 0; j < 16; j++)
qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][j];
qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][j];
for (i = 0; i < 64; i++) {
qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][i];
qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][i];
qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][i];
qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][i];
}
} else {
for (i = 0; i < 6; i++)
for (j = 0; j < 16; j++)
qm->bScalingLists4x4[i][j] = h->pps.scaling_matrix4[i][ff_zigzag_scan[j]];
qm->bScalingLists4x4[i][j] = pps->scaling_matrix4[i][ff_zigzag_scan[j]];
for (i = 0; i < 64; i++) {
qm->bScalingLists8x8[0][i] = h->pps.scaling_matrix8[0][ff_zigzag_direct[i]];
qm->bScalingLists8x8[1][i] = h->pps.scaling_matrix8[3][ff_zigzag_direct[i]];
qm->bScalingLists8x8[0][i] = pps->scaling_matrix8[0][ff_zigzag_direct[i]];
qm->bScalingLists8x8[1][i] = pps->scaling_matrix8[3][ff_zigzag_direct[i]];
}
}
}
......@@ -280,11 +282,11 @@ static void fill_slice_long(AVCodecContext *avctx, DXVA_Slice_H264_Long *slice,
}
}
slice->slice_qs_delta = 0; /* XXX not implemented by Libav */
slice->slice_qp_delta = sl->qscale - h->pps.init_qp;
slice->slice_qp_delta = sl->qscale - h->ps.pps->init_qp;
slice->redundant_pic_cnt = sl->redundant_pic_count;
if (sl->slice_type == AV_PICTURE_TYPE_B)
slice->direct_spatial_mv_pred_flag = sl->direct_spatial_mv_pred;
slice->cabac_init_idc = h->pps.cabac ? sl->cabac_init_idc : 0;
slice->cabac_init_idc = h->ps.pps->cabac ? sl->cabac_init_idc : 0;
if (sl->deblocking_filter < 2)
slice->disable_deblocking_filter_idc = 1 - sl->deblocking_filter;
else
......
......@@ -206,9 +206,6 @@ int ff_h264_alloc_tables(H264Context *h)
h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
}
if (!h->dequant4_coeff[0])
ff_h264_init_dequant_tables(h);
return 0;
fail:
......@@ -397,7 +394,6 @@ static int h264_init_context(AVCodecContext *avctx, H264Context *h)
int i;
h->avctx = avctx;
h->dequant_coeff_pps = -1;
h->picture_structure = PICT_FRAME;
h->slice_context_count = 1;
......@@ -474,9 +470,9 @@ av_cold int ff_h264_decode_init(AVCodecContext *avctx)
}
}
if (h->sps.bitstream_restriction_flag &&
h->avctx->has_b_frames < h->sps.num_reorder_frames) {
h->avctx->has_b_frames = h->sps.num_reorder_frames;
if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
h->low_delay = 0;
}
......@@ -520,6 +516,7 @@ static int decode_init_thread_copy(AVCodecContext *avctx)
*/
static void decode_postinit(H264Context *h, int setup_finished)
{
const SPS *sps = h->ps.sps;
H264Picture *out = h->cur_pic_ptr;
H264Picture *cur = h->cur_pic_ptr;
int i, pics, out_of_order, out_idx;
......@@ -547,7 +544,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
/* Prioritize picture timing SEI information over used
* decoding process if it exists. */
if (h->sps.pic_struct_present_flag) {
if (sps->pic_struct_present_flag) {
switch (h->sei_pic_struct) {
case SEI_PIC_STRUCT_FRAME:
break;
......@@ -591,7 +588,7 @@ static void decode_postinit(H264Context *h, int setup_finished)
/* Derive top_field_first from field pocs. */
cur->f->top_field_first = cur->field_poc[0] < cur->field_poc[1];
} else {
if (cur->f->interlaced_frame || h->sps.pic_struct_present_flag) {
if (cur->f->interlaced_frame || sps->pic_struct_present_flag) {
/* Use picture timing SEI information. Even if it is a
* information of a past frame, better than nothing. */
if (h->sei_pic_struct == SEI_PIC_STRUCT_TOP_BOTTOM ||
......@@ -684,9 +681,9 @@ static void decode_postinit(H264Context *h, int setup_finished)
// FIXME do something with unavailable reference frames
/* Sort B-frames into display order */
if (h->sps.bitstream_restriction_flag ||
if (sps->bitstream_restriction_flag ||
h->avctx->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, h->sps.num_reorder_frames);
h->avctx->has_b_frames = FFMAX(h->avctx->has_b_frames, sps->num_reorder_frames);
}
h->low_delay = !h->avctx->has_b_frames;
......@@ -746,8 +743,8 @@ static void decode_postinit(H264Context *h, int setup_finished)
out_of_order = !out->f->key_frame && !h->mmco_reset &&
(out->poc < h->next_outputed_poc);
if (h->sps.bitstream_restriction_flag &&
h->avctx->has_b_frames >= h->sps.num_reorder_frames) {
if (sps->bitstream_restriction_flag &&
h->avctx->has_b_frames >= sps->num_reorder_frames) {
} else if (out_of_order && pics - 1 == h->avctx->has_b_frames &&
h->avctx->has_b_frames < MAX_DELAYED_PIC_COUNT) {
if (invalid + cnt < MAX_DELAYED_PIC_COUNT) {
......@@ -862,15 +859,16 @@ static void flush_dpb(AVCodecContext *avctx)
int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
{
const int max_frame_num = 1 << h->sps.log2_max_frame_num;
const SPS *sps = h->ps.sps;
const int max_frame_num = 1 << sps->log2_max_frame_num;
int field_poc[2];
h->frame_num_offset = h->prev_frame_num_offset;
if (h->frame_num < h->prev_frame_num)
h->frame_num_offset += max_frame_num;
if (h->sps.poc_type == 0) {
const int max_poc_lsb = 1 << h->sps.log2_max_poc_lsb;
if (sps->poc_type == 0) {
const int max_poc_lsb = 1 << sps->log2_max_poc_lsb;
if (h->poc_lsb < h->prev_poc_lsb &&
h->prev_poc_lsb - h->poc_lsb >= max_poc_lsb / 2)
......@@ -884,11 +882,11 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
field_poc[1] = h->poc_msb + h->poc_lsb;
if (h->picture_structure == PICT_FRAME)
field_poc[1] += h->delta_poc_bottom;
} else if (h->sps.poc_type == 1) {
} else if (sps->poc_type == 1) {
int abs_frame_num, expected_delta_per_poc_cycle, expectedpoc;
int i;
if (h->sps.poc_cycle_length != 0)
if (sps->poc_cycle_length != 0)
abs_frame_num = h->frame_num_offset + h->frame_num;
else
abs_frame_num = 0;
......@@ -897,25 +895,25 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
abs_frame_num--;
expected_delta_per_poc_cycle = 0;
for (i = 0; i < h->sps.poc_cycle_length; i++)
for (i = 0; i < sps->poc_cycle_length; i++)
// FIXME integrate during sps parse
expected_delta_per_poc_cycle += h->sps.offset_for_ref_frame[i];
expected_delta_per_poc_cycle += sps->offset_for_ref_frame[i];
if (abs_frame_num > 0) {
int poc_cycle_cnt = (abs_frame_num - 1) / h->sps.poc_cycle_length;
int frame_num_in_poc_cycle = (abs_frame_num - 1) % h->sps.poc_cycle_length;
int poc_cycle_cnt = (abs_frame_num - 1) / sps->poc_cycle_length;
int frame_num_in_poc_cycle = (abs_frame_num - 1) % sps->poc_cycle_length;
expectedpoc = poc_cycle_cnt * expected_delta_per_poc_cycle;
for (i = 0; i <= frame_num_in_poc_cycle; i++)
expectedpoc = expectedpoc + h->sps.offset_for_ref_frame[i];
expectedpoc = expectedpoc + sps->offset_for_ref_frame[i];
} else
expectedpoc = 0;
if (h->nal_ref_idc == 0)
expectedpoc = expectedpoc + h->sps.offset_for_non_ref_pic;
expectedpoc = expectedpoc + sps->offset_for_non_ref_pic;
field_poc[0] = expectedpoc + h->delta_poc[0];
field_poc[1] = field_poc[0] + h->sps.offset_for_top_to_bottom_field;
field_poc[1] = field_poc[0] + sps->offset_for_top_to_bottom_field;
if (h->picture_structure == PICT_FRAME)
field_poc[1] += h->delta_poc[1];
......@@ -945,7 +943,7 @@ int ff_init_poc(H264Context *h, int pic_field_poc[2], int *pic_poc)
*
* @return profile as defined by FF_PROFILE_H264_*
*/
int ff_h264_get_profile(SPS *sps)
int ff_h264_get_profile(const SPS *sps)
{
int profile = sps->profile_idc;
......@@ -1067,7 +1065,7 @@ again:
if (h->sei_recovery_frame_cnt >= 0 && h->recovery_frame < 0) {
h->recovery_frame = (h->frame_num + h->sei_recovery_frame_cnt) &
((1 << h->sps.log2_max_frame_num) - 1);
((1 << h->ps.sps->log2_max_frame_num) - 1);
}
h->cur_pic_ptr->f->key_frame |=
......@@ -1121,14 +1119,13 @@ again:
goto end;
break;
case NAL_SPS:
h->gb = nal->gb;
ret = ff_h264_decode_seq_parameter_set(h);
ret = ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
goto end;
break;
case NAL_PPS:
h->gb = nal->gb;
ret = ff_h264_decode_picture_parameter_set(h, nal->size_bits);
ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
nal->size_bits);
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
goto end;
break;
......@@ -1201,14 +1198,14 @@ static int output_frame(H264Context *h, AVFrame *dst, AVFrame *src)
if (ret < 0)
return ret;
if (!h->sps.crop)
if (!h->ps.sps || !h->ps.sps->crop)
return 0;
for (i = 0; i < 3; i++) {
int hshift = (i > 0) ? h->chroma_x_shift : 0;
int vshift = (i > 0) ? h->chroma_y_shift : 0;
int off = ((h->sps.crop_left >> hshift) << h->pixel_shift) +
(h->sps.crop_top >> vshift) * dst->linesize[i];
int off = ((h->ps.sps->crop_left >> hshift) << h->pixel_shift) +
(h->ps.sps->crop_top >> vshift) * dst->linesize[i];
dst->data[i] += off;
}
return 0;
......@@ -1321,10 +1318,10 @@ av_cold void ff_h264_free_context(H264Context *h)
h->nb_slice_ctx = 0;
for (i = 0; i < MAX_SPS_COUNT; i++)
av_freep(h->sps_buffers + i);
av_buffer_unref(&h->ps.sps_list[i]);
for (i = 0; i < MAX_PPS_COUNT; i++)
av_freep(h->pps_buffers + i);
av_buffer_unref(&h->ps.pps_list[i]);
ff_h2645_packet_uninit(&h->pkt);
}
......
......@@ -28,6 +28,7 @@
#ifndef AVCODEC_H264_H
#define AVCODEC_H264_H
#include "libavutil/buffer.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/thread.h"
#include "cabac.h"
......@@ -92,11 +93,11 @@
#define FIELD_OR_MBAFF_PICTURE(h) (FRAME_MBAFF(h) || FIELD_PICTURE(h))
#ifndef CABAC
#define CABAC(h) h->pps.cabac
#define CABAC(h) h->ps.pps->cabac
#endif
#define CHROMA422(h) (h->sps.chroma_format_idc == 2)
#define CHROMA444(h) (h->sps.chroma_format_idc == 3)
#define CHROMA422(h) (h->ps.sps->chroma_format_idc == 2)
#define CHROMA444(h) (h->ps.sps->chroma_format_idc == 3)
#define EXTENDED_SAR 255
......@@ -214,7 +215,6 @@ typedef struct SPS {
int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8
int residual_color_transform_flag; ///< residual_colour_transform_flag
int constraint_set_flags; ///< constraint_set[0-3]_flag
int new; ///< flag to keep track if the decoder context needs re-init due to changed SPS
} SPS;
/**
......@@ -240,8 +240,23 @@ typedef struct PPS {
uint8_t scaling_matrix8[6][64];
uint8_t chroma_qp_table[2][64]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
int chroma_qp_diff;
uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
uint32_t(*dequant4_coeff[6])[16];
uint32_t(*dequant8_coeff[6])[64];
} PPS;
typedef struct H264ParamSets {
AVBufferRef *sps_list[MAX_SPS_COUNT];
AVBufferRef *pps_list[MAX_PPS_COUNT];
/* currently active parameters sets */
const PPS *pps;
// FIXME this should properly be const
SPS *sps;
} H264ParamSets;
/**
* Memory management control operation opcode.
*/
......@@ -506,14 +521,6 @@ typedef struct H264Context {
uint32_t *mb2br_xy;
int b_stride; // FIXME use s->b4_stride
SPS sps; ///< current sps
PPS pps; ///< current pps
uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16]; // FIXME should these be moved down?
uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
uint32_t(*dequant4_coeff[6])[16];
uint32_t(*dequant8_coeff[6])[64];
uint16_t *slice_table; ///< slice_table_base + 2*mb_stride + 1
// interlacing specific flags
......@@ -566,10 +573,7 @@ typedef struct H264Context {
int bit_depth_luma; ///< luma bit depth from sps to detect changes
int chroma_format_idc; ///< chroma format from sps to detect changes
SPS *sps_buffers[MAX_SPS_COUNT];
PPS *pps_buffers[MAX_PPS_COUNT];
int dequant_coeff_pps; ///< reinit tables when pps changes
H264ParamSets ps;
uint16_t *slice_table_base;
......@@ -756,17 +760,19 @@ int ff_h264_decode_sei(H264Context *h);
/**
* Decode SPS
*/
int ff_h264_decode_seq_parameter_set(H264Context *h);
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
H264ParamSets *ps);
/**
* compute profile from sps
*/
int ff_h264_get_profile(SPS *sps);
int ff_h264_get_profile(const SPS *sps);
/**
* Decode PPS
*/
int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length);
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
H264ParamSets *ps, int bit_length);
/**
* Free any data that may have been allocated in the H264 context
......@@ -912,7 +918,7 @@ static av_always_inline uint16_t pack8to16(int a, int b)
*/
static av_always_inline int get_chroma_qp(const H264Context *h, int t, int qscale)
{
return h->pps.chroma_qp_table[t][qscale];
return h->ps.pps->chroma_qp_table[t][qscale];
}
/**
......@@ -1035,7 +1041,7 @@ static av_always_inline void write_back_motion(const H264Context *h,
static av_always_inline int get_dct8x8_allowed(const H264Context *h, H264SliceContext *sl)
{
if (h->sps.direct_8x8_inference_flag)
if (h->ps.sps->direct_8x8_inference_flag)
return !(AV_RN64A(sl->sub_mb_type) &
((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
0x0001000100010001ULL));
......
......@@ -1265,7 +1265,7 @@ void ff_h264_init_cabac_states(const H264Context *h, H264SliceContext *sl)
{
int i;
const int8_t (*tab)[2];
const int slice_qp = av_clip(sl->qscale - 6*(h->sps.bit_depth_luma-8), 0, 51);
const int slice_qp = av_clip(sl->qscale - 6*(h->ps.sps->bit_depth_luma-8), 0, 51);
if (sl->slice_type_nos == AV_PICTURE_TYPE_I) tab = cabac_context_init_I;
else tab = cabac_context_init_PB[sl->cabac_init_idc];
......@@ -1870,7 +1870,7 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2
decode_cabac_residual_dc(h, sl, sl->mb_luma_dc[p], ctx_cat[0][p], LUMA_DC_BLOCK_INDEX+p, scan, 16);
if( cbp&15 ) {
qmul = h->dequant4_coeff[p][qscale];
qmul = h->ps.pps->dequant4_coeff[p][qscale];
for( i4x4 = 0; i4x4 < 16; i4x4++ ) {
const int index = 16*p + i4x4;
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[1][p], index, scan + 1, qmul, 15);
......@@ -1885,9 +1885,9 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2
if( IS_8x8DCT(mb_type) ) {
const int index = 16*p + 4*i8x8;
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), ctx_cat[3][p], index,
scan8x8, h->dequant8_coeff[cqm][qscale], 64);
scan8x8, h->ps.pps->dequant8_coeff[cqm][qscale], 64);
} else {
qmul = h->dequant4_coeff[cqm][qscale];
qmul = h->ps.pps->dequant4_coeff[cqm][qscale];
for( i4x4 = 0; i4x4 < 4; i4x4++ ) {
const int index = 16*p + 4*i8x8 + i4x4;
//START_TIMER
......@@ -1908,10 +1908,11 @@ static av_always_inline void decode_cabac_luma_residual(const H264Context *h, H2
*/
int ff_h264_decode_mb_cabac(const H264Context *h, H264SliceContext *sl)
{
const SPS *sps = h->ps.sps;
int mb_xy;
int mb_type, partition_count, cbp = 0;
int dct8x8_allowed= h->pps.transform_8x8_mode;
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2;
int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
int decode_chroma = sps->chroma_format_idc == 1 || sps->chroma_format_idc == 2;
const int pixel_shift = h->pixel_shift;
mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
......@@ -2021,8 +2022,8 @@ decode_intra_mb:
h->slice_table[mb_xy] = sl->slice_num;
if(IS_INTRA_PCM(mb_type)) {
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
h->sps.bit_depth_luma >> 3;
const int mb_size = ff_h264_mb_sizes[sps->chroma_format_idc] *
sps->bit_depth_luma >> 3;
const uint8_t *ptr;
// We assume these blocks are very rare so we do not optimize it.
......@@ -2206,7 +2207,7 @@ decode_intra_mb:
ff_h264_pred_direct_motion(h, sl, &mb_type);
fill_rectangle(sl->mvd_cache[0][scan8[0]], 4, 4, 8, 0, 2);
fill_rectangle(sl->mvd_cache[1][scan8[0]], 4, 4, 8, 0, 2);
dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
dct8x8_allowed &= sps->direct_8x8_inference_flag;
} else {
int list, i;
if(IS_16X16(mb_type)){
......@@ -2365,7 +2366,7 @@ decode_intra_mb:
if(get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + (sl->last_qscale_diff != 0)])){
int val = 1;
int ctx= 2;
const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8);
const int max_qp = 51 + 6*(sps->bit_depth_luma-8);
while( get_cabac_noinline( &sl->cabac, &sl->cabac_state[60 + ctx] ) ) {
ctx= 3;
......@@ -2408,7 +2409,7 @@ decode_intra_mb:
int c, i, i8x8;
for( c = 0; c < 2; c++ ) {
int16_t *mb = sl->mb + (16*(16 + 16*c) << pixel_shift);
qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
for (i8x8 = 0; i8x8 < 2; i8x8++) {
for (i = 0; i < 4; i++) {
const int index = 16 + 16 * c + 8*i8x8 + i;
......@@ -2432,7 +2433,7 @@ decode_intra_mb:
if( cbp&0x20 ) {
int c, i;
for( c = 0; c < 2; c++ ) {
qmul = h->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
qmul = h->ps.pps->dequant4_coeff[c+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[c]];
for( i = 0; i < 4; i++ ) {
const int index = 16 + 16 * c + i;
decode_cabac_residual_nondc(h, sl, sl->mb + (16*index << pixel_shift), 4, index, scan + 1, qmul, 15);
......
......@@ -649,7 +649,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl,
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8 + p*16;
if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift),
index, scan + 1, h->dequant4_coeff[p][qscale], 15) < 0 ){
index, scan + 1, h->ps.pps->dequant4_coeff[p][qscale], 15) < 0 ){
return -1;
}
}
......@@ -671,7 +671,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl,
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8 + p*16;
if( decode_residual(h, sl, gb, buf, index, scan8x8+16*i4x4,
h->dequant8_coeff[cqm][qscale], 16) < 0 )
h->ps.pps->dequant8_coeff[cqm][qscale], 16) < 0 )
return -1;
}
nnz = &sl->non_zero_count_cache[scan8[4 * i8x8 + p * 16]];
......@@ -681,7 +681,7 @@ int decode_luma_residual(const H264Context *h, H264SliceContext *sl,
for(i4x4=0; i4x4<4; i4x4++){
const int index= i4x4 + 4*i8x8 + p*16;
if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index,
scan, h->dequant4_coeff[cqm][qscale], 16) < 0 ){
scan, h->ps.pps->dequant4_coeff[cqm][qscale], 16) < 0 ){
return -1;
}
new_cbp |= sl->non_zero_count_cache[scan8[index]] << i8x8;
......@@ -701,8 +701,8 @@ int ff_h264_decode_mb_cavlc(const H264Context *h, H264SliceContext *sl)
int mb_xy;
int partition_count;
unsigned int mb_type, cbp;
int dct8x8_allowed= h->pps.transform_8x8_mode;
int decode_chroma = h->sps.chroma_format_idc == 1 || h->sps.chroma_format_idc == 2;
int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
int decode_chroma = h->ps.sps->chroma_format_idc == 1 || h->ps.sps->chroma_format_idc == 2;
const int pixel_shift = h->pixel_shift;
mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
......@@ -768,8 +768,8 @@ decode_intra_mb:
h->slice_table[mb_xy] = sl->slice_num;
if(IS_INTRA_PCM(mb_type)){
const int mb_size = ff_h264_mb_sizes[h->sps.chroma_format_idc] *
h->sps.bit_depth_luma;
const int mb_size = ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] *
h->ps.sps->bit_depth_luma;
// We assume these blocks are very rare so we do not optimize it.
sl->intra_pcm_ptr = align_get_bits(&sl->gb);
......@@ -942,7 +942,7 @@ decode_intra_mb:
}
}else if(IS_DIRECT(mb_type)){
ff_h264_pred_direct_motion(h, sl, &mb_type);
dct8x8_allowed &= h->sps.direct_8x8_inference_flag;
dct8x8_allowed &= h->ps.sps->direct_8x8_inference_flag;
}else{
int list, mx, my, i;
//FIXME we should set ref_idx_l? to 0 if we use that later ...
......@@ -1092,7 +1092,7 @@ decode_intra_mb:
int ret;
GetBitContext *gb = &sl->gb;
const uint8_t *scan, *scan8x8;
const int max_qp = 51 + 6*(h->sps.bit_depth_luma-8);
const int max_qp = 51 + 6 * (h->ps.sps->bit_depth_luma - 8);
if(IS_INTERLACED(mb_type)){
scan8x8 = sl->qscale ? h->field_scan8x8_cavlc : h->field_scan8x8_cavlc_q0;
......@@ -1141,7 +1141,7 @@ decode_intra_mb:
if(cbp&0x20){
for(chroma_idx=0; chroma_idx<2; chroma_idx++){
const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
int16_t *mb = sl->mb + (16*(16 + 16*chroma_idx) << pixel_shift);
for (i8x8 = 0; i8x8 < 2; i8x8++) {
for (i4x4 = 0; i4x4 < 4; i4x4++) {
......@@ -1167,7 +1167,7 @@ decode_intra_mb:
if(cbp&0x20){
for(chroma_idx=0; chroma_idx<2; chroma_idx++){
const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
const uint32_t *qmul = h->ps.pps->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][sl->chroma_qp[chroma_idx]];
for(i4x4=0; i4x4<4; i4x4++){
const int index= 16 + 16*chroma_idx + i4x4;
if( decode_residual(h, sl, gb, sl->mb + (16*index << pixel_shift), index, scan + 1, qmul, 15) < 0){
......
......@@ -310,7 +310,7 @@ single_col:
*mb_type |= MB_TYPE_DIRECT2 |
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
} else {
if (!h->sps.direct_8x8_inference_flag) {
if (!h->ps.sps->direct_8x8_inference_flag) {
/* FIXME: Save sub mb types from previous frames (or derive
* from MVs) so we know exactly what block size to use. */
sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */
......@@ -533,7 +533,7 @@ single_col:
*mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
} else {
if (!h->sps.direct_8x8_inference_flag) {
if (!h->ps.sps->direct_8x8_inference_flag) {
/* FIXME: save sub mb types from previous frames (or derive
* from MVs) so we know exactly what block size to use */
sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
......@@ -574,7 +574,7 @@ single_col:
if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
int y_shift = 2 * !IS_INTERLACED(*mb_type);
assert(h->sps.direct_8x8_inference_flag);
assert(h->ps.sps->direct_8x8_inference_flag);
for (i8 = 0; i8 < 4; i8++) {
const int x8 = i8 & 1;
......
......@@ -252,7 +252,7 @@ static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h,
int left_type = sl->left_type[LTOP];
int top_type = sl->top_type;
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
int b = 52 + sl->slice_beta_offset - qp_bd_offset;
......@@ -422,7 +422,7 @@ void ff_h264_filter_mb_fast(const H264Context *h, H264SliceContext *sl,
unsigned int linesize, unsigned int uvlinesize)
{
assert(!FRAME_MBAFF(h));
if(!h->h264dsp.h264_loop_filter_strength || h->pps.chroma_qp_diff) {
if(!h->h264dsp.h264_loop_filter_strength || h->ps.pps->chroma_qp_diff) {
ff_h264_filter_mb(h, sl, mb_x, mb_y, img_y, img_cb, img_cr, linesize, uvlinesize);
return;
}
......@@ -722,7 +722,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl,
const int mvy_limit = IS_INTERLACED(mb_type) ? 2 : 4;
int first_vertical_edge_done = 0;
int chroma = !(CONFIG_GRAY && (h->flags & AV_CODEC_FLAG_GRAY));
int qp_bd_offset = 6 * (h->sps.bit_depth_luma - 8);
int qp_bd_offset = 6 * (h->ps.sps->bit_depth_luma - 8);
int a = 52 + sl->slice_alpha_c0_offset - qp_bd_offset;
int b = 52 + sl->slice_beta_offset - qp_bd_offset;
......@@ -765,7 +765,7 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl,
bS[i] = 4;
else{
bS[i] = 1 + !!(sl->non_zero_count_cache[12+8*(i>>1)] |
((!h->pps.cabac && IS_8x8DCT(mbn_type)) ?
((!h->ps.pps->cabac && IS_8x8DCT(mbn_type)) ?
(h->cbp_table[mbn_xy] & (((MB_FIELD(sl) ? (i&2) : (mb_y&1)) ? 8 : 2) << 12))
:
h->non_zero_count[mbn_xy][ off[i] ]));
......
......@@ -634,7 +634,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
for (i = 0; i < 16; i += 4) {
uint8_t *const ptr = dest_y + block_offset[i];
const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
h->hpc.pred8x8l_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
} else {
const int nnz = sl->non_zero_count_cache[scan8[i + p * 16]];
......@@ -660,7 +660,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
uint8_t *const ptr = dest_y + block_offset[i];
const int dir = sl->intra4x4_pred_mode_cache[scan8[i]];
if (transform_bypass && h->sps.profile_idc == 244 && dir <= 1) {
if (transform_bypass && h->ps.sps->profile_idc == 244 && dir <= 1) {
h->hpc.pred4x4_add[dir](ptr, sl->mb + (i * 16 + p * 256 << pixel_shift), linesize);
} else {
uint8_t *topright;
......@@ -699,7 +699,7 @@ static av_always_inline void hl_decode_mb_predict_luma(const H264Context *h,
if (!transform_bypass)
h->h264dsp.h264_luma_dc_dequant_idct(sl->mb + (p * 256 << pixel_shift),
sl->mb_luma_dc[p],
h->dequant4_coeff[p][qscale][0]);
h->ps.pps->dequant4_coeff[p][qscale][0]);
else {
static const uint8_t dc_mapping[16] = {
0 * 16, 1 * 16, 4 * 16, 5 * 16,
......@@ -731,7 +731,7 @@ static av_always_inline void hl_decode_mb_idct_luma(const H264Context *h, H264Sl
if (!IS_INTRA4x4(mb_type)) {
if (IS_INTRA16x16(mb_type)) {
if (transform_bypass) {
if (h->sps.profile_idc == 244 &&
if (h->ps.sps->profile_idc == 244 &&
(sl->intra16x16_pred_mode == VERT_PRED8x8 ||
sl->intra16x16_pred_mode == HOR_PRED8x8)) {
h->hpc.pred16x16_add[sl->intra16x16_pred_mode](dest_y, block_offset,
......
......@@ -48,7 +48,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
int linesize, uvlinesize /*dct_offset*/;
int i, j;
const int *block_offset = &h->block_offset[0];
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass);
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass);
void (*idct_add)(uint8_t *dst, int16_t *block, int stride);
const int block_h = 16 >> h->chroma_y_shift;
const int chroma422 = CHROMA422(h);
......@@ -97,11 +97,11 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
if (PIXEL_SHIFT) {
const int bit_depth = h->sps.bit_depth_luma;
const int bit_depth = h->ps.sps->bit_depth_luma;
int j;
GetBitContext gb;
init_get_bits(&gb, sl->intra_pcm_ptr,
ff_h264_mb_sizes[h->sps.chroma_format_idc] * bit_depth);
ff_h264_mb_sizes[h->ps.sps->chroma_format_idc] * bit_depth);
for (i = 0; i < 16; i++) {
uint16_t *tmp_y = (uint16_t *)(dest_y + i * linesize);
......@@ -109,7 +109,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
tmp_y[j] = get_bits(&gb, bit_depth);
}
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
if (!h->sps.chroma_format_idc) {
if (!h->ps.sps->chroma_format_idc) {
for (i = 0; i < block_h; i++) {
uint16_t *tmp_cb = (uint16_t *)(dest_cb + i * uvlinesize);
for (j = 0; j < 8; j++)
......@@ -137,7 +137,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
for (i = 0; i < 16; i++)
memcpy(dest_y + i * linesize, sl->intra_pcm_ptr + i * 16, 16);
if (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) {
if (!h->sps.chroma_format_idc) {
if (!h->ps.sps->chroma_format_idc) {
for (i = 0; i < block_h; i++) {
memset(dest_cb + i * uvlinesize, 128, 8);
memset(dest_cr + i * uvlinesize, 128, 8);
......@@ -193,7 +193,7 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
(sl->cbp & 0x30)) {
uint8_t *dest[2] = { dest_cb, dest_cr };
if (transform_bypass) {
if (IS_INTRA(mb_type) && h->sps.profile_idc == 244 &&
if (IS_INTRA(mb_type) && h->ps.sps->profile_idc == 244 &&
(sl->chroma_pred_mode == VERT_PRED8x8 ||
sl->chroma_pred_mode == HOR_PRED8x8)) {
h->hpc.pred8x8_add[sl->chroma_pred_mode](dest[0],
......@@ -234,10 +234,10 @@ static av_noinline void FUNC(hl_decode_mb)(const H264Context *h, H264SliceContex
}
if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 0]])
h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 1 << PIXEL_SHIFT),
h->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 1 : 4][qp[0]][0]);
if (sl->non_zero_count_cache[scan8[CHROMA_DC_BLOCK_INDEX + 1]])
h->h264dsp.h264_chroma_dc_dequant_idct(sl->mb + (16 * 16 * 2 << PIXEL_SHIFT),
h->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
h->ps.pps->dequant4_coeff[IS_INTRA(mb_type) ? 2 : 5][qp[1]][0]);
h->h264dsp.h264_idct_add8(dest, block_offset,
sl->mb, uvlinesize,
sl->non_zero_count_cache);
......@@ -262,7 +262,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo
int linesize;
int i, j, p;
const int *block_offset = &h->block_offset[0];
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->sps.transform_bypass);
const int transform_bypass = !SIMPLE && (sl->qscale == 0 && h->ps.sps->transform_bypass);
const int plane_count = (SIMPLE || !CONFIG_GRAY || !(h->flags & AV_CODEC_FLAG_GRAY)) ? 3 : 1;
for (p = 0; p < plane_count; p++) {
......@@ -304,7 +304,7 @@ static av_noinline void FUNC(hl_decode_mb_444)(const H264Context *h, H264SliceCo
if (!SIMPLE && IS_INTRA_PCM(mb_type)) {
if (PIXEL_SHIFT) {
const int bit_depth = h->sps.bit_depth_luma;
const int bit_depth = h->ps.sps->bit_depth_luma;
GetBitContext gb;
init_get_bits(&gb, sl->intra_pcm_ptr, 768 * bit_depth);
......
......@@ -464,7 +464,7 @@ static void fill_decode_caches(const H264Context *h, H264SliceContext *sl, int m
if (!IS_SKIP(mb_type)) {
if (IS_INTRA(mb_type)) {
int type_mask = h->pps.constrained_intra_pred ? IS_INTRA(-1) : -1;
int type_mask = h->ps.pps->constrained_intra_pred ? IS_INTRA(-1) : -1;
sl->topleft_samples_available =
sl->top_samples_available =
sl->left_samples_available = 0xFFFF;
......
......@@ -45,6 +45,7 @@
typedef struct H264ParseContext {
H264Context h;
ParseContext pc;
H264ParamSets ps;
int got_first;
} H264ParseContext;
......@@ -115,13 +116,13 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
int list_count, ref_count[2];
if (h->pps.redundant_pic_cnt_present)
if (p->ps.pps->redundant_pic_cnt_present)
get_ue_golomb(gb); // redundant_pic_count
if (slice_type_nos == AV_PICTURE_TYPE_B)
get_bits1(gb); // direct_spatial_mv_pred
if (ff_h264_parse_ref_count(&list_count, ref_count, gb, &h->pps,
if (ff_h264_parse_ref_count(&list_count, ref_count, gb, p->ps.pps,
slice_type_nos, h->picture_structure) < 0)
return AVERROR_INVALIDDATA;
......@@ -153,9 +154,9 @@ static int scan_mmco_reset(AVCodecParserContext *s, GetBitContext *gb)
}
}
if ((h->pps.weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
(h->pps.weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B))
ff_h264_pred_weight_table(gb, &h->sps, ref_count, slice_type_nos,
if ((p->ps.pps->weighted_pred && slice_type_nos == AV_PICTURE_TYPE_P) ||
(p->ps.pps->weighted_bipred_idc == 1 && slice_type_nos == AV_PICTURE_TYPE_B))
ff_h264_pred_weight_table(gb, p->ps.sps, ref_count, slice_type_nos,
&pwt);
if (get_bits1(gb)) { // adaptive_ref_pic_marking_mode_flag
......@@ -220,6 +221,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
return 0;
for (;;) {
const SPS *sps;
int src_length, consumed;
buf = avpriv_find_start_code(buf, buf_end, &state);
if (buf >= buf_end)
......@@ -260,10 +262,11 @@ static inline int parse_nal_units(AVCodecParserContext *s,
switch (h->nal_unit_type) {
case NAL_SPS:
ff_h264_decode_seq_parameter_set(h);
ff_h264_decode_seq_parameter_set(&nal.gb, avctx, &p->ps);
break;
case NAL_PPS:
ff_h264_decode_picture_parameter_set(h, h->gb.size_in_bits);
ff_h264_decode_picture_parameter_set(&nal.gb, avctx, &p->ps,
nal.size_bits);
break;
case NAL_SEI:
ff_h264_decode_sei(h);
......@@ -290,30 +293,35 @@ static inline int parse_nal_units(AVCodecParserContext *s,
"pps_id %u out of range\n", pps_id);
goto fail;
}
if (!h->pps_buffers[pps_id]) {
if (!p->ps.pps_list[pps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing PPS %u referenced\n", pps_id);
goto fail;
}
h->pps = *h->pps_buffers[pps_id];
if (!h->sps_buffers[h->pps.sps_id]) {
p->ps.pps = (const PPS*)p->ps.pps_list[pps_id]->data;
if (!p->ps.sps_list[p->ps.pps->sps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing SPS %u referenced\n", h->pps.sps_id);
"non-existing SPS %u referenced\n", p->ps.pps->sps_id);
goto fail;
}
h->sps = *h->sps_buffers[h->pps.sps_id];
h->frame_num = get_bits(&nal.gb, h->sps.log2_max_frame_num);
p->ps.sps = (SPS*)p->ps.sps_list[p->ps.pps->sps_id]->data;
s->coded_width = 16 * h->sps.mb_width;
s->coded_height = 16 * h->sps.mb_height;
s->width = s->coded_width - (h->sps.crop_right + h->sps.crop_left);
s->height = s->coded_height - (h->sps.crop_top + h->sps.crop_bottom);
h->ps.sps = p->ps.sps;
h->ps.pps = p->ps.pps;
sps = p->ps.sps;
h->frame_num = get_bits(&nal.gb, sps->log2_max_frame_num);
s->coded_width = 16 * sps->mb_width;
s->coded_height = 16 * sps->mb_height;
s->width = s->coded_width - (sps->crop_right + sps->crop_left);
s->height = s->coded_height - (sps->crop_top + sps->crop_bottom);
if (s->width <= 0 || s->height <= 0) {
s->width = s->coded_width;
s->height = s->coded_height;
}
switch (h->sps.bit_depth_luma) {
switch (sps->bit_depth_luma) {
case 9:
if (CHROMA444(h)) s->format = AV_PIX_FMT_YUV444P9;
else if (CHROMA422(h)) s->format = AV_PIX_FMT_YUV422P9;
......@@ -333,10 +341,10 @@ static inline int parse_nal_units(AVCodecParserContext *s,
s->format = AV_PIX_FMT_NONE;
}
avctx->profile = ff_h264_get_profile(&h->sps);
avctx->level = h->sps.level_idc;
avctx->profile = ff_h264_get_profile(sps);
avctx->level = sps->level_idc;
if (h->sps.frame_mbs_only_flag) {
if (sps->frame_mbs_only_flag) {
h->picture_structure = PICT_FRAME;
} else {
if (get_bits1(&nal.gb)) { // field_pic_flag
......@@ -348,19 +356,19 @@ static inline int parse_nal_units(AVCodecParserContext *s,
if (h->nal_unit_type == NAL_IDR_SLICE)
get_ue_golomb(&nal.gb); /* idr_pic_id */
if (h->sps.poc_type == 0) {
h->poc_lsb = get_bits(&nal.gb, h->sps.log2_max_poc_lsb);
if (sps->poc_type == 0) {
h->poc_lsb = get_bits(&nal.gb, sps->log2_max_poc_lsb);
if (h->pps.pic_order_present == 1 &&
if (p->ps.pps->pic_order_present == 1 &&
h->picture_structure == PICT_FRAME)
h->delta_poc_bottom = get_se_golomb(&nal.gb);
}
if (h->sps.poc_type == 1 &&
!h->sps.delta_pic_order_always_zero_flag) {
if (sps->poc_type == 1 &&
!sps->delta_pic_order_always_zero_flag) {
h->delta_poc[0] = get_se_golomb(&nal.gb);
if (h->pps.pic_order_present == 1 &&
if (p->ps.pps->pic_order_present == 1 &&
h->picture_structure == PICT_FRAME)
h->delta_poc[1] = get_se_golomb(&nal.gb);
}
......@@ -394,7 +402,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
}
}
if (h->sps.pic_struct_present_flag) {
if (sps->pic_struct_present_flag) {
switch (h->sei_pic_struct) {
case SEI_PIC_STRUCT_TOP_FIELD:
case SEI_PIC_STRUCT_BOTTOM_FIELD:
......@@ -425,7 +433,7 @@ static inline int parse_nal_units(AVCodecParserContext *s,
if (h->picture_structure == PICT_FRAME) {
s->picture_structure = AV_PICTURE_STRUCTURE_FRAME;
if (h->sps.pic_struct_present_flag) {
if (sps->pic_struct_present_flag) {
switch (h->sei_pic_struct) {
case SEI_PIC_STRUCT_TOP_BOTTOM:
case SEI_PIC_STRUCT_TOP_BOTTOM_TOP:
......@@ -566,9 +574,16 @@ static void h264_close(AVCodecParserContext *s)
H264ParseContext *p = s->priv_data;
H264Context *h = &p->h;
ParseContext *pc = &p->pc;
int i;
av_free(pc->buffer);
ff_h264_free_context(h);
for (i = 0; i < FF_ARRAY_ELEMS(p->ps.sps_list); i++)
av_buffer_unref(&p->ps.sps_list[i]);
for (i = 0; i < FF_ARRAY_ELEMS(p->ps.pps_list); i++)
av_buffer_unref(&p->ps.pps_list[i]);
}
static av_cold int init(AVCodecParserContext *s)
......
This diff is collapsed.
......@@ -526,10 +526,10 @@ int ff_generate_sliding_window_mmcos(H264Context *h, int first_slice)
MMCO mmco_temp[MAX_MMCO_COUNT], *mmco = first_slice ? h->mmco : mmco_temp;
int mmco_index = 0, i = 0;
assert(h->long_ref_count + h->short_ref_count <= h->sps.ref_frame_count);
assert(h->long_ref_count + h->short_ref_count <= h->ps.sps->ref_frame_count);
if (h->short_ref_count &&
h->long_ref_count + h->short_ref_count == h->sps.ref_frame_count &&
h->long_ref_count + h->short_ref_count == h->ps.sps->ref_frame_count &&
!(FIELD_PICTURE(h) && !h->first_field && h->cur_pic_ptr->reference)) {
mmco[0].opcode = MMCO_SHORT2UNUSED;
mmco[0].short_pic_num = h->short_ref[h->short_ref_count - 1]->frame_num;
......@@ -698,7 +698,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
}
if (h->long_ref_count + h->short_ref_count -
(h->short_ref[0] == h->cur_pic_ptr) > h->sps.ref_frame_count) {
(h->short_ref[0] == h->cur_pic_ptr) > h->ps.sps->ref_frame_count) {
/* We have too many reference frames, probably due to corrupted
* stream. Need to discard one frame. Prevents overrun of the
......@@ -707,7 +707,7 @@ int ff_h264_execute_ref_pic_marking(H264Context *h, MMCO *mmco, int mmco_count)
av_log(h->avctx, AV_LOG_ERROR,
"number of reference frames (%d+%d) exceeds max (%d; probably "
"corrupt input), discarding one\n",
h->long_ref_count, h->short_ref_count, h->sps.ref_frame_count);
h->long_ref_count, h->short_ref_count, h->ps.sps->ref_frame_count);
err = AVERROR_INVALIDDATA;
if (h->long_ref_count && !h->short_ref_count) {
......
......@@ -50,14 +50,19 @@ void ff_h264_reset_sei(H264Context *h)
static int decode_picture_timing(H264Context *h)
{
if (h->sps.nal_hrd_parameters_present_flag ||
h->sps.vcl_hrd_parameters_present_flag) {
const SPS *sps = h->ps.sps;
if (!sps)
return AVERROR_INVALIDDATA;
if (sps->nal_hrd_parameters_present_flag ||
sps->vcl_hrd_parameters_present_flag) {
h->sei_cpb_removal_delay = get_bits(&h->gb,
h->sps.cpb_removal_delay_length);
sps->cpb_removal_delay_length);
h->sei_dpb_output_delay = get_bits(&h->gb,
h->sps.dpb_output_delay_length);
sps->dpb_output_delay_length);
}
if (h->sps.pic_struct_present_flag) {
if (sps->pic_struct_present_flag) {
unsigned int i, num_clock_ts;
h->sei_pic_struct = get_bits(&h->gb, 4);
......@@ -93,9 +98,9 @@ static int decode_picture_timing(H264Context *h)
}
}
}
if (h->sps.time_offset_length > 0)
if (sps->time_offset_length > 0)
skip_bits(&h->gb,
h->sps.time_offset_length); /* time_offset */
sps->time_offset_length); /* time_offset */
}
}
......@@ -259,12 +264,12 @@ static int decode_buffering_period(H264Context *h)
SPS *sps;
sps_id = get_ue_golomb_31(&h->gb);
if (sps_id > 31 || !h->sps_buffers[sps_id]) {
if (sps_id > 31 || !h->ps.sps_list[sps_id]) {
av_log(h->avctx, AV_LOG_ERROR,
"non-existing SPS %d referenced in buffering period\n", sps_id);
return AVERROR_INVALIDDATA;
}
sps = h->sps_buffers[sps_id];
sps = (SPS*)h->ps.sps_list[sps_id]->data;
// NOTE: This is really so duplicated in the standard... See H.264, D.1.1
if (sps->nal_hrd_parameters_present_flag) {
......
This diff is collapsed.
......@@ -227,6 +227,8 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx,
{
H264Context * const h = avctx->priv_data;
struct vaapi_context * const vactx = avctx->hwaccel_context;
const PPS *pps = h->ps.pps;
const SPS *sps = h->ps.sps;
VAPictureParameterBufferH264 *pic_param;
VAIQMatrixBufferH264 *iq_matrix;
......@@ -243,38 +245,38 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx,
return -1;
pic_param->picture_width_in_mbs_minus1 = h->mb_width - 1;
pic_param->picture_height_in_mbs_minus1 = h->mb_height - 1;
pic_param->bit_depth_luma_minus8 = h->sps.bit_depth_luma - 8;
pic_param->bit_depth_chroma_minus8 = h->sps.bit_depth_chroma - 8;
pic_param->num_ref_frames = h->sps.ref_frame_count;
pic_param->bit_depth_luma_minus8 = sps->bit_depth_luma - 8;
pic_param->bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8;
pic_param->num_ref_frames = sps->ref_frame_count;
pic_param->seq_fields.value = 0; /* reset all bits */
pic_param->seq_fields.bits.chroma_format_idc = h->sps.chroma_format_idc;
pic_param->seq_fields.bits.residual_colour_transform_flag = h->sps.residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = h->sps.gaps_in_frame_num_allowed_flag;
pic_param->seq_fields.bits.frame_mbs_only_flag = h->sps.frame_mbs_only_flag;
pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = h->sps.mb_aff;
pic_param->seq_fields.bits.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag;
pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = h->sps.level_idc >= 31; /* A.3.3.2 */
pic_param->seq_fields.bits.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4;
pic_param->seq_fields.bits.pic_order_cnt_type = h->sps.poc_type;
pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = h->sps.log2_max_poc_lsb - 4;
pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
pic_param->num_slice_groups_minus1 = h->pps.slice_group_count - 1;
pic_param->slice_group_map_type = h->pps.mb_slice_group_map_type;
pic_param->seq_fields.bits.chroma_format_idc = sps->chroma_format_idc;
pic_param->seq_fields.bits.residual_colour_transform_flag = sps->residual_color_transform_flag; /* XXX: only for 4:4:4 high profile? */
pic_param->seq_fields.bits.gaps_in_frame_num_value_allowed_flag = sps->gaps_in_frame_num_allowed_flag;
pic_param->seq_fields.bits.frame_mbs_only_flag = sps->frame_mbs_only_flag;
pic_param->seq_fields.bits.mb_adaptive_frame_field_flag = sps->mb_aff;
pic_param->seq_fields.bits.direct_8x8_inference_flag = sps->direct_8x8_inference_flag;
pic_param->seq_fields.bits.MinLumaBiPredSize8x8 = sps->level_idc >= 31; /* A.3.3.2 */
pic_param->seq_fields.bits.log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4;
pic_param->seq_fields.bits.pic_order_cnt_type = sps->poc_type;
pic_param->seq_fields.bits.log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4;
pic_param->seq_fields.bits.delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
pic_param->num_slice_groups_minus1 = pps->slice_group_count - 1;
pic_param->slice_group_map_type = pps->mb_slice_group_map_type;
pic_param->slice_group_change_rate_minus1 = 0; /* XXX: unimplemented in Libav */
pic_param->pic_init_qp_minus26 = h->pps.init_qp - 26;
pic_param->pic_init_qs_minus26 = h->pps.init_qs - 26;
pic_param->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0];
pic_param->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
pic_param->pic_init_qp_minus26 = pps->init_qp - 26;
pic_param->pic_init_qs_minus26 = pps->init_qs - 26;
pic_param->chroma_qp_index_offset = pps->chroma_qp_index_offset[0];
pic_param->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1];
pic_param->pic_fields.value = 0; /* reset all bits */
pic_param->pic_fields.bits.entropy_coding_mode_flag = h->pps.cabac;
pic_param->pic_fields.bits.weighted_pred_flag = h->pps.weighted_pred;
pic_param->pic_fields.bits.weighted_bipred_idc = h->pps.weighted_bipred_idc;
pic_param->pic_fields.bits.transform_8x8_mode_flag = h->pps.transform_8x8_mode;
pic_param->pic_fields.bits.entropy_coding_mode_flag = pps->cabac;
pic_param->pic_fields.bits.weighted_pred_flag = pps->weighted_pred;
pic_param->pic_fields.bits.weighted_bipred_idc = pps->weighted_bipred_idc;
pic_param->pic_fields.bits.transform_8x8_mode_flag = pps->transform_8x8_mode;
pic_param->pic_fields.bits.field_pic_flag = h->picture_structure != PICT_FRAME;
pic_param->pic_fields.bits.constrained_intra_pred_flag = h->pps.constrained_intra_pred;
pic_param->pic_fields.bits.pic_order_present_flag = h->pps.pic_order_present;
pic_param->pic_fields.bits.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present;
pic_param->pic_fields.bits.constrained_intra_pred_flag = pps->constrained_intra_pred;
pic_param->pic_fields.bits.pic_order_present_flag = pps->pic_order_present;
pic_param->pic_fields.bits.deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
pic_param->pic_fields.bits.redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present;
pic_param->pic_fields.bits.reference_pic_flag = h->nal_ref_idc != 0;
pic_param->frame_num = h->frame_num;
......@@ -282,9 +284,9 @@ static int vaapi_h264_start_frame(AVCodecContext *avctx,
iq_matrix = ff_vaapi_alloc_iq_matrix(vactx, sizeof(VAIQMatrixBufferH264));
if (!iq_matrix)
return -1;
memcpy(iq_matrix->ScalingList4x4, h->pps.scaling_matrix4, sizeof(iq_matrix->ScalingList4x4));
memcpy(iq_matrix->ScalingList8x8[0], h->pps.scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0]));
memcpy(iq_matrix->ScalingList8x8[1], h->pps.scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0]));
memcpy(iq_matrix->ScalingList4x4, pps->scaling_matrix4, sizeof(iq_matrix->ScalingList4x4));
memcpy(iq_matrix->ScalingList8x8[0], pps->scaling_matrix8[0], sizeof(iq_matrix->ScalingList8x8[0]));
memcpy(iq_matrix->ScalingList8x8[1], pps->scaling_matrix8[3], sizeof(iq_matrix->ScalingList8x8[0]));
return 0;
}
......@@ -335,7 +337,7 @@ static int vaapi_h264_decode_slice(AVCodecContext *avctx,
slice_param->num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0;
slice_param->num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0;
slice_param->cabac_init_idc = sl->cabac_init_idc;
slice_param->slice_qp_delta = sl->qscale - h->pps.init_qp;
slice_param->slice_qp_delta = sl->qscale - h->ps.pps->init_qp;
slice_param->disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter;
slice_param->slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2;
slice_param->slice_beta_offset_div2 = sl->slice_beta_offset / 2;
......
......@@ -120,6 +120,8 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx,
const uint8_t *buffer, uint32_t size)
{
H264Context * const h = avctx->priv_data;
const PPS *pps = h->ps.pps;
const SPS *sps = h->ps.sps;
H264Picture *pic = h->cur_pic_ptr;
struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
VdpPictureInfoH264 *info = &pic_ctx->info.h264;
......@@ -135,37 +137,37 @@ static int vdpau_h264_start_frame(AVCodecContext *avctx,
info->frame_num = h->frame_num;
info->field_pic_flag = h->picture_structure != PICT_FRAME;
info->bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD;
info->num_ref_frames = h->sps.ref_frame_count;
info->mb_adaptive_frame_field_flag = h->sps.mb_aff && !info->field_pic_flag;
info->constrained_intra_pred_flag = h->pps.constrained_intra_pred;
info->weighted_pred_flag = h->pps.weighted_pred;
info->weighted_bipred_idc = h->pps.weighted_bipred_idc;
info->frame_mbs_only_flag = h->sps.frame_mbs_only_flag;
info->transform_8x8_mode_flag = h->pps.transform_8x8_mode;
info->chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0];
info->second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1];
info->pic_init_qp_minus26 = h->pps.init_qp - 26;
info->num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1;
info->num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1;
info->log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4;
info->pic_order_cnt_type = h->sps.poc_type;
info->log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4;
info->delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag;
info->direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag;
info->num_ref_frames = sps->ref_frame_count;
info->mb_adaptive_frame_field_flag = sps->mb_aff && !info->field_pic_flag;
info->constrained_intra_pred_flag = pps->constrained_intra_pred;
info->weighted_pred_flag = pps->weighted_pred;
info->weighted_bipred_idc = pps->weighted_bipred_idc;
info->frame_mbs_only_flag = sps->frame_mbs_only_flag;
info->transform_8x8_mode_flag = pps->transform_8x8_mode;
info->chroma_qp_index_offset = pps->chroma_qp_index_offset[0];
info->second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1];
info->pic_init_qp_minus26 = pps->init_qp - 26;
info->num_ref_idx_l0_active_minus1 = pps->ref_count[0] - 1;
info->num_ref_idx_l1_active_minus1 = pps->ref_count[1] - 1;
info->log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4;
info->pic_order_cnt_type = sps->poc_type;
info->log2_max_pic_order_cnt_lsb_minus4 = sps->poc_type ? 0 : sps->log2_max_poc_lsb - 4;
info->delta_pic_order_always_zero_flag = sps->delta_pic_order_always_zero_flag;
info->direct_8x8_inference_flag = sps->direct_8x8_inference_flag;
#ifdef VDP_DECODER_PROFILE_H264_HIGH_444_PREDICTIVE
info2->qpprime_y_zero_transform_bypass_flag = h->sps.transform_bypass;
info2->separate_colour_plane_flag = h->sps.residual_color_transform_flag;
info2->qpprime_y_zero_transform_bypass_flag = sps->transform_bypass;
info2->separate_colour_plane_flag = sps->residual_color_transform_flag;
#endif
info->entropy_coding_mode_flag = h->pps.cabac;
info->pic_order_present_flag = h->pps.pic_order_present;
info->deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present;
info->redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present;
info->entropy_coding_mode_flag = pps->cabac;
info->pic_order_present_flag = pps->pic_order_present;
info->deblocking_filter_control_present_flag = pps->deblocking_filter_parameters_present;
info->redundant_pic_cnt_present_flag = pps->redundant_pic_cnt_present;
memcpy(info->scaling_lists_4x4, h->pps.scaling_matrix4,
memcpy(info->scaling_lists_4x4, pps->scaling_matrix4,
sizeof(info->scaling_lists_4x4));
memcpy(info->scaling_lists_8x8[0], h->pps.scaling_matrix8[0],
memcpy(info->scaling_lists_8x8[0], pps->scaling_matrix8[0],
sizeof(info->scaling_lists_8x8[0]));
memcpy(info->scaling_lists_8x8[1], h->pps.scaling_matrix8[3],
memcpy(info->scaling_lists_8x8[1], pps->scaling_matrix8[3],
sizeof(info->scaling_lists_8x8[1]));
vdpau_h264_set_reference_frames(avctx);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment