Commit 6e92181b authored by Anton Khirnov's avatar Anton Khirnov

h264: pass just the PPS to get_chroma_qp()

It does not need the whole context. This will simplify the following
commit.
parent d1f539c9
...@@ -783,9 +783,9 @@ static av_always_inline uint16_t pack8to16(int a, int b) ...@@ -783,9 +783,9 @@ static av_always_inline uint16_t pack8to16(int a, int b)
/** /**
* Get the chroma qp. * Get the chroma qp.
*/ */
static av_always_inline int get_chroma_qp(const H264Context *h, int t, int qscale) static av_always_inline int get_chroma_qp(const PPS *pps, int t, int qscale)
{ {
return h->ps.pps->chroma_qp_table[t][qscale]; return pps->chroma_qp_table[t][qscale];
} }
/** /**
......
...@@ -2385,8 +2385,8 @@ decode_intra_mb: ...@@ -2385,8 +2385,8 @@ decode_intra_mb:
if (sl->qscale < 0) sl->qscale += max_qp + 1; if (sl->qscale < 0) sl->qscale += max_qp + 1;
else sl->qscale -= max_qp + 1; else sl->qscale -= max_qp + 1;
} }
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
}else }else
sl->last_qscale_diff=0; sl->last_qscale_diff=0;
......
...@@ -1110,8 +1110,8 @@ decode_intra_mb: ...@@ -1110,8 +1110,8 @@ decode_intra_mb:
} }
} }
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
if ((ret = decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 0)) < 0 ) { if ((ret = decode_luma_residual(h, sl, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 0)) < 0 ) {
return -1; return -1;
......
...@@ -260,9 +260,9 @@ static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h, ...@@ -260,9 +260,9 @@ static av_always_inline void h264_filter_mb_fast_internal(const H264Context *h,
int qp = h->cur_pic.qscale_table[mb_xy]; int qp = h->cur_pic.qscale_table[mb_xy];
int qp0 = h->cur_pic.qscale_table[mb_xy - 1]; int qp0 = h->cur_pic.qscale_table[mb_xy - 1];
int qp1 = h->cur_pic.qscale_table[sl->top_mb_xy]; int qp1 = h->cur_pic.qscale_table[sl->top_mb_xy];
int qpc = get_chroma_qp( h, 0, qp ); int qpc = get_chroma_qp(h->ps.pps, 0, qp);
int qpc0 = get_chroma_qp( h, 0, qp0 ); int qpc0 = get_chroma_qp(h->ps.pps, 0, qp0);
int qpc1 = get_chroma_qp( h, 0, qp1 ); int qpc1 = get_chroma_qp(h->ps.pps, 0, qp1);
qp0 = (qp + qp0 + 1) >> 1; qp0 = (qp + qp0 + 1) >> 1;
qp1 = (qp + qp1 + 1) >> 1; qp1 = (qp + qp1 + 1) >> 1;
qpc0 = (qpc + qpc0 + 1) >> 1; qpc0 = (qpc + qpc0 + 1) >> 1;
...@@ -531,8 +531,8 @@ static av_always_inline void filter_mb_dir(const H264Context *h, H264SliceContex ...@@ -531,8 +531,8 @@ static av_always_inline void filter_mb_dir(const H264Context *h, H264SliceContex
ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize); ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, tmp_linesize, tmp_uvlinesize);
{ int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); } { int i; for (i = 0; i < 4; i++) ff_tlog(h->avctx, " bS[%d]:%d", i, bS[i]); ff_tlog(h->avctx, "\n"); }
filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 ); filter_mb_edgeh( &img_y[j*linesize], tmp_linesize, bS, qp, a, b, h, 0 );
chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1; chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1; chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbn_xy]) + 1) >> 1;
if (chroma) { if (chroma) {
if (chroma444) { if (chroma444) {
filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0); filter_mb_edgeh (&img_cb[j*uvlinesize], tmp_uvlinesize, bS, chroma_qp_avg[0], a, b, h, 0);
...@@ -594,8 +594,8 @@ static av_always_inline void filter_mb_dir(const H264Context *h, H264SliceContex ...@@ -594,8 +594,8 @@ static av_always_inline void filter_mb_dir(const H264Context *h, H264SliceContex
if(bS[0]+bS[1]+bS[2]+bS[3]){ if(bS[0]+bS[1]+bS[2]+bS[3]){
qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1; qp = (h->cur_pic.qscale_table[mb_xy] + h->cur_pic.qscale_table[mbm_xy] + 1) >> 1;
ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize); ff_tlog(h->avctx, "filter mb:%d/%d dir:%d edge:%d, QPy:%d ls:%d uvls:%d", mb_x, mb_y, dir, edge, qp, linesize, uvlinesize);
chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1; chroma_qp_avg[0] = (sl->chroma_qp[0] + get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1; chroma_qp_avg[1] = (sl->chroma_qp[1] + get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mbm_xy]) + 1) >> 1;
if( dir == 0 ) { if( dir == 0 ) {
filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 ); filter_mb_edgev( &img_y[0], linesize, bS, qp, a, b, h, 1 );
if (chroma) { if (chroma) {
...@@ -777,15 +777,15 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl, ...@@ -777,15 +777,15 @@ void ff_h264_filter_mb(const H264Context *h, H264SliceContext *sl,
mbn0_qp = h->cur_pic.qscale_table[sl->left_mb_xy[0]]; mbn0_qp = h->cur_pic.qscale_table[sl->left_mb_xy[0]];
mbn1_qp = h->cur_pic.qscale_table[sl->left_mb_xy[1]]; mbn1_qp = h->cur_pic.qscale_table[sl->left_mb_xy[1]];
qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1; qp[0] = ( mb_qp + mbn0_qp + 1 ) >> 1;
bqp[0] = ( get_chroma_qp( h, 0, mb_qp ) + bqp[0] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
get_chroma_qp( h, 0, mbn0_qp ) + 1 ) >> 1; get_chroma_qp(h->ps.pps, 0, mbn0_qp) + 1) >> 1;
rqp[0] = ( get_chroma_qp( h, 1, mb_qp ) + rqp[0] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
get_chroma_qp( h, 1, mbn0_qp ) + 1 ) >> 1; get_chroma_qp(h->ps.pps, 1, mbn0_qp) + 1) >> 1;
qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1; qp[1] = ( mb_qp + mbn1_qp + 1 ) >> 1;
bqp[1] = ( get_chroma_qp( h, 0, mb_qp ) + bqp[1] = (get_chroma_qp(h->ps.pps, 0, mb_qp) +
get_chroma_qp( h, 0, mbn1_qp ) + 1 ) >> 1; get_chroma_qp(h->ps.pps, 0, mbn1_qp) + 1 ) >> 1;
rqp[1] = ( get_chroma_qp( h, 1, mb_qp ) + rqp[1] = (get_chroma_qp(h->ps.pps, 1, mb_qp) +
get_chroma_qp( h, 1, mbn1_qp ) + 1 ) >> 1; get_chroma_qp(h->ps.pps, 1, mbn1_qp) + 1 ) >> 1;
/* Filter edge */ /* Filter edge */
ff_tlog(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize); ff_tlog(h->avctx, "filter mb:%d/%d MBAFF, QPy:%d/%d, QPb:%d/%d QPr:%d/%d ls:%d uvls:%d", mb_x, mb_y, qp[0], qp[1], bqp[0], bqp[1], rqp[0], rqp[1], linesize, uvlinesize);
......
...@@ -1361,8 +1361,8 @@ static int h264_slice_header_parse(H264Context *h, H264SliceContext *sl) ...@@ -1361,8 +1361,8 @@ static int h264_slice_header_parse(H264Context *h, H264SliceContext *sl)
return AVERROR_INVALIDDATA; return AVERROR_INVALIDDATA;
} }
sl->qscale = tmp; sl->qscale = tmp;
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[0] = get_chroma_qp(pps, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(pps, 1, sl->qscale);
// FIXME qscale / qp ... stuff // FIXME qscale / qp ... stuff
if (sl->slice_type == AV_PICTURE_TYPE_SP) if (sl->slice_type == AV_PICTURE_TYPE_SP)
get_bits1(&sl->gb); /* sp_for_switch_flag */ get_bits1(&sl->gb); /* sp_for_switch_flag */
...@@ -1846,8 +1846,8 @@ static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, ...@@ -1846,8 +1846,8 @@ static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x,
uvlinesize, 0); uvlinesize, 0);
if (fill_filter_caches(h, sl, mb_type)) if (fill_filter_caches(h, sl, mb_type))
continue; continue;
sl->chroma_qp[0] = get_chroma_qp(h, 0, h->cur_pic.qscale_table[mb_xy]); sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, h->cur_pic.qscale_table[mb_xy]);
sl->chroma_qp[1] = get_chroma_qp(h, 1, h->cur_pic.qscale_table[mb_xy]); sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, h->cur_pic.qscale_table[mb_xy]);
if (FRAME_MBAFF(h)) { if (FRAME_MBAFF(h)) {
ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr, ff_h264_filter_mb(h, sl, mb_x, mb_y, dest_y, dest_cb, dest_cr,
...@@ -1861,8 +1861,8 @@ static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x, ...@@ -1861,8 +1861,8 @@ static void loop_filter(const H264Context *h, H264SliceContext *sl, int start_x,
sl->slice_type = old_slice_type; sl->slice_type = old_slice_type;
sl->mb_x = end_x; sl->mb_x = end_x;
sl->mb_y = end_mb_y - FRAME_MBAFF(h); sl->mb_y = end_mb_y - FRAME_MBAFF(h);
sl->chroma_qp[0] = get_chroma_qp(h, 0, sl->qscale); sl->chroma_qp[0] = get_chroma_qp(h->ps.pps, 0, sl->qscale);
sl->chroma_qp[1] = get_chroma_qp(h, 1, sl->qscale); sl->chroma_qp[1] = get_chroma_qp(h->ps.pps, 1, sl->qscale);
} }
static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl) static void predict_field_decoding_flag(const H264Context *h, H264SliceContext *sl)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment