Commit 875f6955 authored by Clément Bœsch's avatar Clément Bœsch

lavc/vp9: misc cosmetics

Imported from Libav
parent ff8436ba
...@@ -252,7 +252,7 @@ static int update_block_buffers(AVCodecContext *avctx) ...@@ -252,7 +252,7 @@ static int update_block_buffers(AVCodecContext *avctx)
return 0; return 0;
} }
// for some reason the sign bit is at the end, not the start, of a bit sequence // The sign bit is at the end, not the start, of a bit sequence
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n) static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
{ {
int v = get_bits(gb, n); int v = get_bits(gb, n);
...@@ -292,13 +292,13 @@ static int update_prob(VP56RangeCoder *c, int p) ...@@ -292,13 +292,13 @@ static int update_prob(VP56RangeCoder *c, int p)
/* This code is trying to do a differential probability update. For a /* This code is trying to do a differential probability update. For a
* current probability A in the range [1, 255], the difference to a new * current probability A in the range [1, 255], the difference to a new
* probability of any value can be expressed differentially as 1-A,255-A * probability of any value can be expressed differentially as 1-A, 255-A
* where some part of this (absolute range) exists both in positive as * where some part of this (absolute range) exists both in positive as
* well as the negative part, whereas another part only exists in one * well as the negative part, whereas another part only exists in one
* half. We're trying to code this shared part differentially, i.e. * half. We're trying to code this shared part differentially, i.e.
* times two where the value of the lowest bit specifies the sign, and * times two where the value of the lowest bit specifies the sign, and
* the single part is then coded on top of this. This absolute difference * the single part is then coded on top of this. This absolute difference
* then again has a value of [0,254], but a bigger value in this range * then again has a value of [0, 254], but a bigger value in this range
* indicates that we're further away from the original value A, so we * indicates that we're further away from the original value A, so we
* can code this as a VLC code, since higher values are increasingly * can code this as a VLC code, since higher values are increasingly
* unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough' * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
...@@ -414,12 +414,15 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -414,12 +414,15 @@ static int decode_frame_header(AVCodecContext *avctx,
*ref = get_bits(&s->gb, 3); *ref = get_bits(&s->gb, 3);
return 0; return 0;
} }
s->last_keyframe = s->s.h.keyframe; s->last_keyframe = s->s.h.keyframe;
s->s.h.keyframe = !get_bits1(&s->gb); s->s.h.keyframe = !get_bits1(&s->gb);
last_invisible = s->s.h.invisible; last_invisible = s->s.h.invisible;
s->s.h.invisible = !get_bits1(&s->gb); s->s.h.invisible = !get_bits1(&s->gb);
s->s.h.errorres = get_bits1(&s->gb); s->s.h.errorres = get_bits1(&s->gb);
s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible; s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
if (s->s.h.keyframe) { if (s->s.h.keyframe) {
if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n"); av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
...@@ -565,12 +568,11 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -565,12 +568,11 @@ static int decode_frame_header(AVCodecContext *avctx,
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ? s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
get_bits(&s->gb, 8) : 255; get_bits(&s->gb, 8) : 255;
if ((s->s.h.segmentation.temporal = get_bits1(&s->gb))) { if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ? s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
get_bits(&s->gb, 8) : 255; get_bits(&s->gb, 8) : 255;
} }
}
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
s->s.h.segmentation.absolute_vals = get_bits1(&s->gb); s->s.h.segmentation.absolute_vals = get_bits1(&s->gb);
...@@ -734,9 +736,9 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -734,9 +736,9 @@ static int decode_frame_header(AVCodecContext *avctx,
} else { } else {
memset(&s->counts, 0, sizeof(s->counts)); memset(&s->counts, 0, sizeof(s->counts));
} }
// FIXME is it faster to not copy here, but do it down in the fw updates /* FIXME is it faster to not copy here, but do it down in the fw updates
// as explicit copies if the fw update is missing (and skip the copy upon * as explicit copies if the fw update is missing (and skip the copy upon
// fw update)? * fw update)? */
s->prob.p = s->prob_ctx[c].p; s->prob.p = s->prob_ctx[c].p;
// txfm updates // txfm updates
...@@ -777,12 +779,11 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -777,12 +779,11 @@ static int decode_frame_header(AVCodecContext *avctx,
if (m >= 3 && l == 0) // dc only has 3 pt if (m >= 3 && l == 0) // dc only has 3 pt
break; break;
for (n = 0; n < 3; n++) { for (n = 0; n < 3; n++) {
if (vp56_rac_get_prob_branchy(&s->c, 252)) { if (vp56_rac_get_prob_branchy(&s->c, 252))
p[n] = update_prob(&s->c, r[n]); p[n] = update_prob(&s->c, r[n]);
} else { else
p[n] = r[n]; p[n] = r[n];
} }
}
p[3] = 0; p[3] = 0;
} }
} else { } else {
...@@ -866,7 +867,8 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -866,7 +867,8 @@ static int decode_frame_header(AVCodecContext *avctx,
for (k = 0; k < 3; k++) for (k = 0; k < 3; k++)
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
s->prob.p.partition[3 - i][j][k] = s->prob.p.partition[3 - i][j][k] =
update_prob(&s->c, s->prob.p.partition[3 - i][j][k]); update_prob(&s->c,
s->prob.p.partition[3 - i][j][k]);
// mv fields don't use the update_prob subexp model for some reason // mv fields don't use the update_prob subexp model for some reason
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
...@@ -875,7 +877,8 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -875,7 +877,8 @@ static int decode_frame_header(AVCodecContext *avctx,
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].sign = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; s->prob.p.mv_comp[i].sign =
(vp8_rac_get_uint(&s->c, 7) << 1) | 1;
for (j = 0; j < 10; j++) for (j = 0; j < 10; j++)
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
...@@ -883,7 +886,8 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -883,7 +886,8 @@ static int decode_frame_header(AVCodecContext *avctx,
(vp8_rac_get_uint(&s->c, 7) << 1) | 1; (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].class0 = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; s->prob.p.mv_comp[i].class0 =
(vp8_rac_get_uint(&s->c, 7) << 1) | 1;
for (j = 0; j < 10; j++) for (j = 0; j < 10; j++)
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
...@@ -1210,11 +1214,11 @@ static void loopfilter_sb(AVCodecContext *avctx, struct VP9Filter *lflvl, ...@@ -1210,11 +1214,11 @@ static void loopfilter_sb(AVCodecContext *avctx, struct VP9Filter *lflvl,
uint8_t (*uv_masks)[8][4] = lflvl->mask[s->ss_h | s->ss_v]; uint8_t (*uv_masks)[8][4] = lflvl->mask[s->ss_h | s->ss_v];
int p; int p;
// FIXME in how far can we interleave the v/h loopfilter calls? E.g. /* FIXME: In how far can we interleave the v/h loopfilter calls? E.g.
// if you think of them as acting on a 8x8 block max, we can interleave * if you think of them as acting on a 8x8 block max, we can interleave
// each v/h within the single x loop, but that only works if we work on * each v/h within the single x loop, but that only works if we work on
// 8 pixel blocks, and we won't always do that (we want at least 16px * 8 pixel blocks, and we won't always do that (we want at least 16px
// to use SSE2 optimizations, perhaps 32 for AVX2) * to use SSE2 optimizations, perhaps 32 for AVX2) */
filter_plane_cols(s, col, 0, 0, lflvl->level, lflvl->mask[0][0], dst, ls_y); filter_plane_cols(s, col, 0, 0, lflvl->level, lflvl->mask[0][0], dst, ls_y);
filter_plane_rows(s, row, 0, 0, lflvl->level, lflvl->mask[0][1], dst, ls_y); filter_plane_rows(s, row, 0, 0, lflvl->level, lflvl->mask[0][1], dst, ls_y);
...@@ -1485,14 +1489,12 @@ FF_ENABLE_DEPRECATION_WARNINGS ...@@ -1485,14 +1489,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
yoff2, uvoff2, BL_64X64); yoff2, uvoff2, BL_64X64);
} }
} }
if (s->pass != 2) { if (s->pass != 2)
memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c)); memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
} }
}
if (s->pass == 1) { if (s->pass == 1)
continue; continue;
}
// backup pre-loopfilter reconstruction data for intra // backup pre-loopfilter reconstruction data for intra
// prediction of next row of sb64s // prediction of next row of sb64s
......
...@@ -231,33 +231,45 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -231,33 +231,45 @@ static void decode_mode(AVCodecContext *avctx)
// FIXME the memory storage intermediates here aren't really // FIXME the memory storage intermediates here aren't really
// necessary, they're just there to make the code slightly // necessary, they're just there to make the code slightly
// simpler for now // simpler for now
b->mode[0] = a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[0] =
a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[0]][l[0]]); ff_vp9_default_kf_ymode_probs[a[0]][l[0]]);
if (b->bs != BS_8x4) { if (b->bs != BS_8x4) {
b->mode[1] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[1] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]); ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
l[0] = a[1] = b->mode[1]; l[0] =
a[1] = b->mode[1];
} else { } else {
l[0] = a[1] = b->mode[1] = b->mode[0]; l[0] =
a[1] =
b->mode[1] = b->mode[0];
} }
if (b->bs != BS_4x8) { if (b->bs != BS_4x8) {
b->mode[2] = a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[2] =
a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[0]][l[1]]); ff_vp9_default_kf_ymode_probs[a[0]][l[1]]);
if (b->bs != BS_8x4) { if (b->bs != BS_8x4) {
b->mode[3] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[3] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]); ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
l[1] = a[1] = b->mode[3]; l[1] =
a[1] = b->mode[3];
} else { } else {
l[1] = a[1] = b->mode[3] = b->mode[2]; l[1] =
a[1] =
b->mode[3] = b->mode[2];
} }
} else { } else {
b->mode[2] = b->mode[0]; b->mode[2] = b->mode[0];
l[1] = a[1] = b->mode[3] = b->mode[1]; l[1] =
a[1] =
b->mode[3] = b->mode[1];
} }
} else { } else {
b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[*a][*l]); ff_vp9_default_kf_ymode_probs[*a][*l]);
b->mode[3] = b->mode[2] = b->mode[1] = b->mode[0]; b->mode[3] =
b->mode[2] =
b->mode[1] = b->mode[0];
// FIXME this can probably be optimized // FIXME this can probably be optimized
memset(a, b->mode[0], bwh_tab[0][b->bs][0]); memset(a, b->mode[0], bwh_tab[0][b->bs][0]);
memset(l, b->mode[0], bwh_tab[0][b->bs][1]); memset(l, b->mode[0], bwh_tab[0][b->bs][1]);
...@@ -300,7 +312,9 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -300,7 +312,9 @@ static void decode_mode(AVCodecContext *avctx)
b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
s->prob.p.y_mode[sz]); s->prob.p.y_mode[sz]);
b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0]; b->mode[1] =
b->mode[2] =
b->mode[3] = b->mode[0];
s->counts.y_mode[sz][b->mode[3]]++; s->counts.y_mode[sz][b->mode[3]]++;
} }
b->uvmode = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->uvmode = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
...@@ -369,7 +383,7 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -369,7 +383,7 @@ static void decode_mode(AVCodecContext *avctx)
// read actual references // read actual references
// FIXME probably cache a few variables here to prevent repetitive // FIXME probably cache a few variables here to prevent repetitive
// memory accesses below // memory accesses below
if (b->comp) /* two references */ { if (b->comp) { /* two references */
int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit; int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit;
b->ref[fix_idx] = s->s.h.fixcompref; b->ref[fix_idx] = s->s.h.fixcompref;
...@@ -570,7 +584,10 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -570,7 +584,10 @@ static void decode_mode(AVCodecContext *avctx)
if (b->bs <= BS_8x8) { if (b->bs <= BS_8x8) {
if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) { if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) {
b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV; b->mode[0] =
b->mode[1] =
b->mode[2] =
b->mode[3] = ZEROMV;
} else { } else {
static const uint8_t off[10] = { static const uint8_t off[10] = {
3, 0, 0, 1, 0, 0, 0, 0, 0, 0 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
...@@ -583,7 +600,9 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -583,7 +600,9 @@ static void decode_mode(AVCodecContext *avctx)
b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree, b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree,
s->prob.p.mv_mode[c]); s->prob.p.mv_mode[c]);
b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0]; b->mode[1] =
b->mode[2] =
b->mode[3] = b->mode[0];
s->counts.mv_mode[c][b->mode[0] - 10]++; s->counts.mv_mode[c][b->mode[0] - 10]++;
} }
} }
...@@ -810,7 +829,7 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, ...@@ -810,7 +829,7 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
if (!val) if (!val)
break; break;
skip_eob: skip_eob:
if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
cnt[band][nnz][0]++; cnt[band][nnz][0]++;
if (!--band_left) if (!--band_left)
...@@ -844,9 +863,9 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, ...@@ -844,9 +863,9 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
} else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2 } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
cache[rc] = 4; cache[rc] = 4;
if (!vp56_rac_get_prob_branchy(c, tp[7])) { if (!vp56_rac_get_prob_branchy(c, tp[7])) {
val = 5 + vp56_rac_get_prob(c, 159); val = vp56_rac_get_prob(c, 159) + 5;
} else { } else {
val = 7 + (vp56_rac_get_prob(c, 165) << 1); val = (vp56_rac_get_prob(c, 165) << 1) + 7;
val += vp56_rac_get_prob(c, 145); val += vp56_rac_get_prob(c, 145);
} }
} else { // cat 3-6 } else { // cat 3-6
...@@ -863,7 +882,7 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, ...@@ -863,7 +882,7 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
val += vp56_rac_get_prob(c, 135); val += vp56_rac_get_prob(c, 135);
} }
} else if (!vp56_rac_get_prob_branchy(c, tp[10])) { } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
val = 35 + (vp56_rac_get_prob(c, 180) << 4); val = (vp56_rac_get_prob(c, 180) << 4) + 35;
val += (vp56_rac_get_prob(c, 157) << 3); val += (vp56_rac_get_prob(c, 157) << 3);
val += (vp56_rac_get_prob(c, 141) << 2); val += (vp56_rac_get_prob(c, 141) << 2);
val += (vp56_rac_get_prob(c, 134) << 1); val += (vp56_rac_get_prob(c, 134) << 1);
...@@ -966,7 +985,7 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp ...@@ -966,7 +985,7 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp
int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1; int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1;
int end_x = FFMIN(2 * (s->cols - col), w4); int end_x = FFMIN(2 * (s->cols - col), w4);
int end_y = FFMIN(2 * (s->rows - row), h4); int end_y = FFMIN(2 * (s->rows - row), h4);
int n, pl, x, y, res; int n, pl, x, y, ret;
int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul; int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul;
int tx = 4 * s->s.h.lossless + b->tx; int tx = 4 * s->s.h.lossless + b->tx;
const int16_t * const *yscans = ff_vp9_scans[tx]; const int16_t * const *yscans = ff_vp9_scans[tx];
...@@ -999,16 +1018,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp ...@@ -999,16 +1018,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp
for (n = 0, y = 0; y < end_y; y += step) { \ for (n = 0, y = 0; y < end_y; y += step) { \
for (x = 0; x < end_x; x += step, n += step * step) { \ for (x = 0; x < end_x; x += step, n += step * step) { \
enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \ enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \
res = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
(s, s->block + 16 * n * bytesperpixel, 16 * step * step, \ (s, s->block + 16 * n * bytesperpixel, 16 * step * step, \
c, e, p, a[x] + l[y], yscans[txtp], \ c, e, p, a[x] + l[y], yscans[txtp], \
ynbs[txtp], y_band_counts, qmul[0]); \ ynbs[txtp], y_band_counts, qmul[0]); \
a[x] = l[y] = !!res; \ a[x] = l[y] = !!ret; \
total_coeff |= !!res; \ total_coeff |= !!ret; \
if (step >= 4) { \ if (step >= 4) { \
AV_WN16A(&s->eob[n], res); \ AV_WN16A(&s->eob[n], ret); \
} else { \ } else { \
s->eob[n] = res; \ s->eob[n] = ret; \
} \ } \
} \ } \
} }
...@@ -1073,16 +1092,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp ...@@ -1073,16 +1092,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp
#define DECODE_UV_COEF_LOOP(step, v) \ #define DECODE_UV_COEF_LOOP(step, v) \
for (n = 0, y = 0; y < end_y; y += step) { \ for (n = 0, y = 0; y < end_y; y += step) { \
for (x = 0; x < end_x; x += step, n += step * step) { \ for (x = 0; x < end_x; x += step, n += step * step) { \
res = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
(s, s->uvblock[pl] + 16 * n * bytesperpixel, \ (s, s->uvblock[pl] + 16 * n * bytesperpixel, \
16 * step * step, c, e, p, a[x] + l[y], \ 16 * step * step, c, e, p, a[x] + l[y], \
uvscan, uvnb, uv_band_counts, qmul[1]); \ uvscan, uvnb, uv_band_counts, qmul[1]); \
a[x] = l[y] = !!res; \ a[x] = l[y] = !!ret; \
total_coeff |= !!res; \ total_coeff |= !!ret; \
if (step >= 4) { \ if (step >= 4) { \
AV_WN16A(&s->uveob[pl][n], res); \ AV_WN16A(&s->uveob[pl][n], ret); \
} else { \ } else { \
s->uveob[pl][n] = res; \ s->uveob[pl][n] = ret; \
} \ } \
} \ } \
} }
...@@ -1176,12 +1195,16 @@ static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t ** ...@@ -1176,12 +1195,16 @@ static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **
[HOR_PRED] = { .needs_left = 1 }, [HOR_PRED] = { .needs_left = 1 },
[DC_PRED] = { .needs_top = 1, .needs_left = 1 }, [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
[DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 }, [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
[DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
[VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, .needs_topleft = 1 },
[HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
.needs_topleft = 1 },
[HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1,
.needs_topleft = 1 },
[VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 }, [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
[HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 }, [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
[TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1,
.needs_topleft = 1 },
[LEFT_DC_PRED] = { .needs_left = 1 }, [LEFT_DC_PRED] = { .needs_left = 1 },
[TOP_DC_PRED] = { .needs_top = 1 }, [TOP_DC_PRED] = { .needs_top = 1 },
[DC_128_PRED] = { 0 }, [DC_128_PRED] = { 0 },
...@@ -1679,6 +1702,7 @@ static av_always_inline void inter_recon(AVCodecContext *avctx, int bytesperpixe ...@@ -1679,6 +1702,7 @@ static av_always_inline void inter_recon(AVCodecContext *avctx, int bytesperpixe
inter_pred_16bpp(avctx); inter_pred_16bpp(avctx);
} }
} }
if (!b->skip) { if (!b->skip) {
/* mostly copied intra_recon() */ /* mostly copied intra_recon() */
...@@ -1808,8 +1832,8 @@ static av_always_inline void mask_edges(uint8_t (*mask)[8][4], int ss_h, int ss_ ...@@ -1808,8 +1832,8 @@ static av_always_inline void mask_edges(uint8_t (*mask)[8][4], int ss_h, int ss_
if (!skip_inter) { if (!skip_inter) {
int mask_id = (tx == TX_8X8); int mask_id = (tx == TX_8X8);
static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
int l2 = tx + ss_h - 1, step1d; int l2 = tx + ss_h - 1, step1d;
static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
int m_row = m_col & masks[l2]; int m_row = m_col & masks[l2];
// at odd UV col/row edges tx16/tx32 loopfilter edges, force // at odd UV col/row edges tx16/tx32 loopfilter edges, force
...@@ -1874,10 +1898,12 @@ void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, ...@@ -1874,10 +1898,12 @@ void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col,
s->row7 = row & 7; s->row7 = row & 7;
s->col = col; s->col = col;
s->col7 = col & 7; s->col7 = col & 7;
s->min_mv.x = -(128 + col * 64); s->min_mv.x = -(128 + col * 64);
s->min_mv.y = -(128 + row * 64); s->min_mv.y = -(128 + row * 64);
s->max_mv.x = 128 + (s->cols - col - w4) * 64; s->max_mv.x = 128 + (s->cols - col - w4) * 64;
s->max_mv.y = 128 + (s->rows - row - h4) * 64; s->max_mv.y = 128 + (s->rows - row - h4) * 64;
if (s->pass < 2) { if (s->pass < 2) {
b->bs = bs; b->bs = bs;
b->bl = bl; b->bl = bl;
......
...@@ -1445,13 +1445,13 @@ const ProbContext ff_vp9_default_probs = { ...@@ -1445,13 +1445,13 @@ const ProbContext ff_vp9_default_probs = {
{ 34, 3, }, { 34, 3, },
{ 149, 144, }, { 149, 144, },
}, { /* mv_mode */ }, { /* mv_mode */
{ 2, 173, 34}, // 0 = both zero mv { 2, 173, 34 }, // 0 = both zero mv
{ 7, 145, 85}, // 1 = one zero mv + one a predicted mv { 7, 145, 85 }, // 1 = one zero mv + one a predicted mv
{ 7, 166, 63}, // 2 = two predicted mvs { 7, 166, 63 }, // 2 = two predicted mvs
{ 7, 94, 66}, // 3 = one predicted/zero and one new mv { 7, 94, 66 }, // 3 = one predicted/zero and one new mv
{ 8, 64, 46}, // 4 = two new mvs { 8, 64, 46 }, // 4 = two new mvs
{ 17, 81, 31}, // 5 = one intra neighbour + x { 17, 81, 31 }, // 5 = one intra neighbor + x
{ 25, 29, 30}, // 6 = two intra neighbours { 25, 29, 30 }, // 6 = two intra neighbors
}, { /* intra */ }, { /* intra */
9, 102, 187, 225 9, 102, 187, 225
}, { /* comp */ }, { /* comp */
...@@ -1481,7 +1481,7 @@ const ProbContext ff_vp9_default_probs = { ...@@ -1481,7 +1481,7 @@ const ProbContext ff_vp9_default_probs = {
128, /* sign */ 128, /* sign */
{ 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, /* class */ { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, /* class */
216, /* class0 */ 216, /* class0 */
{ 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, /* bits */ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, /* bits */
{ /* class0_fp */ { /* class0_fp */
{ 128, 128, 64 }, { 128, 128, 64 },
{ 96, 112, 64 } { 96, 112, 64 }
......
...@@ -37,32 +37,32 @@ static void find_ref_mvs(VP9Context *s, ...@@ -37,32 +37,32 @@ static void find_ref_mvs(VP9Context *s,
VP56mv *pmv, int ref, int z, int idx, int sb) VP56mv *pmv, int ref, int z, int idx, int sb)
{ {
static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = { static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
[BS_64x64] = {{ 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 }, [BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
{ -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 }}, { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 } },
[BS_64x32] = {{ 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 }, [BS_64x32] = { { 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 }}, { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 } },
[BS_32x64] = {{ -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 }, [BS_32x64] = { { -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
{ -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 }}, { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 } },
[BS_32x32] = {{ 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 }, [BS_32x32] = { { 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }}, { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
[BS_32x16] = {{ 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 }, [BS_32x16] = { { 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
{ -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }}, { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
[BS_16x32] = {{ -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 }, [BS_16x32] = { { -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
{ 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 }}, { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 } },
[BS_16x16] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 }, [BS_16x16] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }}, { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
[BS_16x8] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 }, [BS_16x8] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
{ 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }}, { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 } },
[BS_8x16] = {{ -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 }, [BS_8x16] = { { -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
{ -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 }}, { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 } },
[BS_8x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_8x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
[BS_8x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_8x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
[BS_4x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_4x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
[BS_4x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
}; };
VP9Block *b = s->b; VP9Block *b = s->b;
int row = s->row, col = s->col, row7 = s->row7; int row = s->row, col = s->col, row7 = s->row7;
...@@ -136,39 +136,37 @@ static void find_ref_mvs(VP9Context *s, ...@@ -136,39 +136,37 @@ static void find_ref_mvs(VP9Context *s,
if (row > 0) { if (row > 0) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]); RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]); RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
} }
}
if (col > s->tile_col_start) { if (col > s->tile_col_start) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]); RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]); RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
} }
}
i = 2; i = 2;
} else { } else {
i = 0; i = 0;
} }
// previously coded MVs in this neighbourhood, using same reference frame // previously coded MVs in this neighborhood, using same reference frame
for (; i < 8; i++) { for (; i < 8; i++) {
int c = p[i][0] + col, r = p[i][1] + row; int c = p[i][0] + col, r = p[i][1] + row;
if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) { if (c >= s->tile_col_start && c < s->cols &&
r >= 0 && r < s->rows) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(mv->mv[0]); RETURN_MV(mv->mv[0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(mv->mv[1]); RETURN_MV(mv->mv[1]);
} }
} }
}
// MV at this position in previous frame, using same reference frame // MV at this position in previous frame, using same reference frame
if (s->s.h.use_last_frame_mvs) { if (s->s.h.use_last_frame_mvs) {
...@@ -176,12 +174,11 @@ static void find_ref_mvs(VP9Context *s, ...@@ -176,12 +174,11 @@ static void find_ref_mvs(VP9Context *s,
if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass) if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass)
ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0); ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(mv->mv[0]); RETURN_MV(mv->mv[0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(mv->mv[1]); RETURN_MV(mv->mv[1]);
} }
}
#define RETURN_SCALE_MV(mv, scale) \ #define RETURN_SCALE_MV(mv, scale) \
do { \ do { \
...@@ -193,16 +190,16 @@ static void find_ref_mvs(VP9Context *s, ...@@ -193,16 +190,16 @@ static void find_ref_mvs(VP9Context *s,
} \ } \
} while (0) } while (0)
// previously coded MVs in this neighbourhood, using different reference frame // previously coded MVs in this neighborhood, using different reference frame
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
int c = p[i][0] + col, r = p[i][1] + row; int c = p[i][0] + col, r = p[i][1] + row;
if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) { if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
if (mv->ref[0] != ref && mv->ref[0] >= 0) { if (mv->ref[0] != ref && mv->ref[0] >= 0)
RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]); RETURN_SCALE_MV(mv->mv[0],
} s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
if (mv->ref[1] != ref && mv->ref[1] >= 0 && if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
// BUG - libvpx has this condition regardless of whether // BUG - libvpx has this condition regardless of whether
// we used the first ref MV and pre-scaling // we used the first ref MV and pre-scaling
...@@ -217,9 +214,8 @@ static void find_ref_mvs(VP9Context *s, ...@@ -217,9 +214,8 @@ static void find_ref_mvs(VP9Context *s,
struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col]; struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
// no need to await_progress, because we already did that above // no need to await_progress, because we already did that above
if (mv->ref[0] != ref && mv->ref[0] >= 0) { if (mv->ref[0] != ref && mv->ref[0] >= 0)
RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]); RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
}
if (mv->ref[1] != ref && mv->ref[1] >= 0 && if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
// BUG - libvpx has this condition regardless of whether // BUG - libvpx has this condition regardless of whether
// we used the first ref MV and pre-scaling // we used the first ref MV and pre-scaling
...@@ -252,7 +248,8 @@ static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp) ...@@ -252,7 +248,8 @@ static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
s->counts.mv_comp[idx].bits[m][bit]++; s->counts.mv_comp[idx].bits[m][bit]++;
} }
n <<= 3; n <<= 3;
bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree, s->prob.p.mv_comp[idx].fp); bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
s->prob.p.mv_comp[idx].fp);
n |= bit << 1; n |= bit << 1;
s->counts.mv_comp[idx].fp[bit]++; s->counts.mv_comp[idx].fp[bit]++;
if (hp) { if (hp) {
...@@ -302,7 +299,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb) ...@@ -302,7 +299,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
mode == NEWMV ? -1 : sb); mode == NEWMV ? -1 : sb);
// FIXME maybe move this code into find_ref_mvs() // FIXME maybe move this code into find_ref_mvs()
if ((mode == NEWMV || sb == -1) && if ((mode == NEWMV || sb == -1) &&
!(hp = s->s.h.highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) { !(hp = s->s.h.highprecisionmvs &&
abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
if (mv[0].y & 1) { if (mv[0].y & 1) {
if (mv[0].y < 0) if (mv[0].y < 0)
mv[0].y++; mv[0].y++;
...@@ -332,7 +330,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb) ...@@ -332,7 +330,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV, find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
mode == NEWMV ? -1 : sb); mode == NEWMV ? -1 : sb);
if ((mode == NEWMV || sb == -1) && if ((mode == NEWMV || sb == -1) &&
!(hp = s->s.h.highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) { !(hp = s->s.h.highprecisionmvs &&
abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
if (mv[1].y & 1) { if (mv[1].y & 1) {
if (mv[1].y < 0) if (mv[1].y < 0)
mv[1].y++; mv[1].y++;
......
...@@ -76,16 +76,19 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -76,16 +76,19 @@ void ff_vp9_adapt_probs(VP9Context *s)
// skip flag // skip flag
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128); adapt_prob(&p->skip[i], s->counts.skip[i][0],
s->counts.skip[i][1], 20, 128);
// intra/inter flag // intra/inter flag
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128); adapt_prob(&p->intra[i], s->counts.intra[i][0],
s->counts.intra[i][1], 20, 128);
// comppred flag // comppred flag
if (s->s.h.comppredmode == PRED_SWITCHABLE) { if (s->s.h.comppredmode == PRED_SWITCHABLE) {
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128); adapt_prob(&p->comp[i], s->counts.comp[i][0],
s->counts.comp[i][1], 20, 128);
} }
// reference frames // reference frames
...@@ -121,7 +124,8 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -121,7 +124,8 @@ void ff_vp9_adapt_probs(VP9Context *s)
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i]; unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128); adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0],
s->counts.tx8p[i][1], 20, 128);
adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128); adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128); adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128); adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
...@@ -171,7 +175,8 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -171,7 +175,8 @@ void ff_vp9_adapt_probs(VP9Context *s)
pp = p->mv_comp[i].classes; pp = p->mv_comp[i].classes;
c = s->counts.mv_comp[i].classes; c = s->counts.mv_comp[i].classes;
sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10]; sum = c[1] + c[2] + c[3] + c[4] + c[5] +
c[6] + c[7] + c[8] + c[9] + c[10];
adapt_prob(&pp[0], c[0], sum, 20, 128); adapt_prob(&pp[0], c[0], sum, 20, 128);
sum -= c[1]; sum -= c[1];
adapt_prob(&pp[1], c[1], sum, 20, 128); adapt_prob(&pp[1], c[1], sum, 20, 128);
...@@ -208,7 +213,8 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -208,7 +213,8 @@ void ff_vp9_adapt_probs(VP9Context *s)
adapt_prob(&pp[2], c[2], c[3], 20, 128); adapt_prob(&pp[2], c[2], c[3], 20, 128);
if (s->s.h.highprecisionmvs) { if (s->s.h.highprecisionmvs) {
adapt_prob(&p->mv_comp[i].class0_hp, s->counts.mv_comp[i].class0_hp[0], adapt_prob(&p->mv_comp[i].class0_hp,
s->counts.mv_comp[i].class0_hp[0],
s->counts.mv_comp[i].class0_hp[1], 20, 128); s->counts.mv_comp[i].class0_hp[1], 20, 128);
adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0], adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0],
s->counts.mv_comp[i].hp[1], 20, 128); s->counts.mv_comp[i].hp[1], 20, 128);
...@@ -231,7 +237,8 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -231,7 +237,8 @@ void ff_vp9_adapt_probs(VP9Context *s)
adapt_prob(&pp[3], s2, sum, 20, 128); adapt_prob(&pp[3], s2, sum, 20, 128);
s2 -= c[HOR_PRED]; s2 -= c[HOR_PRED];
adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128); adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128); adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
20, 128);
sum -= c[DIAG_DOWN_LEFT_PRED]; sum -= c[DIAG_DOWN_LEFT_PRED];
adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128); adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
sum -= c[VERT_LEFT_PRED]; sum -= c[VERT_LEFT_PRED];
...@@ -255,7 +262,8 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -255,7 +262,8 @@ void ff_vp9_adapt_probs(VP9Context *s)
adapt_prob(&pp[3], s2, sum, 20, 128); adapt_prob(&pp[3], s2, sum, 20, 128);
s2 -= c[HOR_PRED]; s2 -= c[HOR_PRED];
adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128); adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128); adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
20, 128);
sum -= c[DIAG_DOWN_LEFT_PRED]; sum -= c[DIAG_DOWN_LEFT_PRED];
adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128); adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
sum -= c[VERT_LEFT_PRED]; sum -= c[VERT_LEFT_PRED];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment