Commit 875f6955 authored by Clément Bœsch's avatar Clément Bœsch

lavc/vp9: misc cosmetics

Imported from Libav
parent ff8436ba
...@@ -252,7 +252,7 @@ static int update_block_buffers(AVCodecContext *avctx) ...@@ -252,7 +252,7 @@ static int update_block_buffers(AVCodecContext *avctx)
return 0; return 0;
} }
// for some reason the sign bit is at the end, not the start, of a bit sequence // The sign bit is at the end, not the start, of a bit sequence
static av_always_inline int get_sbits_inv(GetBitContext *gb, int n) static av_always_inline int get_sbits_inv(GetBitContext *gb, int n)
{ {
int v = get_bits(gb, n); int v = get_bits(gb, n);
...@@ -292,13 +292,13 @@ static int update_prob(VP56RangeCoder *c, int p) ...@@ -292,13 +292,13 @@ static int update_prob(VP56RangeCoder *c, int p)
/* This code is trying to do a differential probability update. For a /* This code is trying to do a differential probability update. For a
* current probability A in the range [1, 255], the difference to a new * current probability A in the range [1, 255], the difference to a new
* probability of any value can be expressed differentially as 1-A,255-A * probability of any value can be expressed differentially as 1-A, 255-A
* where some part of this (absolute range) exists both in positive as * where some part of this (absolute range) exists both in positive as
* well as the negative part, whereas another part only exists in one * well as the negative part, whereas another part only exists in one
* half. We're trying to code this shared part differentially, i.e. * half. We're trying to code this shared part differentially, i.e.
* times two where the value of the lowest bit specifies the sign, and * times two where the value of the lowest bit specifies the sign, and
* the single part is then coded on top of this. This absolute difference * the single part is then coded on top of this. This absolute difference
* then again has a value of [0,254], but a bigger value in this range * then again has a value of [0, 254], but a bigger value in this range
* indicates that we're further away from the original value A, so we * indicates that we're further away from the original value A, so we
* can code this as a VLC code, since higher values are increasingly * can code this as a VLC code, since higher values are increasingly
* unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough' * unlikely. The first 20 values in inv_map_table[] allow 'cheap, rough'
...@@ -414,12 +414,15 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -414,12 +414,15 @@ static int decode_frame_header(AVCodecContext *avctx,
*ref = get_bits(&s->gb, 3); *ref = get_bits(&s->gb, 3);
return 0; return 0;
} }
s->last_keyframe = s->s.h.keyframe; s->last_keyframe = s->s.h.keyframe;
s->s.h.keyframe = !get_bits1(&s->gb); s->s.h.keyframe = !get_bits1(&s->gb);
last_invisible = s->s.h.invisible;
s->s.h.invisible = !get_bits1(&s->gb); last_invisible = s->s.h.invisible;
s->s.h.errorres = get_bits1(&s->gb); s->s.h.invisible = !get_bits1(&s->gb);
s->s.h.errorres = get_bits1(&s->gb);
s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible; s->s.h.use_last_frame_mvs = !s->s.h.errorres && !last_invisible;
if (s->s.h.keyframe) { if (s->s.h.keyframe) {
if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n"); av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
...@@ -434,8 +437,8 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -434,8 +437,8 @@ static int decode_frame_header(AVCodecContext *avctx,
if (get_bits1(&s->gb)) // display size if (get_bits1(&s->gb)) // display size
skip_bits(&s->gb, 32); skip_bits(&s->gb, 32);
} else { } else {
s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0; s->s.h.intraonly = s->s.h.invisible ? get_bits1(&s->gb) : 0;
s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2); s->s.h.resetctx = s->s.h.errorres ? 0 : get_bits(&s->gb, 2);
if (s->s.h.intraonly) { if (s->s.h.intraonly) {
if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode if (get_bits_long(&s->gb, 24) != VP9_SYNCCODE) { // synccode
av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n"); av_log(avctx, AV_LOG_ERROR, "Invalid sync code\n");
...@@ -565,11 +568,10 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -565,11 +568,10 @@ static int decode_frame_header(AVCodecContext *avctx,
for (i = 0; i < 7; i++) for (i = 0; i < 7; i++)
s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ? s->s.h.segmentation.prob[i] = get_bits1(&s->gb) ?
get_bits(&s->gb, 8) : 255; get_bits(&s->gb, 8) : 255;
if ((s->s.h.segmentation.temporal = get_bits1(&s->gb))) { if ((s->s.h.segmentation.temporal = get_bits1(&s->gb)))
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ? s->s.h.segmentation.pred_prob[i] = get_bits1(&s->gb) ?
get_bits(&s->gb, 8) : 255; get_bits(&s->gb, 8) : 255;
}
} }
if (get_bits1(&s->gb)) { if (get_bits1(&s->gb)) {
...@@ -734,9 +736,9 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -734,9 +736,9 @@ static int decode_frame_header(AVCodecContext *avctx,
} else { } else {
memset(&s->counts, 0, sizeof(s->counts)); memset(&s->counts, 0, sizeof(s->counts));
} }
// FIXME is it faster to not copy here, but do it down in the fw updates /* FIXME is it faster to not copy here, but do it down in the fw updates
// as explicit copies if the fw update is missing (and skip the copy upon * as explicit copies if the fw update is missing (and skip the copy upon
// fw update)? * fw update)? */
s->prob.p = s->prob_ctx[c].p; s->prob.p = s->prob_ctx[c].p;
// txfm updates // txfm updates
...@@ -777,11 +779,10 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -777,11 +779,10 @@ static int decode_frame_header(AVCodecContext *avctx,
if (m >= 3 && l == 0) // dc only has 3 pt if (m >= 3 && l == 0) // dc only has 3 pt
break; break;
for (n = 0; n < 3; n++) { for (n = 0; n < 3; n++) {
if (vp56_rac_get_prob_branchy(&s->c, 252)) { if (vp56_rac_get_prob_branchy(&s->c, 252))
p[n] = update_prob(&s->c, r[n]); p[n] = update_prob(&s->c, r[n]);
} else { else
p[n] = r[n]; p[n] = r[n];
}
} }
p[3] = 0; p[3] = 0;
} }
...@@ -866,7 +867,8 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -866,7 +867,8 @@ static int decode_frame_header(AVCodecContext *avctx,
for (k = 0; k < 3; k++) for (k = 0; k < 3; k++)
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
s->prob.p.partition[3 - i][j][k] = s->prob.p.partition[3 - i][j][k] =
update_prob(&s->c, s->prob.p.partition[3 - i][j][k]); update_prob(&s->c,
s->prob.p.partition[3 - i][j][k]);
// mv fields don't use the update_prob subexp model for some reason // mv fields don't use the update_prob subexp model for some reason
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
...@@ -875,7 +877,8 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -875,7 +877,8 @@ static int decode_frame_header(AVCodecContext *avctx,
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].sign = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; s->prob.p.mv_comp[i].sign =
(vp8_rac_get_uint(&s->c, 7) << 1) | 1;
for (j = 0; j < 10; j++) for (j = 0; j < 10; j++)
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
...@@ -883,7 +886,8 @@ static int decode_frame_header(AVCodecContext *avctx, ...@@ -883,7 +886,8 @@ static int decode_frame_header(AVCodecContext *avctx,
(vp8_rac_get_uint(&s->c, 7) << 1) | 1; (vp8_rac_get_uint(&s->c, 7) << 1) | 1;
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
s->prob.p.mv_comp[i].class0 = (vp8_rac_get_uint(&s->c, 7) << 1) | 1; s->prob.p.mv_comp[i].class0 =
(vp8_rac_get_uint(&s->c, 7) << 1) | 1;
for (j = 0; j < 10; j++) for (j = 0; j < 10; j++)
if (vp56_rac_get_prob_branchy(&s->c, 252)) if (vp56_rac_get_prob_branchy(&s->c, 252))
...@@ -1210,11 +1214,11 @@ static void loopfilter_sb(AVCodecContext *avctx, struct VP9Filter *lflvl, ...@@ -1210,11 +1214,11 @@ static void loopfilter_sb(AVCodecContext *avctx, struct VP9Filter *lflvl,
uint8_t (*uv_masks)[8][4] = lflvl->mask[s->ss_h | s->ss_v]; uint8_t (*uv_masks)[8][4] = lflvl->mask[s->ss_h | s->ss_v];
int p; int p;
// FIXME in how far can we interleave the v/h loopfilter calls? E.g. /* FIXME: In how far can we interleave the v/h loopfilter calls? E.g.
// if you think of them as acting on a 8x8 block max, we can interleave * if you think of them as acting on a 8x8 block max, we can interleave
// each v/h within the single x loop, but that only works if we work on * each v/h within the single x loop, but that only works if we work on
// 8 pixel blocks, and we won't always do that (we want at least 16px * 8 pixel blocks, and we won't always do that (we want at least 16px
// to use SSE2 optimizations, perhaps 32 for AVX2) * to use SSE2 optimizations, perhaps 32 for AVX2) */
filter_plane_cols(s, col, 0, 0, lflvl->level, lflvl->mask[0][0], dst, ls_y); filter_plane_cols(s, col, 0, 0, lflvl->level, lflvl->mask[0][0], dst, ls_y);
filter_plane_rows(s, row, 0, 0, lflvl->level, lflvl->mask[0][1], dst, ls_y); filter_plane_rows(s, row, 0, 0, lflvl->level, lflvl->mask[0][1], dst, ls_y);
...@@ -1485,14 +1489,12 @@ FF_ENABLE_DEPRECATION_WARNINGS ...@@ -1485,14 +1489,12 @@ FF_ENABLE_DEPRECATION_WARNINGS
yoff2, uvoff2, BL_64X64); yoff2, uvoff2, BL_64X64);
} }
} }
if (s->pass != 2) { if (s->pass != 2)
memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c)); memcpy(&s->c_b[tile_col], &s->c, sizeof(s->c));
}
} }
if (s->pass == 1) { if (s->pass == 1)
continue; continue;
}
// backup pre-loopfilter reconstruction data for intra // backup pre-loopfilter reconstruction data for intra
// prediction of next row of sb64s // prediction of next row of sb64s
......
...@@ -33,10 +33,10 @@ ...@@ -33,10 +33,10 @@
static const uint8_t bwh_tab[2][N_BS_SIZES][2] = { static const uint8_t bwh_tab[2][N_BS_SIZES][2] = {
{ {
{ 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 }, { 16, 16 }, { 16, 8 }, { 8, 16 }, { 8, 8 }, { 8, 4 }, { 4, 8 },
{ 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 4, 4 }, { 4, 2 }, { 2, 4 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 },
}, { }, {
{ 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 }, { 8, 8 }, { 8, 4 }, { 4, 8 }, { 4, 4 }, { 4, 2 }, { 2, 4 },
{ 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 2, 2 }, { 2, 1 }, { 1, 2 }, { 1, 1 }, { 1, 1 }, { 1, 1 }, { 1, 1 },
} }
}; };
...@@ -96,7 +96,7 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -96,7 +96,7 @@ static void decode_mode(AVCodecContext *avctx)
}; };
static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = { static const uint8_t max_tx_for_bl_bp[N_BS_SIZES] = {
TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16, TX_32X32, TX_32X32, TX_32X32, TX_32X32, TX_16X16, TX_16X16,
TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4 TX_16X16, TX_8X8, TX_8X8, TX_8X8, TX_4X4, TX_4X4, TX_4X4
}; };
VP9Context *s = avctx->priv_data; VP9Context *s = avctx->priv_data;
VP9Block *b = s->b; VP9Block *b = s->b;
...@@ -231,33 +231,45 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -231,33 +231,45 @@ static void decode_mode(AVCodecContext *avctx)
// FIXME the memory storage intermediates here aren't really // FIXME the memory storage intermediates here aren't really
// necessary, they're just there to make the code slightly // necessary, they're just there to make the code slightly
// simpler for now // simpler for now
b->mode[0] = a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[0] =
ff_vp9_default_kf_ymode_probs[a[0]][l[0]]); a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[0]][l[0]]);
if (b->bs != BS_8x4) { if (b->bs != BS_8x4) {
b->mode[1] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[1] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]); ff_vp9_default_kf_ymode_probs[a[1]][b->mode[0]]);
l[0] = a[1] = b->mode[1]; l[0] =
a[1] = b->mode[1];
} else { } else {
l[0] = a[1] = b->mode[1] = b->mode[0]; l[0] =
a[1] =
b->mode[1] = b->mode[0];
} }
if (b->bs != BS_4x8) { if (b->bs != BS_4x8) {
b->mode[2] = a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[2] =
ff_vp9_default_kf_ymode_probs[a[0]][l[1]]); a[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[0]][l[1]]);
if (b->bs != BS_8x4) { if (b->bs != BS_8x4) {
b->mode[3] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[3] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]); ff_vp9_default_kf_ymode_probs[a[1]][b->mode[2]]);
l[1] = a[1] = b->mode[3]; l[1] =
a[1] = b->mode[3];
} else { } else {
l[1] = a[1] = b->mode[3] = b->mode[2]; l[1] =
a[1] =
b->mode[3] = b->mode[2];
} }
} else { } else {
b->mode[2] = b->mode[0]; b->mode[2] = b->mode[0];
l[1] = a[1] = b->mode[3] = b->mode[1]; l[1] =
a[1] =
b->mode[3] = b->mode[1];
} }
} else { } else {
b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
ff_vp9_default_kf_ymode_probs[*a][*l]); ff_vp9_default_kf_ymode_probs[*a][*l]);
b->mode[3] = b->mode[2] = b->mode[1] = b->mode[0]; b->mode[3] =
b->mode[2] =
b->mode[1] = b->mode[0];
// FIXME this can probably be optimized // FIXME this can probably be optimized
memset(a, b->mode[0], bwh_tab[0][b->bs][0]); memset(a, b->mode[0], bwh_tab[0][b->bs][0]);
memset(l, b->mode[0], bwh_tab[0][b->bs][1]); memset(l, b->mode[0], bwh_tab[0][b->bs][1]);
...@@ -300,7 +312,9 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -300,7 +312,9 @@ static void decode_mode(AVCodecContext *avctx)
b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
s->prob.p.y_mode[sz]); s->prob.p.y_mode[sz]);
b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0]; b->mode[1] =
b->mode[2] =
b->mode[3] = b->mode[0];
s->counts.y_mode[sz][b->mode[3]]++; s->counts.y_mode[sz][b->mode[3]]++;
} }
b->uvmode = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree, b->uvmode = vp8_rac_get_tree(&s->c, ff_vp9_intramode_tree,
...@@ -349,8 +363,8 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -349,8 +363,8 @@ static void decode_mode(AVCodecContext *avctx)
} else { } else {
c = (!s->above_intra_ctx[col] && c = (!s->above_intra_ctx[col] &&
s->above_ref_ctx[col] == s->s.h.fixcompref) ^ s->above_ref_ctx[col] == s->s.h.fixcompref) ^
(!s->left_intra_ctx[row7] && (!s->left_intra_ctx[row7] &&
s->left_ref_ctx[row & 7] == s->s.h.fixcompref); s->left_ref_ctx[row & 7] == s->s.h.fixcompref);
} }
} else { } else {
c = s->above_comp_ctx[col] ? 3 : c = s->above_comp_ctx[col] ? 3 :
...@@ -369,7 +383,7 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -369,7 +383,7 @@ static void decode_mode(AVCodecContext *avctx)
// read actual references // read actual references
// FIXME probably cache a few variables here to prevent repetitive // FIXME probably cache a few variables here to prevent repetitive
// memory accesses below // memory accesses below
if (b->comp) /* two references */ { if (b->comp) { /* two references */
int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit; int fix_idx = s->s.h.signbias[s->s.h.fixcompref], var_idx = !fix_idx, c, bit;
b->ref[fix_idx] = s->s.h.fixcompref; b->ref[fix_idx] = s->s.h.fixcompref;
...@@ -537,7 +551,7 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -537,7 +551,7 @@ static void decode_mode(AVCodecContext *avctx)
c = 4 * (s->above_ref_ctx[col] == 1); c = 4 * (s->above_ref_ctx[col] == 1);
} else { } else {
c = 2 * (s->left_ref_ctx[row7] == 1) + c = 2 * (s->left_ref_ctx[row7] == 1) +
2 * (s->above_ref_ctx[col] == 1); 2 * (s->above_ref_ctx[col] == 1);
} }
} else { } else {
if (s->above_intra_ctx[col] || if (s->above_intra_ctx[col] ||
...@@ -570,7 +584,10 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -570,7 +584,10 @@ static void decode_mode(AVCodecContext *avctx)
if (b->bs <= BS_8x8) { if (b->bs <= BS_8x8) {
if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) { if (s->s.h.segmentation.enabled && s->s.h.segmentation.feat[b->seg_id].skip_enabled) {
b->mode[0] = b->mode[1] = b->mode[2] = b->mode[3] = ZEROMV; b->mode[0] =
b->mode[1] =
b->mode[2] =
b->mode[3] = ZEROMV;
} else { } else {
static const uint8_t off[10] = { static const uint8_t off[10] = {
3, 0, 0, 1, 0, 0, 0, 0, 0, 0 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
...@@ -583,7 +600,9 @@ static void decode_mode(AVCodecContext *avctx) ...@@ -583,7 +600,9 @@ static void decode_mode(AVCodecContext *avctx)
b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree, b->mode[0] = vp8_rac_get_tree(&s->c, ff_vp9_inter_mode_tree,
s->prob.p.mv_mode[c]); s->prob.p.mv_mode[c]);
b->mode[1] = b->mode[2] = b->mode[3] = b->mode[0]; b->mode[1] =
b->mode[2] =
b->mode[3] = b->mode[0];
s->counts.mv_mode[c][b->mode[0] - 10]++; s->counts.mv_mode[c][b->mode[0] - 10]++;
} }
} }
...@@ -810,23 +829,23 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, ...@@ -810,23 +829,23 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
if (!val) if (!val)
break; break;
skip_eob: skip_eob:
if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero if (!vp56_rac_get_prob_branchy(c, tp[1])) { // zero
cnt[band][nnz][0]++; cnt[band][nnz][0]++;
if (!--band_left) if (!--band_left)
band_left = band_counts[++band]; band_left = band_counts[++band];
cache[scan[i]] = 0; cache[scan[i]] = 0;
nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1; nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
tp = p[band][nnz]; tp = p[band][nnz];
if (++i == n_coeffs) if (++i == n_coeffs)
break; //invalid input; blocks should end with EOB break; //invalid input; blocks should end with EOB
goto skip_eob; goto skip_eob;
} }
rc = scan[i]; rc = scan[i];
if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one if (!vp56_rac_get_prob_branchy(c, tp[2])) { // one
cnt[band][nnz][1]++; cnt[band][nnz][1]++;
val = 1; val = 1;
cache[rc] = 1; cache[rc] = 1;
} else { } else {
// fill in p[3-10] (model fill) - only once per frame for each pos // fill in p[3-10] (model fill) - only once per frame for each pos
...@@ -838,16 +857,16 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, ...@@ -838,16 +857,16 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
if (!vp56_rac_get_prob_branchy(c, tp[4])) { if (!vp56_rac_get_prob_branchy(c, tp[4])) {
cache[rc] = val = 2; cache[rc] = val = 2;
} else { } else {
val = 3 + vp56_rac_get_prob(c, tp[5]); val = 3 + vp56_rac_get_prob(c, tp[5]);
cache[rc] = 3; cache[rc] = 3;
} }
} else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2 } else if (!vp56_rac_get_prob_branchy(c, tp[6])) { // cat1/2
cache[rc] = 4; cache[rc] = 4;
if (!vp56_rac_get_prob_branchy(c, tp[7])) { if (!vp56_rac_get_prob_branchy(c, tp[7])) {
val = 5 + vp56_rac_get_prob(c, 159); val = vp56_rac_get_prob(c, 159) + 5;
} else { } else {
val = 7 + (vp56_rac_get_prob(c, 165) << 1); val = (vp56_rac_get_prob(c, 165) << 1) + 7;
val += vp56_rac_get_prob(c, 145); val += vp56_rac_get_prob(c, 145);
} }
} else { // cat 3-6 } else { // cat 3-6
cache[rc] = 5; cache[rc] = 5;
...@@ -863,11 +882,11 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, ...@@ -863,11 +882,11 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
val += vp56_rac_get_prob(c, 135); val += vp56_rac_get_prob(c, 135);
} }
} else if (!vp56_rac_get_prob_branchy(c, tp[10])) { } else if (!vp56_rac_get_prob_branchy(c, tp[10])) {
val = 35 + (vp56_rac_get_prob(c, 180) << 4); val = (vp56_rac_get_prob(c, 180) << 4) + 35;
val += (vp56_rac_get_prob(c, 157) << 3); val += (vp56_rac_get_prob(c, 157) << 3);
val += (vp56_rac_get_prob(c, 141) << 2); val += (vp56_rac_get_prob(c, 141) << 2);
val += (vp56_rac_get_prob(c, 134) << 1); val += (vp56_rac_get_prob(c, 134) << 1);
val += vp56_rac_get_prob(c, 130); val += vp56_rac_get_prob(c, 130);
} else { } else {
val = 67; val = 67;
if (!is8bitsperpixel) { if (!is8bitsperpixel) {
...@@ -878,20 +897,20 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, ...@@ -878,20 +897,20 @@ decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs,
val += (vp56_rac_get_prob(c, 255) << 15); val += (vp56_rac_get_prob(c, 255) << 15);
val += (vp56_rac_get_prob(c, 255) << 14); val += (vp56_rac_get_prob(c, 255) << 14);
} }
val += (vp56_rac_get_prob(c, 254) << 13); val += (vp56_rac_get_prob(c, 254) << 13);
val += (vp56_rac_get_prob(c, 254) << 12); val += (vp56_rac_get_prob(c, 254) << 12);
val += (vp56_rac_get_prob(c, 254) << 11); val += (vp56_rac_get_prob(c, 254) << 11);
val += (vp56_rac_get_prob(c, 252) << 10); val += (vp56_rac_get_prob(c, 252) << 10);
val += (vp56_rac_get_prob(c, 249) << 9); val += (vp56_rac_get_prob(c, 249) << 9);
val += (vp56_rac_get_prob(c, 243) << 8); val += (vp56_rac_get_prob(c, 243) << 8);
val += (vp56_rac_get_prob(c, 230) << 7); val += (vp56_rac_get_prob(c, 230) << 7);
val += (vp56_rac_get_prob(c, 196) << 6); val += (vp56_rac_get_prob(c, 196) << 6);
val += (vp56_rac_get_prob(c, 177) << 5); val += (vp56_rac_get_prob(c, 177) << 5);
val += (vp56_rac_get_prob(c, 153) << 4); val += (vp56_rac_get_prob(c, 153) << 4);
val += (vp56_rac_get_prob(c, 140) << 3); val += (vp56_rac_get_prob(c, 140) << 3);
val += (vp56_rac_get_prob(c, 133) << 2); val += (vp56_rac_get_prob(c, 133) << 2);
val += (vp56_rac_get_prob(c, 130) << 1); val += (vp56_rac_get_prob(c, 130) << 1);
val += vp56_rac_get_prob(c, 129); val += vp56_rac_get_prob(c, 129);
} }
} }
} }
...@@ -966,7 +985,7 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp ...@@ -966,7 +985,7 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp
int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1; int w4 = bwh_tab[1][b->bs][0] << 1, h4 = bwh_tab[1][b->bs][1] << 1;
int end_x = FFMIN(2 * (s->cols - col), w4); int end_x = FFMIN(2 * (s->cols - col), w4);
int end_y = FFMIN(2 * (s->rows - row), h4); int end_y = FFMIN(2 * (s->rows - row), h4);
int n, pl, x, y, res; int n, pl, x, y, ret;
int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul; int16_t (*qmul)[2] = s->s.h.segmentation.feat[b->seg_id].qmul;
int tx = 4 * s->s.h.lossless + b->tx; int tx = 4 * s->s.h.lossless + b->tx;
const int16_t * const *yscans = ff_vp9_scans[tx]; const int16_t * const *yscans = ff_vp9_scans[tx];
...@@ -999,16 +1018,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp ...@@ -999,16 +1018,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp
for (n = 0, y = 0; y < end_y; y += step) { \ for (n = 0, y = 0; y < end_y; y += step) { \
for (x = 0; x < end_x; x += step, n += step * step) { \ for (x = 0; x < end_x; x += step, n += step * step) { \
enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \ enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \
res = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
(s, s->block + 16 * n * bytesperpixel, 16 * step * step, \ (s, s->block + 16 * n * bytesperpixel, 16 * step * step, \
c, e, p, a[x] + l[y], yscans[txtp], \ c, e, p, a[x] + l[y], yscans[txtp], \
ynbs[txtp], y_band_counts, qmul[0]); \ ynbs[txtp], y_band_counts, qmul[0]); \
a[x] = l[y] = !!res; \ a[x] = l[y] = !!ret; \
total_coeff |= !!res; \ total_coeff |= !!ret; \
if (step >= 4) { \ if (step >= 4) { \
AV_WN16A(&s->eob[n], res); \ AV_WN16A(&s->eob[n], ret); \
} else { \ } else { \
s->eob[n] = res; \ s->eob[n] = ret; \
} \ } \
} \ } \
} }
...@@ -1073,16 +1092,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp ...@@ -1073,16 +1092,16 @@ static av_always_inline int decode_coeffs(AVCodecContext *avctx, int is8bitsperp
#define DECODE_UV_COEF_LOOP(step, v) \ #define DECODE_UV_COEF_LOOP(step, v) \
for (n = 0, y = 0; y < end_y; y += step) { \ for (n = 0, y = 0; y < end_y; y += step) { \
for (x = 0; x < end_x; x += step, n += step * step) { \ for (x = 0; x < end_x; x += step, n += step * step) { \
res = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \
(s, s->uvblock[pl] + 16 * n * bytesperpixel, \ (s, s->uvblock[pl] + 16 * n * bytesperpixel, \
16 * step * step, c, e, p, a[x] + l[y], \ 16 * step * step, c, e, p, a[x] + l[y], \
uvscan, uvnb, uv_band_counts, qmul[1]); \ uvscan, uvnb, uv_band_counts, qmul[1]); \
a[x] = l[y] = !!res; \ a[x] = l[y] = !!ret; \
total_coeff |= !!res; \ total_coeff |= !!ret; \
if (step >= 4) { \ if (step >= 4) { \
AV_WN16A(&s->uveob[pl][n], res); \ AV_WN16A(&s->uveob[pl][n], ret); \
} else { \ } else { \
s->uveob[pl][n] = res; \ s->uveob[pl][n] = ret; \
} \ } \
} \ } \
} }
...@@ -1144,26 +1163,26 @@ static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t ** ...@@ -1144,26 +1163,26 @@ static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **
int have_right = x < w - 1; int have_right = x < w - 1;
int bpp = s->s.h.bpp; int bpp = s->s.h.bpp;
static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = { static const uint8_t mode_conv[10][2 /* have_left */][2 /* have_top */] = {
[VERT_PRED] = { { DC_127_PRED, VERT_PRED }, [VERT_PRED] = { { DC_127_PRED, VERT_PRED },
{ DC_127_PRED, VERT_PRED } }, { DC_127_PRED, VERT_PRED } },
[HOR_PRED] = { { DC_129_PRED, DC_129_PRED }, [HOR_PRED] = { { DC_129_PRED, DC_129_PRED },
{ HOR_PRED, HOR_PRED } }, { HOR_PRED, HOR_PRED } },
[DC_PRED] = { { DC_128_PRED, TOP_DC_PRED }, [DC_PRED] = { { DC_128_PRED, TOP_DC_PRED },
{ LEFT_DC_PRED, DC_PRED } }, { LEFT_DC_PRED, DC_PRED } },
[DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED }, [DIAG_DOWN_LEFT_PRED] = { { DC_127_PRED, DIAG_DOWN_LEFT_PRED },
{ DC_127_PRED, DIAG_DOWN_LEFT_PRED } }, { DC_127_PRED, DIAG_DOWN_LEFT_PRED } },
[DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED }, [DIAG_DOWN_RIGHT_PRED] = { { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED },
{ DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } }, { DIAG_DOWN_RIGHT_PRED, DIAG_DOWN_RIGHT_PRED } },
[VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED }, [VERT_RIGHT_PRED] = { { VERT_RIGHT_PRED, VERT_RIGHT_PRED },
{ VERT_RIGHT_PRED, VERT_RIGHT_PRED } }, { VERT_RIGHT_PRED, VERT_RIGHT_PRED } },
[HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED }, [HOR_DOWN_PRED] = { { HOR_DOWN_PRED, HOR_DOWN_PRED },
{ HOR_DOWN_PRED, HOR_DOWN_PRED } }, { HOR_DOWN_PRED, HOR_DOWN_PRED } },
[VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED }, [VERT_LEFT_PRED] = { { DC_127_PRED, VERT_LEFT_PRED },
{ DC_127_PRED, VERT_LEFT_PRED } }, { DC_127_PRED, VERT_LEFT_PRED } },
[HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED }, [HOR_UP_PRED] = { { DC_129_PRED, DC_129_PRED },
{ HOR_UP_PRED, HOR_UP_PRED } }, { HOR_UP_PRED, HOR_UP_PRED } },
[TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED }, [TM_VP8_PRED] = { { DC_129_PRED, VERT_PRED },
{ HOR_PRED, TM_VP8_PRED } }, { HOR_PRED, TM_VP8_PRED } },
}; };
static const struct { static const struct {
uint8_t needs_left:1; uint8_t needs_left:1;
...@@ -1176,12 +1195,16 @@ static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t ** ...@@ -1176,12 +1195,16 @@ static av_always_inline int check_intra_mode(VP9Context *s, int mode, uint8_t **
[HOR_PRED] = { .needs_left = 1 }, [HOR_PRED] = { .needs_left = 1 },
[DC_PRED] = { .needs_top = 1, .needs_left = 1 }, [DC_PRED] = { .needs_top = 1, .needs_left = 1 },
[DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 }, [DIAG_DOWN_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
[DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, [DIAG_DOWN_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
[VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, .needs_topleft = 1 },
[HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, [VERT_RIGHT_PRED] = { .needs_left = 1, .needs_top = 1,
.needs_topleft = 1 },
[HOR_DOWN_PRED] = { .needs_left = 1, .needs_top = 1,
.needs_topleft = 1 },
[VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 }, [VERT_LEFT_PRED] = { .needs_top = 1, .needs_topright = 1 },
[HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 }, [HOR_UP_PRED] = { .needs_left = 1, .invert_left = 1 },
[TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1, .needs_topleft = 1 }, [TM_VP8_PRED] = { .needs_left = 1, .needs_top = 1,
.needs_topleft = 1 },
[LEFT_DC_PRED] = { .needs_left = 1 }, [LEFT_DC_PRED] = { .needs_left = 1 },
[TOP_DC_PRED] = { .needs_top = 1 }, [TOP_DC_PRED] = { .needs_top = 1 },
[DC_128_PRED] = { 0 }, [DC_128_PRED] = { 0 },
...@@ -1353,7 +1376,7 @@ static av_always_inline void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off, ...@@ -1353,7 +1376,7 @@ static av_always_inline void intra_recon(AVCodecContext *avctx, ptrdiff_t y_off,
} }
// U/V // U/V
w4 >>= s->ss_h; w4 >>= s->ss_h;
end_x >>= s->ss_h; end_x >>= s->ss_h;
end_y >>= s->ss_v; end_y >>= s->ss_v;
step = 1 << (b->uvtx * 2); step = 1 << (b->uvtx * 2);
...@@ -1679,6 +1702,7 @@ static av_always_inline void inter_recon(AVCodecContext *avctx, int bytesperpixe ...@@ -1679,6 +1702,7 @@ static av_always_inline void inter_recon(AVCodecContext *avctx, int bytesperpixe
inter_pred_16bpp(avctx); inter_pred_16bpp(avctx);
} }
} }
if (!b->skip) { if (!b->skip) {
/* mostly copied intra_recon() */ /* mostly copied intra_recon() */
...@@ -1808,8 +1832,8 @@ static av_always_inline void mask_edges(uint8_t (*mask)[8][4], int ss_h, int ss_ ...@@ -1808,8 +1832,8 @@ static av_always_inline void mask_edges(uint8_t (*mask)[8][4], int ss_h, int ss_
if (!skip_inter) { if (!skip_inter) {
int mask_id = (tx == TX_8X8); int mask_id = (tx == TX_8X8);
static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
int l2 = tx + ss_h - 1, step1d; int l2 = tx + ss_h - 1, step1d;
static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
int m_row = m_col & masks[l2]; int m_row = m_col & masks[l2];
// at odd UV col/row edges tx16/tx32 loopfilter edges, force // at odd UV col/row edges tx16/tx32 loopfilter edges, force
...@@ -1874,10 +1898,12 @@ void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col, ...@@ -1874,10 +1898,12 @@ void ff_vp9_decode_block(AVCodecContext *avctx, int row, int col,
s->row7 = row & 7; s->row7 = row & 7;
s->col = col; s->col = col;
s->col7 = col & 7; s->col7 = col & 7;
s->min_mv.x = -(128 + col * 64); s->min_mv.x = -(128 + col * 64);
s->min_mv.y = -(128 + row * 64); s->min_mv.y = -(128 + row * 64);
s->max_mv.x = 128 + (s->cols - col - w4) * 64; s->max_mv.x = 128 + (s->cols - col - w4) * 64;
s->max_mv.y = 128 + (s->rows - row - h4) * 64; s->max_mv.y = 128 + (s->rows - row - h4) * 64;
if (s->pass < 2) { if (s->pass < 2) {
b->bs = bs; b->bs = bs;
b->bl = bl; b->bl = bl;
......
...@@ -23,9 +23,9 @@ ...@@ -23,9 +23,9 @@
#include "vp9data.h" #include "vp9data.h"
const int8_t ff_vp9_partition_tree[3][2] = { const int8_t ff_vp9_partition_tree[3][2] = {
{ -PARTITION_NONE, 1 }, // '0' { -PARTITION_NONE, 1 }, // '0'
{ -PARTITION_H, 2 }, // '10' { -PARTITION_H, 2 }, // '10'
{ -PARTITION_V, -PARTITION_SPLIT }, // '110', '111' { -PARTITION_V, -PARTITION_SPLIT }, // '110', '111'
}; };
const uint8_t ff_vp9_default_kf_partition_probs[4][4][3] = { const uint8_t ff_vp9_default_kf_partition_probs[4][4][3] = {
...@@ -54,24 +54,24 @@ const uint8_t ff_vp9_default_kf_partition_probs[4][4][3] = { ...@@ -54,24 +54,24 @@ const uint8_t ff_vp9_default_kf_partition_probs[4][4][3] = {
const int8_t ff_vp9_segmentation_tree[7][2] = { const int8_t ff_vp9_segmentation_tree[7][2] = {
{ 1, 2 }, { 1, 2 },
{ 3, 4 }, { 3, 4 },
{ 5, 6 }, { 5, 6 },
{ -0, -1 }, // '00x' { -0, -1 }, // '00x'
{ -2, -3 }, // '01x' { -2, -3 }, // '01x'
{ -4, -5 }, // '10x' { -4, -5 }, // '10x'
{ -6, -7 }, // '11x' { -6, -7 }, // '11x'
}; };
const int8_t ff_vp9_intramode_tree[9][2] = { const int8_t ff_vp9_intramode_tree[9][2] = {
{ -DC_PRED, 1 }, // '0' { -DC_PRED, 1 }, // '0'
{ -TM_VP8_PRED, 2 }, // '10' { -TM_VP8_PRED, 2 }, // '10'
{ -VERT_PRED, 3 }, // '110' { -VERT_PRED, 3 }, // '110'
{ 4, 6 }, { 4, 6 },
{ -HOR_PRED, 5 }, // '11100' { -HOR_PRED, 5 }, // '11100'
{ -DIAG_DOWN_RIGHT_PRED, -VERT_RIGHT_PRED }, // '11101x' { -DIAG_DOWN_RIGHT_PRED, -VERT_RIGHT_PRED }, // '11101x'
{ -DIAG_DOWN_LEFT_PRED, 7 }, // '11110' { -DIAG_DOWN_LEFT_PRED, 7 }, // '11110'
{ -VERT_LEFT_PRED, 8 }, // '111110' { -VERT_LEFT_PRED, 8 }, // '111110'
{ -HOR_DOWN_PRED, -HOR_UP_PRED }, // '111111x' { -HOR_DOWN_PRED, -HOR_UP_PRED }, // '111111x'
}; };
const uint8_t ff_vp9_default_kf_ymode_probs[10][10][9] = { const uint8_t ff_vp9_default_kf_ymode_probs[10][10][9] = {
...@@ -202,14 +202,14 @@ const uint8_t ff_vp9_default_kf_uvmode_probs[10][9] = { ...@@ -202,14 +202,14 @@ const uint8_t ff_vp9_default_kf_uvmode_probs[10][9] = {
}; };
const int8_t ff_vp9_inter_mode_tree[3][2] = { const int8_t ff_vp9_inter_mode_tree[3][2] = {
{ -ZEROMV, 1 }, // '0' { -ZEROMV, 1 }, // '0'
{ -NEARESTMV, 2 }, // '10' { -NEARESTMV, 2 }, // '10'
{ -NEARMV, -NEWMV }, // '11x' { -NEARMV, -NEWMV }, // '11x'
}; };
const int8_t ff_vp9_filter_tree[2][2] = { const int8_t ff_vp9_filter_tree[2][2] = {
{ -0, 1 }, // '0' { -0, 1 }, // '0'
{ -1, -2 }, // '1x' { -1, -2 }, // '1x'
}; };
const enum FilterMode ff_vp9_filter_lut[3] = { const enum FilterMode ff_vp9_filter_lut[3] = {
...@@ -1445,13 +1445,13 @@ const ProbContext ff_vp9_default_probs = { ...@@ -1445,13 +1445,13 @@ const ProbContext ff_vp9_default_probs = {
{ 34, 3, }, { 34, 3, },
{ 149, 144, }, { 149, 144, },
}, { /* mv_mode */ }, { /* mv_mode */
{ 2, 173, 34}, // 0 = both zero mv { 2, 173, 34 }, // 0 = both zero mv
{ 7, 145, 85}, // 1 = one zero mv + one a predicted mv { 7, 145, 85 }, // 1 = one zero mv + one a predicted mv
{ 7, 166, 63}, // 2 = two predicted mvs { 7, 166, 63 }, // 2 = two predicted mvs
{ 7, 94, 66}, // 3 = one predicted/zero and one new mv { 7, 94, 66 }, // 3 = one predicted/zero and one new mv
{ 8, 64, 46}, // 4 = two new mvs { 8, 64, 46 }, // 4 = two new mvs
{ 17, 81, 31}, // 5 = one intra neighbour + x { 17, 81, 31 }, // 5 = one intra neighbor + x
{ 25, 29, 30}, // 6 = two intra neighbours { 25, 29, 30 }, // 6 = two intra neighbors
}, { /* intra */ }, { /* intra */
9, 102, 187, 225 9, 102, 187, 225
}, { /* comp */ }, { /* comp */
...@@ -1481,7 +1481,7 @@ const ProbContext ff_vp9_default_probs = { ...@@ -1481,7 +1481,7 @@ const ProbContext ff_vp9_default_probs = {
128, /* sign */ 128, /* sign */
{ 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, /* class */ { 224, 144, 192, 168, 192, 176, 192, 198, 198, 245 }, /* class */
216, /* class0 */ 216, /* class0 */
{ 136, 140, 148, 160, 176, 192, 224, 234, 234, 240}, /* bits */ { 136, 140, 148, 160, 176, 192, 224, 234, 234, 240 }, /* bits */
{ /* class0_fp */ { /* class0_fp */
{ 128, 128, 64 }, { 128, 128, 64 },
{ 96, 112, 64 } { 96, 112, 64 }
...@@ -2212,26 +2212,26 @@ const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3] = { ...@@ -2212,26 +2212,26 @@ const uint8_t ff_vp9_default_coef_probs[4][2][2][6][6][3] = {
}; };
const int8_t ff_vp9_mv_joint_tree[3][2] = { const int8_t ff_vp9_mv_joint_tree[3][2] = {
{ -MV_JOINT_ZERO, 1 }, // '0' { -MV_JOINT_ZERO, 1 }, // '0'
{ -MV_JOINT_H, 2 }, // '10' { -MV_JOINT_H, 2 }, // '10'
{ -MV_JOINT_V, -MV_JOINT_HV }, // '11x' { -MV_JOINT_V, -MV_JOINT_HV }, // '11x'
}; };
const int8_t ff_vp9_mv_class_tree[10][2] = { const int8_t ff_vp9_mv_class_tree[10][2] = {
{ -0, 1 }, // '0' { -0, 1 }, // '0'
{ -1, 2 }, // '10' { -1, 2 }, // '10'
{ 3, 4 }, { 3, 4 },
{ -2, -3 }, // '110x' { -2, -3 }, // '110x'
{ 5, 6 }, { 5, 6 },
{ -4, -5 }, // '1110x' { -4, -5 }, // '1110x'
{ -6, 7 }, // '11110' { -6, 7 }, // '11110'
{ 8, 9 }, { 8, 9 },
{ -7, -8 }, // '111110x' { -7, -8 }, // '111110x'
{ -9, -10 }, // '111111x' { -9, -10 }, // '111111x'
}; };
const int8_t ff_vp9_mv_fp_tree[3][2] = { const int8_t ff_vp9_mv_fp_tree[3][2] = {
{ -0, 1 }, // '0' { -0, 1 }, // '0'
{ -1, 2 }, // '10' { -1, 2 }, // '10'
{ -2, -3 }, // '11x' { -2, -3 }, // '11x'
}; };
...@@ -37,32 +37,32 @@ static void find_ref_mvs(VP9Context *s, ...@@ -37,32 +37,32 @@ static void find_ref_mvs(VP9Context *s,
VP56mv *pmv, int ref, int z, int idx, int sb) VP56mv *pmv, int ref, int z, int idx, int sb)
{ {
static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = { static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
[BS_64x64] = {{ 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 }, [BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
{ -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 }}, { -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 } },
[BS_64x32] = {{ 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 }, [BS_64x32] = { { 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 }}, { -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 } },
[BS_32x64] = {{ -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 }, [BS_32x64] = { { -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
{ -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 }}, { -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 } },
[BS_32x32] = {{ 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 }, [BS_32x32] = { { 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }}, { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
[BS_32x16] = {{ 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 }, [BS_32x16] = { { 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
{ -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }}, { -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
[BS_16x32] = {{ -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 }, [BS_16x32] = { { -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
{ 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 }}, { 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 } },
[BS_16x16] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 }, [BS_16x16] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 }}, { -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
[BS_16x8] = {{ 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 }, [BS_16x8] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
{ 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 }}, { 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 } },
[BS_8x16] = {{ -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 }, [BS_8x16] = { { -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
{ -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 }}, { -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 } },
[BS_8x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_8x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
[BS_8x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_8x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
[BS_4x8] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_4x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
[BS_4x4] = {{ 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 }, [BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 }}, { -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
}; };
VP9Block *b = s->b; VP9Block *b = s->b;
int row = s->row, col = s->col, row7 = s->row7; int row = s->row, col = s->col, row7 = s->row7;
...@@ -71,18 +71,18 @@ static void find_ref_mvs(VP9Context *s, ...@@ -71,18 +71,18 @@ static void find_ref_mvs(VP9Context *s,
uint32_t mem = INVALID_MV, mem_sub8x8 = INVALID_MV; uint32_t mem = INVALID_MV, mem_sub8x8 = INVALID_MV;
int i; int i;
#define RETURN_DIRECT_MV(mv) \ #define RETURN_DIRECT_MV(mv) \
do { \ do { \
uint32_t m = AV_RN32A(&mv); \ uint32_t m = AV_RN32A(&mv); \
if (!idx) { \ if (!idx) { \
AV_WN32A(pmv, m); \ AV_WN32A(pmv, m); \
return; \ return; \
} else if (mem == INVALID_MV) { \ } else if (mem == INVALID_MV) { \
mem = m; \ mem = m; \
} else if (m != mem) { \ } else if (m != mem) { \
AV_WN32A(pmv, m); \ AV_WN32A(pmv, m); \
return; \ return; \
} \ } \
} while (0) } while (0)
if (sb >= 0) { if (sb >= 0) {
...@@ -94,79 +94,77 @@ static void find_ref_mvs(VP9Context *s, ...@@ -94,79 +94,77 @@ static void find_ref_mvs(VP9Context *s,
RETURN_DIRECT_MV(b->mv[0][z]); RETURN_DIRECT_MV(b->mv[0][z]);
} }
#define RETURN_MV(mv) \ #define RETURN_MV(mv) \
do { \ do { \
if (sb > 0) { \ if (sb > 0) { \
VP56mv tmp; \ VP56mv tmp; \
uint32_t m; \ uint32_t m; \
av_assert2(idx == 1); \ av_assert2(idx == 1); \
av_assert2(mem != INVALID_MV); \ av_assert2(mem != INVALID_MV); \
if (mem_sub8x8 == INVALID_MV) { \ if (mem_sub8x8 == INVALID_MV) { \
clamp_mv(&tmp, &mv, s); \ clamp_mv(&tmp, &mv, s); \
m = AV_RN32A(&tmp); \ m = AV_RN32A(&tmp); \
if (m != mem) { \ if (m != mem) { \
AV_WN32A(pmv, m); \ AV_WN32A(pmv, m); \
return; \ return; \
} \ } \
mem_sub8x8 = AV_RN32A(&mv); \ mem_sub8x8 = AV_RN32A(&mv); \
} else if (mem_sub8x8 != AV_RN32A(&mv)) { \ } else if (mem_sub8x8 != AV_RN32A(&mv)) { \
clamp_mv(&tmp, &mv, s); \ clamp_mv(&tmp, &mv, s); \
m = AV_RN32A(&tmp); \ m = AV_RN32A(&tmp); \
if (m != mem) { \ if (m != mem) { \
AV_WN32A(pmv, m); \ AV_WN32A(pmv, m); \
} else { \ } else { \
/* BUG I'm pretty sure this isn't the intention */ \ /* BUG I'm pretty sure this isn't the intention */ \
AV_WN32A(pmv, 0); \ AV_WN32A(pmv, 0); \
} \ } \
return; \ return; \
} \ } \
} else { \ } else { \
uint32_t m = AV_RN32A(&mv); \ uint32_t m = AV_RN32A(&mv); \
if (!idx) { \ if (!idx) { \
clamp_mv(pmv, &mv, s); \ clamp_mv(pmv, &mv, s); \
return; \ return; \
} else if (mem == INVALID_MV) { \ } else if (mem == INVALID_MV) { \
mem = m; \ mem = m; \
} else if (m != mem) { \ } else if (m != mem) { \
clamp_mv(pmv, &mv, s); \ clamp_mv(pmv, &mv, s); \
return; \ return; \
} \ } \
} \ } \
} while (0) } while (0)
if (row > 0) { if (row > 0) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[(row - 1) * s->sb_cols * 8 + col];
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]); RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]); RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
}
} }
if (col > s->tile_col_start) { if (col > s->tile_col_start) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[row * s->sb_cols * 8 + col - 1];
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]); RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]); RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
}
} }
i = 2; i = 2;
} else { } else {
i = 0; i = 0;
} }
// previously coded MVs in this neighbourhood, using same reference frame // previously coded MVs in this neighborhood, using same reference frame
for (; i < 8; i++) { for (; i < 8; i++) {
int c = p[i][0] + col, r = p[i][1] + row; int c = p[i][0] + col, r = p[i][1] + row;
if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) { if (c >= s->tile_col_start && c < s->cols &&
r >= 0 && r < s->rows) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(mv->mv[0]); RETURN_MV(mv->mv[0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(mv->mv[1]); RETURN_MV(mv->mv[1]);
}
} }
} }
...@@ -176,33 +174,32 @@ static void find_ref_mvs(VP9Context *s, ...@@ -176,33 +174,32 @@ static void find_ref_mvs(VP9Context *s,
if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass) if (!s->s.frames[REF_FRAME_MVPAIR].uses_2pass)
ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0); ff_thread_await_progress(&s->s.frames[REF_FRAME_MVPAIR].tf, row >> 3, 0);
if (mv->ref[0] == ref) { if (mv->ref[0] == ref)
RETURN_MV(mv->mv[0]); RETURN_MV(mv->mv[0]);
} else if (mv->ref[1] == ref) { else if (mv->ref[1] == ref)
RETURN_MV(mv->mv[1]); RETURN_MV(mv->mv[1]);
}
} }
#define RETURN_SCALE_MV(mv, scale) \ #define RETURN_SCALE_MV(mv, scale) \
do { \ do { \
if (scale) { \ if (scale) { \
VP56mv mv_temp = { -mv.x, -mv.y }; \ VP56mv mv_temp = { -mv.x, -mv.y }; \
RETURN_MV(mv_temp); \ RETURN_MV(mv_temp); \
} else { \ } else { \
RETURN_MV(mv); \ RETURN_MV(mv); \
} \ } \
} while (0) } while (0)
// previously coded MVs in this neighbourhood, using different reference frame // previously coded MVs in this neighborhood, using different reference frame
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
int c = p[i][0] + col, r = p[i][1] + row; int c = p[i][0] + col, r = p[i][1] + row;
if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) { if (c >= s->tile_col_start && c < s->cols && r >= 0 && r < s->rows) {
struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c]; struct VP9mvrefPair *mv = &s->s.frames[CUR_FRAME].mv[r * s->sb_cols * 8 + c];
if (mv->ref[0] != ref && mv->ref[0] >= 0) { if (mv->ref[0] != ref && mv->ref[0] >= 0)
RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]); RETURN_SCALE_MV(mv->mv[0],
} s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
if (mv->ref[1] != ref && mv->ref[1] >= 0 && if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
// BUG - libvpx has this condition regardless of whether // BUG - libvpx has this condition regardless of whether
// we used the first ref MV and pre-scaling // we used the first ref MV and pre-scaling
...@@ -217,9 +214,8 @@ static void find_ref_mvs(VP9Context *s, ...@@ -217,9 +214,8 @@ static void find_ref_mvs(VP9Context *s,
struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col]; struct VP9mvrefPair *mv = &s->s.frames[REF_FRAME_MVPAIR].mv[row * s->sb_cols * 8 + col];
// no need to await_progress, because we already did that above // no need to await_progress, because we already did that above
if (mv->ref[0] != ref && mv->ref[0] >= 0) { if (mv->ref[0] != ref && mv->ref[0] >= 0)
RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]); RETURN_SCALE_MV(mv->mv[0], s->s.h.signbias[mv->ref[0]] != s->s.h.signbias[ref]);
}
if (mv->ref[1] != ref && mv->ref[1] >= 0 && if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
// BUG - libvpx has this condition regardless of whether // BUG - libvpx has this condition regardless of whether
// we used the first ref MV and pre-scaling // we used the first ref MV and pre-scaling
...@@ -252,8 +248,9 @@ static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp) ...@@ -252,8 +248,9 @@ static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
s->counts.mv_comp[idx].bits[m][bit]++; s->counts.mv_comp[idx].bits[m][bit]++;
} }
n <<= 3; n <<= 3;
bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree, s->prob.p.mv_comp[idx].fp); bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
n |= bit << 1; s->prob.p.mv_comp[idx].fp);
n |= bit << 1;
s->counts.mv_comp[idx].fp[bit]++; s->counts.mv_comp[idx].fp[bit]++;
if (hp) { if (hp) {
bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp); bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
...@@ -302,7 +299,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb) ...@@ -302,7 +299,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
mode == NEWMV ? -1 : sb); mode == NEWMV ? -1 : sb);
// FIXME maybe move this code into find_ref_mvs() // FIXME maybe move this code into find_ref_mvs()
if ((mode == NEWMV || sb == -1) && if ((mode == NEWMV || sb == -1) &&
!(hp = s->s.h.highprecisionmvs && abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) { !(hp = s->s.h.highprecisionmvs &&
abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
if (mv[0].y & 1) { if (mv[0].y & 1) {
if (mv[0].y < 0) if (mv[0].y < 0)
mv[0].y++; mv[0].y++;
...@@ -332,7 +330,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb) ...@@ -332,7 +330,8 @@ void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV, find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
mode == NEWMV ? -1 : sb); mode == NEWMV ? -1 : sb);
if ((mode == NEWMV || sb == -1) && if ((mode == NEWMV || sb == -1) &&
!(hp = s->s.h.highprecisionmvs && abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) { !(hp = s->s.h.highprecisionmvs &&
abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
if (mv[1].y & 1) { if (mv[1].y & 1) {
if (mv[1].y < 0) if (mv[1].y < 0)
mv[1].y++; mv[1].y++;
......
...@@ -76,33 +76,36 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -76,33 +76,36 @@ void ff_vp9_adapt_probs(VP9Context *s)
// skip flag // skip flag
for (i = 0; i < 3; i++) for (i = 0; i < 3; i++)
adapt_prob(&p->skip[i], s->counts.skip[i][0], s->counts.skip[i][1], 20, 128); adapt_prob(&p->skip[i], s->counts.skip[i][0],
s->counts.skip[i][1], 20, 128);
// intra/inter flag // intra/inter flag
for (i = 0; i < 4; i++) for (i = 0; i < 4; i++)
adapt_prob(&p->intra[i], s->counts.intra[i][0], s->counts.intra[i][1], 20, 128); adapt_prob(&p->intra[i], s->counts.intra[i][0],
s->counts.intra[i][1], 20, 128);
// comppred flag // comppred flag
if (s->s.h.comppredmode == PRED_SWITCHABLE) { if (s->s.h.comppredmode == PRED_SWITCHABLE) {
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
adapt_prob(&p->comp[i], s->counts.comp[i][0], s->counts.comp[i][1], 20, 128); adapt_prob(&p->comp[i], s->counts.comp[i][0],
s->counts.comp[i][1], 20, 128);
} }
// reference frames // reference frames
if (s->s.h.comppredmode != PRED_SINGLEREF) { if (s->s.h.comppredmode != PRED_SINGLEREF) {
for (i = 0; i < 5; i++) for (i = 0; i < 5; i++)
adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0], adapt_prob(&p->comp_ref[i], s->counts.comp_ref[i][0],
s->counts.comp_ref[i][1], 20, 128); s->counts.comp_ref[i][1], 20, 128);
} }
if (s->s.h.comppredmode != PRED_COMPREF) { if (s->s.h.comppredmode != PRED_COMPREF) {
for (i = 0; i < 5; i++) { for (i = 0; i < 5; i++) {
uint8_t *pp = p->single_ref[i]; uint8_t *pp = p->single_ref[i];
unsigned (*c)[2] = s->counts.single_ref[i]; unsigned (*c)[2] = s->counts.single_ref[i];
adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128); adapt_prob(&pp[0], c[0][0], c[0][1], 20, 128);
adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128); adapt_prob(&pp[1], c[1][0], c[1][1], 20, 128);
} }
} }
// block partitioning // block partitioning
...@@ -118,16 +121,17 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -118,16 +121,17 @@ void ff_vp9_adapt_probs(VP9Context *s)
// tx size // tx size
if (s->s.h.txfmmode == TX_SWITCHABLE) { if (s->s.h.txfmmode == TX_SWITCHABLE) {
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i]; unsigned *c16 = s->counts.tx16p[i], *c32 = s->counts.tx32p[i];
adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0], s->counts.tx8p[i][1], 20, 128); adapt_prob(&p->tx8p[i], s->counts.tx8p[i][0],
adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128); s->counts.tx8p[i][1], 20, 128);
adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128); adapt_prob(&p->tx16p[i][0], c16[0], c16[1] + c16[2], 20, 128);
adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128); adapt_prob(&p->tx16p[i][1], c16[1], c16[2], 20, 128);
adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128); adapt_prob(&p->tx32p[i][0], c32[0], c32[1] + c32[2] + c32[3], 20, 128);
adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128); adapt_prob(&p->tx32p[i][1], c32[1], c32[2] + c32[3], 20, 128);
} adapt_prob(&p->tx32p[i][2], c32[2], c32[3], 20, 128);
}
} }
// interpolation filter // interpolation filter
...@@ -169,9 +173,10 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -169,9 +173,10 @@ void ff_vp9_adapt_probs(VP9Context *s)
adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0], adapt_prob(&p->mv_comp[i].sign, s->counts.mv_comp[i].sign[0],
s->counts.mv_comp[i].sign[1], 20, 128); s->counts.mv_comp[i].sign[1], 20, 128);
pp = p->mv_comp[i].classes; pp = p->mv_comp[i].classes;
c = s->counts.mv_comp[i].classes; c = s->counts.mv_comp[i].classes;
sum = c[1] + c[2] + c[3] + c[4] + c[5] + c[6] + c[7] + c[8] + c[9] + c[10]; sum = c[1] + c[2] + c[3] + c[4] + c[5] +
c[6] + c[7] + c[8] + c[9] + c[10];
adapt_prob(&pp[0], c[0], sum, 20, 128); adapt_prob(&pp[0], c[0], sum, 20, 128);
sum -= c[1]; sum -= c[1];
adapt_prob(&pp[1], c[1], sum, 20, 128); adapt_prob(&pp[1], c[1], sum, 20, 128);
...@@ -196,19 +201,20 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -196,19 +201,20 @@ void ff_vp9_adapt_probs(VP9Context *s)
for (j = 0; j < 2; j++) { for (j = 0; j < 2; j++) {
pp = p->mv_comp[i].class0_fp[j]; pp = p->mv_comp[i].class0_fp[j];
c = s->counts.mv_comp[i].class0_fp[j]; c = s->counts.mv_comp[i].class0_fp[j];
adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128); adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128); adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
adapt_prob(&pp[2], c[2], c[3], 20, 128); adapt_prob(&pp[2], c[2], c[3], 20, 128);
} }
pp = p->mv_comp[i].fp; pp = p->mv_comp[i].fp;
c = s->counts.mv_comp[i].fp; c = s->counts.mv_comp[i].fp;
adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128); adapt_prob(&pp[0], c[0], c[1] + c[2] + c[3], 20, 128);
adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128); adapt_prob(&pp[1], c[1], c[2] + c[3], 20, 128);
adapt_prob(&pp[2], c[2], c[3], 20, 128); adapt_prob(&pp[2], c[2], c[3], 20, 128);
if (s->s.h.highprecisionmvs) { if (s->s.h.highprecisionmvs) {
adapt_prob(&p->mv_comp[i].class0_hp, s->counts.mv_comp[i].class0_hp[0], adapt_prob(&p->mv_comp[i].class0_hp,
s->counts.mv_comp[i].class0_hp[0],
s->counts.mv_comp[i].class0_hp[1], 20, 128); s->counts.mv_comp[i].class0_hp[1], 20, 128);
adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0], adapt_prob(&p->mv_comp[i].hp, s->counts.mv_comp[i].hp[0],
s->counts.mv_comp[i].hp[1], 20, 128); s->counts.mv_comp[i].hp[1], 20, 128);
...@@ -226,12 +232,13 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -226,12 +232,13 @@ void ff_vp9_adapt_probs(VP9Context *s)
adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128); adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
sum -= c[VERT_PRED]; sum -= c[VERT_PRED];
adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128); adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED]; s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
sum -= s2; sum -= s2;
adapt_prob(&pp[3], s2, sum, 20, 128); adapt_prob(&pp[3], s2, sum, 20, 128);
s2 -= c[HOR_PRED]; s2 -= c[HOR_PRED];
adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128); adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128); adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
20, 128);
sum -= c[DIAG_DOWN_LEFT_PRED]; sum -= c[DIAG_DOWN_LEFT_PRED];
adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128); adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
sum -= c[VERT_LEFT_PRED]; sum -= c[VERT_LEFT_PRED];
...@@ -250,12 +257,13 @@ void ff_vp9_adapt_probs(VP9Context *s) ...@@ -250,12 +257,13 @@ void ff_vp9_adapt_probs(VP9Context *s)
adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128); adapt_prob(&pp[1], c[TM_VP8_PRED], sum, 20, 128);
sum -= c[VERT_PRED]; sum -= c[VERT_PRED];
adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128); adapt_prob(&pp[2], c[VERT_PRED], sum, 20, 128);
s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED]; s2 = c[HOR_PRED] + c[DIAG_DOWN_RIGHT_PRED] + c[VERT_RIGHT_PRED];
sum -= s2; sum -= s2;
adapt_prob(&pp[3], s2, sum, 20, 128); adapt_prob(&pp[3], s2, sum, 20, 128);
s2 -= c[HOR_PRED]; s2 -= c[HOR_PRED];
adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128); adapt_prob(&pp[4], c[HOR_PRED], s2, 20, 128);
adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED], 20, 128); adapt_prob(&pp[5], c[DIAG_DOWN_RIGHT_PRED], c[VERT_RIGHT_PRED],
20, 128);
sum -= c[DIAG_DOWN_LEFT_PRED]; sum -= c[DIAG_DOWN_LEFT_PRED];
adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128); adapt_prob(&pp[6], c[DIAG_DOWN_LEFT_PRED], sum, 20, 128);
sum -= c[VERT_LEFT_PRED]; sum -= c[VERT_LEFT_PRED];
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment