#define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? 256 - (prob) \
: (prob))
+// Cost of coding an n bit literal, using 128 (i.e. 50%) probability
+// for each bit.
+#define vp10_cost_literal(n) ((n) * (1 << VP9_PROB_COST_SHIFT))
+
static INLINE unsigned int cost_branch256(const unsigned int ct[2],
vpx_prob p) {
return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
best_rd_nowedge < 3 * ref_best_rd) {
mbmi->use_wedge_interinter = 1;
- rs = (1 + get_wedge_bits_lookup[bsize]) * 256 +
+ rs = vp10_cost_literal(1 + get_wedge_bits_lookup[bsize]) +
vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
wedge_types = (1 << get_wedge_bits_lookup[bsize]);
if (have_newmv_in_inter_mode(this_mode)) {
}
mbmi->interinter_wedge_sign = best_wedge_index & 1;
mbmi->interinter_wedge_index = best_wedge_index >> 1;
- mbmi->interinter_wedge_sign = best_wedge_index & 1;
vp10_build_wedge_inter_predictor_from_buf(xd, bsize,
0, 0, mi_row, mi_col,
preds0, strides,
tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
if (mbmi->use_wedge_interinter)
- *compmode_wedge_cost = (1 + get_wedge_bits_lookup[bsize]) * 256 +
+ *compmode_wedge_cost =
+ vp10_cost_literal(1 + get_wedge_bits_lookup[bsize]) +
vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
else
*compmode_wedge_cost =
mbmi->use_wedge_interintra = 1;
wedge_types = (1 << get_wedge_bits_lookup[bsize]);
- rwedge = get_wedge_bits_lookup[bsize] * 256 +
+ rwedge = vp10_cost_literal(get_wedge_bits_lookup[bsize]) +
vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
mbmi->interintra_wedge_index = wedge_index;
*compmode_interintra_cost += vp10_cost_bit(
cm->fc->wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
if (mbmi->use_wedge_interintra) {
- *compmode_interintra_cost += get_wedge_bits_lookup[bsize] * 256;
+ *compmode_interintra_cost +=
+ vp10_cost_literal(get_wedge_bits_lookup[bsize]);
}
}
} else if (is_interintra_allowed(mbmi)) {