]> granicus.if.org Git - libvpx/commitdiff
Cost wedge sign/index properly in rdopt.
authorGeza Lore <gezalore@gmail.com>
Wed, 11 May 2016 12:40:05 +0000 (13:40 +0100)
committerDebargha Mukherjee <debargha@google.com>
Wed, 11 May 2016 18:59:10 +0000 (11:59 -0700)
Lowres improves by about 0.1%

lowres: -2.164 BDRATE

Change-Id: I393bbb92700bfbb8763ace424f4edc2d672a74b4

vp10/encoder/cost.h
vp10/encoder/rdopt.c

index 431e0c4a5982d76b7edddaaeb35a911480988230..bfd0be08d64f397c7b0c4b95ea3494cf583c4df8 100644 (file)
@@ -33,6 +33,10 @@ extern const uint16_t vp10_prob_cost[256];
 #define vp10_cost_bit(prob, bit) vp10_cost_zero((bit) ? 256 - (prob) \
                                                     : (prob))
 
+// Cost of coding an n bit literal, using 128 (i.e. 50%) probability
+// for each bit.
+#define vp10_cost_literal(n) ((n) * (1 << VP9_PROB_COST_SHIFT))
+
 static INLINE unsigned int cost_branch256(const unsigned int ct[2],
                                           vpx_prob p) {
   return ct[0] * vp10_cost_zero(p) + ct[1] * vp10_cost_one(p);
index 6d7c1a8fe964db447b585abc1f4ed9fc7b0f1558..c111b56bc2262d5d8524d87ea82884dbabaa15d6 100644 (file)
@@ -7049,7 +7049,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
         best_rd_nowedge < 3 * ref_best_rd) {
 
       mbmi->use_wedge_interinter = 1;
-      rs = (1 + get_wedge_bits_lookup[bsize]) * 256 +
+      rs = vp10_cost_literal(1 + get_wedge_bits_lookup[bsize]) +
           vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
       wedge_types = (1 << get_wedge_bits_lookup[bsize]);
       if (have_newmv_in_inter_mode(this_mode)) {
@@ -7188,7 +7188,6 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
         }
         mbmi->interinter_wedge_sign = best_wedge_index & 1;
         mbmi->interinter_wedge_index = best_wedge_index >> 1;
-        mbmi->interinter_wedge_sign = best_wedge_index & 1;
         vp10_build_wedge_inter_predictor_from_buf(xd, bsize,
                                                   0, 0, mi_row, mi_col,
                                                   preds0, strides,
@@ -7217,7 +7216,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
 
     if (mbmi->use_wedge_interinter)
-      *compmode_wedge_cost = (1 + get_wedge_bits_lookup[bsize]) * 256 +
+      *compmode_wedge_cost =
+          vp10_cost_literal(1 + get_wedge_bits_lookup[bsize]) +
           vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
     else
       *compmode_wedge_cost =
@@ -7303,7 +7303,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
         mbmi->use_wedge_interintra = 1;
         wedge_types = (1 << get_wedge_bits_lookup[bsize]);
-        rwedge = get_wedge_bits_lookup[bsize] * 256 +
+        rwedge = vp10_cost_literal(get_wedge_bits_lookup[bsize]) +
             vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
         for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
           mbmi->interintra_wedge_index = wedge_index;
@@ -7389,7 +7389,8 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
       *compmode_interintra_cost += vp10_cost_bit(
           cm->fc->wedge_interintra_prob[bsize], mbmi->use_wedge_interintra);
       if (mbmi->use_wedge_interintra) {
-        *compmode_interintra_cost += get_wedge_bits_lookup[bsize] * 256;
+        *compmode_interintra_cost +=
+            vp10_cost_literal(get_wedge_bits_lookup[bsize]);
       }
     }
   } else if (is_interintra_allowed(mbmi)) {