]> granicus.if.org Git - libvpx/commitdiff
Vectorize motion vector probability models
authorJingning Han <jingning@google.com>
Thu, 18 Feb 2016 19:57:44 +0000 (11:57 -0800)
committerJingning Han <jingning@google.com>
Sat, 20 Feb 2016 00:20:41 +0000 (16:20 -0800)
This commit converts the scalar motion vector probability model
into vector format for later precise estimate.

Change-Id: I7008d047ecc1b9577aa8442b4db2df312be869dc

12 files changed:
vp10/common/entropymode.h
vp10/common/entropymv.c
vp10/common/enums.h
vp10/common/mvref_common.h
vp10/common/thread_common.c
vp10/decoder/decodeframe.c
vp10/decoder/decodemv.c
vp10/encoder/bitstream.c
vp10/encoder/encodemv.c
vp10/encoder/encodemv.h
vp10/encoder/rd.c
vp10/encoder/rdopt.c

index d581a08cd697a188b39a2a78f7b95af77e73a6b3..05918ee00f4a95468bfb1a21d5dc0679f89cea67 100644 (file)
@@ -93,7 +93,11 @@ typedef struct frame_contexts {
   vpx_prob txfm_partition_prob[TXFM_PARTITION_CONTEXTS];
 #endif
   vpx_prob skip_probs[SKIP_CONTEXTS];
+#if CONFIG_REF_MV
+  nmv_context nmvc[NMV_CONTEXTS];
+#else
   nmv_context nmvc;
+#endif
   int initialized;
 #if CONFIG_EXT_TX
   vpx_prob inter_ext_tx_prob[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES - 1];
@@ -150,7 +154,11 @@ typedef struct FRAME_COUNTS {
   unsigned int txfm_partition[TXFM_PARTITION_CONTEXTS][2];
 #endif
   unsigned int skip[SKIP_CONTEXTS][2];
+#if CONFIG_REF_MV
+  nmv_context_counts mv[NMV_CONTEXTS];
+#else
   nmv_context_counts mv;
+#endif
 #if CONFIG_EXT_TX
   unsigned int inter_ext_tx[EXT_TX_SETS_INTER][EXT_TX_SIZES][TX_TYPES];
   unsigned int intra_ext_tx[EXT_TX_SETS_INTRA][EXT_TX_SIZES][INTRA_MODES]
index a9946ee15228924d89d384c75deb79cb628f8834..5be979759731bf2ea0ab6e1d90465e97f6f0f239 100644 (file)
@@ -185,7 +185,45 @@ void vp10_inc_mv(const MV *mv, nmv_context_counts *counts, const int usehp) {
 
 void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
   int i, j;
+#if CONFIG_REF_MV
+  int idx;
+  for (idx = 0; idx < NMV_CONTEXTS; ++idx) {
+    nmv_context *fc = &cm->fc->nmvc[idx];
+    const nmv_context *pre_fc =
+        &cm->frame_contexts[cm->frame_context_idx].nmvc[idx];
+    const nmv_context_counts *counts = &cm->counts.mv[idx];
 
+    vpx_tree_merge_probs(vp10_mv_joint_tree, pre_fc->joints, counts->joints,
+                         fc->joints);
+
+    for (i = 0; i < 2; ++i) {
+      nmv_component *comp = &fc->comps[i];
+      const nmv_component *pre_comp = &pre_fc->comps[i];
+      const nmv_component_counts *c = &counts->comps[i];
+
+      comp->sign = mode_mv_merge_probs(pre_comp->sign, c->sign);
+      vpx_tree_merge_probs(vp10_mv_class_tree, pre_comp->classes, c->classes,
+                           comp->classes);
+      vpx_tree_merge_probs(vp10_mv_class0_tree, pre_comp->class0, c->class0,
+                           comp->class0);
+
+      for (j = 0; j < MV_OFFSET_BITS; ++j)
+        comp->bits[j] = mode_mv_merge_probs(pre_comp->bits[j], c->bits[j]);
+
+      for (j = 0; j < CLASS0_SIZE; ++j)
+        vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->class0_fp[j],
+                             c->class0_fp[j], comp->class0_fp[j]);
+
+      vpx_tree_merge_probs(vp10_mv_fp_tree, pre_comp->fp, c->fp, comp->fp);
+
+      if (allow_hp) {
+        comp->class0_hp = mode_mv_merge_probs(pre_comp->class0_hp,
+                                              c->class0_hp);
+        comp->hp = mode_mv_merge_probs(pre_comp->hp, c->hp);
+      }
+    }
+  }
+#else
   nmv_context *fc = &cm->fc->nmvc;
   const nmv_context *pre_fc = &cm->frame_contexts[cm->frame_context_idx].nmvc;
   const nmv_context_counts *counts = &cm->counts.mv;
@@ -218,8 +256,15 @@ void vp10_adapt_mv_probs(VP10_COMMON *cm, int allow_hp) {
       comp->hp = mode_mv_merge_probs(pre_comp->hp, c->hp);
     }
   }
+#endif
 }
 
 void vp10_init_mv_probs(VP10_COMMON *cm) {
+#if CONFIG_REF_MV
+  int i;
+  for (i = 0; i < NMV_CONTEXTS; ++i)
+    cm->fc->nmvc[i] = default_nmv_context;
+#else
   cm->fc->nmvc = default_nmv_context;
+#endif
 }
index af6ef3657a1bf49f2c3d8f69415924ac886d9acd..e1f316855094bdf0a6105789b27fff0dc0bcb570 100644 (file)
@@ -227,6 +227,8 @@ typedef enum {
 #define SKIP_CONTEXTS 3
 
 #if CONFIG_REF_MV
+#define NMV_CONTEXTS 2
+
 #define NEWMV_MODE_CONTEXTS  7
 #define ZEROMV_MODE_CONTEXTS 2
 #define REFMV_MODE_CONTEXTS  9
index 62d85da0092dc29c824ab2485baca929605b1d00..b3a8bebe3119775768d8e95eec5dd14f40c1dbe9 100644 (file)
@@ -228,6 +228,22 @@ static INLINE void lower_mv_precision(MV *mv, int allow_hp) {
 }
 
 #if CONFIG_REF_MV
+static INLINE int vp10_nmv_ctx(const uint8_t ref_mv_count,
+                               const CANDIDATE_MV *ref_mv_stack) {
+#if CONFIG_EXT_INTER
+  return 0;
+#endif
+  if (ref_mv_stack[0].weight > REF_CAT_LEVEL &&
+      ref_mv_count > 0) {
+    if (abs(ref_mv_stack[0].this_mv.as_mv.row -
+            ref_mv_stack[0].pred_mv.as_mv.row) < 8 &&
+        abs(ref_mv_stack[0].this_mv.as_mv.col -
+            ref_mv_stack[0].pred_mv.as_mv.col) < 8)
+      return 1;
+  }
+  return 0;
+}
+
 static INLINE int8_t vp10_ref_frame_type(const MV_REFERENCE_FRAME *const rf) {
   if (rf[1] > INTRA_FRAME)
     return rf[0] + ALTREF_FRAME;
index 6e959edbd351df7988c7d7051c3a780d52334436..7f04a097e0bece201b77cc2f9804f6725281aa12 100644 (file)
@@ -447,6 +447,39 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
     for (j = 0; j < 2; j++)
       cm->counts.skip[i][j] += counts->skip[i][j];
 
+#if CONFIG_REF_MV
+  for (m = 0; m < NMV_CONTEXTS; ++m) {
+    for (i = 0; i < MV_JOINTS; i++)
+      cm->counts.mv[m].joints[i] += counts->mv[m].joints[i];
+
+    for (k = 0; k < 2; k++) {
+      nmv_component_counts *comps = &cm->counts.mv[m].comps[k];
+      nmv_component_counts *comps_t = &counts->mv[m].comps[k];
+
+      for (i = 0; i < 2; i++) {
+        comps->sign[i] += comps_t->sign[i];
+        comps->class0_hp[i] += comps_t->class0_hp[i];
+        comps->hp[i] += comps_t->hp[i];
+      }
+
+      for (i = 0; i < MV_CLASSES; i++)
+        comps->classes[i] += comps_t->classes[i];
+
+      for (i = 0; i < CLASS0_SIZE; i++) {
+        comps->class0[i] += comps_t->class0[i];
+        for (j = 0; j < MV_FP_SIZE; j++)
+          comps->class0_fp[i][j] += comps_t->class0_fp[i][j];
+      }
+
+      for (i = 0; i < MV_OFFSET_BITS; i++)
+        for (j = 0; j < 2; j++)
+          comps->bits[i][j] += comps_t->bits[i][j];
+
+      for (i = 0; i < MV_FP_SIZE; i++)
+        comps->fp[i] += comps_t->fp[i];
+    }
+  }
+#else
   for (i = 0; i < MV_JOINTS; i++)
     cm->counts.mv.joints[i] += counts->mv.joints[i];
 
@@ -476,6 +509,7 @@ void vp10_accumulate_frame_counts(VP10_COMMON *cm, FRAME_COUNTS *counts,
     for (i = 0; i < MV_FP_SIZE; i++)
       comps->fp[i] += comps_t->fp[i];
   }
+#endif
 
 #if CONFIG_EXT_TX
   for (i = 0; i < EXT_TX_SIZES; i++) {
index a003d7ac40f73fecd02155532068477a949879b0..a976356c04ff042587f6ddca1b7e6c32035f7619 100644 (file)
@@ -3566,7 +3566,9 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
         for (i = 0; i < INTRA_MODES - 1; ++i)
           vp10_diff_update_prob(&r, &cm->kf_y_prob[k][j][i]);
   } else {
+#if !CONFIG_REF_MV
     nmv_context *const nmvc = &fc->nmvc;
+#endif
 
     read_inter_mode_probs(fc, &r);
 
@@ -3593,7 +3595,12 @@ static int read_compressed_header(VP10Decoder *pbi, const uint8_t *data,
       for (i = 0; i < INTRA_MODES - 1; ++i)
         vp10_diff_update_prob(&r, &fc->y_mode_prob[j][i]);
 
+#if CONFIG_REF_MV
+    for (i = 0; i < NMV_CONTEXTS; ++i)
+      read_mv_probs(&fc->nmvc[i], cm->allow_high_precision_mv, &r);
+#else
     read_mv_probs(nmvc, cm->allow_high_precision_mv, &r);
+#endif
     read_ext_tx_probs(fc, &r);
 #if CONFIG_SUPERTX
     if (!xd->lossless[0])
@@ -3647,7 +3654,14 @@ static void debug_check_frame_counts(const VP10_COMMON *const cm) {
                  sizeof(cm->counts.comp_ref)));
   assert(!memcmp(&cm->counts.tx, &zero_counts.tx, sizeof(cm->counts.tx)));
   assert(!memcmp(cm->counts.skip, zero_counts.skip, sizeof(cm->counts.skip)));
+#if CONFIG_REF_MV
+  assert(!memcmp(&cm->counts.mv[0], &zero_counts.mv[0],
+                 sizeof(cm->counts.mv[0])));
+  assert(!memcmp(&cm->counts.mv[1], &zero_counts.mv[1],
+                 sizeof(cm->counts.mv[0])));
+#else
   assert(!memcmp(&cm->counts.mv, &zero_counts.mv, sizeof(cm->counts.mv)));
+#endif
   assert(!memcmp(cm->counts.inter_ext_tx, zero_counts.inter_ext_tx,
                  sizeof(cm->counts.inter_ext_tx)));
   assert(!memcmp(cm->counts.intra_ext_tx, zero_counts.intra_ext_tx,
index f924a6d7bf24a96b574055f7ea16935cb6935966..7a8b47f1a67c82f399c15bf4a546a192be57ac4e 100644 (file)
@@ -912,10 +912,21 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
 #endif  // CONFIG_EXT_INTER
     case NEWMV: {
       FRAME_COUNTS *counts = xd->counts;
+#if !CONFIG_REF_MV
       nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
+#endif
       for (i = 0; i < 1 + is_compound; ++i) {
+#if CONFIG_REF_MV
+        int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+                                   xd->ref_mv_stack[mbmi->ref_frame[i]]);
+        nmv_context_counts *const mv_counts =
+            counts ? &counts->mv[nmv_ctx] : NULL;
+        read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc[nmv_ctx],
+                mv_counts, allow_hp);
+#else
         read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
                 allow_hp);
+#endif
         ret = ret && is_mv_valid(&mv[i].as_mv);
 
 #if CONFIG_REF_MV
@@ -963,11 +974,23 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
 #if CONFIG_EXT_INTER
     case NEW_NEWMV: {
       FRAME_COUNTS *counts = xd->counts;
+#if !CONFIG_REF_MV
       nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
+#endif
       assert(is_compound);
       for (i = 0; i < 2; ++i) {
+#if CONFIG_REF_MV
+        int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[i]],
+                                   xd->ref_mv_stack[mbmi->ref_frame[i]]);
+        nmv_context_counts *const mv_counts =
+            counts ? &counts->mv[nmv_ctx] : NULL;
+        read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv,
+                &cm->fc->nmvc[nmv_ctx], mv_counts,
+                allow_hp);
+#else
         read_mv(r, &mv[i].as_mv, &ref_mv[i].as_mv, &cm->fc->nmvc, mv_counts,
                 allow_hp);
+#endif
         ret = ret && is_mv_valid(&mv[i].as_mv);
       }
       break;
@@ -992,40 +1015,83 @@ static INLINE int assign_mv(VP10_COMMON *cm, MACROBLOCKD *xd,
     }
     case NEW_NEARESTMV: {
       FRAME_COUNTS *counts = xd->counts;
+#if CONFIG_REF_MV
+      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+                                 xd->ref_mv_stack[mbmi->ref_frame[0]]);
+      nmv_context_counts *const mv_counts =
+          counts ? &counts->mv[nmv_ctx] : NULL;
+      read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv,
+              &cm->fc->nmvc[nmv_ctx], mv_counts,
+              allow_hp);
+#else
       nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
-      assert(is_compound);
       read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc->nmvc, mv_counts,
               allow_hp);
+#endif
+      assert(is_compound);
       ret = ret && is_mv_valid(&mv[0].as_mv);
       mv[1].as_int = nearest_mv[1].as_int;
       break;
     }
     case NEAREST_NEWMV: {
       FRAME_COUNTS *counts = xd->counts;
+#if CONFIG_REF_MV
+      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+                                 xd->ref_mv_stack[mbmi->ref_frame[1]]);
+      nmv_context_counts *const mv_counts =
+          counts ? &counts->mv[nmv_ctx] : NULL;
+      mv[0].as_int = nearest_mv[0].as_int;
+      read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv,
+              &cm->fc->nmvc[nmv_ctx], mv_counts,
+              allow_hp);
+#else
       nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
-      assert(is_compound);
       mv[0].as_int = nearest_mv[0].as_int;
       read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, &cm->fc->nmvc, mv_counts,
               allow_hp);
+#endif
+      assert(is_compound);
       ret = ret && is_mv_valid(&mv[1].as_mv);
       break;
     }
     case NEAR_NEWMV: {
       FRAME_COUNTS *counts = xd->counts;
+#if CONFIG_REF_MV
+      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[1]],
+                                 xd->ref_mv_stack[mbmi->ref_frame[1]]);
+      nmv_context_counts *const mv_counts =
+          counts ? &counts->mv[nmv_ctx] : NULL;
+      mv[0].as_int = near_mv[0].as_int;
+      read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv,
+              &cm->fc->nmvc[nmv_ctx], mv_counts,
+              allow_hp);
+#else
       nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
-      assert(is_compound);
       mv[0].as_int = near_mv[0].as_int;
       read_mv(r, &mv[1].as_mv, &ref_mv[1].as_mv, &cm->fc->nmvc, mv_counts,
               allow_hp);
+#endif
+      assert(is_compound);
+
       ret = ret && is_mv_valid(&mv[1].as_mv);
       break;
     }
     case NEW_NEARMV: {
       FRAME_COUNTS *counts = xd->counts;
+#if CONFIG_REF_MV
+      int nmv_ctx = vp10_nmv_ctx(xd->ref_mv_count[mbmi->ref_frame[0]],
+                                 xd->ref_mv_stack[mbmi->ref_frame[0]]);
+      nmv_context_counts *const mv_counts =
+          counts ? &counts->mv[nmv_ctx] : NULL;
+      read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv,
+              &cm->fc->nmvc[nmv_ctx], mv_counts,
+              allow_hp);
+#else
       nmv_context_counts *const mv_counts = counts ? &counts->mv : NULL;
-      assert(is_compound);
       read_mv(r, &mv[0].as_mv, &ref_mv[0].as_mv, &cm->fc->nmvc, mv_counts,
               allow_hp);
+#endif
+      assert(is_compound);
       ret = ret && is_mv_valid(&mv[0].as_mv);
       mv[1].as_int = near_mv[1].as_int;
       break;
@@ -1342,6 +1408,10 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
       }
     }
 
+#if CONFIG_REF_MV
+    mbmi->pred_mv[0].as_int = mi->bmi[3].pred_mv[0].as_int;
+    mbmi->pred_mv[1].as_int = mi->bmi[3].pred_mv[1].as_int;
+#endif
     mi->mbmi.mode = b_mode;
 
     mbmi->mv[0].as_int = mi->bmi[3].as_mv[0].as_int;
index 930f73fb964f47fc305ff251650c150d1d5bda61..f06b96086a53c9233bc22a2137235eb204ef1c69 100644 (file)
@@ -882,7 +882,9 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
 #endif
                                 vpx_writer *w) {
   VP10_COMMON *const cm = &cpi->common;
+#if !CONFIG_REF_MV
   const nmv_context *nmvc = &cm->fc->nmvc;
+#endif
   const MACROBLOCK *x = &cpi->td.mb;
   const MACROBLOCKD *xd = &x->e_mbd;
   const struct segmentation *const seg = &cm->seg;
@@ -1070,20 +1072,39 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
 #else
           if (b_mode == NEWMV) {
 #endif  // CONFIG_EXT_INTER
-            for (ref = 0; ref < 1 + is_compound; ++ref)
+            for (ref = 0; ref < 1 + is_compound; ++ref) {
+#if CONFIG_REF_MV
+              int nmv_ctx =
+                  vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+              const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
               vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[ref].as_mv,
 #if CONFIG_EXT_INTER
                              &mi->bmi[j].ref_mv[ref].as_mv,
 #else
                              &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv,
 #endif  // CONFIG_EXT_INTER
-                            nmvc, allow_hp);
+                             nmvc, allow_hp);
+            }
           }
 #if CONFIG_EXT_INTER
           else if (b_mode == NEAREST_NEWMV || b_mode == NEAR_NEWMV) {
+#if CONFIG_REF_MV
+            int nmv_ctx =
+                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+            const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
             vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[1].as_mv,
                            &mi->bmi[j].ref_mv[1].as_mv, nmvc, allow_hp);
           } else if (b_mode == NEW_NEARESTMV || b_mode == NEW_NEARMV) {
+#if CONFIG_REF_MV
+            int nmv_ctx =
+                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+            const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
             vp10_encode_mv(cpi, w, &mi->bmi[j].as_mv[0].as_mv,
                            &mi->bmi[j].ref_mv[0].as_mv, nmvc, allow_hp);
           }
@@ -1096,9 +1117,14 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
 #else
       if (mode == NEWMV) {
 #endif  // CONFIG_EXT_INTER
-        for (ref = 0; ref < 1 + is_compound; ++ref)
+        for (ref = 0; ref < 1 + is_compound; ++ref) {
+#if CONFIG_REF_MV
+              int nmv_ctx =
+                  vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[ref]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[ref]]);
+              const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
 #if CONFIG_EXT_INTER
-        {
           if (mode == NEWFROMNEARMV)
             vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
                            &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][1].as_mv,
@@ -1108,13 +1134,25 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
           vp10_encode_mv(cpi, w, &mbmi->mv[ref].as_mv,
                         &mbmi_ext->ref_mvs[mbmi->ref_frame[ref]][0].as_mv, nmvc,
                         allow_hp);
-#if CONFIG_EXT_INTER
         }
+#if CONFIG_EXT_INTER
       } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
+#if CONFIG_REF_MV
+            int nmv_ctx =
+                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+            const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
         vp10_encode_mv(cpi, w, &mbmi->mv[1].as_mv,
                        &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv, nmvc,
                        allow_hp);
       } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
+#if CONFIG_REF_MV
+            int nmv_ctx =
+                vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                             mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+            const nmv_context *nmvc = &cm->fc->nmvc[nmv_ctx];
+#endif
         vp10_encode_mv(cpi, w, &mbmi->mv[0].as_mv,
                        &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv, nmvc,
                        allow_hp);
@@ -2449,7 +2487,11 @@ static size_t write_compressed_header(VP10_COMP *cpi, uint8_t *data) {
                        counts->y_mode[i], INTRA_MODES, &header_bc);
 
     vp10_write_nmv_probs(cm, cm->allow_high_precision_mv, &header_bc,
-                        &counts->mv);
+#if CONFIG_REF_MV
+                         counts->mv);
+#else
+                         &counts->mv);
+#endif
     update_ext_tx_probs(cm, &header_bc);
 #if CONFIG_SUPERTX
     if (!xd->lossless[0])
index 4124c4ac28c2c83e64318e0af6bf71b756d0d6f1..61429aaa2c18bb36d3ddb9fefd2ff71e2dc8d92e 100644 (file)
@@ -157,9 +157,49 @@ static void write_mv_update(const vpx_tree_index *tree,
 }
 
 void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
-                         nmv_context_counts *const counts) {
+                          nmv_context_counts *const nmv_counts) {
   int i, j;
+#if CONFIG_REF_MV
+  int nmv_ctx = 0;
+  for (nmv_ctx = 0; nmv_ctx < NMV_CONTEXTS; ++nmv_ctx) {
+    nmv_context *const mvc = &cm->fc->nmvc[nmv_ctx];
+    nmv_context_counts *const counts = &nmv_counts[nmv_ctx];
+    write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints,
+                    MV_JOINTS, w);
+
+    for (i = 0; i < 2; ++i) {
+      nmv_component *comp = &mvc->comps[i];
+      nmv_component_counts *comp_counts = &counts->comps[i];
+
+      update_mv(w, comp_counts->sign, &comp->sign, MV_UPDATE_PROB);
+      write_mv_update(vp10_mv_class_tree, comp->classes, comp_counts->classes,
+                      MV_CLASSES, w);
+      write_mv_update(vp10_mv_class0_tree, comp->class0, comp_counts->class0,
+                      CLASS0_SIZE, w);
+      for (j = 0; j < MV_OFFSET_BITS; ++j)
+        update_mv(w, comp_counts->bits[j], &comp->bits[j], MV_UPDATE_PROB);
+    }
+
+    for (i = 0; i < 2; ++i) {
+      for (j = 0; j < CLASS0_SIZE; ++j)
+        write_mv_update(vp10_mv_fp_tree, mvc->comps[i].class0_fp[j],
+                        counts->comps[i].class0_fp[j], MV_FP_SIZE, w);
+
+      write_mv_update(vp10_mv_fp_tree, mvc->comps[i].fp, counts->comps[i].fp,
+                      MV_FP_SIZE, w);
+    }
+
+    if (usehp) {
+      for (i = 0; i < 2; ++i) {
+        update_mv(w, counts->comps[i].class0_hp, &mvc->comps[i].class0_hp,
+                  MV_UPDATE_PROB);
+        update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB);
+      }
+    }
+  }
+#else
   nmv_context *const mvc = &cm->fc->nmvc;
+  nmv_context_counts *const counts = nmv_counts;
 
   write_mv_update(vp10_mv_joint_tree, mvc->joints, counts->joints, MV_JOINTS, w);
 
@@ -192,6 +232,7 @@ void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
       update_mv(w, counts->comps[i].hp, &mvc->comps[i].hp, MV_UPDATE_PROB);
     }
   }
+#endif
 }
 
 void vp10_encode_mv(VP10_COMP* cpi, vpx_writer* w,
@@ -227,27 +268,45 @@ void vp10_build_nmv_cost_table(int *mvjoint, int *mvcost[2],
 #if CONFIG_EXT_INTER
 static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
                     const int_mv mvs[2],
-                    nmv_context_counts *counts) {
+                    nmv_context_counts *nmv_counts) {
   int i;
   PREDICTION_MODE mode = mbmi->mode;
   int mv_idx = (mode == NEWFROMNEARMV);
+#if !CONFIG_REF_MV
+  nmv_context_counts *counts = nmv_counts;
+#endif
 
   if (mode == NEWMV || mode == NEWFROMNEARMV || mode == NEW_NEWMV) {
     for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
       const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][mv_idx].as_mv;
       const MV diff = {mvs[i].as_mv.row - ref->row,
                        mvs[i].as_mv.col - ref->col};
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
       vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
     }
   } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[1]][0].as_mv;
     const MV diff = {mvs[1].as_mv.row - ref->row,
                      mvs[1].as_mv.col - ref->col};
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
     vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
   } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[0]][0].as_mv;
     const MV diff = {mvs[0].as_mv.row - ref->row,
                      mvs[0].as_mv.col - ref->col};
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
     vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
   }
 }
@@ -255,36 +314,67 @@ static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
 static void inc_mvs_sub8x8(const MODE_INFO *mi,
                            int block,
                            const int_mv mvs[2],
-                           nmv_context_counts *counts) {
+#if CONFIG_REF_MV
+                           const MB_MODE_INFO_EXT *mbmi_ext,
+#endif
+                           nmv_context_counts *nmv_counts) {
   int i;
   PREDICTION_MODE mode = mi->bmi[block].as_mode;
+#if CONFIG_REF_MV
+  const MB_MODE_INFO *mbmi = &mi->mbmi;
+#else
+  nmv_context_counts *counts = nmv_counts;
+#endif
 
   if (mode == NEWMV || mode == NEWFROMNEARMV || mode == NEW_NEWMV) {
     for (i = 0; i < 1 + has_second_ref(&mi->mbmi); ++i) {
       const MV *ref = &mi->bmi[block].ref_mv[i].as_mv;
       const MV diff = {mvs[i].as_mv.row - ref->row,
                        mvs[i].as_mv.col - ref->col};
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
       vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
     }
   } else if (mode == NEAREST_NEWMV || mode == NEAR_NEWMV) {
     const MV *ref = &mi->bmi[block].ref_mv[1].as_mv;
     const MV diff = {mvs[1].as_mv.row - ref->row,
                      mvs[1].as_mv.col - ref->col};
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[1]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[1]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
     vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
   } else if (mode == NEW_NEARESTMV || mode == NEW_NEARMV) {
     const MV *ref = &mi->bmi[block].ref_mv[0].as_mv;
     const MV diff = {mvs[0].as_mv.row - ref->row,
                      mvs[0].as_mv.col - ref->col};
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[0]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[0]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
     vp10_inc_mv(&diff, counts, vp10_use_mv_hp(ref));
   }
 }
 #else
 static void inc_mvs(const MB_MODE_INFO *mbmi, const MB_MODE_INFO_EXT *mbmi_ext,
                     const int_mv mvs[2],
-                    nmv_context_counts *counts) {
+                    nmv_context_counts *nmv_counts) {
   int i;
+#if !CONFIG_REF_MV
+  nmv_context_counts *counts = nmv_counts;
+#endif
 
   for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+#if CONFIG_REF_MV
+    int nmv_ctx = vp10_nmv_ctx(mbmi_ext->ref_mv_count[mbmi->ref_frame[i]],
+                               mbmi_ext->ref_mv_stack[mbmi->ref_frame[i]]);
+    nmv_context_counts *counts = &nmv_counts[nmv_ctx];
+#endif
     const MV *ref = &mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_mv;
     const MV diff = {mvs[i].as_mv.row - ref->row,
                      mvs[i].as_mv.col - ref->col};
@@ -310,10 +400,21 @@ void vp10_update_mv_count(ThreadData *td) {
 
 #if CONFIG_EXT_INTER
         if (have_newmv_in_inter_mode(mi->bmi[i].as_mode))
-          inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv, &td->counts->mv);
+          inc_mvs_sub8x8(mi, i, mi->bmi[i].as_mv,
+#if CONFIG_REF_MV
+                         mbmi_ext,
+                         td->counts->mv);
+#else
+                         &td->counts->mv);
+#endif
 #else
         if (mi->bmi[i].as_mode == NEWMV)
-          inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv, &td->counts->mv);
+          inc_mvs(mbmi, mbmi_ext, mi->bmi[i].as_mv,
+#if CONFIG_REF_MV
+                  td->counts->mv);
+#else
+                  &td->counts->mv);
+#endif
 #endif  // CONFIG_EXT_INTER
       }
     }
@@ -323,7 +424,12 @@ void vp10_update_mv_count(ThreadData *td) {
 #else
     if (mbmi->mode == NEWMV)
 #endif  // CONFIG_EXT_INTER
-      inc_mvs(mbmi, mbmi_ext, mbmi->mv, &td->counts->mv);
+      inc_mvs(mbmi, mbmi_ext, mbmi->mv,
+#if CONFIG_REF_MV
+              td->counts->mv);
+#else
+              &td->counts->mv);
+#endif
   }
 }
 
index 006f6d7c71ccd1c65368fd9418a539bf75d59e5d..c753d349f8c3b9f7211bcefdaf645203bcb96645 100644 (file)
@@ -21,7 +21,7 @@ extern "C" {
 void vp10_entropy_mv_init(void);
 
 void vp10_write_nmv_probs(VP10_COMMON *cm, int usehp, vpx_writer *w,
-                         nmv_context_counts *const counts);
+                          nmv_context_counts *const counts);
 
 void vp10_encode_mv(VP10_COMP *cpi, vpx_writer* w, const MV* mv, const MV* ref,
                    const nmv_context* mvctx, int usehp);
index bf73064ba48d2092237caa4d0d5b8d699e06d80b..b75f849e719449fd68237973f4558f5c1a2aad88 100644 (file)
@@ -361,10 +361,20 @@ void vp10_initialize_rd_consts(VP10_COMP *cpi) {
   fill_mode_costs(cpi);
 
   if (!frame_is_intra_only(cm)) {
+#if CONFIG_REF_MV
+    int nmv_ctx = 0;
+    vp10_build_nmv_cost_table(x->nmvjointcost,
+                             cm->allow_high_precision_mv ? x->nmvcost_hp
+                                                         : x->nmvcost,
+                             &cm->fc->nmvc[nmv_ctx],
+                             cm->allow_high_precision_mv);
+#else
     vp10_build_nmv_cost_table(x->nmvjointcost,
                              cm->allow_high_precision_mv ? x->nmvcost_hp
                                                          : x->nmvcost,
                              &cm->fc->nmvc, cm->allow_high_precision_mv);
+#endif
+
 #if CONFIG_REF_MV
     for (i = 0; i < NEWMV_MODE_CONTEXTS; ++i) {
       cpi->newmv_mode_cost[i][0] = vp10_cost_bit(cm->fc->newmv_prob[i], 0);
index 8f6c4c73ad34c6ab75be9f0cc38fa3a3488383a5..c6879cf2091086fa51a0d6f0ae8468aa4ba55873 100644 (file)
@@ -5277,13 +5277,6 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     if (mv_check_bounds(x, &cur_mv[i].as_mv))
       return INT64_MAX;
     mbmi->mv[i].as_int = cur_mv[i].as_int;
-
-#if CONFIG_REF_MV
-    if (this_mode != NEWMV)
-      mbmi->pred_mv[i].as_int = mbmi->mv[i].as_int;
-    else
-      mbmi->pred_mv[i].as_int = mbmi_ext->ref_mvs[refs[i]][0].as_int;
-#endif
   }
 
 #if CONFIG_REF_MV
@@ -7182,6 +7175,15 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
   *mbmi = best_mbmode;
   x->skip |= best_skip2;
 
+#if CONFIG_REF_MV
+  for (i = 0; i < 1 + has_second_ref(mbmi); ++i) {
+    if (mbmi->mode != NEWMV)
+      mbmi->pred_mv[i].as_int = mbmi->mv[i].as_int;
+    else
+      mbmi->pred_mv[i].as_int = mbmi_ext->ref_mvs[mbmi->ref_frame[i]][0].as_int;
+  }
+#endif
+
   for (i = 0; i < REFERENCE_MODES; ++i) {
     if (best_pred_rd[i] == INT64_MAX)
       best_pred_diff[i] = INT_MIN;
@@ -8108,6 +8110,10 @@ void vp10_rd_pick_inter_mode_sub8x8(struct VP10_COMP *cpi,
 
     mbmi->mv[0].as_int = xd->mi[0]->bmi[3].as_mv[0].as_int;
     mbmi->mv[1].as_int = xd->mi[0]->bmi[3].as_mv[1].as_int;
+#if CONFIG_REF_MV
+    mbmi->pred_mv[0].as_int = xd->mi[0]->bmi[3].pred_mv[0].as_int;
+    mbmi->pred_mv[1].as_int = xd->mi[0]->bmi[3].pred_mv[1].as_int;
+#endif
   }
 
   for (i = 0; i < REFERENCE_MODES; ++i) {