]> granicus.if.org Git - libvpx/commitdiff
Refactoring and uv fix for wedge
authorDebargha Mukherjee <debargha@google.com>
Mon, 2 May 2016 20:35:02 +0000 (13:35 -0700)
committerDebargha Mukherjee <debargha@google.com>
Tue, 3 May 2016 15:02:08 +0000 (08:02 -0700)
lowres: -1.72%

Change-Id: I4c883097caac72fab8e01945454579891617145e

vp10/common/blockd.h
vp10/common/reconinter.c
vp10/common/reconinter.h
vp10/decoder/decodeframe.c
vp10/decoder/decodemv.c
vp10/encoder/bitstream.c
vp10/encoder/encodeframe.c
vp10/encoder/rdopt.c
vp10/encoder/speed_features.c
vp10/encoder/speed_features.h

index 377d19921d5c91ecc4ffcbe1f104a1614c46c7b7..bc2c417fb2a20ec3b3d16d36c335ee523411d1eb 100644 (file)
@@ -211,7 +211,6 @@ typedef struct {
 
 #if CONFIG_EXT_INTER
   INTERINTRA_MODE interintra_mode;
-  INTERINTRA_MODE interintra_uv_mode;
   // TODO(debargha): Consolidate these flags
   int use_wedge_interintra;
   int interintra_wedge_index;
index 69857eac7d69546d516309a7eb5e8d447959695c..c0a1256cb16fe4ff0b2a8c606ef5f17f6a357f5a 100644 (file)
@@ -226,13 +226,26 @@ static const int *get_wedge_params_lookup[BLOCK_SIZES] = {
 #endif  // CONFIG_EXT_PARTITION
 };
 
-static const uint8_t *get_wedge_mask_inplace(const int *a,
+static const int *get_wedge_params(int wedge_index,
+                                   BLOCK_SIZE sb_type) {
+  const int *a = NULL;
+  if (wedge_index != WEDGE_NONE) {
+    return get_wedge_params_lookup[sb_type] + WEDGE_PARMS * wedge_index;
+  }
+  return a;
+}
+
+static const uint8_t *get_wedge_mask_inplace(int wedge_index,
                                              int neg,
-                                             int h, int w) {
+                                             BLOCK_SIZE sb_type) {
   const uint8_t *master;
-  const int woff = (a[3] * w) >> 2;
-  const int hoff = (a[4] * h) >> 2;
+  const int bh = 4 << b_height_log2_lookup[sb_type];
+  const int bw = 4 << b_width_log2_lookup[sb_type];
+  const int *a = get_wedge_params(wedge_index, sb_type);
+  int woff, hoff;
   if (!a) return NULL;
+  woff = (a[3] * bw) >> 2;
+  hoff = (a[4] * bh) >> 2;
   master = (a[0] ?
             wedge_mask_obl[neg][a[1]][a[2]] :
             wedge_mask_str[neg][a[1]]) +
@@ -241,129 +254,200 @@ static const uint8_t *get_wedge_mask_inplace(const int *a,
   return master;
 }
 
-static const int *get_wedge_params(int wedge_index,
-                                   BLOCK_SIZE sb_type) {
-  const int *a = NULL;
-  if (wedge_index != WEDGE_NONE) {
-    return get_wedge_params_lookup[sb_type] + WEDGE_PARMS * wedge_index;
-  }
-  return a;
-}
-
 const uint8_t *vp10_get_soft_mask(int wedge_index,
                                   int wedge_sign,
                                   BLOCK_SIZE sb_type,
-                                  int h, int w) {
-  const int *a = get_wedge_params(wedge_index, sb_type);
-  return get_wedge_mask_inplace(a, wedge_sign, h, w);
+                                  int wedge_offset_x,
+                                  int wedge_offset_y) {
+  const uint8_t *mask =
+      get_wedge_mask_inplace(wedge_index, wedge_sign, sb_type);
+  if (mask)
+    mask -= (wedge_offset_x + wedge_offset_y * MASK_MASTER_STRIDE);
+  return mask;
 }
 
-#if CONFIG_SUPERTX
-const uint8_t *get_soft_mask_extend(int wedge_index,
-                                    int wedge_sign,
-                                    int plane,
-                                    BLOCK_SIZE sb_type,
-                                    int wedge_offset_y,
-                                    int wedge_offset_x) {
-  int subh = (plane ? 2 : 4) << b_height_log2_lookup[sb_type];
-  int subw = (plane ? 2 : 4) << b_width_log2_lookup[sb_type];
-  const int *a = get_wedge_params(wedge_index, sb_type);
-  if (a) {
-    const uint8_t *mask = get_wedge_mask_inplace(a, wedge_sign, subh, subw);
-    mask -= (wedge_offset_x + wedge_offset_y * MASK_MASTER_STRIDE);
-    return mask;
+static void build_masked_compound(uint8_t *dst, int dst_stride,
+                                  uint8_t *dst1, int dst1_stride,
+                                  uint8_t *dst2, int dst2_stride,
+                                  const uint8_t *mask,
+                                  int h, int w, int subh, int subw) {
+  int i, j;
+  if (subw == 0 && subh == 0) {
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = mask[i * MASK_MASTER_STRIDE + j];
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
+  } else if (subw == 1 && subh == 1) {
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = (mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j)] +
+                 mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j)] +
+                 mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j + 1)] +
+                 mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j + 1)] + 2) >> 2;
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
+  } else if (subw == 1 && subh == 0) {
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = (mask[i * MASK_MASTER_STRIDE + (2 * j)] +
+                 mask[i * MASK_MASTER_STRIDE + (2 * j + 1)] + 1) >> 1;
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
   } else {
-    return NULL;
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = (mask[(2 * i) * MASK_MASTER_STRIDE + j] +
+                 mask[(2 * i + 1) * MASK_MASTER_STRIDE + j] + 1) >> 1;
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
   }
 }
 
-static void build_masked_compound_extend(uint8_t *dst, int dst_stride,
-                                         uint8_t *dst2, int dst2_stride,
-                                         int plane,
-                                         int wedge_index,
-                                         int wedge_sign,
-                                         BLOCK_SIZE sb_type,
-                                         int wedge_offset_y, int wedge_offset_x,
-                                         int h, int w) {
+#if CONFIG_VP9_HIGHBITDEPTH
+static void build_masked_compound_highbd(uint8_t *dst_8, int dst_stride,
+                                         uint8_t *dst1_8, int dst1_stride,
+                                         uint8_t *dst2_8, int dst2_stride,
+                                         const uint8_t *mask,
+                                         int h, int w, int subh, int subw) {
   int i, j;
-  const uint8_t *mask = get_soft_mask_extend(
-     wedge_index, wedge_sign, plane, sb_type, wedge_offset_y, wedge_offset_x);
-  for (i = 0; i < h; ++i)
-    for (j = 0; j < w; ++j) {
-      int m = mask[i * MASK_MASTER_STRIDE + j];
-      dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
-                                 dst2[i * dst2_stride + j] *
-                                 ((1 << WEDGE_WEIGHT_BITS) - m) +
-                                 (1 << (WEDGE_WEIGHT_BITS - 1))) >>
-                                 WEDGE_WEIGHT_BITS;
-    }
+  uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);
+  uint16_t *dst1 = CONVERT_TO_SHORTPTR(dst1_8);
+  uint16_t *dst2 = CONVERT_TO_SHORTPTR(dst2_8);
+  if (subw == 0 && subh == 0) {
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = mask[i * MASK_MASTER_STRIDE + j];
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
+  } else if (subw == 1 && subh == 1) {
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = (mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j)] +
+                 mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j)] +
+                 mask[(2 * i) * MASK_MASTER_STRIDE + (2 * j + 1)] +
+                 mask[(2 * i + 1) * MASK_MASTER_STRIDE + (2 * j + 1)] + 2) >> 2;
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
+  } else if (subw == 1 && subh == 0) {
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = (mask[i * MASK_MASTER_STRIDE + (2 * j)] +
+                 mask[i * MASK_MASTER_STRIDE + (2 * j + 1)] + 1) >> 1;
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
+  } else {
+    for (i = 0; i < h; ++i)
+      for (j = 0; j < w; ++j) {
+        int m = (mask[(2 * i) * MASK_MASTER_STRIDE + j] +
+                 mask[(2 * i + 1) * MASK_MASTER_STRIDE + j] + 1) >> 1;
+        dst[i * dst_stride + j] = (dst1[i * dst1_stride + j] * m +
+                                   dst2[i * dst2_stride + j] *
+                                   ((1 << WEDGE_WEIGHT_BITS) - m) +
+                                   (1 << (WEDGE_WEIGHT_BITS - 1))) >>
+            WEDGE_WEIGHT_BITS;
+      }
+  }
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_SUPERTX
+static void build_masked_compound_wedge_extend(
+    uint8_t *dst, int dst_stride,
+    uint8_t *dst2, int dst2_stride,
+    int wedge_index,
+    int wedge_sign,
+    BLOCK_SIZE sb_type,
+    int wedge_offset_x, int wedge_offset_y,
+    int h, int w) {
+  const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+  const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
+  const uint8_t *mask = vp10_get_soft_mask(
+     wedge_index, wedge_sign, sb_type, wedge_offset_x, wedge_offset_y);
+  build_masked_compound(dst, dst_stride,
+                        dst, dst_stride, dst2, dst2_stride, mask,
+                        h, w, subh, subw);
 }
 
 #if CONFIG_VP9_HIGHBITDEPTH
-static void build_masked_compound_extend_highbd(
+static void build_masked_compound_wedge_extend_highbd(
     uint8_t *dst_8, int dst_stride,
-    uint8_t *dst2_8, int dst2_stride, int plane,
-    int wedge_index, int wedge_sign, BLOCK_SIZE sb_type,
-    int wedge_offset_y, int wedge_offset_x,
+    uint8_t *dst2_8, int dst2_stride,
+    int wedge_index, int wedge_sign,
+    BLOCK_SIZE sb_type,
+    int wedge_offset_x, int wedge_offset_y,
     int h, int w) {
-  int i, j;
-  const uint8_t *mask = get_soft_mask_extend(
-      wedge_index, wedge_sign, plane, sb_type, wedge_offset_y, wedge_offset_x);
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);
-  uint16_t *dst2 = CONVERT_TO_SHORTPTR(dst2_8);
-  for (i = 0; i < h; ++i)
-    for (j = 0; j < w; ++j) {
-      int m = mask[i * MASK_MASTER_STRIDE + j];
-      dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
-                                 dst2[i * dst2_stride + j] *
-                                 ((1 << WEDGE_WEIGHT_BITS) - m) +
-                                 (1 << (WEDGE_WEIGHT_BITS - 1))) >>
-                                 WEDGE_WEIGHT_BITS;
-    }
+  const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+  const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
+  const uint8_t *mask = vp10_get_soft_mask(
+      wedge_index, wedge_sign, sb_type, wedge_offset_x, wedge_offset_y);
+  build_masked_compound_highbd(dst_8, dst_stride,
+                               dst_8, dst_stride, dst2_8, dst2_stride, mask,
+                               h, w, subh, subw);
 }
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 
 #else   // CONFIG_SUPERTX
 
-static void build_masked_compound(uint8_t *dst, int dst_stride,
-                                  uint8_t *dst2, int dst2_stride,
-                                  int wedge_index, int wedge_sign,
-                                  BLOCK_SIZE sb_type,
-                                  int h, int w) {
-  int i, j;
+static void build_masked_compound_wedge(uint8_t *dst, int dst_stride,
+                                        uint8_t *dst2, int dst2_stride,
+                                        int wedge_index, int wedge_sign,
+                                        BLOCK_SIZE sb_type,
+                                        int h, int w) {
+  // Derive subsampling from h and w passed in. May be refactored to
+  // pass in subsampling factors directly.
+  const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+  const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
-                                           sb_type, h, w);
-  for (i = 0; i < h; ++i)
-    for (j = 0; j < w; ++j) {
-      int m = mask[i * MASK_MASTER_STRIDE + j];
-      dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
-                                 dst2[i * dst2_stride + j] *
-                                 ((1 << WEDGE_WEIGHT_BITS) - m) +
-                                 (1 << (WEDGE_WEIGHT_BITS - 1))) >>
-                                 WEDGE_WEIGHT_BITS;
-    }
+                                           sb_type, 0, 0);
+  build_masked_compound(dst, dst_stride,
+                        dst, dst_stride, dst2, dst2_stride, mask,
+                        h, w, subh, subw);
 }
 
 #if CONFIG_VP9_HIGHBITDEPTH
-static void build_masked_compound_highbd(uint8_t *dst_8, int dst_stride,
-                                         uint8_t *dst2_8, int dst2_stride,
-                                         int wedge_index, int wedge_sign,
-                                         BLOCK_SIZE sb_type,
-                                         int h, int w) {
-  int i, j;
+static void build_masked_compound_wedge_highbd(uint8_t *dst_8, int dst_stride,
+                                               uint8_t *dst2_8, int dst2_stride,
+                                               int wedge_index, int wedge_sign,
+                                               BLOCK_SIZE sb_type,
+                                               int h, int w) {
+  // Derive subsampling from h and w passed in. May be refactored to
+  // pass in subsampling factors directly.
+  const int subh = (2 << b_height_log2_lookup[sb_type]) == h;
+  const int subw = (2 << b_width_log2_lookup[sb_type]) == w;
   const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
-                                           sb_type, h, w);
-  uint16_t *dst = CONVERT_TO_SHORTPTR(dst_8);
-  uint16_t *dst2 = CONVERT_TO_SHORTPTR(dst2_8);
-  for (i = 0; i < h; ++i)
-    for (j = 0; j < w; ++j) {
-      int m = mask[i * MASK_MASTER_STRIDE + j];
-      dst[i * dst_stride + j] = (dst[i * dst_stride + j] * m +
-                                 dst2[i * dst2_stride + j] *
-                                 ((1 << WEDGE_WEIGHT_BITS) - m) +
-                                 (1 << (WEDGE_WEIGHT_BITS - 1))) >>
-                                 WEDGE_WEIGHT_BITS;
-    }
+                                           sb_type, 0, 0);
+  build_masked_compound_highbd(dst_8, dst_stride,
+                               dst_8, dst_stride, dst2_8, dst2_stride, mask,
+                               h, w, subh, subw);
 }
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 #endif  // CONFIG_SUPERTX
@@ -380,7 +464,7 @@ void vp10_make_masked_inter_predictor(
     const INTERP_FILTER interp_filter,
     int xs, int ys,
 #if CONFIG_SUPERTX
-    int plane, int wedge_offset_x, int wedge_offset_y,
+    int wedge_offset_x, int wedge_offset_y,
 #endif  // CONFIG_SUPERTX
     const MACROBLOCKD *xd) {
   const MODE_INFO *mi = xd->mi[0];
@@ -394,28 +478,28 @@ void vp10_make_masked_inter_predictor(
                             interp_filter, xs, ys, xd);
 #if CONFIG_SUPERTX
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
-    build_masked_compound_extend_highbd(
-        dst, dst_stride, tmp_dst, MAX_SB_SIZE, plane,
+    build_masked_compound_wedge_extend_highbd(
+        dst, dst_stride, tmp_dst, MAX_SB_SIZE,
         mi->mbmi.interinter_wedge_index,
         mi->mbmi.interinter_wedge_sign,
-        mi->mbmi.sb_type,
-        wedge_offset_y, wedge_offset_x, h, w);
+        mi->mbmi.sb_type, plane,
+        wedge_offset_x, wedge_offset_y, h, w);
   else
-    build_masked_compound_extend(
-        dst, dst_stride, tmp_dst, MAX_SB_SIZE, plane,
+    build_masked_compound_wedge_extend(
+        dst, dst_stride, tmp_dst, MAX_SB_SIZE,
         mi->mbmi.interinter_wedge_index,
         mi->mbmi.interinter_wedge_sign,
-        mi->mbmi.sb_type,
-        wedge_offset_y, wedge_offset_x, h, w);
+        mi->mbmi.sb_type, plane,
+        wedge_offset_x, wedge_offset_y, h, w);
 #else
   if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
-    build_masked_compound_highbd(
+    build_masked_compound_wedge_highbd(
         dst, dst_stride, tmp_dst, MAX_SB_SIZE,
         mi->mbmi.interinter_wedge_index,
         mi->mbmi.interinter_wedge_sign,
         mi->mbmi.sb_type, h, w);
   else
-    build_masked_compound(
+    build_masked_compound_wedge(
         dst, dst_stride, tmp_dst, MAX_SB_SIZE,
         mi->mbmi.interinter_wedge_index,
         mi->mbmi.interinter_wedge_sign,
@@ -427,14 +511,14 @@ void vp10_make_masked_inter_predictor(
                             subpel_x, subpel_y, sf, w, h, 0,
                             interp_filter, xs, ys, xd);
 #if CONFIG_SUPERTX
-  build_masked_compound_extend(
-      dst, dst_stride, tmp_dst, MAX_SB_SIZE, plane,
+  build_masked_compound_wedge_extend(
+      dst, dst_stride, tmp_dst, MAX_SB_SIZE,
       mi->mbmi.interinter_wedge_index,
       mi->mbmi.interinter_wedge_sign,
       mi->mbmi.sb_type,
-      wedge_offset_y, wedge_offset_x, h, w);
+      wedge_offset_x, wedge_offset_y, h, w);
 #else
-  build_masked_compound(
+  build_masked_compound_wedge(
       dst, dst_stride, tmp_dst, MAX_SB_SIZE,
       mi->mbmi.interinter_wedge_index,
       mi->mbmi.interinter_wedge_sign,
@@ -558,7 +642,7 @@ void build_inter_predictors(MACROBLOCKD *xd, int plane,
           subpel_x, subpel_y, sf, w, h,
           interp_filter, xs, ys,
 #if CONFIG_SUPERTX
-          plane, wedge_offset_x, wedge_offset_y,
+          wedge_offset_x, wedge_offset_y,
 #endif  // CONFIG_SUPERTX
           xd);
     else
@@ -810,10 +894,11 @@ static void generate_1dmask(int length, uint8_t *mask, int plane) {
 void vp10_build_masked_inter_predictor_complex(
     MACROBLOCKD *xd,
     uint8_t *dst, int dst_stride, uint8_t *dst2, int dst2_stride,
-    const struct macroblockd_plane *pd, int mi_row, int mi_col,
+    int mi_row, int mi_col,
     int mi_row_ori, int mi_col_ori, BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
     PARTITION_TYPE partition, int plane) {
   int i, j;
+  const struct macroblockd_plane *pd = &xd->plane[plane];
   uint8_t mask[MAX_TX_SIZE];
   int top_w = 4 << b_width_log2_lookup[top_bsize];
   int top_h = 4 << b_height_log2_lookup[top_bsize];
@@ -1002,8 +1087,8 @@ void vp10_build_inter_predictors_sb_sub8x8_extend(
                            block, bw, bh,
                            0, 0, bw, bh,
 #if CONFIG_EXT_INTER
-                           wedge_offset_x >> (xd->plane[plane].subsampling_x),
-                           wedge_offset_y >> (xd->plane[plane].subsampling_y),
+                           wedge_offset_x,
+                           wedge_offset_y,
 #endif  // CONFIG_SUPERTX
                            mi_x, mi_y);
   }
@@ -1053,8 +1138,8 @@ void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
 #endif  // CONFIG_OBMC
                y * 2 + x, bw, bh, 4 * x, 4 * y, 4, 4,
 #if CONFIG_EXT_INTER
-               wedge_offset_x >> (xd->plane[plane].subsampling_x),
-               wedge_offset_y >> (xd->plane[plane].subsampling_y),
+               wedge_offset_x,
+               wedge_offset_y,
 #endif  // CONFIG_EXT_INTER
                mi_x, mi_y);
     } else {
@@ -1065,8 +1150,8 @@ void vp10_build_inter_predictors_sb_extend(MACROBLOCKD *xd,
 #endif  // CONFIG_OBMC
           0, bw, bh, 0, 0, bw, bh,
 #if CONFIG_EXT_INTER
-          wedge_offset_x >> (xd->plane[plane].subsampling_x),
-          wedge_offset_y >> (xd->plane[plane].subsampling_y),
+          wedge_offset_x,
+          wedge_offset_y,
 #endif  // CONFIG_EXT_INTER
           mi_x, mi_y);
     }
@@ -1561,22 +1646,22 @@ void vp10_build_prediction_by_left_preds(VP10_COMMON *cm,
 #if CONFIG_EXT_INTER
 #if CONFIG_EXT_PARTITION
 static const int ii_weights1d[MAX_SB_SIZE] = {
-  128, 127, 125, 124, 123, 122, 120, 119,
-  118, 117, 116, 115, 113, 112, 111, 110,
-  109, 108, 107, 106, 105, 104, 103, 103,
-  102, 101, 100,  99,  98,  97,  97,  96,
-  95,  94,  94,  93,  92,  91,  91,  90,
-  89,  89,  88,  87,  87,  86,  86,  85,
-  84,  84,  83,  83,  82,  82,  81,  81,
-  80,  80,  79,  79,  78,  78,  77,  77,
-  76,  76,  75,  75,  75,  74,  74,  73,
-  73,  73,  72,  72,  72,  71,  71,  70,
-  70,  70,  69,  69,  69,  69,  68,  68,
-  68,  67,  67,  67,  67,  66,  66,  66,
-  66,  65,  65,  65,  65,  64,  64,  64,
-  64,  63,  63,  63,  63,  63,  62,  62,
-  62,  62,  62,  61,  61,  61,  61,  61,
-  61,  60,  60,  60,  60,  60,  60,  60,
+  102, 100,  97,  95,  92,  90,  88,  86,
+  84,  82,  80,  78,  76,  74,  73,  71,
+  69,  68,  67,  65,  64,  62,  61,  60,
+  59,  58,  57,  55,  54,  53,  52,  52,
+  51,  50,  49,  48,  47,  47,  46,  45,
+  45,  44,  43,  43,  42,  41,  41,  40,
+  40,  39,  39,  38,  38,  38,  37,  37,
+  36,  36,  36,  35,  35,  35,  34,  34,
+  34,  33,  33,  33,  33,  32,  32,  32,
+  32,  32,  31,  31,  31,  31,  31,  30,
+  30,  30,  30,  30,  30,  30,  29,  29,
+  29,  29,  29,  29,  29,  29,  28,  28,
+  28,  28,  28,  28,  28,  28,  28,  28,
+  28,  28,  27,  27,  27,  27,  27,  27,
+  27,  27,  27,  27,  27,  27,  27,  27,
+  27,  27,  27,  27,  27,  27,  27,  27,
 };
 static int ii_size_scales[BLOCK_SIZES] = {
   32, 16, 16, 16, 8, 8, 8, 4, 4, 4, 2, 2, 2, 1, 1, 1
@@ -1618,18 +1703,15 @@ static void combine_interintra(INTERINTRA_MODE mode,
   int i, j;
 
   if (use_wedge_interintra) {
-    if (is_interinter_wedge_used(bsize)) {
+    if (is_interintra_wedge_used(bsize)) {
       const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
-                                               bsize, bh, bw);
-      for (i = 0; i < bh; ++i) {
-        for (j = 0; j < bw; ++j) {
-          int m = mask[i * MASK_MASTER_STRIDE + j];
-          comppred[i * compstride + j] =
-              (intrapred[i * intrastride + j] * m +
-               interpred[i * interstride + j] * ((1 << WEDGE_WEIGHT_BITS) - m) +
-               (1 << (WEDGE_WEIGHT_BITS - 1))) >> WEDGE_WEIGHT_BITS;
-        }
-      }
+                                               bsize, 0, 0);
+      const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
+      const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
+      build_masked_compound(comppred, compstride,
+                            intrapred, intrastride,
+                            interpred, interstride, mask,
+                            bh, bw, subh, subw);
     }
     return;
   }
@@ -1752,18 +1834,15 @@ static void combine_interintra_highbd(INTERINTRA_MODE mode,
   (void) bd;
 
   if (use_wedge_interintra) {
-    if (is_interinter_wedge_used(bsize)) {
+    if (is_interintra_wedge_used(bsize)) {
       const uint8_t *mask = vp10_get_soft_mask(wedge_index, wedge_sign,
-                                               bsize, bh, bw);
-      for (i = 0; i < bh; ++i) {
-        for (j = 0; j < bw; ++j) {
-          int m = mask[i * MASK_MASTER_STRIDE + j];
-          comppred[i * compstride + j] =
-              (intrapred[i * intrastride + j] * m +
-               interpred[i * interstride + j] * ((1 << WEDGE_WEIGHT_BITS) - m) +
-               (1 << (WEDGE_WEIGHT_BITS - 1))) >> WEDGE_WEIGHT_BITS;
-        }
-      }
+                                               bsize, 0, 0);
+      const int subh = 2 * num_4x4_blocks_high_lookup[bsize] == bh;
+      const int subw = 2 * num_4x4_blocks_wide_lookup[bsize] == bw;
+      build_masked_compound(comppred8, compstride,
+                            intrapred8, intrastride,
+                            interpred8, interstride, mask,
+                            bh, bw, subh, subw);
     }
     return;
   }
@@ -2213,42 +2292,42 @@ static void build_wedge_inter_predictor_from_buf(MACROBLOCKD *xd, int plane,
 #if CONFIG_SUPERTX
 #if CONFIG_VP9_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH) {
-        build_masked_compound_extend_highbd(
-            dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE, plane,
+        build_masked_compound_wedge_extend_highbd(
+            dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
             mi->mbmi.interinter_wedge_index,
             mi->mbmi.interinter_wedge_sign,
             mi->mbmi.sb_type,
-            wedge_offset_y, wedge_offset_x, h, w);
+            wedge_offset_x, wedge_offset_y, h, w);
       } else {
-        build_masked_compound_extend(
-            dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE, plane,
+        build_masked_compound_wedge_extend(
+            dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
             mi->mbmi.interinter_wedge_index,
             mi->mbmi.interinter_wedge_sign,
             mi->mbmi.sb_type,
-            wedge_offset_y, wedge_offset_x, h, w);
+            wedge_offset_x, wedge_offset_y, h, w);
       }
 #else
-      build_masked_compound_extend(dst, dst_buf->stride, tmp_dst,
-                                   MAX_SB_SIZE, plane,
-                                   mi->mbmi.interinter_wedge_index,
-                                   mi->mbmi.interinter_wedge_sign,
-                                   mi->mbmi.sb_type,
-                                   wedge_offset_y, wedge_offset_x, h, w);
+      build_masked_compound_wedge_extend(dst, dst_buf->stride,
+                                         tmp_dst, MAX_SB_SIZE,
+                                         mi->mbmi.interinter_wedge_index,
+                                         mi->mbmi.interinter_wedge_sign,
+                                         mi->mbmi.sb_type,
+                                         wedge_offset_x, wedge_offset_y, h, w);
 #endif  // CONFIG_VP9_HIGHBITDEPTH
 #else   // CONFIG_SUPERTX
 #if CONFIG_VP9_HIGHBITDEPTH
       if (xd->cur_buf->flags & YV12_FLAG_HIGHBITDEPTH)
-        build_masked_compound_highbd(dst, dst_buf->stride, tmp_dst,
-                                     MAX_SB_SIZE,
-                                     mi->mbmi.interinter_wedge_index,
-                                     mi->mbmi.interinter_wedge_sign,
-                                     mi->mbmi.sb_type, h, w);
+        build_masked_compound_wedge_highbd(dst, dst_buf->stride, tmp_dst,
+                                           MAX_SB_SIZE,
+                                           mi->mbmi.interinter_wedge_index,
+                                           mi->mbmi.interinter_wedge_sign,
+                                           mi->mbmi.sb_type, h, w);
       else
 #endif  // CONFIG_VP9_HIGHBITDEPTH
-        build_masked_compound(dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
-                              mi->mbmi.interinter_wedge_index,
-                              mi->mbmi.interinter_wedge_sign,
-                              mi->mbmi.sb_type, h, w);
+        build_masked_compound_wedge(dst, dst_buf->stride, tmp_dst, MAX_SB_SIZE,
+                                    mi->mbmi.interinter_wedge_index,
+                                    mi->mbmi.interinter_wedge_sign,
+                                    mi->mbmi.sb_type, h, w);
 #endif  // CONFIG_SUPERTX
     } else {
 #if CONFIG_VP9_HIGHBITDEPTH
index c5b455ef7a0bbe88ab3f80ab5699b2da8c1d81c5..c5e2c3a17e02b0ee5605791799c005a50914e3ec 100644 (file)
@@ -155,7 +155,7 @@ void vp10_make_masked_inter_predictor(
     const INTERP_FILTER interp_filter,
     int xs, int ys,
 #if CONFIG_SUPERTX
-    int plane, int wedge_offset_x, int wedge_offset_y,
+    int wedge_offset_x, int wedge_offset_y,
 #endif  // CONFIG_SUPERTX
     const MACROBLOCKD *xd);
 #endif  // CONFIG_EXT_INTER
@@ -274,8 +274,8 @@ struct macroblockd_plane;
 void vp10_build_masked_inter_predictor_complex(
     MACROBLOCKD *xd,
     uint8_t *dst, int dst_stride, uint8_t *dst2, int dst2_stride,
-    const struct macroblockd_plane *pd, int mi_row, int mi_col,
-    int mi_row_ori, int mi_col_ori, BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
+    int mi_row, int mi_col, int mi_row_ori, int mi_col_ori,
+    BLOCK_SIZE bsize, BLOCK_SIZE top_bsize,
     PARTITION_TYPE partition, int plane);
 #endif  // CONFIG_SUPERTX
 
@@ -412,7 +412,8 @@ void vp10_init_wedge_masks();
 const uint8_t *vp10_get_soft_mask(int wedge_index,
                                   int wedge_sign,
                                   BLOCK_SIZE sb_type,
-                                  int h, int w);
+                                  int wedge_offset_x,
+                                  int wedge_offset_y);
 
 void vp10_build_interintra_predictors(MACROBLOCKD *xd,
                                       uint8_t *ypred,
index 2d6c82b63811882cfec2b0b07a274f60f5cd7947..32c6d574e68b30554639515db4f7710d3866bc79 100644 (file)
@@ -479,10 +479,9 @@ static void extend_and_predict_highbd(const uint8_t *buf_ptr1,
                                       int subpel_x, int subpel_y,
                                       const INTERP_FILTER interp_filter,
                                       const struct scale_factors *sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
-                                      int plane,
+#if CONFIG_EXT_INTER
                                       int wedge_offset_x, int wedge_offset_y,
-#endif  // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#endif  // CONFIG_EXT_INTER
                                       MACROBLOCKD *xd,
                                       int w, int h, int ref, int xs, int ys) {
   DECLARE_ALIGNED(16, uint16_t,
@@ -505,9 +504,7 @@ static void extend_and_predict_highbd(const uint8_t *buf_ptr1,
         buf_ptr, b_w, dst, dst_buf_stride,
         subpel_x, subpel_y, sf, w, h,
         interp_filter, xs, ys,
-#if CONFIG_SUPERTX
-        plane, wedge_offset_x, wedge_offset_y,
-#endif  // CONFIG_SUPERTX
+        wedge_offset_x, wedge_offset_y,
         xd);
   else
 #endif  // CONFIG_EXT_INTER
@@ -526,10 +523,9 @@ static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
                                int subpel_x, int subpel_y,
                                const INTERP_FILTER interp_filter,
                                const struct scale_factors *sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
-                               int plane,
+#if CONFIG_EXT_INTER
                                int wedge_offset_x, int wedge_offset_y,
-#endif  // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#endif  // CONFIG_EXT_INTER
                                MACROBLOCKD *xd,
                                int w, int h, int ref, int xs, int ys) {
   DECLARE_ALIGNED(16, uint8_t,
@@ -546,9 +542,7 @@ static void extend_and_predict(const uint8_t *buf_ptr1, int pre_buf_stride,
         buf_ptr, b_w, dst, dst_buf_stride,
         subpel_x, subpel_y, sf, w, h,
         interp_filter, xs, ys,
-#if CONFIG_SUPERTX
-        plane, wedge_offset_x, wedge_offset_y,
-#endif  // CONFIG_SUPERTX
+        wedge_offset_x, wedge_offset_y,
         xd);
   else
 #endif  // CONFIG_EXT_INTER
@@ -565,9 +559,9 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi,
 #endif  // CONFIG_OBMC
                                        int bw, int bh,
                                        int x, int y, int w, int h,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
                                        int wedge_offset_x, int wedge_offset_y,
-#endif  // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#endif  // CONFIG_EXT_INTER
                                        int mi_x, int mi_y,
                                        const INTERP_FILTER interp_filter,
                                        const struct scale_factors *sf,
@@ -722,9 +716,9 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi,
                                 dst, dst_buf->stride,
                                 subpel_x, subpel_y,
                                 interp_filter, sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
-                                plane, wedge_offset_x, wedge_offset_y,
-#endif  // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
+                                wedge_offset_x, wedge_offset_y,
+#endif  // CONFIG_EXT_INTER
                                 xd, w, h, ref, xs, ys);
 #else
       extend_and_predict(buf_ptr1, buf_stride, x0, y0, b_w, b_h,
@@ -732,9 +726,9 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi,
                          dst, dst_buf->stride,
                          subpel_x, subpel_y,
                          interp_filter, sf,
-#if CONFIG_EXT_INTER && CONFIG_SUPERTX
-                         plane, wedge_offset_x, wedge_offset_y,
-#endif  // CONFIG_EXT_INTER && CONFIG_SUPERTX
+#if CONFIG_EXT_INTER
+                         wedge_offset_x, wedge_offset_y,
+#endif  // CONFIG_EXT_INTER
                          xd, w, h, ref, xs, ys);
 #endif  // CONFIG_VP9_HIGHBITDEPTH
       return;
@@ -750,25 +744,18 @@ static void dec_build_inter_predictors(VP10Decoder *const pbi,
   }
 #if CONFIG_EXT_INTER
   if (ref && is_interinter_wedge_used(mi->mbmi.sb_type) &&
-      mi->mbmi.use_wedge_interinter) {
+      mi->mbmi.use_wedge_interinter)
     vp10_make_masked_inter_predictor(
         buf_ptr, buf_stride, dst, dst_buf->stride,
         subpel_x, subpel_y, sf, w, h,
         interp_filter, xs, ys,
-#if CONFIG_SUPERTX
-        plane, wedge_offset_x, wedge_offset_y,
-#endif  // CONFIG_SUPERTX
+        wedge_offset_x, wedge_offset_y,
         xd);
-  } else {
+  else
+#endif  // CONFIG_EXT_INTER
     vp10_make_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride,
                               subpel_x, subpel_y, sf, w, h, ref,
                               interp_filter, xs, ys, xd);
-  }
-#else
-  vp10_make_inter_predictor(buf_ptr, buf_stride, dst, dst_buf->stride,
-                            subpel_x, subpel_y, sf, w, h, ref,
-                            interp_filter, xs, ys, xd);
-#endif  // CONFIG_EXT_INTER
 }
 
 static void dec_build_inter_predictors_sb_extend(
@@ -828,8 +815,8 @@ static void dec_build_inter_predictors_sb_extend(
                 n4w_x4, n4h_x4,
                 4 * x, 4 * y, pw, ph,
 #if CONFIG_EXT_INTER
-                wedge_offset_x >> (pd->subsampling_x),
-                wedge_offset_y >> (pd->subsampling_y),
+                wedge_offset_x,
+                wedge_offset_y,
 #endif  // CONFIG_EXT_INTER
                 mi_x, mi_y,
                 interp_filter, sf, pre_buf, dst_buf,
@@ -846,8 +833,8 @@ static void dec_build_inter_predictors_sb_extend(
             n4w_x4, n4h_x4,
             0, 0, n4w_x4, n4h_x4,
 #if CONFIG_EXT_INTER
-            wedge_offset_x >> (pd->subsampling_x),
-            wedge_offset_y >> (pd->subsampling_y),
+            wedge_offset_x,
+            wedge_offset_y,
 #endif  // CONFIG_EXT_INTER
             mi_x, mi_y,
             interp_filter, sf, pre_buf, dst_buf,
@@ -919,8 +906,8 @@ static void dec_build_inter_predictors_sb_sub8x8_extend(
                                  n4w_x4, n4h_x4,
                                  0, 0, n4w_x4, n4h_x4,
 #if CONFIG_EXT_INTER
-                                 wedge_offset_x >> (pd->subsampling_x),
-                                 wedge_offset_y >> (pd->subsampling_y),
+                                 wedge_offset_x,
+                                 wedge_offset_y,
 #endif  // CONFIG_EXT_INTER
                                  mi_x, mi_y,
                                  interp_filter, sf, pre_buf, dst_buf,
@@ -1386,7 +1373,6 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[0], dst_stride[0],
                                                   dst_buf1[0], dst_stride1[0],
-                                                  &xd->plane[0],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -1427,7 +1413,7 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
             xd->plane[i].dst.stride = dst_stride[i];
             vp10_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
-                &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+                mi_row, mi_col, mi_row_top, mi_col_top,
                 bsize, top_bsize, PARTITION_HORZ, i);
           }
         }
@@ -1457,7 +1443,6 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[0], dst_stride[0],
                                                   dst_buf1[0], dst_stride1[0],
-                                                  &xd->plane[0],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -1494,7 +1479,7 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
             xd->plane[i].dst.stride = dst_stride[i];
             vp10_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
-                &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+                mi_row, mi_col, mi_row_top, mi_col_top,
                 bsize, top_bsize, PARTITION_VERT, i);
           }
         }
@@ -1549,7 +1534,6 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
                                                       dst_buf[i], dst_stride[i],
                                                       dst_buf1[i],
                                                       dst_stride1[i],
-                                                      &xd->plane[i],
                                                       mi_row, mi_col,
                                                       mi_row_top, mi_col_top,
                                                       bsize, top_bsize,
@@ -1560,7 +1544,6 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
                                                         dst_stride2[i],
                                                         dst_buf3[i],
                                                         dst_stride3[i],
-                                                        &xd->plane[i],
                                                         mi_row, mi_col,
                                                         mi_row_top, mi_col_top,
                                                         bsize, top_bsize,
@@ -1570,7 +1553,6 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
                                                         dst_stride[i],
                                                         dst_buf2[i],
                                                         dst_stride2[i],
-                                                        &xd->plane[i],
                                                         mi_row, mi_col,
                                                         mi_row_top, mi_col_top,
                                                         bsize, top_bsize,
@@ -1582,7 +1564,6 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
                                                       dst_stride[i],
                                                       dst_buf2[i],
                                                       dst_stride2[i],
-                                                      &xd->plane[i],
                                                       mi_row, mi_col,
                                                       mi_row_top, mi_col_top,
                                                       bsize, top_bsize,
@@ -1620,23 +1601,21 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf[i], dst_stride[i],
-                                                 dst_buf1[i], dst_stride1[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_VERT, i);
+                                                  dst_buf[i], dst_stride[i],
+                                                  dst_buf1[i], dst_stride1[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_VERT, i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf[i], dst_stride[i],
-                                                 dst_buf2[i], dst_stride2[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_HORZ, i);
+                                                  dst_buf[i], dst_stride[i],
+                                                  dst_buf2[i], dst_stride2[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_HORZ, i);
       }
       break;
     case PARTITION_VERT_A:
@@ -1670,23 +1649,21 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf[i], dst_stride[i],
-                                                 dst_buf1[i], dst_stride1[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_HORZ, i);
+                                                  dst_buf[i], dst_stride[i],
+                                                  dst_buf1[i], dst_stride1[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_HORZ, i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf[i], dst_stride[i],
-                                                 dst_buf2[i], dst_stride2[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_VERT, i);
+                                                  dst_buf[i], dst_stride[i],
+                                                  dst_buf2[i], dst_stride2[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_VERT, i);
       }
       break;
     case PARTITION_HORZ_B:
@@ -1717,25 +1694,23 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
         xd->plane[i].dst.buf = dst_buf1[i];
         xd->plane[i].dst.stride = dst_stride1[i];
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf1[i], dst_stride1[i],
-                                                 dst_buf2[i], dst_stride2[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_VERT, i);
+                                                  dst_buf1[i], dst_stride1[i],
+                                                  dst_buf2[i], dst_stride2[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_VERT, i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf[i], dst_stride[i],
-                                                 dst_buf1[i], dst_stride1[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_HORZ, i);
+                                                  dst_buf[i], dst_stride[i],
+                                                  dst_buf1[i], dst_stride1[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_HORZ, i);
       }
       break;
     case PARTITION_VERT_B:
@@ -1766,25 +1741,23 @@ static void dec_predict_sb_complex(VP10Decoder *const pbi,
         xd->plane[i].dst.buf = dst_buf1[i];
         xd->plane[i].dst.stride = dst_stride1[i];
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf1[i], dst_stride1[i],
-                                                 dst_buf2[i], dst_stride2[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_HORZ, i);
+                                                  dst_buf1[i], dst_stride1[i],
+                                                  dst_buf2[i], dst_stride2[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_HORZ, i);
       }
       for (i = 0; i < MAX_MB_PLANE; i++) {
         xd->plane[i].dst.buf = dst_buf[i];
         xd->plane[i].dst.stride = dst_stride[i];
         vp10_build_masked_inter_predictor_complex(xd,
-                                                 dst_buf[i], dst_stride[i],
-                                                 dst_buf1[i], dst_stride1[i],
-                                                 &xd->plane[i],
-                                                 mi_row, mi_col,
-                                                 mi_row_top, mi_col_top,
-                                                 bsize, top_bsize,
-                                                 PARTITION_VERT, i);
+                                                  dst_buf[i], dst_stride[i],
+                                                  dst_buf1[i], dst_stride1[i],
+                                                  mi_row, mi_col,
+                                                  mi_row_top, mi_col_top,
+                                                  bsize, top_bsize,
+                                                  PARTITION_VERT, i);
       }
       break;
 #endif  // CONFIG_EXT_PARTITION_TYPES
index 9b6aacb214fdcfbe6e76f31c023a1012f899704c..9b20804ef3d83cc292216da32a324309d919087b 100644 (file)
@@ -1551,7 +1551,6 @@ static void read_inter_block_mode_info(VP10Decoder *const pbi,
           read_interintra_mode(cm, xd, r, bsize_group);
       mbmi->ref_frame[1] = INTRA_FRAME;
       mbmi->interintra_mode = interintra_mode;
-      mbmi->interintra_uv_mode = interintra_mode;
 #if CONFIG_EXT_INTRA
       mbmi->ext_intra_mode_info.use_ext_intra_mode[0] = 0;
       mbmi->ext_intra_mode_info.use_ext_intra_mode[1] = 0;
index c8fb2caf0a3514fd5cd507649a63769aea02e63e..15fea988da34439716081d89b80eb31aceb95356 100644 (file)
@@ -1310,7 +1310,6 @@ static void pack_inter_mode_mvs(VP10_COMP *cpi, const MODE_INFO *mi,
         write_interintra_mode(
             w, mbmi->interintra_mode,
             cm->fc->interintra_mode_prob[bsize_group]);
-        assert(mbmi->interintra_mode == mbmi->interintra_uv_mode);
         if (is_interintra_wedge_used(bsize)) {
           vp10_write(w, mbmi->use_wedge_interintra,
                      cm->fc->wedge_interintra_prob[bsize]);
index 30508d4b5ab52b942d17b247795754d7d6d1dc98..6711d46b95733ed1c48abbeead98470ce2b8d05a 100644 (file)
@@ -5489,7 +5489,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[0], dst_stride[0],
                                                   dst_buf1[0], dst_stride1[0],
-                                                  &xd->plane[0],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5529,7 +5528,7 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
             xd->plane[i].dst.stride = dst_stride[i];
             vp10_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
-                &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+                mi_row, mi_col, mi_row_top, mi_col_top,
                 bsize, top_bsize, PARTITION_HORZ, i);
           }
         }
@@ -5561,7 +5560,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[0], dst_stride[0],
                                                   dst_buf1[0], dst_stride1[0],
-                                                  &xd->plane[0],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5600,7 +5598,7 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
             xd->plane[i].dst.stride = dst_stride[i];
             vp10_build_masked_inter_predictor_complex(
                 xd, dst_buf[i], dst_stride[i], dst_buf1[i], dst_stride1[i],
-                &xd->plane[i], mi_row, mi_col, mi_row_top, mi_col_top,
+                mi_row, mi_col, mi_row_top, mi_col_top,
                 bsize, top_bsize, PARTITION_VERT, i);
           }
         }
@@ -5665,7 +5663,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
                                                       dst_stride[i],
                                                       dst_buf1[i],
                                                       dst_stride1[i],
-                                                      &xd->plane[i],
                                                       mi_row, mi_col,
                                                       mi_row_top, mi_col_top,
                                                       bsize, top_bsize,
@@ -5676,7 +5673,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
                                                         dst_stride2[i],
                                                         dst_buf3[i],
                                                         dst_stride3[i],
-                                                        &xd->plane[i],
                                                         mi_row, mi_col,
                                                         mi_row_top, mi_col_top,
                                                         bsize, top_bsize,
@@ -5684,13 +5680,12 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
               vp10_build_masked_inter_predictor_complex(xd,
                                                         dst_buf[i],
                                                         dst_stride[i],
-                                                       dst_buf2[i],
-                                                       dst_stride2[i],
-                                                       &xd->plane[i],
-                                                       mi_row, mi_col,
-                                                       mi_row_top, mi_col_top,
-                                                       bsize, top_bsize,
-                                                       PARTITION_HORZ, i);
+                                                        dst_buf2[i],
+                                                        dst_stride2[i],
+                                                        mi_row, mi_col,
+                                                        mi_row_top, mi_col_top,
+                                                        bsize, top_bsize,
+                                                        PARTITION_HORZ, i);
             }
           } else if (mi_row + hbs < cm->mi_rows && mi_col < cm->mi_cols) {
             vp10_build_masked_inter_predictor_complex(xd,
@@ -5698,7 +5693,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
                                                       dst_stride[i],
                                                       dst_buf2[i],
                                                       dst_stride2[i],
-                                                      &xd->plane[i],
                                                       mi_row, mi_col,
                                                       mi_row_top, mi_col_top,
                                                       bsize, top_bsize,
@@ -5739,7 +5733,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[i], dst_stride[i],
                                                   dst_buf1[i], dst_stride1[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5749,7 +5742,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[i], dst_stride[i],
                                                   dst_buf2[i], dst_stride2[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5790,7 +5782,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[i], dst_stride[i],
                                                   dst_buf1[i], dst_stride1[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5800,7 +5791,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[i], dst_stride[i],
                                                   dst_buf2[i], dst_stride2[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5840,7 +5830,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf1[i], dst_stride1[i],
                                                   dst_buf2[i], dst_stride2[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5852,7 +5841,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[i], dst_stride[i],
                                                   dst_buf1[i], dst_stride1[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5893,7 +5881,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf1[i], dst_stride1[i],
                                                   dst_buf2[i], dst_stride2[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
@@ -5905,7 +5892,6 @@ static void predict_sb_complex(VP10_COMP *cpi, ThreadData *td,
         vp10_build_masked_inter_predictor_complex(xd,
                                                   dst_buf[i], dst_stride[i],
                                                   dst_buf1[i], dst_stride1[i],
-                                                  &xd->plane[i],
                                                   mi_row, mi_col,
                                                   mi_row_top, mi_col_top,
                                                   bsize, top_bsize,
index f1d203de5547d3b20045362838275a8a599626a8..532b3345fe5e457ab6ddb4539797fab86c1e1fae 100644 (file)
@@ -6118,11 +6118,9 @@ static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
   MACROBLOCKD *xd = &x->e_mbd;
   MB_MODE_INFO *mbmi = &xd->mi[0]->mbmi;
   BLOCK_SIZE sb_type = mbmi->sb_type;
-  int w = (4 << b_width_log2_lookup[sb_type]);
-  int h = (4 << b_height_log2_lookup[sb_type]);
   const uint8_t *mask;
   const int mask_stride = MASK_MASTER_STRIDE;
-  mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type, h, w);
+  mask = vp10_get_soft_mask(wedge_index, wedge_sign, sb_type, 0, 0);
 
   if (which == 0 || which == 2)
     do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
@@ -6131,7 +6129,7 @@ static void do_masked_motion_search_indexed(VP10_COMP *cpi, MACROBLOCK *x,
 
   if (which == 1 || which == 2) {
     // get the negative mask
-    mask = vp10_get_soft_mask(wedge_index, !wedge_sign, sb_type, h, w);
+    mask = vp10_get_soft_mask(wedge_index, !wedge_sign, sb_type, 0, 0);
     do_masked_motion_search(cpi, x, mask, mask_stride, bsize,
                             mi_row, mi_col, &tmp_mv[1], &rate_mv[1],
                             1, mv_idx[1]);
@@ -6784,136 +6782,143 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     int wedge_types;
     int tmp_skip_txfm_sb;
     int64_t tmp_skip_sse_sb;
+
     rs = vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 0);
     vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
     model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
                     &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
     rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
     best_rd_nowedge = rd;
-    mbmi->use_wedge_interinter = 1;
-    rs = (1 + get_wedge_bits_lookup[bsize]) * 256 +
-        vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
-    wedge_types = (1 << get_wedge_bits_lookup[bsize]);
-    if (have_newmv_in_inter_mode(this_mode)) {
-      int_mv tmp_mv[2];
-      int rate_mvs[2], tmp_rate_mv = 0;
-      uint8_t pred0[2 * MAX_SB_SQUARE * 3];
-      uint8_t pred1[2 * MAX_SB_SQUARE * 3];
-      uint8_t *preds0[3] = {pred0,
-                            pred0 + 2 * MAX_SB_SQUARE,
-                            pred0 + 4 * MAX_SB_SQUARE};
-      uint8_t *preds1[3] = {pred1,
-                            pred1 + 2 * MAX_SB_SQUARE,
-                            pred1 + 4 * MAX_SB_SQUARE};
-      int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
-      vp10_build_inter_predictors_for_planes_single_buf(
-          xd, bsize, mi_row, mi_col, 0, preds0, strides);
-      vp10_build_inter_predictors_for_planes_single_buf(
-          xd, bsize, mi_row, mi_col, 1, preds1, strides);
-
-      for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
-        mbmi->interinter_wedge_index = wedge_index >> 1;
-        mbmi->interinter_wedge_sign = wedge_index & 1;
-        vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
-                                                  preds0, strides,
-                                                  preds1, strides);
-        model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
-                        &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
-        rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
-        if (rd < best_rd_wedge) {
-          best_wedge_index = wedge_index;
-          best_rd_wedge = rd;
+    mbmi->use_wedge_interinter = 0;
+
+    // Disbale wedge search if source variance is small
+    if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
+
+      mbmi->use_wedge_interinter = 1;
+      rs = (1 + get_wedge_bits_lookup[bsize]) * 256 +
+          vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
+      wedge_types = (1 << get_wedge_bits_lookup[bsize]);
+      if (have_newmv_in_inter_mode(this_mode)) {
+        int_mv tmp_mv[2];
+        int rate_mvs[2], tmp_rate_mv = 0;
+        uint8_t pred0[2 * MAX_SB_SQUARE * 3];
+        uint8_t pred1[2 * MAX_SB_SQUARE * 3];
+        uint8_t *preds0[3] = {pred0,
+          pred0 + 2 * MAX_SB_SQUARE,
+          pred0 + 4 * MAX_SB_SQUARE};
+        uint8_t *preds1[3] = {pred1,
+          pred1 + 2 * MAX_SB_SQUARE,
+          pred1 + 4 * MAX_SB_SQUARE};
+        int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
+        vp10_build_inter_predictors_for_planes_single_buf(
+            xd, bsize, mi_row, mi_col, 0, preds0, strides);
+        vp10_build_inter_predictors_for_planes_single_buf(
+            xd, bsize, mi_row, mi_col, 1, preds1, strides);
+
+        for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
+          mbmi->interinter_wedge_index = wedge_index >> 1;
+          mbmi->interinter_wedge_sign = wedge_index & 1;
+          vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
+                                                    preds0, strides,
+                                                    preds1, strides);
+          model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+                          &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
+          rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+          if (rd < best_rd_wedge) {
+            best_wedge_index = wedge_index;
+            best_rd_wedge = rd;
+          }
         }
-      }
-      mbmi->interinter_wedge_index = best_wedge_index >> 1;
-      mbmi->interinter_wedge_sign = best_wedge_index & 1;
-      if (this_mode == NEW_NEWMV) {
-        int mv_idxs[2] = {0, 0};
-        do_masked_motion_search_indexed(cpi, x,
-                                        mbmi->interinter_wedge_index,
-                                        mbmi->interinter_wedge_sign,
-                                        bsize, mi_row, mi_col, tmp_mv, rate_mvs,
-                                        mv_idxs, 2);
-        tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
-        mbmi->mv[0].as_int = tmp_mv[0].as_int;
-        mbmi->mv[1].as_int = tmp_mv[1].as_int;
-      } else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
-        int mv_idxs[2] = {0, 0};
-        do_masked_motion_search_indexed(cpi, x,
-                                        mbmi->interinter_wedge_index,
-                                        mbmi->interinter_wedge_sign,
-                                        bsize, mi_row, mi_col, tmp_mv, rate_mvs,
-                                        mv_idxs, 0);
-        tmp_rate_mv = rate_mvs[0];
-        mbmi->mv[0].as_int = tmp_mv[0].as_int;
-      } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
-        int mv_idxs[2] = {0, 0};
-        do_masked_motion_search_indexed(cpi, x,
-                                        mbmi->interinter_wedge_index,
-                                        mbmi->interinter_wedge_sign,
-                                        bsize, mi_row, mi_col, tmp_mv, rate_mvs,
-                                        mv_idxs, 1);
-        tmp_rate_mv = rate_mvs[1];
-        mbmi->mv[1].as_int = tmp_mv[1].as_int;
-      }
-      vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
-      model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
-                      &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
-      rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
-      if (rd < best_rd_wedge) {
-        best_rd_wedge = rd;
-      } else {
-        mbmi->mv[0].as_int = cur_mv[0].as_int;
-        mbmi->mv[1].as_int = cur_mv[1].as_int;
-        tmp_rate_mv = rate_mv;
-      }
-      if (best_rd_wedge < best_rd_nowedge) {
-        mbmi->use_wedge_interinter = 1;
         mbmi->interinter_wedge_index = best_wedge_index >> 1;
         mbmi->interinter_wedge_sign = best_wedge_index & 1;
-        xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
-        xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
-        *rate2 += tmp_rate_mv - rate_mv;
-        rate_mv = tmp_rate_mv;
-      } else {
-        mbmi->use_wedge_interinter = 0;
-        mbmi->mv[0].as_int = cur_mv[0].as_int;
-        mbmi->mv[1].as_int = cur_mv[1].as_int;
-      }
-    } else {
-      uint8_t pred0[2 * MAX_SB_SQUARE * 3];
-      uint8_t pred1[2 * MAX_SB_SQUARE * 3];
-      uint8_t *preds0[3] = {pred0,
-                            pred0 + 2 * MAX_SB_SQUARE,
-                            pred0 + 4 * MAX_SB_SQUARE};
-      uint8_t *preds1[3] = {pred1,
-                            pred1 + 2 * MAX_SB_SQUARE,
-                            pred1 + 4 * MAX_SB_SQUARE};
-      int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
-      vp10_build_inter_predictors_for_planes_single_buf(
-          xd, bsize, mi_row, mi_col, 0, preds0, strides);
-      vp10_build_inter_predictors_for_planes_single_buf(
-          xd, bsize, mi_row, mi_col, 1, preds1, strides);
-      for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
-        mbmi->interinter_wedge_index = wedge_index >> 1;
-        mbmi->interinter_wedge_sign = wedge_index & 1;
-        vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
-                                                  preds0, strides,
-                                                  preds1, strides);
+        if (this_mode == NEW_NEWMV) {
+          int mv_idxs[2] = {0, 0};
+          do_masked_motion_search_indexed(cpi, x,
+                                          mbmi->interinter_wedge_index,
+                                          mbmi->interinter_wedge_sign,
+                                          bsize, mi_row, mi_col, tmp_mv, rate_mvs,
+                                          mv_idxs, 2);
+          tmp_rate_mv = rate_mvs[0] + rate_mvs[1];
+          mbmi->mv[0].as_int = tmp_mv[0].as_int;
+          mbmi->mv[1].as_int = tmp_mv[1].as_int;
+        } else if (this_mode == NEW_NEARESTMV || this_mode == NEW_NEARMV) {
+          int mv_idxs[2] = {0, 0};
+          do_masked_motion_search_indexed(cpi, x,
+                                          mbmi->interinter_wedge_index,
+                                          mbmi->interinter_wedge_sign,
+                                          bsize, mi_row, mi_col, tmp_mv, rate_mvs,
+                                          mv_idxs, 0);
+          tmp_rate_mv = rate_mvs[0];
+          mbmi->mv[0].as_int = tmp_mv[0].as_int;
+        } else if (this_mode == NEAREST_NEWMV || this_mode == NEAR_NEWMV) {
+          int mv_idxs[2] = {0, 0};
+          do_masked_motion_search_indexed(cpi, x,
+                                          mbmi->interinter_wedge_index,
+                                          mbmi->interinter_wedge_sign,
+                                          bsize, mi_row, mi_col, tmp_mv, rate_mvs,
+                                          mv_idxs, 1);
+          tmp_rate_mv = rate_mvs[1];
+          mbmi->mv[1].as_int = tmp_mv[1].as_int;
+        }
+        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
         model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
                         &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
-        rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+        rd = RDCOST(x->rdmult, x->rddiv, rs + tmp_rate_mv + rate_sum, dist_sum);
         if (rd < best_rd_wedge) {
-          best_wedge_index = wedge_index;
           best_rd_wedge = rd;
+        } else {
+          mbmi->mv[0].as_int = cur_mv[0].as_int;
+          mbmi->mv[1].as_int = cur_mv[1].as_int;
+          tmp_rate_mv = rate_mv;
+        }
+        if (best_rd_wedge < best_rd_nowedge) {
+          mbmi->use_wedge_interinter = 1;
+          mbmi->interinter_wedge_index = best_wedge_index >> 1;
+          mbmi->interinter_wedge_sign = best_wedge_index & 1;
+          xd->mi[0]->bmi[0].as_mv[0].as_int = mbmi->mv[0].as_int;
+          xd->mi[0]->bmi[0].as_mv[1].as_int = mbmi->mv[1].as_int;
+          *rate2 += tmp_rate_mv - rate_mv;
+          rate_mv = tmp_rate_mv;
+        } else {
+          mbmi->use_wedge_interinter = 0;
+          mbmi->mv[0].as_int = cur_mv[0].as_int;
+          mbmi->mv[1].as_int = cur_mv[1].as_int;
         }
-      }
-      if (best_rd_wedge < best_rd_nowedge) {
-        mbmi->use_wedge_interinter = 1;
-        mbmi->interinter_wedge_index = best_wedge_index >> 1;
-        mbmi->interinter_wedge_sign = best_wedge_index & 1;
       } else {
-        mbmi->use_wedge_interinter = 0;
+        uint8_t pred0[2 * MAX_SB_SQUARE * 3];
+        uint8_t pred1[2 * MAX_SB_SQUARE * 3];
+        uint8_t *preds0[3] = {pred0,
+          pred0 + 2 * MAX_SB_SQUARE,
+          pred0 + 4 * MAX_SB_SQUARE};
+        uint8_t *preds1[3] = {pred1,
+          pred1 + 2 * MAX_SB_SQUARE,
+          pred1 + 4 * MAX_SB_SQUARE};
+        int strides[3] = {MAX_SB_SIZE, MAX_SB_SIZE, MAX_SB_SIZE};
+        vp10_build_inter_predictors_for_planes_single_buf(
+            xd, bsize, mi_row, mi_col, 0, preds0, strides);
+        vp10_build_inter_predictors_for_planes_single_buf(
+            xd, bsize, mi_row, mi_col, 1, preds1, strides);
+        for (wedge_index = 0; wedge_index < 2 * wedge_types; ++wedge_index) {
+          mbmi->interinter_wedge_index = wedge_index >> 1;
+          mbmi->interinter_wedge_sign = wedge_index & 1;
+          vp10_build_wedge_inter_predictor_from_buf(xd, bsize, mi_row, mi_col,
+                                                    preds0, strides,
+                                                    preds1, strides);
+          model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+                          &tmp_skip_txfm_sb, &tmp_skip_sse_sb);
+          rd = RDCOST(x->rdmult, x->rddiv, rs + rate_mv + rate_sum, dist_sum);
+          if (rd < best_rd_wedge) {
+            best_wedge_index = wedge_index;
+            best_rd_wedge = rd;
+          }
+        }
+        if (best_rd_wedge < best_rd_nowedge) {
+          mbmi->use_wedge_interinter = 1;
+          mbmi->interinter_wedge_index = best_wedge_index >> 1;
+          mbmi->interinter_wedge_sign = best_wedge_index & 1;
+        } else {
+          mbmi->use_wedge_interinter = 0;
+        }
       }
     }
     if (ref_best_rd < INT64_MAX &&
@@ -6922,6 +6927,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
 
     pred_exists = 0;
     tmp_rd = VPXMIN(best_rd_wedge, best_rd_nowedge);
+
     if (mbmi->use_wedge_interinter)
       *compmode_wedge_cost = (1 + get_wedge_bits_lookup[bsize]) * 256 +
           vp10_cost_bit(cm->fc->wedge_interinter_prob[bsize], 1);
@@ -6940,8 +6946,6 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     int64_t best_interintra_rd_nowedge = INT64_MAX;
     int64_t best_interintra_rd_wedge = INT64_MAX;
     int rwedge;
-    int bw = 4 << b_width_log2_lookup[mbmi->sb_type],
-        bh = 4 << b_height_log2_lookup[mbmi->sb_type];
     int_mv tmp_mv;
     int tmp_rate_mv = 0;
     DECLARE_ALIGNED(16, uint8_t,
@@ -6962,10 +6966,10 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
     restore_dst_buf(xd, orig_dst, orig_dst_stride);
     mbmi->ref_frame[1] = INTRA_FRAME;
+    mbmi->use_wedge_interintra = 0;
 
     for (j = 0; j < INTERINTRA_MODES; ++j) {
       mbmi->interintra_mode = (INTERINTRA_MODE)j;
-      mbmi->interintra_uv_mode = (INTERINTRA_MODE)j;
       rmode = interintra_mode_cost[mbmi->interintra_mode];
       vp10_build_intra_predictors_for_interintra(
           xd, bsize, 0, intrapred, MAX_SB_SIZE);
@@ -6990,7 +6994,6 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
       }
     }
     mbmi->interintra_mode = best_interintra_mode;
-    mbmi->interintra_uv_mode = best_interintra_mode;
     if (ref_best_rd < INT64_MAX &&
         best_interintra_rd > 2 * ref_best_rd) {
       return INT64_MAX;
@@ -7019,69 +7022,76 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
                   rmode + rate_mv + rwedge + rate_sum, dist_sum);
       best_interintra_rd_nowedge = rd;
 
-      mbmi->use_wedge_interintra = 1;
-      wedge_types = (1 << get_wedge_bits_lookup[bsize]);
-      rwedge = get_wedge_bits_lookup[bsize] * 256 +
-          vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
-      for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
-        mbmi->interintra_wedge_index = wedge_index;
-        mbmi->interintra_wedge_sign = 0;
-        vp10_combine_interintra(xd, bsize, 0,
-                                tmp_buf, MAX_SB_SIZE,
-                                intrapred, MAX_SB_SIZE);
-        vp10_combine_interintra(xd, bsize, 1,
-                                tmp_buf + MAX_SB_SQUARE, MAX_SB_SIZE,
-                                intrapred + MAX_SB_SQUARE, MAX_SB_SIZE);
-        vp10_combine_interintra(xd, bsize, 2,
-                                tmp_buf + 2 * MAX_SB_SQUARE, MAX_SB_SIZE,
-                                intrapred + 2 * MAX_SB_SQUARE, MAX_SB_SIZE);
-        model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
-                        &skip_txfm_sb, &skip_sse_sb);
-        rd = RDCOST(x->rdmult, x->rddiv,
-                    rmode + rate_mv + rwedge + rate_sum, dist_sum);
-        if (rd < best_interintra_rd_wedge) {
-          best_interintra_rd_wedge = rd;
-          best_wedge_index = wedge_index;
+      // Disbale wedge search if source variance is small
+      if (x->source_variance > cpi->sf.disable_wedge_search_var_thresh) {
+
+        mbmi->use_wedge_interintra = 1;
+        wedge_types = (1 << get_wedge_bits_lookup[bsize]);
+        rwedge = get_wedge_bits_lookup[bsize] * 256 +
+            vp10_cost_bit(cm->fc->wedge_interintra_prob[bsize], 1);
+        for (wedge_index = 0; wedge_index < wedge_types; ++wedge_index) {
+          mbmi->interintra_wedge_index = wedge_index;
+          mbmi->interintra_wedge_sign = 0;
+          vp10_combine_interintra(xd, bsize, 0,
+                                  tmp_buf, MAX_SB_SIZE,
+                                  intrapred, MAX_SB_SIZE);
+          vp10_combine_interintra(xd, bsize, 1,
+                                  tmp_buf + MAX_SB_SQUARE, MAX_SB_SIZE,
+                                  intrapred + MAX_SB_SQUARE, MAX_SB_SIZE);
+          vp10_combine_interintra(xd, bsize, 2,
+                                  tmp_buf + 2 * MAX_SB_SQUARE, MAX_SB_SIZE,
+                                  intrapred + 2 * MAX_SB_SQUARE, MAX_SB_SIZE);
+          model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+                          &skip_txfm_sb, &skip_sse_sb);
+          rd = RDCOST(x->rdmult, x->rddiv,
+                      rmode + rate_mv + rwedge + rate_sum, dist_sum);
+          if (rd < best_interintra_rd_wedge) {
+            best_interintra_rd_wedge = rd;
+            best_wedge_index = wedge_index;
+          }
         }
-      }
-      // Refine motion vector.
-      if (have_newmv_in_inter_mode(this_mode)) {
-        // get negative of mask
-        const uint8_t* mask = vp10_get_soft_mask(
-            best_wedge_index, 1, bsize, bh, bw);
-        mbmi->interintra_wedge_index = best_wedge_index;
-        mbmi->interintra_wedge_sign = 0;
-        do_masked_motion_search(cpi, x, mask, MASK_MASTER_STRIDE, bsize,
-                                mi_row, mi_col, &tmp_mv, &tmp_rate_mv,
-                                0, mv_idx);
-        mbmi->mv[0].as_int = tmp_mv.as_int;
-        vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
-        model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
-                        &skip_txfm_sb, &skip_sse_sb);
-        rd = RDCOST(x->rdmult, x->rddiv,
-                    rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
-        if (rd < best_interintra_rd_wedge) {
-          best_interintra_rd_wedge = rd;
+        // Refine motion vector.
+        if (have_newmv_in_inter_mode(this_mode)) {
+          // get negative of mask
+          const uint8_t* mask = vp10_get_soft_mask(
+              best_wedge_index, 1, bsize, 0, 0);
+          mbmi->interintra_wedge_index = best_wedge_index;
+          mbmi->interintra_wedge_sign = 0;
+          do_masked_motion_search(cpi, x, mask, MASK_MASTER_STRIDE, bsize,
+                                  mi_row, mi_col, &tmp_mv, &tmp_rate_mv,
+                                  0, mv_idx);
+          mbmi->mv[0].as_int = tmp_mv.as_int;
+          vp10_build_inter_predictors_sb(xd, mi_row, mi_col, bsize);
+          model_rd_for_sb(cpi, bsize, x, xd, &rate_sum, &dist_sum,
+                          &skip_txfm_sb, &skip_sse_sb);
+          rd = RDCOST(x->rdmult, x->rddiv,
+                      rmode + tmp_rate_mv + rwedge + rate_sum, dist_sum);
+          if (rd < best_interintra_rd_wedge) {
+            best_interintra_rd_wedge = rd;
+          } else {
+            tmp_mv.as_int = cur_mv[0].as_int;
+            tmp_rate_mv = rate_mv;
+          }
         } else {
           tmp_mv.as_int = cur_mv[0].as_int;
           tmp_rate_mv = rate_mv;
         }
-      } else {
-        tmp_mv.as_int = cur_mv[0].as_int;
-        tmp_rate_mv = rate_mv;
-      }
-      if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
-        mbmi->use_wedge_interintra = 1;
-        mbmi->interintra_wedge_index = best_wedge_index;
-        mbmi->interintra_wedge_sign = 0;
-        best_interintra_rd = best_interintra_rd_wedge;
-        mbmi->mv[0].as_int = tmp_mv.as_int;
-        *rate2 += tmp_rate_mv - rate_mv;
-        rate_mv = tmp_rate_mv;
+        if (best_interintra_rd_wedge < best_interintra_rd_nowedge) {
+          mbmi->use_wedge_interintra = 1;
+          mbmi->interintra_wedge_index = best_wedge_index;
+          mbmi->interintra_wedge_sign = 0;
+          best_interintra_rd = best_interintra_rd_wedge;
+          mbmi->mv[0].as_int = tmp_mv.as_int;
+          *rate2 += tmp_rate_mv - rate_mv;
+          rate_mv = tmp_rate_mv;
+        } else {
+          mbmi->use_wedge_interintra = 0;
+          best_interintra_rd = best_interintra_rd_nowedge;
+          mbmi->mv[0].as_int = cur_mv[0].as_int;
+        }
       } else {
         mbmi->use_wedge_interintra = 0;
         best_interintra_rd = best_interintra_rd_nowedge;
-        mbmi->mv[0].as_int = cur_mv[0].as_int;
       }
     }
 
@@ -7099,7 +7109,7 @@ static int64_t handle_inter_mode(VP10_COMP *cpi, MACROBLOCK *x,
     }
   } else if (is_interintra_allowed(mbmi)) {
     *compmode_interintra_cost =
-      vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
+        vp10_cost_bit(cm->fc->interintra_prob[size_group_lookup[bsize]], 0);
   }
 
 #if CONFIG_EXT_INTERP
@@ -8207,7 +8217,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
 
 #if CONFIG_EXT_INTER
     mbmi->interintra_mode = (PREDICTION_MODE)(DC_PRED - 1);
-    mbmi->interintra_uv_mode = (PREDICTION_MODE)(DC_PRED - 1);
 #endif  // CONFIG_EXT_INTER
 
     if (ref_frame == INTRA_FRAME) {
@@ -8376,7 +8385,6 @@ void vp10_rd_pick_inter_mode_sb(VP10_COMP *cpi,
         if (best_single_inter_ref != ref_frame)
           continue;
         mbmi->interintra_mode = best_intra_mode;
-        mbmi->interintra_uv_mode = best_intra_mode;
 #if CONFIG_EXT_INTRA
         // TODO(debargha|geza.lore):
         // Should we use ext_intra modes for interintra?
index b3304a7e185515de7a7ed2f5f2a3110f24cada91..913e0452ee7ebfffecfef8f50f4d041f7878e8dd 100644 (file)
@@ -163,6 +163,9 @@ static void set_good_speed_feature(VP10_COMP *cpi, VP10_COMMON *cm,
     // Use transform domain distortion.
     // Note var-tx expt always uses pixel domain distortion.
     sf->use_transform_domain_distortion = 1;
+#if CONFIG_EXT_INTER
+    sf->disable_wedge_search_var_thresh = 100;
+#endif  // CONFIG_EXT_INTER
   }
 
   if (speed >= 2) {
@@ -278,6 +281,9 @@ static void set_rt_speed_feature(VP10_COMP *cpi, SPEED_FEATURES *sf,
   sf->allow_exhaustive_searches = 0;
   sf->exhaustive_searches_thresh = INT_MAX;
   sf->use_upsampled_references = 0;
+#if CONFIG_EXT_INTER
+  sf->disable_wedge_search_var_thresh = 100;
+#endif  // CONFIG_EXT_INTER
 
   // Use transform domain distortion computation
   // Note var-tx expt always uses pixel domain distortion.
@@ -509,6 +515,9 @@ void vp10_set_speed_features_framesize_independent(VP10_COMP *cpi) {
 #else
   sf->use_upsampled_references = 1;
 #endif
+#if CONFIG_EXT_INTER
+  sf->disable_wedge_search_var_thresh = 0;
+#endif  // CONFIG_EXT_INTER
 
   for (i = 0; i < TX_SIZES; i++) {
     sf->intra_y_mode_mask[i] = INTRA_ALL;
index 9f4e3a12a6c8488331db5664928921e47064ccc2..6cee748133e7e6f6b664e482562a79dda1c3f0c6 100644 (file)
@@ -399,6 +399,11 @@ typedef struct SPEED_FEATURES {
   // Choose a very large value (UINT_MAX) to use 8-tap always
   unsigned int disable_filter_search_var_thresh;
 
+#if CONFIG_EXT_INTER
+  // A source variance threshold below which wedge search is disabled
+  unsigned int disable_wedge_search_var_thresh;
+#endif  // CONFIG_EXT_INTER
+
   // These bit masks allow you to enable or disable intra modes for each
   // transform size separately.
   int intra_y_mode_mask[TX_SIZES];