]> granicus.if.org Git - libvpx/commitdiff
Adds masked variance and sad functions for wedge
authorDebargha Mukherjee <debargha@google.com>
Tue, 1 Mar 2016 00:08:07 +0000 (16:08 -0800)
committerDebargha Mukherjee <debargha@google.com>
Wed, 2 Mar 2016 01:28:56 +0000 (17:28 -0800)
Adds masked variance and sad functions needed for wedge
prediction modes to come.

Change-Id: I25b231bbc345e6a494316abb0a7d5cd5586a3a54

test/masked_sad_test.cc [new file with mode: 0644]
test/masked_variance_test.cc [new file with mode: 0644]
test/test.mk
vpx_dsp/sad.c
vpx_dsp/variance.c
vpx_dsp/vpx_dsp.mk
vpx_dsp/vpx_dsp_rtcd_defs.pl
vpx_dsp/vpx_filter.h
vpx_dsp/x86/masked_sad_intrin_ssse3.c [new file with mode: 0644]
vpx_dsp/x86/masked_variance_intrin_ssse3.c [new file with mode: 0644]

diff --git a/test/masked_sad_test.cc b/test/masked_sad_test.cc
new file mode 100644 (file)
index 0000000..c09104c
--- /dev/null
@@ -0,0 +1,209 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "vpx/vpx_integer.h"
+
+using libvpx_test::ACMRandom;
+
+namespace {
+const int number_of_iterations = 500;
+
+typedef unsigned int (*MaskedSADFunc)(const uint8_t *a, int a_stride,
+                                      const uint8_t *b, int b_stride,
+                                      const uint8_t *m, int m_stride);
+typedef std::tr1::tuple<MaskedSADFunc, MaskedSADFunc> MaskedSADParam;
+
+class MaskedSADTest : public ::testing::TestWithParam<MaskedSADParam> {
+ public:
+  virtual ~MaskedSADTest() {}
+  virtual void SetUp() {
+    maskedSAD_op_   = GET_PARAM(0);
+    ref_maskedSAD_op_ = GET_PARAM(1);
+  }
+
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+  MaskedSADFunc maskedSAD_op_;
+  MaskedSADFunc ref_maskedSAD_op_;
+};
+
+TEST_P(MaskedSADTest, OperationCheck) {
+  unsigned int ref_ret, ret;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint8_t,  src_ptr[4096]);
+  DECLARE_ALIGNED(16, uint8_t,  ref_ptr[4096]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[4096]);
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = 64;
+  int ref_stride = 64;
+  int msk_stride = 64;
+  for (int i = 0; i < number_of_iterations; ++i) {
+    for (int j = 0; j < 4096; j++) {
+      src_ptr[j] = rnd.Rand8();
+      ref_ptr[j] = rnd.Rand8();
+      msk_ptr[j] = ((rnd.Rand8()&0x7f) > 64) ? rnd.Rand8()&0x3f : 64;
+      assert(msk_ptr[j] <= 64);
+    }
+
+    ref_ret = ref_maskedSAD_op_(src_ptr, src_stride, ref_ptr, ref_stride,
+                                msk_ptr, msk_stride);
+    ASM_REGISTER_STATE_CHECK(ret = maskedSAD_op_(src_ptr, src_stride,
+                                                 ref_ptr, ref_stride,
+                                                 msk_ptr, msk_stride));
+    if (ret != ref_ret) {
+      err_count++;
+      if (first_failure == -1)
+        first_failure = i;
+    }
+  }
+  EXPECT_EQ(0, err_count)
+    << "Error: Masked SAD Test, C output doesn't match SSSE3 output. "
+    << "First failed at test case " << first_failure;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef unsigned int (*HighbdMaskedSADFunc)(const uint8_t *a, int a_stride,
+                                            const uint8_t *b, int b_stride,
+                                            const uint8_t *m, int m_stride);
+typedef std::tr1::tuple<HighbdMaskedSADFunc, HighbdMaskedSADFunc>
+    HighbdMaskedSADParam;
+
+class HighbdMaskedSADTest : public ::testing::
+        TestWithParam<HighbdMaskedSADParam> {
+ public:
+  virtual ~HighbdMaskedSADTest() {}
+  virtual void SetUp() {
+    maskedSAD_op_   = GET_PARAM(0);
+    ref_maskedSAD_op_ = GET_PARAM(1);
+  }
+
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+  HighbdMaskedSADFunc maskedSAD_op_;
+  HighbdMaskedSADFunc ref_maskedSAD_op_;
+};
+
+TEST_P(HighbdMaskedSADTest, OperationCheck) {
+  unsigned int ref_ret, ret;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint16_t,  src_ptr[4096]);
+  DECLARE_ALIGNED(16, uint16_t,  ref_ptr[4096]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[4096]);
+  uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+  uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = 64;
+  int ref_stride = 64;
+  int msk_stride = 64;
+  for (int i = 0; i < number_of_iterations; ++i) {
+    for (int j = 0; j < 4096; j++) {
+      src_ptr[j] = rnd.Rand16()&0xfff;
+      ref_ptr[j] = rnd.Rand16()&0xfff;
+      msk_ptr[j] = ((rnd.Rand8()&0x7f) > 64) ? rnd.Rand8()&0x3f : 64;
+    }
+
+    ref_ret = ref_maskedSAD_op_(src8_ptr, src_stride, ref8_ptr, ref_stride,
+                                msk_ptr, msk_stride);
+    ASM_REGISTER_STATE_CHECK(ret = maskedSAD_op_(src8_ptr, src_stride,
+                                                 ref8_ptr, ref_stride,
+                                                 msk_ptr, msk_stride));
+    if (ret != ref_ret) {
+      err_count++;
+      if (first_failure == -1)
+        first_failure = i;
+    }
+  }
+  EXPECT_EQ(0, err_count)
+    << "Error: High BD Masked SAD Test, C output doesn't match SSSE3 output. "
+    << "First failed at test case " << first_failure;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSSE3
+INSTANTIATE_TEST_CASE_P(
+  SSSE3_C_COMPARE, MaskedSADTest,
+  ::testing::Values(
+    make_tuple(&vpx_masked_sad64x64_ssse3,
+               &vpx_masked_sad64x64_c),
+    make_tuple(&vpx_masked_sad64x32_ssse3,
+               &vpx_masked_sad64x32_c),
+    make_tuple(&vpx_masked_sad32x64_ssse3,
+               &vpx_masked_sad32x64_c),
+    make_tuple(&vpx_masked_sad32x32_ssse3,
+               &vpx_masked_sad32x32_c),
+    make_tuple(&vpx_masked_sad32x16_ssse3,
+               &vpx_masked_sad32x16_c),
+    make_tuple(&vpx_masked_sad16x32_ssse3,
+               &vpx_masked_sad16x32_c),
+    make_tuple(&vpx_masked_sad16x16_ssse3,
+               &vpx_masked_sad16x16_c),
+    make_tuple(&vpx_masked_sad16x8_ssse3,
+               &vpx_masked_sad16x8_c),
+    make_tuple(&vpx_masked_sad8x16_ssse3,
+               &vpx_masked_sad8x16_c),
+    make_tuple(&vpx_masked_sad8x8_ssse3,
+               &vpx_masked_sad8x8_c),
+    make_tuple(&vpx_masked_sad8x4_ssse3,
+               &vpx_masked_sad8x4_c),
+    make_tuple(&vpx_masked_sad4x8_ssse3,
+               &vpx_masked_sad4x8_c),
+    make_tuple(&vpx_masked_sad4x4_ssse3,
+               &vpx_masked_sad4x4_c)));
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+  SSSE3_C_COMPARE, HighbdMaskedSADTest,
+  ::testing::Values(
+    make_tuple(&vp9_highbd_masked_sad64x64_ssse3,
+               &vp9_highbd_masked_sad64x64_c),
+    make_tuple(&vp9_highbd_masked_sad64x32_ssse3,
+               &vp9_highbd_masked_sad64x32_c),
+    make_tuple(&vp9_highbd_masked_sad32x64_ssse3,
+               &vp9_highbd_masked_sad32x64_c),
+    make_tuple(&vp9_highbd_masked_sad32x32_ssse3,
+               &vp9_highbd_masked_sad32x32_c),
+    make_tuple(&vp9_highbd_masked_sad32x16_ssse3,
+               &vp9_highbd_masked_sad32x16_c),
+    make_tuple(&vp9_highbd_masked_sad16x32_ssse3,
+               &vp9_highbd_masked_sad16x32_c),
+    make_tuple(&vp9_highbd_masked_sad16x16_ssse3,
+               &vp9_highbd_masked_sad16x16_c),
+    make_tuple(&vp9_highbd_masked_sad16x8_ssse3,
+               &vp9_highbd_masked_sad16x8_c),
+    make_tuple(&vp9_highbd_masked_sad8x16_ssse3,
+               &vp9_highbd_masked_sad8x16_c),
+    make_tuple(&vp9_highbd_masked_sad8x8_ssse3,
+               &vp9_highbd_masked_sad8x8_c),
+    make_tuple(&vp9_highbd_masked_sad8x4_ssse3,
+               &vp9_highbd_masked_sad8x4_c),
+    make_tuple(&vp9_highbd_masked_sad4x8_ssse3,
+               &vp9_highbd_masked_sad4x8_c),
+    make_tuple(&vp9_highbd_masked_sad4x4_ssse3,
+               &vp9_highbd_masked_sad4x4_c)));
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // HAVE_SSSE3
+}  // namespace
diff --git a/test/masked_variance_test.cc b/test/masked_variance_test.cc
new file mode 100644 (file)
index 0000000..fc37759
--- /dev/null
@@ -0,0 +1,752 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <math.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/googletest/src/include/gtest/gtest.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
+#include "test/register_state_check.h"
+#include "test/util.h"
+
+#include "./vpx_config.h"
+#include "./vpx_dsp_rtcd.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_dsp/vpx_filter.h"
+
+#define MAX_SIZE 64
+
+using libvpx_test::ACMRandom;
+
+namespace {
+const int number_of_iterations = 500;
+
+typedef unsigned int (*MaskedVarianceFunc)(const uint8_t *a, int a_stride,
+                                           const uint8_t *b, int b_stride,
+                                           const uint8_t *m, int m_stride,
+                                           unsigned int *sse);
+
+typedef std::tr1::tuple<MaskedVarianceFunc,
+                        MaskedVarianceFunc> MaskedVarianceParam;
+
+class MaskedVarianceTest :
+  public ::testing::TestWithParam<MaskedVarianceParam> {
+ public:
+  virtual ~MaskedVarianceTest() {}
+  virtual void SetUp() {
+    opt_func_ = GET_PARAM(0);
+    ref_func_ = GET_PARAM(1);
+  }
+
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+  MaskedVarianceFunc opt_func_;
+  MaskedVarianceFunc ref_func_;
+};
+
+TEST_P(MaskedVarianceTest, OperationCheck) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint8_t,  src_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint8_t,  ref_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[MAX_SIZE*MAX_SIZE]);
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = MAX_SIZE;
+  int ref_stride = MAX_SIZE;
+  int msk_stride = MAX_SIZE;
+
+  for (int i = 0; i < number_of_iterations; ++i) {
+    for (int j = 0; j < MAX_SIZE*MAX_SIZE; j++) {
+      src_ptr[j] = rnd.Rand8();
+      ref_ptr[j] = rnd.Rand8();
+      msk_ptr[j] = rnd(65);
+    }
+
+    ref_ret = ref_func_(src_ptr, src_stride,
+                        ref_ptr, ref_stride,
+                        msk_ptr, msk_stride,
+                        &ref_sse);
+    ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
+                                                 ref_ptr, ref_stride,
+                                                 msk_ptr, msk_stride,
+                                                 &opt_sse));
+
+    if (opt_ret != ref_ret || opt_sse != ref_sse) {
+      err_count++;
+      if (first_failure == -1)
+        first_failure = i;
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+  << "Error: Masked Variance Test OperationCheck,"
+  << "C output doesn't match SSSE3 output. "
+  << "First failed at test case " << first_failure;
+}
+
+TEST_P(MaskedVarianceTest, ExtremeValues) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint8_t,  src_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint8_t,  ref_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[MAX_SIZE*MAX_SIZE]);
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = MAX_SIZE;
+  int ref_stride = MAX_SIZE;
+  int msk_stride = MAX_SIZE;
+
+  for (int i = 0; i < 8; ++i) {
+    memset(src_ptr, (i & 0x1) ? 255 : 0, MAX_SIZE*MAX_SIZE);
+    memset(ref_ptr, (i & 0x2) ? 255 : 0, MAX_SIZE*MAX_SIZE);
+    memset(msk_ptr, (i & 0x4) ?  64 : 0, MAX_SIZE*MAX_SIZE);
+
+    ref_ret = ref_func_(src_ptr, src_stride,
+                        ref_ptr, ref_stride,
+                        msk_ptr, msk_stride,
+                        &ref_sse);
+    ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
+                                                 ref_ptr, ref_stride,
+                                                 msk_ptr, msk_stride,
+                                                 &opt_sse));
+
+    if (opt_ret != ref_ret || opt_sse != ref_sse) {
+      err_count++;
+      if (first_failure == -1)
+        first_failure = i;
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+  << "Error: Masked Variance Test ExtremeValues,"
+  << "C output doesn't match SSSE3 output. "
+  << "First failed at test case " << first_failure;
+}
+
+typedef unsigned int (*MaskedSubPixelVarianceFunc)(
+    const uint8_t *a, int a_stride,
+    int xoffset, int  yoffset,
+    const uint8_t *b, int b_stride,
+    const uint8_t *m, int m_stride,
+    unsigned int *sse);
+
+typedef std::tr1::tuple<MaskedSubPixelVarianceFunc,
+                        MaskedSubPixelVarianceFunc> MaskedSubPixelVarianceParam;
+
+class MaskedSubPixelVarianceTest :
+  public ::testing::TestWithParam<MaskedSubPixelVarianceParam> {
+ public:
+  virtual ~MaskedSubPixelVarianceTest() {}
+  virtual void SetUp() {
+    opt_func_ = GET_PARAM(0);
+    ref_func_ = GET_PARAM(1);
+  }
+
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+  MaskedSubPixelVarianceFunc opt_func_;
+  MaskedSubPixelVarianceFunc ref_func_;
+};
+
+TEST_P(MaskedSubPixelVarianceTest, OperationCheck) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint8_t,  src_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint8_t,  ref_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = (MAX_SIZE+1);
+  int ref_stride = (MAX_SIZE+1);
+  int msk_stride = (MAX_SIZE+1);
+  int xoffset;
+  int yoffset;
+
+  for (int i = 0; i < number_of_iterations; ++i) {
+    int xoffsets[] = {0, 4, rnd(BIL_SUBPEL_SHIFTS)};
+    int yoffsets[] = {0, 4, rnd(BIL_SUBPEL_SHIFTS)};
+    for (int j = 0; j < (MAX_SIZE+1)*(MAX_SIZE+1); j++) {
+      src_ptr[j] = rnd.Rand8();
+      ref_ptr[j] = rnd.Rand8();
+      msk_ptr[j] = rnd(65);
+    }
+    for (int k = 0; k < 3; k++) {
+      xoffset = xoffsets[k];
+      for (int l = 0; l < 3; l++) {
+        xoffset = xoffsets[k];
+        yoffset = yoffsets[l];
+
+        ref_ret = ref_func_(src_ptr, src_stride,
+                            xoffset, yoffset,
+                            ref_ptr, ref_stride,
+                            msk_ptr, msk_stride,
+                            &ref_sse);
+        ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
+                                                     xoffset, yoffset,
+                                                     ref_ptr, ref_stride,
+                                                     msk_ptr, msk_stride,
+                                                     &opt_sse));
+
+        if (opt_ret != ref_ret || opt_sse != ref_sse) {
+        err_count++;
+        if (first_failure == -1)
+            first_failure = i;
+        }
+      }
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+    << "Error: Masked Sub Pixel Variance Test OperationCheck,"
+    << "C output doesn't match SSSE3 output. "
+    << "First failed at test case " << first_failure;
+}
+
+TEST_P(MaskedSubPixelVarianceTest, ExtremeValues) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint8_t,  src_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint8_t,  ref_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  int first_failure_x = -1;
+  int first_failure_y = -1;
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = (MAX_SIZE+1);
+  int ref_stride = (MAX_SIZE+1);
+  int msk_stride = (MAX_SIZE+1);
+
+  for (int xoffset = 0 ; xoffset < BIL_SUBPEL_SHIFTS ; xoffset++) {
+    for (int yoffset = 0 ; yoffset < BIL_SUBPEL_SHIFTS ; yoffset++) {
+      for (int i = 0; i < 8; ++i) {
+        memset(src_ptr, (i & 0x1) ? 255 : 0, (MAX_SIZE+1)*(MAX_SIZE+1));
+        memset(ref_ptr, (i & 0x2) ? 255 : 0, (MAX_SIZE+1)*(MAX_SIZE+1));
+        memset(msk_ptr, (i & 0x4) ?  64 : 0, (MAX_SIZE+1)*(MAX_SIZE+1));
+
+        ref_ret = ref_func_(src_ptr, src_stride,
+                            xoffset, yoffset,
+                            ref_ptr, ref_stride,
+                            msk_ptr, msk_stride,
+                            &ref_sse);
+        ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src_ptr, src_stride,
+                                                     xoffset, yoffset,
+                                                     ref_ptr, ref_stride,
+                                                     msk_ptr, msk_stride,
+                                                     &opt_sse));
+
+        if (opt_ret != ref_ret || opt_sse != ref_sse) {
+          err_count++;
+          if (first_failure == -1) {
+            first_failure = i;
+            first_failure_x = xoffset;
+            first_failure_y = yoffset;
+          }
+        }
+      }
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+  << "Error: Masked Variance Test ExtremeValues,"
+  << "C output doesn't match SSSE3 output. "
+  << "First failed at test case " << first_failure
+  << " x_offset = " << first_failure_x
+  << " y_offset = " << first_failure_y;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef std::tr1::tuple<MaskedVarianceFunc,
+                        MaskedVarianceFunc,
+                        vpx_bit_depth_t> HighbdMaskedVarianceParam;
+
+class HighbdMaskedVarianceTest :
+  public ::testing::TestWithParam<HighbdMaskedVarianceParam> {
+ public:
+  virtual ~HighbdMaskedVarianceTest() {}
+  virtual void SetUp() {
+    opt_func_ = GET_PARAM(0);
+    ref_func_ = GET_PARAM(1);
+    bit_depth_ = GET_PARAM(2);
+  }
+
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+  MaskedVarianceFunc opt_func_;
+  MaskedVarianceFunc ref_func_;
+  vpx_bit_depth_t bit_depth_;
+};
+
+TEST_P(HighbdMaskedVarianceTest, OperationCheck) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[MAX_SIZE*MAX_SIZE]);
+  uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+  uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = MAX_SIZE;
+  int ref_stride = MAX_SIZE;
+  int msk_stride = MAX_SIZE;
+
+  for (int i = 0; i < number_of_iterations; ++i) {
+    for (int j = 0; j < MAX_SIZE*MAX_SIZE; j++) {
+      src_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
+      ref_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
+      msk_ptr[j] = rnd(65);
+    }
+
+    ref_ret = ref_func_(src8_ptr, src_stride,
+                        ref8_ptr, ref_stride,
+                        msk_ptr, msk_stride,
+                        &ref_sse);
+    ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
+                                                 ref8_ptr, ref_stride,
+                                                 msk_ptr, msk_stride,
+                                                 &opt_sse));
+
+    if (opt_ret != ref_ret || opt_sse != ref_sse) {
+      err_count++;
+      if (first_failure == -1)
+        first_failure = i;
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+  << "Error: Masked Variance Test OperationCheck,"
+  << "C output doesn't match SSSE3 output. "
+  << "First failed at test case " << first_failure;
+}
+
+TEST_P(HighbdMaskedVarianceTest, ExtremeValues) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint16_t, src_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint16_t, ref_ptr[MAX_SIZE*MAX_SIZE]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[MAX_SIZE*MAX_SIZE]);
+  uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+  uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = MAX_SIZE;
+  int ref_stride = MAX_SIZE;
+  int msk_stride = MAX_SIZE;
+
+  for (int i = 0; i < 8; ++i) {
+    vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
+                 MAX_SIZE*MAX_SIZE);
+    vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
+                 MAX_SIZE*MAX_SIZE);
+    memset(msk_ptr, (i & 0x4) ?  64 : 0, MAX_SIZE*MAX_SIZE);
+
+    ref_ret = ref_func_(src8_ptr, src_stride,
+                        ref8_ptr, ref_stride,
+                        msk_ptr, msk_stride,
+                        &ref_sse);
+    ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
+                                                 ref8_ptr, ref_stride,
+                                                 msk_ptr, msk_stride,
+                                                 &opt_sse));
+
+    if (opt_ret != ref_ret || opt_sse != ref_sse) {
+      err_count++;
+      if (first_failure == -1)
+        first_failure = i;
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+  << "Error: Masked Variance Test ExtremeValues,"
+  << "C output doesn't match SSSE3 output. "
+  << "First failed at test case " << first_failure;
+}
+
+typedef std::tr1::tuple<MaskedSubPixelVarianceFunc,
+                        MaskedSubPixelVarianceFunc,
+                        vpx_bit_depth_t> HighbdMaskedSubPixelVarianceParam;
+
+class HighbdMaskedSubPixelVarianceTest :
+  public ::testing::TestWithParam<HighbdMaskedSubPixelVarianceParam> {
+ public:
+  virtual ~HighbdMaskedSubPixelVarianceTest() {}
+  virtual void SetUp() {
+    opt_func_ = GET_PARAM(0);
+    ref_func_ = GET_PARAM(1);
+    bit_depth_ = GET_PARAM(2);
+  }
+
+  virtual void TearDown() { libvpx_test::ClearSystemState(); }
+
+ protected:
+  MaskedSubPixelVarianceFunc opt_func_;
+  MaskedSubPixelVarianceFunc ref_func_;
+  vpx_bit_depth_t bit_depth_;
+};
+
+TEST_P(HighbdMaskedSubPixelVarianceTest, OperationCheck) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+  uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+  int err_count = 0;
+  int first_failure = -1;
+  int first_failure_x = -1;
+  int first_failure_y = -1;
+  int src_stride = (MAX_SIZE+1);
+  int ref_stride = (MAX_SIZE+1);
+  int msk_stride = (MAX_SIZE+1);
+  int xoffset, yoffset;
+
+  for (int i = 0; i < number_of_iterations; ++i) {
+    for (xoffset = 0; xoffset < BIL_SUBPEL_SHIFTS; xoffset++) {
+      for (yoffset = 0; yoffset < BIL_SUBPEL_SHIFTS; yoffset++) {
+        for (int j = 0; j < (MAX_SIZE+1)*(MAX_SIZE+1); j++) {
+          src_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
+          ref_ptr[j] = rnd.Rand16() & ((1 << bit_depth_) - 1);
+          msk_ptr[j] = rnd(65);
+        }
+
+        ref_ret = ref_func_(src8_ptr, src_stride,
+                            xoffset, yoffset,
+                            ref8_ptr, ref_stride,
+                            msk_ptr, msk_stride,
+                            &ref_sse);
+        ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
+                                                     xoffset, yoffset,
+                                                     ref8_ptr, ref_stride,
+                                                     msk_ptr, msk_stride,
+                                                     &opt_sse));
+
+        if (opt_ret != ref_ret || opt_sse != ref_sse) {
+          err_count++;
+          if (first_failure == -1) {
+            first_failure = i;
+            first_failure_x = xoffset;
+            first_failure_y = yoffset;
+          }
+        }
+      }
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+    << "Error: Masked Sub Pixel Variance Test OperationCheck,"
+    << "C output doesn't match SSSE3 output. "
+    << "First failed at test case " << first_failure
+    << " x_offset = " << first_failure_x
+    << " y_offset = " << first_failure_y;
+}
+
+TEST_P(HighbdMaskedSubPixelVarianceTest, ExtremeValues) {
+  unsigned int ref_ret, opt_ret;
+  unsigned int ref_sse, opt_sse;
+  ACMRandom rnd(ACMRandom::DeterministicSeed());
+  DECLARE_ALIGNED(16, uint16_t, src_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint16_t, ref_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  DECLARE_ALIGNED(16, uint8_t,  msk_ptr[(MAX_SIZE+1)*(MAX_SIZE+1)]);
+  uint8_t* src8_ptr = CONVERT_TO_BYTEPTR(src_ptr);
+  uint8_t* ref8_ptr = CONVERT_TO_BYTEPTR(ref_ptr);
+  int first_failure_x = -1;
+  int first_failure_y = -1;
+  int err_count = 0;
+  int first_failure = -1;
+  int src_stride = (MAX_SIZE+1);
+  int ref_stride = (MAX_SIZE+1);
+  int msk_stride = (MAX_SIZE+1);
+
+  for (int xoffset = 0 ; xoffset < BIL_SUBPEL_SHIFTS ; xoffset++) {
+    for (int yoffset = 0 ; yoffset < BIL_SUBPEL_SHIFTS ; yoffset++) {
+      for (int i = 0; i < 8; ++i) {
+        vpx_memset16(src_ptr, (i & 0x1) ? ((1 << bit_depth_) - 1) : 0,
+                     (MAX_SIZE+1)*(MAX_SIZE+1));
+        vpx_memset16(ref_ptr, (i & 0x2) ? ((1 << bit_depth_) - 1) : 0,
+                     (MAX_SIZE+1)*(MAX_SIZE+1));
+        memset(msk_ptr, (i & 0x4) ?   64 : 0, (MAX_SIZE+1)*(MAX_SIZE+1));
+
+        ref_ret = ref_func_(src8_ptr, src_stride,
+                            xoffset, yoffset,
+                            ref8_ptr, ref_stride,
+                            msk_ptr, msk_stride,
+                            &ref_sse);
+        ASM_REGISTER_STATE_CHECK(opt_ret = opt_func_(src8_ptr, src_stride,
+                                                     xoffset, yoffset,
+                                                     ref8_ptr, ref_stride,
+                                                     msk_ptr, msk_stride,
+                                                     &opt_sse));
+
+        if (opt_ret != ref_ret || opt_sse != ref_sse) {
+          err_count++;
+          if (first_failure == -1) {
+            first_failure = i;
+            first_failure_x = xoffset;
+            first_failure_y = yoffset;
+          }
+        }
+      }
+    }
+  }
+
+  EXPECT_EQ(0, err_count)
+  << "Error: Masked Variance Test ExtremeValues,"
+  << "C output doesn't match SSSE3 output. "
+  << "First failed at test case " << first_failure
+  << " x_offset = " << first_failure_x
+  << " y_offset = " << first_failure_y;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+using std::tr1::make_tuple;
+
+#if HAVE_SSSE3
+INSTANTIATE_TEST_CASE_P(
+  SSSE3_C_COMPARE, MaskedVarianceTest,
+  ::testing::Values(
+    make_tuple(&vpx_masked_variance64x64_ssse3,
+               &vpx_masked_variance64x64_c),
+    make_tuple(&vpx_masked_variance64x32_ssse3,
+               &vpx_masked_variance64x32_c),
+    make_tuple(&vpx_masked_variance32x64_ssse3,
+               &vpx_masked_variance32x64_c),
+    make_tuple(&vpx_masked_variance32x32_ssse3,
+               &vpx_masked_variance32x32_c),
+    make_tuple(&vpx_masked_variance32x16_ssse3,
+               &vpx_masked_variance32x16_c),
+    make_tuple(&vpx_masked_variance16x32_ssse3,
+               &vpx_masked_variance16x32_c),
+    make_tuple(&vpx_masked_variance16x16_ssse3,
+               &vpx_masked_variance16x16_c),
+    make_tuple(&vpx_masked_variance16x8_ssse3,
+               &vpx_masked_variance16x8_c),
+    make_tuple(&vpx_masked_variance8x16_ssse3,
+               &vpx_masked_variance8x16_c),
+    make_tuple(&vpx_masked_variance8x8_ssse3,
+               &vpx_masked_variance8x8_c),
+    make_tuple(&vpx_masked_variance8x4_ssse3,
+               &vpx_masked_variance8x4_c),
+    make_tuple(&vpx_masked_variance4x8_ssse3,
+               &vpx_masked_variance4x8_c),
+    make_tuple(&vpx_masked_variance4x4_ssse3,
+               &vpx_masked_variance4x4_c)));
+
+INSTANTIATE_TEST_CASE_P(
+  SSSE3_C_COMPARE, MaskedSubPixelVarianceTest,
+  ::testing::Values(
+    make_tuple(&vpx_masked_sub_pixel_variance64x64_ssse3,
+              &vpx_masked_sub_pixel_variance64x64_c),
+    make_tuple(&vpx_masked_sub_pixel_variance64x32_ssse3,
+              &vpx_masked_sub_pixel_variance64x32_c),
+    make_tuple(&vpx_masked_sub_pixel_variance32x64_ssse3,
+              &vpx_masked_sub_pixel_variance32x64_c),
+    make_tuple(&vpx_masked_sub_pixel_variance32x32_ssse3,
+              &vpx_masked_sub_pixel_variance32x32_c),
+    make_tuple(&vpx_masked_sub_pixel_variance32x16_ssse3,
+              &vpx_masked_sub_pixel_variance32x16_c),
+    make_tuple(&vpx_masked_sub_pixel_variance16x32_ssse3,
+              &vpx_masked_sub_pixel_variance16x32_c),
+    make_tuple(&vpx_masked_sub_pixel_variance16x16_ssse3,
+              &vpx_masked_sub_pixel_variance16x16_c),
+    make_tuple(&vpx_masked_sub_pixel_variance16x8_ssse3,
+              &vpx_masked_sub_pixel_variance16x8_c),
+    make_tuple(&vpx_masked_sub_pixel_variance8x16_ssse3,
+              &vpx_masked_sub_pixel_variance8x16_c),
+    make_tuple(&vpx_masked_sub_pixel_variance8x8_ssse3,
+              &vpx_masked_sub_pixel_variance8x8_c),
+    make_tuple(&vpx_masked_sub_pixel_variance8x4_ssse3,
+              &vpx_masked_sub_pixel_variance8x4_c),
+    make_tuple(&vpx_masked_sub_pixel_variance4x8_ssse3,
+              &vpx_masked_sub_pixel_variance4x8_c),
+    make_tuple(&vpx_masked_sub_pixel_variance4x4_ssse3,
+              &vpx_masked_sub_pixel_variance4x4_c)));
+
+#if CONFIG_VP9_HIGHBITDEPTH
+INSTANTIATE_TEST_CASE_P(
+  SSSE3_C_COMPARE, HighbdMaskedVarianceTest,
+  ::testing::Values(
+    make_tuple(&vp9_highbd_masked_variance64x64_ssse3,
+               &vp9_highbd_masked_variance64x64_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance64x32_ssse3,
+               &vp9_highbd_masked_variance64x32_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance32x64_ssse3,
+               &vp9_highbd_masked_variance32x64_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance32x32_ssse3,
+               &vp9_highbd_masked_variance32x32_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance32x16_ssse3,
+               &vp9_highbd_masked_variance32x16_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance16x32_ssse3,
+               &vp9_highbd_masked_variance16x32_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance16x16_ssse3,
+               &vp9_highbd_masked_variance16x16_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance16x8_ssse3,
+               &vp9_highbd_masked_variance16x8_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance8x16_ssse3,
+               &vp9_highbd_masked_variance8x16_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance8x8_ssse3,
+               &vp9_highbd_masked_variance8x8_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance8x4_ssse3,
+               &vp9_highbd_masked_variance8x4_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance4x8_ssse3,
+               &vp9_highbd_masked_variance4x8_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_variance4x4_ssse3,
+               &vp9_highbd_masked_variance4x4_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_10_masked_variance64x64_ssse3,
+               &vp9_highbd_10_masked_variance64x64_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance64x32_ssse3,
+               &vp9_highbd_10_masked_variance64x32_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance32x64_ssse3,
+               &vp9_highbd_10_masked_variance32x64_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance32x32_ssse3,
+               &vp9_highbd_10_masked_variance32x32_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance32x16_ssse3,
+               &vp9_highbd_10_masked_variance32x16_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance16x32_ssse3,
+               &vp9_highbd_10_masked_variance16x32_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance16x16_ssse3,
+               &vp9_highbd_10_masked_variance16x16_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance16x8_ssse3,
+               &vp9_highbd_10_masked_variance16x8_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance8x16_ssse3,
+               &vp9_highbd_10_masked_variance8x16_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance8x8_ssse3,
+               &vp9_highbd_10_masked_variance8x8_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance8x4_ssse3,
+               &vp9_highbd_10_masked_variance8x4_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance4x8_ssse3,
+               &vp9_highbd_10_masked_variance4x8_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_variance4x4_ssse3,
+               &vp9_highbd_10_masked_variance4x4_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_12_masked_variance64x64_ssse3,
+               &vp9_highbd_12_masked_variance64x64_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance64x32_ssse3,
+               &vp9_highbd_12_masked_variance64x32_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance32x64_ssse3,
+               &vp9_highbd_12_masked_variance32x64_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance32x32_ssse3,
+               &vp9_highbd_12_masked_variance32x32_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance32x16_ssse3,
+               &vp9_highbd_12_masked_variance32x16_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance16x32_ssse3,
+               &vp9_highbd_12_masked_variance16x32_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance16x16_ssse3,
+               &vp9_highbd_12_masked_variance16x16_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance16x8_ssse3,
+               &vp9_highbd_12_masked_variance16x8_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance8x16_ssse3,
+               &vp9_highbd_12_masked_variance8x16_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance8x8_ssse3,
+               &vp9_highbd_12_masked_variance8x8_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance8x4_ssse3,
+               &vp9_highbd_12_masked_variance8x4_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance4x8_ssse3,
+               &vp9_highbd_12_masked_variance4x8_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_variance4x4_ssse3,
+               &vp9_highbd_12_masked_variance4x4_c, VPX_BITS_12)));
+
+INSTANTIATE_TEST_CASE_P(
+  SSSE3_C_COMPARE, HighbdMaskedSubPixelVarianceTest,
+  ::testing::Values(
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance64x64_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance64x64_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance64x32_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance64x32_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance32x64_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance32x64_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance32x32_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance32x32_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance32x16_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance32x16_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance16x32_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance16x32_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance16x16_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance16x16_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance16x8_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance16x8_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance8x16_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance8x16_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance8x8_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance8x8_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance8x4_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance8x4_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance4x8_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance4x8_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_masked_sub_pixel_variance4x4_ssse3,
+               &vp9_highbd_masked_sub_pixel_variance4x4_c, VPX_BITS_8),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance64x64_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance64x64_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance64x32_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance64x32_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance32x64_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance32x64_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance32x32_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance32x32_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance32x16_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance32x16_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance16x32_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance16x32_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance16x16_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance16x16_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance16x8_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance16x8_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance8x16_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance8x16_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance8x8_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance8x8_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance8x4_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance8x4_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance4x8_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance4x8_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_10_masked_sub_pixel_variance4x4_ssse3,
+               &vp9_highbd_10_masked_sub_pixel_variance4x4_c, VPX_BITS_10),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance64x64_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance64x64_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance64x32_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance64x32_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance32x64_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance32x64_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance32x32_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance32x32_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance32x16_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance32x16_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance16x32_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance16x32_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance16x16_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance16x16_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance16x8_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance16x8_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance8x16_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance8x16_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance8x8_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance8x8_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance8x4_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance8x4_c, VPX_BITS_12) ,
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance4x8_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance4x8_c, VPX_BITS_12),
+    make_tuple(&vp9_highbd_12_masked_sub_pixel_variance4x4_ssse3,
+               &vp9_highbd_12_masked_sub_pixel_variance4x4_c, VPX_BITS_12)));
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#endif  // HAVE_SSSE3
+}  // namespace
index a73ebd9862a88ca4c95a65deb507a6aac7f8a2e4..95dfa169fdc6827c88c0e32fcf36df898a72d895 100644 (file)
@@ -168,6 +168,11 @@ LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += vp10_dct_test.cc
 LIBVPX_TEST_SRCS-$(CONFIG_ANS)          += vp10_ans_test.cc
 
 LIBVPX_TEST_SRCS-$(CONFIG_VP10_ENCODER) += sum_squares_test.cc
+
+ifeq ($(CONFIG_EXT_INTER),yes)
+LIBVPX_TEST_SRCS-$(HAVE_SSSE3) += masked_variance_test.cc
+LIBVPX_TEST_SRCS-$(HAVE_SSSE3) += masked_sad_test.cc
+endif
 endif # VP10
 
 ## Multi-codec / unconditional whitebox tests.
index c0c3ff99645cb80752f746c54460ea991340fab8..204cedee17bf71ea4eb4dce740347e9b1b95e447 100644 (file)
@@ -316,3 +316,105 @@ highbd_sadMxNxK(4, 4, 8)
 highbd_sadMxNx4D(4, 4)
 
 #endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP10 && CONFIG_EXT_INTER
+static INLINE unsigned int masked_sad(const uint8_t *a, int a_stride,
+                                      const uint8_t *b, int b_stride,
+                                      const uint8_t *m, int m_stride,
+                                      int width, int height) {
+  int y, x;
+  unsigned int sad = 0;
+
+  for (y = 0; y < height; y++) {
+    for (x = 0; x < width; x++)
+      sad += m[x] * abs(a[x] - b[x]);
+
+    a += a_stride;
+    b += b_stride;
+    m += m_stride;
+  }
+  sad = (sad + 31) >> 6;
+
+  return sad;
+}
+
+#define MASKSADMxN(m, n) \
+unsigned int vpx_masked_sad##m##x##n##_c(const uint8_t *src, int src_stride, \
+                                         const uint8_t *ref, int ref_stride, \
+                                         const uint8_t *msk, int msk_stride) { \
+  return masked_sad(src, src_stride, ref, ref_stride, msk, msk_stride, m, n); \
+}
+
+#if CONFIG_EXT_PARTITION
+MASKSADMxN(128, 128)
+MASKSADMxN(128, 64)
+MASKSADMxN(64, 128)
+#endif  // CONFIG_EXT_PARTITION
+MASKSADMxN(64, 64)
+MASKSADMxN(64, 32)
+MASKSADMxN(32, 64)
+MASKSADMxN(32, 32)
+MASKSADMxN(32, 16)
+MASKSADMxN(16, 32)
+MASKSADMxN(16, 16)
+MASKSADMxN(16, 8)
+MASKSADMxN(8, 16)
+MASKSADMxN(8, 8)
+MASKSADMxN(8, 4)
+MASKSADMxN(4, 8)
+MASKSADMxN(4, 4)
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static INLINE unsigned int highbd_masked_sad(const uint8_t *a8, int a_stride,
+                                             const uint8_t *b8, int b_stride,
+                                             const uint8_t *m, int m_stride,
+                                             int width, int height) {
+  int y, x;
+  unsigned int sad = 0;
+  const uint16_t *a = CONVERT_TO_SHORTPTR(a8);
+  const uint16_t *b = CONVERT_TO_SHORTPTR(b8);
+
+  for (y = 0; y < height; y++) {
+    for (x = 0; x < width; x++)
+      sad += m[x] * abs(a[x] - b[x]);
+
+    a += a_stride;
+    b += b_stride;
+    m += m_stride;
+  }
+  sad = (sad + 31) >> 6;
+
+  return sad;
+}
+
+#define HIGHBD_MASKSADMXN(m, n) \
+unsigned int vpx_highbd_masked_sad##m##x##n##_c(const uint8_t *src, \
+                                                int src_stride, \
+                                                const uint8_t *ref, \
+                                                int ref_stride, \
+                                                const uint8_t *msk, \
+                                                int msk_stride) { \
+  return highbd_masked_sad(src, src_stride, ref, ref_stride, \
+                           msk, msk_stride, m, n); \
+}
+
+#if CONFIG_EXT_PARTITION
+HIGHBD_MASKSADMXN(128, 128)
+HIGHBD_MASKSADMXN(128, 64)
+HIGHBD_MASKSADMXN(64, 128)
+#endif  // CONFIG_EXT_PARTITION
+HIGHBD_MASKSADMXN(64, 64)
+HIGHBD_MASKSADMXN(64, 32)
+HIGHBD_MASKSADMXN(32, 64)
+HIGHBD_MASKSADMXN(32, 32)
+HIGHBD_MASKSADMXN(32, 16)
+HIGHBD_MASKSADMXN(16, 32)
+HIGHBD_MASKSADMXN(16, 16)
+HIGHBD_MASKSADMXN(16, 8)
+HIGHBD_MASKSADMXN(8, 16)
+HIGHBD_MASKSADMXN(8, 8)
+HIGHBD_MASKSADMXN(8, 4)
+HIGHBD_MASKSADMXN(4, 8)
+HIGHBD_MASKSADMXN(4, 4)
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_VP10 && CONFIG_EXT_INTER
index 3b6c419744356be0b1b896d045b8cbe867d8a777..14d7f991feff90ffd8c866869f932ed4f4ffaf73 100644 (file)
@@ -15,8 +15,9 @@
 #include "vpx/vpx_integer.h"
 
 #include "vpx_dsp/variance.h"
+#include "vpx_dsp/vpx_filter.h"
 
-static const uint8_t bilinear_filters[8][2] = {
+const uint8_t vpx_bilinear_filters[BIL_SUBPEL_SHIFTS][2] = {
   { 128,   0  },
   { 112,  16  },
   {  96,  32  },
@@ -175,9 +176,9 @@ uint32_t vpx_sub_pixel_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
   uint8_t temp2[H * W]; \
 \
   var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
-                                    bilinear_filters[xoffset]); \
+                                    vpx_bilinear_filters[xoffset]); \
   var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                     bilinear_filters[yoffset]); \
+                                     vpx_bilinear_filters[yoffset]); \
 \
   return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
 }
@@ -195,9 +196,9 @@ uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c(const uint8_t *a, \
   DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
 \
   var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
-                                    bilinear_filters[xoffset]); \
+                                    vpx_bilinear_filters[xoffset]); \
   var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                     bilinear_filters[yoffset]); \
+                                     vpx_bilinear_filters[yoffset]); \
 \
   vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \
 \
@@ -500,9 +501,9 @@ uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \
   uint16_t temp2[H * W]; \
 \
   highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
+                                           W, vpx_bilinear_filters[xoffset]); \
   highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
+                                            vpx_bilinear_filters[yoffset]); \
 \
   return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, dst, \
                                           dst_stride, sse); \
@@ -517,9 +518,9 @@ uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \
   uint16_t temp2[H * W]; \
 \
   highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
+                                           W, vpx_bilinear_filters[xoffset]); \
   highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
+                                            vpx_bilinear_filters[yoffset]); \
 \
   return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
                                              W, dst, dst_stride, sse); \
@@ -534,9 +535,9 @@ uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \
   uint16_t temp2[H * W]; \
 \
   highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
+                                           W, vpx_bilinear_filters[xoffset]); \
   highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
+                                            vpx_bilinear_filters[yoffset]); \
 \
   return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
                                              W, dst, dst_stride, sse); \
@@ -554,9 +555,9 @@ uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \
   DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
 \
   highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
+                                           W, vpx_bilinear_filters[xoffset]); \
   highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
+                                            vpx_bilinear_filters[yoffset]); \
 \
   vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
                            CONVERT_TO_BYTEPTR(temp2), W); \
@@ -576,9 +577,9 @@ uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
   DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
 \
   highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
+                                           W, vpx_bilinear_filters[xoffset]); \
   highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
+                                            vpx_bilinear_filters[yoffset]); \
 \
   vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
                            CONVERT_TO_BYTEPTR(temp2), W); \
@@ -598,9 +599,9 @@ uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
   DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
 \
   highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, \
-                                           W, bilinear_filters[xoffset]); \
+                                           W, vpx_bilinear_filters[xoffset]); \
   highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
-                                            bilinear_filters[yoffset]); \
+                                            vpx_bilinear_filters[yoffset]); \
 \
   vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
                            CONVERT_TO_BYTEPTR(temp2), W); \
@@ -654,3 +655,323 @@ void vpx_highbd_comp_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
   }
 }
 #endif  // CONFIG_VP9_HIGHBITDEPTH
+
+#if CONFIG_VP10 && CONFIG_EXT_INTER
+void masked_variance(const uint8_t *a, int  a_stride,
+                     const uint8_t *b, int  b_stride,
+                     const uint8_t *m, int  m_stride,
+                     int  w, int  h, unsigned int *sse, int *sum) {
+  int i, j;
+
+  int64_t sum64 = 0;
+  uint64_t sse64 = 0;
+
+  for (i = 0; i < h; i++) {
+    for (j = 0; j < w; j++) {
+      const int diff = (a[j] - b[j]) * (m[j]);
+      sum64 += diff;
+      sse64 += diff * diff;
+    }
+
+    a += a_stride;
+    b += b_stride;
+    m += m_stride;
+  }
+  *sum = (sum64 >= 0) ? ((sum64 + 31) >> 6) : -((-sum64 + 31) >> 6);
+  *sse = (sse64 + 2047) >> 12;
+}
+
+#define MASK_VAR(W, H) \
+unsigned int vpx_masked_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
+                                              const uint8_t *b, int b_stride, \
+                                              const uint8_t *m, int m_stride, \
+                                              unsigned int *sse) { \
+  int sum; \
+  masked_variance(a, a_stride, b, b_stride, m, m_stride, W, H, sse, &sum); \
+  return *sse - (((int64_t)sum * sum) / (W * H)); \
+}
+
+#define MASK_SUBPIX_VAR(W, H) \
+unsigned int vpx_masked_sub_pixel_variance##W##x##H##_c( \
+  const uint8_t *src, int  src_stride, \
+  int xoffset, int  yoffset, \
+  const uint8_t *dst, int dst_stride, \
+  const uint8_t *msk, int msk_stride, \
+  unsigned int *sse) { \
+  uint16_t fdata3[(H + 1) * W]; \
+  uint8_t temp2[H * W]; \
+\
+  var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, H + 1, W, \
+                                    vpx_bilinear_filters[xoffset]); \
+  var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+                                     vpx_bilinear_filters[yoffset]); \
+\
+  return vpx_masked_variance##W##x##H##_c(temp2, W, dst, dst_stride, \
+                                          msk, msk_stride, sse); \
+}
+
+MASK_VAR(4, 4)
+MASK_SUBPIX_VAR(4, 4)
+
+MASK_VAR(4, 8)
+MASK_SUBPIX_VAR(4, 8)
+
+MASK_VAR(8, 4)
+MASK_SUBPIX_VAR(8, 4)
+
+MASK_VAR(8, 8)
+MASK_SUBPIX_VAR(8, 8)
+
+MASK_VAR(8, 16)
+MASK_SUBPIX_VAR(8, 16)
+
+MASK_VAR(16, 8)
+MASK_SUBPIX_VAR(16, 8)
+
+MASK_VAR(16, 16)
+MASK_SUBPIX_VAR(16, 16)
+
+MASK_VAR(16, 32)
+MASK_SUBPIX_VAR(16, 32)
+
+MASK_VAR(32, 16)
+MASK_SUBPIX_VAR(32, 16)
+
+MASK_VAR(32, 32)
+MASK_SUBPIX_VAR(32, 32)
+
+MASK_VAR(32, 64)
+MASK_SUBPIX_VAR(32, 64)
+
+MASK_VAR(64, 32)
+MASK_SUBPIX_VAR(64, 32)
+
+MASK_VAR(64, 64)
+MASK_SUBPIX_VAR(64, 64)
+
+#if CONFIG_EXT_PARTITION
+MASK_VAR(64, 128)
+MASK_SUBPIX_VAR(64, 128)
+
+MASK_VAR(128, 64)
+MASK_SUBPIX_VAR(128, 64)
+
+MASK_VAR(128, 128)
+MASK_SUBPIX_VAR(128, 128)
+#endif  // CONFIG_EXT_PARTITION
+
+#if CONFIG_VP9_HIGHBITDEPTH
+void highbd_masked_variance64(const uint8_t *a8, int  a_stride,
+                              const uint8_t *b8, int  b_stride,
+                              const uint8_t *m, int  m_stride,
+                              int  w, int  h,
+                              uint64_t *sse64, int *sum) {
+  int i, j;
+  uint16_t *a = CONVERT_TO_SHORTPTR(a8);
+  uint16_t *b = CONVERT_TO_SHORTPTR(b8);
+
+  int64_t sum64 = 0;
+  *sse64 = 0;
+
+  for (i = 0; i < h; i++) {
+    for (j = 0; j < w; j++) {
+      const int diff = (a[j] - b[j]) * (m[j]);
+      sum64 += diff;
+      *sse64 += (int64_t)diff * diff;
+    }
+
+    a += a_stride;
+    b += b_stride;
+    m += m_stride;
+  }
+  *sum = (sum64 >= 0) ? ((sum64 + 31) >> 6) : -((-sum64 + 31) >> 6);
+  *sse64 = (*sse64 + 2047) >> 12;
+}
+
+void highbd_masked_variance(const uint8_t *a8, int  a_stride,
+                            const uint8_t *b8, int  b_stride,
+                            const uint8_t *m, int  m_stride,
+                            int  w, int  h,
+                            unsigned int *sse, int *sum) {
+  uint64_t sse64;
+  highbd_masked_variance64(a8, a_stride, b8, b_stride, m, m_stride,
+                           w, h, &sse64, sum);
+  *sse = (unsigned int)sse64;
+}
+
+void highbd_10_masked_variance(const uint8_t *a8, int  a_stride,
+                               const uint8_t *b8, int  b_stride,
+                               const uint8_t *m, int  m_stride,
+                               int  w, int  h,
+                               unsigned int *sse, int *sum) {
+  uint64_t sse64;
+  highbd_masked_variance64(a8, a_stride, b8, b_stride, m, m_stride,
+                           w, h, &sse64, sum);
+  *sum = ROUND_POWER_OF_TWO(*sum, 2);
+  *sse = (unsigned int)ROUND_POWER_OF_TWO(sse64, 4);
+}
+
+void highbd_12_masked_variance(const uint8_t *a8, int  a_stride,
+                               const uint8_t *b8, int  b_stride,
+                               const uint8_t *m, int  m_stride,
+                               int  w, int  h,
+                               unsigned int *sse, int *sum) {
+  uint64_t sse64;
+  highbd_masked_variance64(a8, a_stride, b8, b_stride, m, m_stride,
+                           w, h, &sse64, sum);
+  *sum = ROUND_POWER_OF_TWO(*sum, 4);
+  *sse = (unsigned int)ROUND_POWER_OF_TWO(sse64, 8);
+}
+
+#define HIGHBD_MASK_VAR(W, H) \
+unsigned int vpx_highbd_masked_variance##W##x##H##_c(const uint8_t *a, \
+                                                     int a_stride, \
+                                                     const uint8_t *b, \
+                                                     int b_stride, \
+                                                     const uint8_t *m, \
+                                                     int m_stride, \
+                                                     unsigned int *sse) { \
+  int sum; \
+  highbd_masked_variance(a, a_stride, b, b_stride, m, m_stride, \
+                         W, H, sse, &sum); \
+  return *sse - (((int64_t)sum * sum) / (W * H)); \
+} \
+\
+unsigned int vpx_highbd_10_masked_variance##W##x##H##_c(const uint8_t *a, \
+                                                        int a_stride, \
+                                                        const uint8_t *b, \
+                                                        int b_stride, \
+                                                        const uint8_t *m, \
+                                                        int m_stride, \
+                                                        unsigned int *sse) { \
+  int sum; \
+  highbd_10_masked_variance(a, a_stride, b, b_stride, m, m_stride, \
+                            W, H, sse, &sum); \
+  return *sse - (((int64_t)sum * sum) / (W * H)); \
+} \
+\
+unsigned int vpx_highbd_12_masked_variance##W##x##H##_c(const uint8_t *a, \
+                                                        int a_stride, \
+                                                        const uint8_t *b, \
+                                                        int b_stride, \
+                                                        const uint8_t *m, \
+                                                        int m_stride, \
+                                                        unsigned int *sse) { \
+  int sum; \
+  highbd_12_masked_variance(a, a_stride, b, b_stride, m, m_stride, \
+                            W, H, sse, &sum); \
+  return *sse - (((int64_t)sum * sum) / (W * H)); \
+}
+
+#define HIGHBD_MASK_SUBPIX_VAR(W, H) \
+unsigned int vpx_highbd_masked_sub_pixel_variance##W##x##H##_c( \
+  const uint8_t *src, int  src_stride, \
+  int xoffset, int  yoffset, \
+  const uint8_t *dst, int dst_stride, \
+  const uint8_t *msk, int msk_stride, \
+  unsigned int *sse) { \
+  uint16_t fdata3[(H + 1) * W]; \
+  uint16_t temp2[H * W]; \
+\
+  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, \
+                                           H + 1, W, \
+                                           vpx_bilinear_filters[xoffset]); \
+  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+                                            vpx_bilinear_filters[yoffset]); \
+\
+  return vpx_highbd_masked_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
+                                                 W, dst, dst_stride, \
+                                                 msk, msk_stride, sse); \
+} \
+\
+unsigned int vpx_highbd_10_masked_sub_pixel_variance##W##x##H##_c( \
+  const uint8_t *src, int  src_stride, \
+  int xoffset, int  yoffset, \
+  const uint8_t *dst, int dst_stride, \
+  const uint8_t *msk, int msk_stride, \
+  unsigned int *sse) { \
+  uint16_t fdata3[(H + 1) * W]; \
+  uint16_t temp2[H * W]; \
+\
+  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, \
+                                           H + 1, W, \
+                                           vpx_bilinear_filters[xoffset]); \
+  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+                                            vpx_bilinear_filters[yoffset]); \
+\
+  return vpx_highbd_10_masked_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
+                                                    W, dst, dst_stride, \
+                                                    msk, msk_stride, sse); \
+} \
+\
+unsigned int vpx_highbd_12_masked_sub_pixel_variance##W##x##H##_c( \
+  const uint8_t *src, int  src_stride, \
+  int xoffset, int  yoffset, \
+  const uint8_t *dst, int dst_stride, \
+  const uint8_t *msk, int msk_stride, \
+  unsigned int *sse) { \
+  uint16_t fdata3[(H + 1) * W]; \
+  uint16_t temp2[H * W]; \
+\
+  highbd_var_filter_block2d_bil_first_pass(src, fdata3, src_stride, 1, \
+                                           H + 1, W, \
+                                           vpx_bilinear_filters[xoffset]); \
+  highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
+                                            vpx_bilinear_filters[yoffset]); \
+\
+  return vpx_highbd_12_masked_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), \
+                                                    W, dst, dst_stride, \
+                                                    msk, msk_stride, sse); \
+}
+
+HIGHBD_MASK_VAR(4, 4)
+HIGHBD_MASK_SUBPIX_VAR(4, 4)
+
+HIGHBD_MASK_VAR(4, 8)
+HIGHBD_MASK_SUBPIX_VAR(4, 8)
+
+HIGHBD_MASK_VAR(8, 4)
+HIGHBD_MASK_SUBPIX_VAR(8, 4)
+
+HIGHBD_MASK_VAR(8, 8)
+HIGHBD_MASK_SUBPIX_VAR(8, 8)
+
+HIGHBD_MASK_VAR(8, 16)
+HIGHBD_MASK_SUBPIX_VAR(8, 16)
+
+HIGHBD_MASK_VAR(16, 8)
+HIGHBD_MASK_SUBPIX_VAR(16, 8)
+
+HIGHBD_MASK_VAR(16, 16)
+HIGHBD_MASK_SUBPIX_VAR(16, 16)
+
+HIGHBD_MASK_VAR(16, 32)
+HIGHBD_MASK_SUBPIX_VAR(16, 32)
+
+HIGHBD_MASK_VAR(32, 16)
+HIGHBD_MASK_SUBPIX_VAR(32, 16)
+
+HIGHBD_MASK_VAR(32, 32)
+HIGHBD_MASK_SUBPIX_VAR(32, 32)
+
+HIGHBD_MASK_VAR(32, 64)
+HIGHBD_MASK_SUBPIX_VAR(32, 64)
+
+HIGHBD_MASK_VAR(64, 32)
+HIGHBD_MASK_SUBPIX_VAR(64, 32)
+
+HIGHBD_MASK_VAR(64, 64)
+HIGHBD_MASK_SUBPIX_VAR(64, 64)
+
+#if CONFIG_EXT_PARTITION
+HIGHBD_MASK_VAR(64, 128)
+HIGHBD_MASK_SUBPIX_VAR(64, 128)
+
+HIGHBD_MASK_VAR(128, 64)
+HIGHBD_MASK_SUBPIX_VAR(128, 64)
+
+HIGHBD_MASK_VAR(128, 128)
+HIGHBD_MASK_SUBPIX_VAR(128, 128)
+#endif  // CONFIG_EXT_PARTITION
+#endif  // CONFIG_VP9_HIGHBITDEPTH
+#endif  // CONFIG_VP10 && CONFIG_EXT_INTER
index dbb41aaea2682a3a0c318900d95b75930cbc4ae0..fc7060f4841dd2777f6b4291e3ffc899be2538da 100644 (file)
@@ -293,6 +293,13 @@ DSP_SRCS-$(HAVE_SSE4_1) += x86/sad_sse4.asm
 DSP_SRCS-$(HAVE_AVX2)   += x86/sad4d_avx2.c
 DSP_SRCS-$(HAVE_AVX2)   += x86/sad_avx2.c
 
+ifeq ($(CONFIG_VP10_ENCODER),yes)
+ifeq ($(CONFIG_EXT_INTER),yes)
+DSP_SRCS-$(HAVE_SSSE3)  += x86/masked_sad_intrin_ssse3.c
+DSP_SRCS-$(HAVE_SSSE3)  += x86/masked_variance_intrin_ssse3.c
+endif  #CONFIG_EXT_INTER
+endif  #CONFIG_VP10_ENCODER
+
 ifeq ($(CONFIG_USE_X86INC),yes)
 DSP_SRCS-$(HAVE_SSE)    += x86/sad4d_sse2.asm
 DSP_SRCS-$(HAVE_SSE)    += x86/sad_sse2.asm
index 8d1afdfac1e4255072861e689c5b98950814f094..fdfd20c20bf9bc79387b0ef21e3e5c7f172a31cd 100644 (file)
@@ -1464,6 +1464,154 @@ add_proto qw/unsigned int vpx_get4x4sse_cs/, "const unsigned char *src_ptr, int
 
 add_proto qw/void vpx_comp_avg_pred/, "uint8_t *comp_pred, const uint8_t *pred, int width, int height, const uint8_t *ref, int ref_stride";
 
+if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
+  add_proto qw/unsigned int vpx_masked_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance32x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance16x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance64x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance32x64 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance32x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance64x64 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance16x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance16x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance8x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance8x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance8x4 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance4x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_variance4x4 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance64x64 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance32x64 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance64x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance32x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance16x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance32x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance16x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance8x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance16x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance8x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance8x4 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance4x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+  specialize qw/vpx_masked_sub_pixel_variance4x4 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad64x64 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad32x64 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad64x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad32x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad16x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad32x32 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad16x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad16x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad8x16 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad8x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad8x4 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad4x8 ssse3/;
+
+  add_proto qw/unsigned int vpx_masked_sad4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+  specialize qw/vpx_masked_sad4x4 ssse3/;
+
+  if (vpx_config("CONFIG_EXT_PARTITION") eq "yes") {
+    add_proto qw/unsigned int vpx_masked_variance128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_masked_variance128x128/;
+
+    add_proto qw/unsigned int vpx_masked_variance128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_masdctked_variance128x64/;
+
+    add_proto qw/unsigned int vpx_masked_variance64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_masked_variance64x128/;
+
+    add_proto qw/unsigned int vpx_masked_sub_pixel_variance128x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_masked_sub_pixel_variance128x128/;
+
+    add_proto qw/unsigned int vpx_masked_sub_pixel_variance128x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_masked_sub_pixel_variance128x64/;
+
+    add_proto qw/unsigned int vpx_masked_sub_pixel_variance64x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_masked_sub_pixel_variance64x128/;
+
+    add_proto qw/unsigned int vpx_masked_sad128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_masked_sad128x128/;
+
+    add_proto qw/unsigned int vpx_masked_sad128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_masked_sad128x64/;
+
+    add_proto qw/unsigned int vpx_masked_sad64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_masked_sad64x128/;
+  }
+}
+
 if (vpx_config("CONFIG_AFFINE_MOTION") eq "yes") {
   add_proto qw/void vpx_upsampled_pred/, "uint8_t *comp_pred, int width, int height, const uint8_t *ref, int ref_stride";
     specialize qw/vpx_upsampled_pred sse2/;
@@ -1700,6 +1848,346 @@ if (vpx_config("CONFIG_VP9_HIGHBITDEPTH") eq "yes") {
   add_proto qw/unsigned int vpx_highbd_12_mse8x8/, "const uint8_t *src_ptr, int  source_stride, const uint8_t *ref_ptr, int  recon_stride, unsigned int *sse";
   specialize qw/vpx_highbd_12_mse8x8 sse2/;
 
+  if (vpx_config("CONFIG_EXT_INTER") eq "yes") {
+    add_proto qw/unsigned int vpx_highbd_masked_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance32x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance16x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance64x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance32x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance32x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance64x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance16x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance16x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance8x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance8x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance8x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance4x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_variance4x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance32x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance16x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance64x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance32x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance32x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance64x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance16x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance16x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance8x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance8x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance8x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance4x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_variance4x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance32x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance16x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance64x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance32x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance32x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance64x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance16x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance16x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance8x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance8x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance8x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance4x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_variance4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_variance4x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance64x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance64x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance32x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance32x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance32x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance16x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance16x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance16x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance8x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance8x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance8x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance4x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_masked_sub_pixel_variance4x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance64x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance64x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance32x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance32x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance32x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance16x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance16x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance16x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance8x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance8x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance8x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance4x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_10_masked_sub_pixel_variance4x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance64x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance64x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance64x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance64x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance32x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance32x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance32x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance32x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance32x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance32x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance16x32/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance16x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance16x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance16x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance16x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance16x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance8x16/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance8x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance8x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance8x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance8x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance8x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance4x8/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance4x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance4x4/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+    specialize qw/vpx_highbd_12_masked_sub_pixel_variance4x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad64x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad64x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad32x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad32x64 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad64x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad64x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad32x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad32x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad16x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad16x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad32x32/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad32x32 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad16x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad16x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad16x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad16x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad8x16/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad8x16 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad8x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad8x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad8x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad8x4 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad4x8/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad4x8 ssse3/;
+
+    add_proto qw/unsigned int vpx_highbd_masked_sad4x4/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+    specialize qw/vpx_highbd_masked_sad4x4 ssse3/;
+
+    if (vpx_config("CONFIG_EXT_PARTITION") eq "yes") {
+      add_proto qw/unsigned int vpx_highbd_masked_variance128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_masked_variance128x128/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_variance128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_masked_variance128x64/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_variance64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_masked_variance64x128/;
+
+      add_proto qw/unsigned int vpx_highbd_10_masked_variance128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_variance128x128/;
+
+      add_proto qw/unsigned int vpx_highbd_10_masked_variance128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_variance128x64/;
+
+      add_proto qw/unsigned int vpx_highbd_10_masked_variance64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_variance64x128/;
+
+      add_proto qw/unsigned int vpx_highbd_12_masked_variance128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_variance128x128/;
+
+      add_proto qw/unsigned int vpx_highbd_12_masked_variance128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_variance128x64/;
+
+      add_proto qw/unsigned int vpx_highbd_12_masked_variance64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_variance64x128/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance128x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_masked_sub_pixel_variance128x128/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance128x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_masked_sub_pixel_variance128x64/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_sub_pixel_variance64x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_masked_sub_pixel_variance64x128/;
+
+      add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance128x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_sub_pixel_variance128x128/;
+
+      add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance128x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_sub_pixel_variance128x64/;
+
+      add_proto qw/unsigned int vpx_highbd_10_masked_sub_pixel_variance64x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_10_masked_sub_pixel_variance64x128/;
+
+      add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance128x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_12_masked_sub_pixel_variance128x128/;
+
+      add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance128x64/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_12_masked_sub_pixel_variance128x64/;
+
+      add_proto qw/unsigned int vpx_highbd_12_masked_sub_pixel_variance64x128/, "const uint8_t *src_ptr, int source_stride, int xoffset, int  yoffset, const uint8_t *ref_ptr, int ref_stride, const uint8_t *m, int m_stride, unsigned int *sse";
+      specialize qw/vpx_highbd_12_masked_sub_pixel_variance64x128/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_sad128x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int  ref_stride, const uint8_t *mask, int mask_stride";
+      specialize qw/vpx_highbd_masked_sad128x128/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_sad128x64/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+      specialize qw/vpx_highbd_masked_sad128x64/;
+
+      add_proto qw/unsigned int vpx_highbd_masked_sad64x128/, "const uint8_t *src_ptr, int source_stride, const uint8_t *ref_ptr, int ref_stride, const uint8_t *mask, int mask_stride";
+      specialize qw/vpx_highbd_masked_sad64x128/;
+    }
+  }
+
   add_proto qw/void vpx_highbd_comp_avg_pred/, "uint16_t *comp_pred, const uint8_t *pred8, int width, int height, const uint8_t *ref8, int ref_stride";
 
   #
index 2617febf3b3d506bf77b90df47f58ed30b09766c..e049f74564890fabdcf2b033a22a6d97c727ad7f 100644 (file)
@@ -27,6 +27,10 @@ extern "C" {
 
 typedef int16_t InterpKernel[SUBPEL_TAPS];
 
+#define BIL_SUBPEL_BITS    3
+#define BIL_SUBPEL_SHIFTS  (1 << BIL_SUBPEL_BITS)
+extern const uint8_t vpx_bilinear_filters[BIL_SUBPEL_SHIFTS][2];
+
 #ifdef __cplusplus
 }  // extern "C"
 #endif
diff --git a/vpx_dsp/x86/masked_sad_intrin_ssse3.c b/vpx_dsp/x86/masked_sad_intrin_ssse3.c
new file mode 100644 (file)
index 0000000..384f89b
--- /dev/null
@@ -0,0 +1,367 @@
+/*
+ *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+#include <emmintrin.h>
+#include <tmmintrin.h>
+
+#include "vpx_ports/mem.h"
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+
+static INLINE __m128i width8_load_2rows(const uint8_t *ptr, int stride) {
+  __m128i temp1 = _mm_loadl_epi64((const __m128i*)ptr);
+  __m128i temp2 = _mm_loadl_epi64((const __m128i*)(ptr + stride));
+  return _mm_unpacklo_epi64(temp1, temp2);
+}
+
+static INLINE __m128i width4_load_4rows(const uint8_t *ptr, int stride) {
+  __m128i temp1 = _mm_cvtsi32_si128(*(const uint32_t*)ptr);
+  __m128i temp2 = _mm_cvtsi32_si128(*(const uint32_t*)(ptr + stride));
+  __m128i temp3 = _mm_unpacklo_epi32(temp1, temp2);
+  temp1 = _mm_cvtsi32_si128(*(const uint32_t*)(ptr + stride * 2));
+  temp2 = _mm_cvtsi32_si128(*(const uint32_t*)(ptr + stride * 3));
+  temp1 = _mm_unpacklo_epi32(temp1, temp2);
+  return _mm_unpacklo_epi64(temp3, temp1);
+}
+
+static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
+                                            const uint8_t *b_ptr, int b_stride,
+                                            const uint8_t *m_ptr, int m_stride,
+                                            int width, int height);
+
+static INLINE unsigned int masked_sad8xh_ssse3(const uint8_t *a_ptr,
+                                               int a_stride,
+                                               const uint8_t *b_ptr,
+                                               int b_stride,
+                                               const uint8_t *m_ptr,
+                                               int m_stride,
+                                               int height);
+
+static INLINE unsigned int masked_sad4xh_ssse3(const uint8_t *a_ptr,
+                                               int a_stride,
+                                               const uint8_t *b_ptr,
+                                               int b_stride,
+                                               const uint8_t *m_ptr,
+                                               int m_stride,
+                                               int height);
+
+#define MASKSADMXN_SSSE3(m, n) \
+unsigned int vpx_masked_sad##m##x##n##_ssse3(const uint8_t *src, \
+                                             int src_stride, \
+                                             const uint8_t *ref, \
+                                             int ref_stride, \
+                                             const uint8_t *msk, \
+                                             int msk_stride) { \
+  return masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, msk_stride, \
+                          m, n); \
+}
+
+MASKSADMXN_SSSE3(64, 64)
+MASKSADMXN_SSSE3(64, 32)
+MASKSADMXN_SSSE3(32, 64)
+MASKSADMXN_SSSE3(32, 32)
+MASKSADMXN_SSSE3(32, 16)
+MASKSADMXN_SSSE3(16, 32)
+MASKSADMXN_SSSE3(16, 16)
+MASKSADMXN_SSSE3(16, 8)
+
+#define MASKSAD8XN_SSSE3(n) \
+unsigned int vpx_masked_sad8x##n##_ssse3(const uint8_t *src, \
+                                         int src_stride, \
+                                         const uint8_t *ref, \
+                                         int ref_stride, \
+                                         const uint8_t *msk, \
+                                         int msk_stride) { \
+  return masked_sad8xh_ssse3(src, src_stride, ref, ref_stride, msk, \
+                             msk_stride, n); \
+}
+
+MASKSAD8XN_SSSE3(16)
+MASKSAD8XN_SSSE3(8)
+MASKSAD8XN_SSSE3(4)
+
+#define MASKSAD4XN_SSSE3(n) \
+unsigned int vpx_masked_sad4x##n##_ssse3(const uint8_t *src, int src_stride, \
+                                         const uint8_t *ref, int ref_stride, \
+                                         const uint8_t *msk, int msk_stride) { \
+  return masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
+                             msk_stride, n); \
+}
+
+MASKSAD4XN_SSSE3(8)
+MASKSAD4XN_SSSE3(4)
+
+// For width a multiple of 16
+// Assumes values in m are <=64 and w = 16, 32, or 64
+static INLINE unsigned int masked_sad_ssse3(const uint8_t *a_ptr, int a_stride,
+                                            const uint8_t *b_ptr, int b_stride,
+                                            const uint8_t *m_ptr, int m_stride,
+                                            int width, int height) {
+  int y, x;
+  __m128i a, b, m, temp1, temp2;
+  __m128i res = _mm_setzero_si128();
+  __m128i one = _mm_set1_epi16(1);
+  // For each row
+  for (y = 0; y < height; y++) {
+    // Covering the full width
+    for (x = 0; x < width; x += 16) {
+      // Load a, b, m in xmm registers
+      a = _mm_loadu_si128((const __m128i*)(a_ptr + x));
+      b = _mm_loadu_si128((const __m128i*)(b_ptr + x));
+      m = _mm_loadu_si128((const __m128i*)(m_ptr + x));
+
+      // Calculate the difference between a & b
+      temp1 = _mm_subs_epu8(a, b);
+      temp2 = _mm_subs_epu8(b, a);
+      temp1 = _mm_or_si128(temp1, temp2);
+
+      // Multiply by m and add together
+      temp2 = _mm_maddubs_epi16(temp1, m);
+      // Pad out row result to 32 bit integers & add to running total
+      res = _mm_add_epi32(res, _mm_madd_epi16(temp2, one));
+    }
+    // Move onto the next row
+    a_ptr += a_stride;
+    b_ptr += b_stride;
+    m_ptr += m_stride;
+  }
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  // sad = (sad + 31) >> 6;
+  return (_mm_cvtsi128_si32(res) + 31) >> 6;
+}
+
+static INLINE unsigned int masked_sad8xh_ssse3(const uint8_t *a_ptr,
+                                               int a_stride,
+                                               const uint8_t *b_ptr,
+                                               int b_stride,
+                                               const uint8_t *m_ptr,
+                                               int m_stride,
+                                               int height) {
+  int y;
+  __m128i a, b, m, temp1, temp2, row_res;
+  __m128i res = _mm_setzero_si128();
+  __m128i one = _mm_set1_epi16(1);
+  // Add the masked SAD for 2 rows at a time
+  for (y = 0; y < height; y += 2) {
+    // Load a, b, m in xmm registers
+    a = width8_load_2rows(a_ptr, a_stride);
+    b = width8_load_2rows(b_ptr, b_stride);
+    m = width8_load_2rows(m_ptr, m_stride);
+
+    // Calculate the difference between a & b
+    temp1 = _mm_subs_epu8(a, b);
+    temp2 = _mm_subs_epu8(b, a);
+    temp1 = _mm_or_si128(temp1, temp2);
+
+    // Multiply by m and add together
+    row_res = _mm_maddubs_epi16(temp1, m);
+
+    // Pad out row result to 32 bit integers & add to running total
+    res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
+
+    // Move onto the next rows
+    a_ptr += a_stride * 2;
+    b_ptr += b_stride * 2;
+    m_ptr += m_stride * 2;
+  }
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  // sad = (sad + 31) >> 6;
+  return (_mm_cvtsi128_si32(res) + 31) >> 6;
+}
+
+static INLINE unsigned int masked_sad4xh_ssse3(const uint8_t *a_ptr,
+                                               int a_stride,
+                                               const uint8_t *b_ptr,
+                                               int b_stride,
+                                               const uint8_t *m_ptr,
+                                               int m_stride,
+                                               int height) {
+  int y;
+  __m128i a, b, m, temp1, temp2, row_res;
+  __m128i res = _mm_setzero_si128();
+  __m128i one = _mm_set1_epi16(1);
+  // Add the masked SAD for 4 rows at a time
+  for (y = 0; y < height; y += 4) {
+    // Load a, b, m in xmm registers
+    a = width4_load_4rows(a_ptr, a_stride);
+    b = width4_load_4rows(b_ptr, b_stride);
+    m = width4_load_4rows(m_ptr, m_stride);
+
+    // Calculate the difference between a & b
+    temp1 = _mm_subs_epu8(a, b);
+    temp2 = _mm_subs_epu8(b, a);
+    temp1 = _mm_or_si128(temp1, temp2);
+
+    // Multiply by m and add together
+    row_res = _mm_maddubs_epi16(temp1, m);
+
+    // Pad out row result to 32 bit integers & add to running total
+    res = _mm_add_epi32(res, _mm_madd_epi16(row_res, one));
+
+    // Move onto the next rows
+    a_ptr += a_stride * 4;
+    b_ptr += b_stride * 4;
+    m_ptr += m_stride * 4;
+  }
+  // Pad out row result to 32 bit integers & add to running total
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  // sad = (sad + 31) >> 6;
+  return (_mm_cvtsi128_si32(res) + 31) >> 6;
+}
+
+#if CONFIG_VP9_HIGHBITDEPTH
+static INLINE __m128i highbd_width4_load_2rows(const uint16_t *ptr,
+                                               int stride) {
+  __m128i temp1 = _mm_loadl_epi64((const __m128i*)ptr);
+  __m128i temp2 = _mm_loadl_epi64((const __m128i*)(ptr + stride));
+  return _mm_unpacklo_epi64(temp1, temp2);
+}
+
+static INLINE unsigned int highbd_masked_sad_ssse3(const uint8_t *a8_ptr,
+                                                   int a_stride,
+                                                   const uint8_t *b8_ptr,
+                                                   int b_stride,
+                                                   const uint8_t *m_ptr,
+                                                   int m_stride,
+                                                   int width, int height);
+
+static INLINE unsigned int highbd_masked_sad4xh_ssse3(const uint8_t *a8_ptr,
+                                                      int a_stride,
+                                                      const uint8_t *b8_ptr,
+                                                      int b_stride,
+                                                      const uint8_t *m_ptr,
+                                                      int m_stride,
+                                                      int height);
+
+#define HIGHBD_MASKSADMXN_SSSE3(m, n) \
+unsigned int vpx_highbd_masked_sad##m##x##n##_ssse3(const uint8_t *src, \
+                                                    int src_stride, \
+                                                    const uint8_t *ref, \
+                                                    int ref_stride, \
+                                                    const uint8_t *msk, \
+                                                    int msk_stride) { \
+  return highbd_masked_sad_ssse3(src, src_stride, ref, ref_stride, msk, \
+                                 msk_stride, m, n); \
+}
+
+HIGHBD_MASKSADMXN_SSSE3(64, 64)
+HIGHBD_MASKSADMXN_SSSE3(64, 32)
+HIGHBD_MASKSADMXN_SSSE3(32, 64)
+HIGHBD_MASKSADMXN_SSSE3(32, 32)
+HIGHBD_MASKSADMXN_SSSE3(32, 16)
+HIGHBD_MASKSADMXN_SSSE3(16, 32)
+HIGHBD_MASKSADMXN_SSSE3(16, 16)
+HIGHBD_MASKSADMXN_SSSE3(16, 8)
+HIGHBD_MASKSADMXN_SSSE3(8, 16)
+HIGHBD_MASKSADMXN_SSSE3(8, 8)
+HIGHBD_MASKSADMXN_SSSE3(8, 4)
+
+#define HIGHBD_MASKSAD4XN_SSSE3(n) \
+unsigned int vpx_highbd_masked_sad4x##n##_ssse3(const uint8_t *src, \
+                                                int src_stride, \
+                                                const uint8_t *ref, \
+                                                int ref_stride, \
+                                                const uint8_t *msk, \
+                                                int msk_stride) { \
+  return highbd_masked_sad4xh_ssse3(src, src_stride, ref, ref_stride, msk, \
+                                    msk_stride, n); \
+}
+
+HIGHBD_MASKSAD4XN_SSSE3(8)
+HIGHBD_MASKSAD4XN_SSSE3(4)
+
+// For width a multiple of 8
+// Assumes values in m are <=64
+static INLINE unsigned int highbd_masked_sad_ssse3(const uint8_t *a8_ptr,
+                                                   int a_stride,
+                                                   const uint8_t *b8_ptr,
+                                                   int b_stride,
+                                                   const uint8_t *m_ptr,
+                                                   int m_stride,
+                                                   int width, int height) {
+  int y, x;
+  __m128i a, b, m, temp1, temp2;
+  const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
+  const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
+  __m128i res = _mm_setzero_si128();
+  // For each row
+  for (y = 0; y < height; y++) {
+    // Covering the full width
+    for (x = 0; x < width; x += 8) {
+      // Load a, b, m in xmm registers
+      a = _mm_loadu_si128((const __m128i*)(a_ptr + x));
+      b = _mm_loadu_si128((const __m128i*)(b_ptr + x));
+      m = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i*)(m_ptr + x)),
+                            _mm_setzero_si128());
+
+      // Calculate the difference between a & b
+      temp1 = _mm_subs_epu16(a, b);
+      temp2 = _mm_subs_epu16(b, a);
+      temp1 = _mm_or_si128(temp1, temp2);
+
+      // Add result of multiplying by m and add pairs together to running total
+      res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
+    }
+    // Move onto the next row
+    a_ptr += a_stride;
+    b_ptr += b_stride;
+    m_ptr += m_stride;
+  }
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  // sad = (sad + 31) >> 6;
+  return (_mm_cvtsi128_si32(res) + 31) >> 6;
+}
+
+static INLINE unsigned int highbd_masked_sad4xh_ssse3(const uint8_t *a8_ptr,
+                                                      int a_stride,
+                                                      const uint8_t *b8_ptr,
+                                                      int b_stride,
+                                                      const uint8_t *m_ptr,
+                                                      int m_stride,
+                                                      int height) {
+  int y;
+  __m128i a, b, m, temp1, temp2;
+  const uint16_t *a_ptr = CONVERT_TO_SHORTPTR(a8_ptr);
+  const uint16_t *b_ptr = CONVERT_TO_SHORTPTR(b8_ptr);
+  __m128i res = _mm_setzero_si128();
+  // Add the masked SAD for 2 rows at a time
+  for (y = 0; y < height; y += 2) {
+    // Load a, b, m in xmm registers
+    a = highbd_width4_load_2rows(a_ptr, a_stride);
+    b = highbd_width4_load_2rows(b_ptr, b_stride);
+    temp1 = _mm_loadl_epi64((const __m128i*)m_ptr);
+    temp2 = _mm_loadl_epi64((const __m128i*)(m_ptr + m_stride));
+    m = _mm_unpacklo_epi8(_mm_unpacklo_epi32(temp1, temp2),
+                          _mm_setzero_si128());
+
+    // Calculate the difference between a & b
+    temp1 = _mm_subs_epu16(a, b);
+    temp2 = _mm_subs_epu16(b, a);
+    temp1 = _mm_or_si128(temp1, temp2);
+
+    // Multiply by m and add together
+    res = _mm_add_epi32(res, _mm_madd_epi16(temp1, m));
+
+    // Move onto the next rows
+    a_ptr += a_stride * 2;
+    b_ptr += b_stride * 2;
+    m_ptr += m_stride * 2;
+  }
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  res = _mm_hadd_epi32(res, _mm_setzero_si128());
+  // sad = (sad + 31) >> 6;
+  return (_mm_cvtsi128_si32(res) + 31) >> 6;
+}
+#endif  // CONFIG_VP9_HIGHBITDEPTH
diff --git a/vpx_dsp/x86/masked_variance_intrin_ssse3.c b/vpx_dsp/x86/masked_variance_intrin_ssse3.c
new file mode 100644 (file)
index 0000000..96af421
--- /dev/null
@@ -0,0 +1,2096 @@
+/*
+ *  Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <assert.h>
+#include <stdlib.h>
+#include <emmintrin.h>
+#include <tmmintrin.h>
+
+#include "./vpx_config.h"
+#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
+#include "vpx_dsp/vpx_filter.h"
+
+// Assumes mask values are <= 64
+
+// Log 2 of powers of 2 as an expression
+#define LOG2_P2(n)  ((n) ==   1 ? 0 :       \
+                     (n) ==   2 ? 1 :       \
+                     (n) ==   4 ? 2 :       \
+                     (n) ==   8 ? 3 :       \
+                     (n) ==  16 ? 4 :       \
+                     (n) ==  32 ? 5 :       \
+                     (n) ==  64 ? 6 :       \
+                     (n) == 128 ? 7 :  -1)
+
+/*****************************************************************************
+ * n*16 Wide versions
+ *****************************************************************************/
+
+static INLINE unsigned int masked_variancewxh_ssse3(
+    const uint8_t *a, int  a_stride,
+    const uint8_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int w, int  h,
+    unsigned int *sse) {
+  int ii, jj;
+
+  const __m128i v_zero = _mm_setzero_si128();
+
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+
+  assert((w % 16) == 0);
+
+  for (ii = 0; ii < h; ii++) {
+    for (jj = 0 ; jj < w ; jj += 16) {
+      // Load inputs - 8 bits
+      const __m128i v_a_b = _mm_loadu_si128((const __m128i*)(a+jj));
+      const __m128i v_b_b = _mm_loadu_si128((const __m128i*)(b+jj));
+      const __m128i v_m_b = _mm_loadu_si128((const __m128i*)(m+jj));
+
+      // Unpack to 16 bits - still containing max 8 bits
+      const __m128i v_a0_w = _mm_unpacklo_epi8(v_a_b, v_zero);
+      const __m128i v_b0_w = _mm_unpacklo_epi8(v_b_b, v_zero);
+      const __m128i v_m0_w = _mm_unpacklo_epi8(v_m_b, v_zero);
+      const __m128i v_a1_w = _mm_unpackhi_epi8(v_a_b, v_zero);
+      const __m128i v_b1_w = _mm_unpackhi_epi8(v_b_b, v_zero);
+      const __m128i v_m1_w = _mm_unpackhi_epi8(v_m_b, v_zero);
+
+      // Difference: [-255, 255]
+      const __m128i v_d0_w = _mm_sub_epi16(v_a0_w, v_b0_w);
+      const __m128i v_d1_w = _mm_sub_epi16(v_a1_w, v_b1_w);
+
+      // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
+      const __m128i v_e0_w = _mm_mullo_epi16(v_d0_w, v_m0_w);
+      const __m128i v_e0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
+      const __m128i v_e1_w = _mm_mullo_epi16(v_d1_w, v_m1_w);
+      const __m128i v_e1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
+
+      // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
+      const __m128i v_se0_d = _mm_madd_epi16(v_e0_w, v_e0_w);
+      const __m128i v_se1_d = _mm_madd_epi16(v_e1_w, v_e1_w);
+
+      // Sum of v_se{0,1}_d - 31 bits + 31 bits = 32 bits
+      const __m128i v_se_d = _mm_add_epi32(v_se0_d, v_se1_d);
+
+      // Unpack Squared error to 64 bits
+      const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
+      const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
+
+      // Accumulate
+      v_sum_d = _mm_add_epi32(v_sum_d, v_e0_d);
+      v_sum_d = _mm_add_epi32(v_sum_d, v_e1_d);
+      v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
+      v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
+    }
+
+    // Move on to next row
+    a += a_stride;
+    b += b_stride;
+    m += m_stride;
+  }
+
+  // Horizontal sum
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_srli_si128(v_sse_q, 8));
+
+  // Round
+  v_sum_d = _mm_sub_epi32(v_sum_d, _mm_cmplt_epi32(v_sum_d, v_zero));
+  v_sum_d = _mm_add_epi32(v_sum_d, _mm_set_epi32(0, 0, 0, 31));
+  v_sum_d = _mm_srai_epi32(v_sum_d, 6);
+
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 12);
+
+  // Store the SSE
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  v_sum_d = _mm_abs_epi32(v_sum_d);
+  v_sum_d = _mm_mul_epu32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_srl_epi64(v_sum_d,
+                          _mm_set_epi32(0, 0, 0, LOG2_P2(w) + LOG2_P2(h)));
+  v_sse_q = _mm_sub_epi64(v_sse_q, v_sum_d);
+
+  return _mm_cvtsi128_si32(v_sse_q);
+}
+
+#define MASKED_VARWXH(W, H)                                               \
+unsigned int vpx_masked_variance##W##x##H##_ssse3(                        \
+  const uint8_t *a, int a_stride,                                         \
+  const uint8_t *b, int b_stride,                                         \
+  const uint8_t *m, int m_stride,                                         \
+  unsigned int *sse) {                                                    \
+  return masked_variancewxh_ssse3(a, a_stride,                            \
+                                  b, b_stride,                            \
+                                  m, m_stride,                            \
+                                  W, H, sse);                             \
+}
+
+MASKED_VARWXH(16, 8)
+MASKED_VARWXH(16, 16)
+MASKED_VARWXH(16, 32)
+MASKED_VARWXH(32, 16)
+MASKED_VARWXH(32, 32)
+MASKED_VARWXH(32, 64)
+MASKED_VARWXH(64, 32)
+MASKED_VARWXH(64, 64)
+
+/*****************************************************************************
+ * 8 Wide versions
+ *****************************************************************************/
+
+static INLINE unsigned int masked_variance8xh_ssse3(
+    const uint8_t *a, int  a_stride,
+    const uint8_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int  h,
+    unsigned int *sse) {
+  int ii;
+
+  const __m128i v_zero = _mm_setzero_si128();
+
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+
+  for (ii = 0; ii < h; ii++) {
+    // Load inputs - 8 bits
+    const __m128i v_a_b = _mm_loadl_epi64((const __m128i*)a);
+    const __m128i v_b_b = _mm_loadl_epi64((const __m128i*)b);
+    const __m128i v_m_b = _mm_loadl_epi64((const __m128i*)m);
+
+    // Unpack to 16 bits - still containing max 8 bits
+    const __m128i v_a_w = _mm_unpacklo_epi8(v_a_b, v_zero);
+    const __m128i v_b_w = _mm_unpacklo_epi8(v_b_b, v_zero);
+    const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
+
+    // Difference: [-255, 255]
+    const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
+
+    // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
+    const __m128i v_e_w = _mm_mullo_epi16(v_d_w, v_m_w);
+    const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
+
+    // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
+    const __m128i v_se_d = _mm_madd_epi16(v_e_w, v_e_w);
+
+    // Unpack Squared error to 64 bits
+    const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
+    const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
+
+    // Accumulate
+    v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
+    v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
+    v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
+
+    // Move on to next row
+    a += a_stride;
+    b += b_stride;
+    m += m_stride;
+  }
+
+  // Horizontal sum
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_srli_si128(v_sse_q, 8));
+
+  // Round
+  v_sum_d = _mm_sub_epi32(v_sum_d, _mm_cmplt_epi32(v_sum_d, v_zero));
+  v_sum_d = _mm_add_epi32(v_sum_d, _mm_set_epi32(0, 0, 0, 31));
+  v_sum_d = _mm_srai_epi32(v_sum_d, 6);
+
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 12);
+
+  // Store the SSE
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  v_sum_d = _mm_abs_epi32(v_sum_d);
+  v_sum_d = _mm_mul_epu32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_srl_epi64(v_sum_d, _mm_set_epi32(0, 0, 0, LOG2_P2(h) + 3));
+  v_sse_q = _mm_sub_epi64(v_sse_q, v_sum_d);
+
+  return _mm_cvtsi128_si32(v_sse_q);
+}
+
+#define MASKED_VAR8XH(H)                                                  \
+unsigned int vpx_masked_variance8x##H##_ssse3(                            \
+  const uint8_t *a, int a_stride,                                         \
+  const uint8_t *b, int b_stride,                                         \
+  const uint8_t *m, int m_stride,                                         \
+  unsigned int *sse) {                                                    \
+  return masked_variance8xh_ssse3(a, a_stride,                            \
+                                  b, b_stride,                            \
+                                  m, m_stride,                            \
+                                  H, sse);                                \
+}
+
+MASKED_VAR8XH(4)
+MASKED_VAR8XH(8)
+MASKED_VAR8XH(16)
+
+/*****************************************************************************
+ * 4 Wide versions
+ *****************************************************************************/
+
+static INLINE unsigned int masked_variance4xh_ssse3(
+    const uint8_t *a, int  a_stride,
+    const uint8_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int  h,
+    unsigned int *sse) {
+  int ii;
+
+  const __m128i v_zero = _mm_setzero_si128();
+
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+
+  assert((h % 2) == 0);
+
+  for (ii = 0; ii < h/2; ii++) {
+    // Load 2 input rows - 8 bits
+    const __m128i v_a0_b = _mm_cvtsi32_si128(*(const uint32_t*)a);
+    const __m128i v_b0_b = _mm_cvtsi32_si128(*(const uint32_t*)b);
+    const __m128i v_m0_b = _mm_cvtsi32_si128(*(const uint32_t*)m);
+    const __m128i v_a1_b = _mm_cvtsi32_si128(*(const uint32_t*)(a + a_stride));
+    const __m128i v_b1_b = _mm_cvtsi32_si128(*(const uint32_t*)(b + b_stride));
+    const __m128i v_m1_b = _mm_cvtsi32_si128(*(const uint32_t*)(m + m_stride));
+
+    // Interleave 2 rows into a single register
+    const __m128i v_a_b = _mm_unpacklo_epi32(v_a0_b, v_a1_b);
+    const __m128i v_b_b = _mm_unpacklo_epi32(v_b0_b, v_b1_b);
+    const __m128i v_m_b = _mm_unpacklo_epi32(v_m0_b, v_m1_b);
+
+    // Unpack to 16 bits - still containing max 8 bits
+    const __m128i v_a_w = _mm_unpacklo_epi8(v_a_b, v_zero);
+    const __m128i v_b_w = _mm_unpacklo_epi8(v_b_b, v_zero);
+    const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
+
+    // Difference: [-255, 255]
+    const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
+
+    // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
+    const __m128i v_e_w = _mm_mullo_epi16(v_d_w, v_m_w);
+    const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
+
+    // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
+    const __m128i v_se_d = _mm_madd_epi16(v_e_w, v_e_w);
+
+    // Unpack Squared error to 64 bits
+    const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
+    const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
+
+    // Accumulate
+    v_sum_d = _mm_add_epi32(v_sum_d, v_e_d);
+    v_sse_q = _mm_add_epi64(v_sse_q, v_se_lo_q);
+    v_sse_q = _mm_add_epi64(v_sse_q, v_se_hi_q);
+
+    // Move on to next 2 row
+    a += a_stride * 2;
+    b += b_stride * 2;
+    m += m_stride * 2;
+  }
+
+  // Horizontal sum
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_srli_si128(v_sse_q, 8));
+
+  // Round
+  v_sum_d = _mm_sub_epi32(v_sum_d, _mm_cmplt_epi32(v_sum_d, v_zero));
+  v_sum_d = _mm_add_epi32(v_sum_d, _mm_set_epi32(0, 0, 0, 31));
+  v_sum_d = _mm_srai_epi32(v_sum_d, 6);
+
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 12);
+
+  // Store the SSE
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  v_sum_d = _mm_abs_epi32(v_sum_d);
+  v_sum_d = _mm_mul_epu32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_srl_epi64(v_sum_d, _mm_set_epi32(0, 0, 0, LOG2_P2(h) + 2));
+  v_sse_q = _mm_sub_epi64(v_sse_q, v_sum_d);
+
+  return _mm_cvtsi128_si32(v_sse_q);
+}
+
+#define MASKED_VAR4XH(H)                                                  \
+unsigned int vpx_masked_variance4x##H##_ssse3(                            \
+  const uint8_t *a, int a_stride,                                         \
+  const uint8_t *b, int b_stride,                                         \
+  const uint8_t *m, int m_stride,                                         \
+  unsigned int *sse) {                                                    \
+  return masked_variance4xh_ssse3(a, a_stride,                            \
+                                  b, b_stride,                            \
+                                  m, m_stride,                            \
+                                  H, sse);                                \
+}
+
+MASKED_VAR4XH(4)
+MASKED_VAR4XH(8)
+
+#if CONFIG_VP9_HIGHBITDEPTH
+
+// Main calculation for n*8 wide blocks
+static INLINE void highbd_masked_variance64_ssse3(
+    const uint16_t *a, int  a_stride,
+    const uint16_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int w, int  h,
+    __m128i* v_sum_d, __m128i* v_sse_q) {
+  int ii, jj;
+
+  const __m128i v_zero = _mm_setzero_si128();
+
+  *v_sum_d = _mm_setzero_si128();
+  *v_sse_q = _mm_setzero_si128();
+
+  assert((w % 8) == 0);
+
+  for (ii = 0; ii < h; ii++) {
+    for (jj = 0 ; jj < w ; jj += 8) {
+      // Load inputs - 8 bits
+      const __m128i v_a_w = _mm_loadu_si128((const __m128i*)(a+jj));
+      const __m128i v_b_w = _mm_loadu_si128((const __m128i*)(b+jj));
+      const __m128i v_m_b = _mm_loadl_epi64((const __m128i*)(m+jj));
+
+      // Unpack m to 16 bits - still containing max 8 bits
+      const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
+
+      // Difference: [-4095, 4095]
+      const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
+
+      // Error - [-4095, 4095] * [0, 64] => fits in 19 bits (incld sign bit)
+      const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
+
+      // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
+      const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
+      const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
+      const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
+      const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
+      const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
+      const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
+      const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
+      // Square and sum the errors -> 36bits * 4 = 38bits
+      __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
+      v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
+      v_elo1_d = _mm_srli_si128(v_elo_d, 4);
+      v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
+      v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
+      v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
+      v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
+      v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
+      v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
+      v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
+
+      // Accumulate
+      *v_sum_d = _mm_add_epi32(*v_sum_d, v_e_d);
+      *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
+    }
+
+    // Move on to next row
+    a += a_stride;
+    b += b_stride;
+    m += m_stride;
+  }
+
+  // Horizontal sum
+  *v_sum_d = _mm_hadd_epi32(*v_sum_d, *v_sum_d);
+  *v_sum_d = _mm_hadd_epi32(*v_sum_d, *v_sum_d);
+  *v_sse_q = _mm_add_epi64(*v_sse_q, _mm_srli_si128(*v_sse_q, 8));
+
+  // Round
+  *v_sum_d = _mm_sub_epi32(*v_sum_d, _mm_cmplt_epi32(*v_sum_d, v_zero));
+  *v_sum_d = _mm_add_epi32(*v_sum_d, _mm_set_epi32(0, 0, 0, 31));
+  *v_sum_d = _mm_srai_epi32(*v_sum_d, 6);
+
+  *v_sse_q = _mm_add_epi64(*v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  *v_sse_q = _mm_srli_epi64(*v_sse_q, 12);
+}
+
+// Main calculation for 4 wide blocks
+static INLINE void highbd_masked_variance64_4wide_ssse3(
+    const uint16_t *a, int  a_stride,
+    const uint16_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int  h,
+    __m128i* v_sum_d, __m128i* v_sse_q) {
+  int ii;
+
+  const __m128i v_zero = _mm_setzero_si128();
+
+  *v_sum_d = _mm_setzero_si128();
+  *v_sse_q = _mm_setzero_si128();
+
+  assert((h % 2) == 0);
+
+  for (ii = 0; ii < h/2; ii++) {
+    // Load 2 input rows - 8 bits
+    const __m128i v_a0_w = _mm_loadl_epi64((const __m128i*)a);
+    const __m128i v_b0_w = _mm_loadl_epi64((const __m128i*)b);
+    const __m128i v_m0_b = _mm_cvtsi32_si128(*(const uint32_t*)m);
+    const __m128i v_a1_w = _mm_loadl_epi64((const __m128i*)(a + a_stride));
+    const __m128i v_b1_w = _mm_loadl_epi64((const __m128i*)(b + b_stride));
+    const __m128i v_m1_b = _mm_cvtsi32_si128(*(const uint32_t*)(m + m_stride));
+
+    // Interleave 2 rows into a single register
+    const __m128i v_a_w = _mm_unpacklo_epi64(v_a0_w, v_a1_w);
+    const __m128i v_b_w = _mm_unpacklo_epi64(v_b0_w, v_b1_w);
+    const __m128i v_m_b = _mm_unpacklo_epi32(v_m0_b, v_m1_b);
+
+    // Unpack to 16 bits - still containing max 8 bits
+    const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
+
+    // Difference: [-4095, 4095]
+    const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
+
+    // Error - [-4095, 4095] * [0, 64] => fits in 19 bits (incld sign bit)
+    const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
+
+    // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
+    const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
+    const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
+    const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
+    const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
+    const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
+    const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
+    const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
+    // Square and sum the errors -> 36bits * 4 = 38bits
+    __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
+    v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
+    v_elo1_d = _mm_srli_si128(v_elo_d, 4);
+    v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
+    v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
+    v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
+    v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
+    v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
+    v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
+    v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
+
+    // Accumulate
+    *v_sum_d = _mm_add_epi32(*v_sum_d, v_e_d);
+    *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
+
+    // Move on to next row
+    a += a_stride * 2;
+    b += b_stride * 2;
+    m += m_stride * 2;
+  }
+
+  // Horizontal sum
+  *v_sum_d = _mm_hadd_epi32(*v_sum_d, *v_sum_d);
+  *v_sum_d = _mm_hadd_epi32(*v_sum_d, *v_sum_d);
+  *v_sse_q = _mm_add_epi64(*v_sse_q, _mm_srli_si128(*v_sse_q, 8));
+
+  // Round
+  *v_sum_d = _mm_sub_epi32(*v_sum_d, _mm_cmplt_epi32(*v_sum_d, v_zero));
+  *v_sum_d = _mm_add_epi32(*v_sum_d, _mm_set_epi32(0, 0, 0, 31));
+  *v_sum_d = _mm_srai_epi32(*v_sum_d, 6);
+
+  *v_sse_q = _mm_add_epi64(*v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  *v_sse_q = _mm_srli_epi64(*v_sse_q, 12);
+}
+
+static INLINE unsigned int highbd_masked_variancewxh_ssse3(
+    const uint16_t *a, int  a_stride,
+    const uint16_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int w, int  h,
+    unsigned int *sse) {
+  __m128i v_sum_d, v_sse_q;
+
+  if (w == 4)
+    highbd_masked_variance64_4wide_ssse3(a, a_stride, b,  b_stride, m, m_stride,
+            h, &v_sum_d, &v_sse_q);
+  else
+    highbd_masked_variance64_ssse3(a, a_stride, b,  b_stride, m, m_stride, w, h,
+            &v_sum_d, &v_sse_q);
+
+  // Store the SSE
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  v_sum_d = _mm_abs_epi32(v_sum_d);
+  v_sum_d = _mm_mul_epu32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_srl_epi64(v_sum_d,
+                          _mm_set_epi32(0, 0, 0, LOG2_P2(w) + LOG2_P2(h)));
+  v_sse_q = _mm_sub_epi64(v_sse_q, v_sum_d);
+
+  return _mm_cvtsi128_si32(v_sse_q);
+}
+
+static INLINE unsigned int highbd_10_masked_variancewxh_ssse3(
+    const uint16_t *a, int  a_stride,
+    const uint16_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int w, int  h,
+    unsigned int *sse) {
+  __m128i v_sum_d, v_sse_q;
+
+  if (w == 4)
+    highbd_masked_variance64_4wide_ssse3(a, a_stride, b,  b_stride, m, m_stride,
+            h, &v_sum_d, &v_sse_q);
+  else
+    highbd_masked_variance64_ssse3(a, a_stride, b,  b_stride, m, m_stride, w, h,
+            &v_sum_d, &v_sse_q);
+
+  // Round sum and sse
+  v_sum_d = _mm_srai_epi32(_mm_add_epi32(v_sum_d,
+          _mm_set_epi32(0, 0, 0, 1 << 1)), 2);
+  v_sse_q = _mm_srli_epi64(_mm_add_epi64(v_sse_q,
+          _mm_set_epi32(0, 0, 0, 1 << 3)), 4);
+
+  // Store the SSE
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  v_sum_d = _mm_abs_epi32(v_sum_d);
+  v_sum_d = _mm_mul_epu32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_srl_epi64(v_sum_d,
+                          _mm_set_epi32(0, 0, 0, LOG2_P2(w) + LOG2_P2(h)));
+  v_sse_q = _mm_sub_epi64(v_sse_q, v_sum_d);
+
+  return _mm_cvtsi128_si32(v_sse_q);
+}
+
+static INLINE unsigned int highbd_12_masked_variancewxh_ssse3(
+    const uint16_t *a, int  a_stride,
+    const uint16_t *b, int  b_stride,
+    const uint8_t *m, int  m_stride,
+    int w, int  h,
+    unsigned int *sse) {
+  __m128i v_sum_d, v_sse_q;
+
+  if (w == 4)
+    highbd_masked_variance64_4wide_ssse3(a, a_stride, b,  b_stride, m, m_stride,
+            h, &v_sum_d, &v_sse_q);
+  else
+    highbd_masked_variance64_ssse3(a, a_stride, b,  b_stride, m, m_stride, w, h,
+            &v_sum_d, &v_sse_q);
+
+  // Round sum and sse
+  v_sum_d = _mm_srai_epi32(_mm_add_epi32(v_sum_d,
+          _mm_set_epi32(0, 0, 0, 1 << 3)), 4);
+  v_sse_q = _mm_srli_epi64(_mm_add_epi64(v_sse_q,
+          _mm_set_epi32(0, 0, 0, 1 << 7)), 8);
+
+  // Store the SSE
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  v_sum_d = _mm_abs_epi32(v_sum_d);
+  v_sum_d = _mm_mul_epu32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_srl_epi64(v_sum_d,
+                          _mm_set_epi32(0, 0, 0, LOG2_P2(w) + LOG2_P2(h)));
+  v_sse_q = _mm_sub_epi64(v_sse_q, v_sum_d);
+
+  return _mm_cvtsi128_si32(v_sse_q);
+}
+
+#define HIGHBD_MASKED_VARWXH(W, H)                                             \
+unsigned int vpx_highbd_masked_variance##W##x##H##_ssse3(                      \
+  const uint8_t *a8, int a_stride,                                             \
+  const uint8_t *b8, int b_stride,                                             \
+  const uint8_t *m, int m_stride,                                              \
+  unsigned int *sse) {                                                         \
+  uint16_t *a = CONVERT_TO_SHORTPTR(a8);                                       \
+  uint16_t *b = CONVERT_TO_SHORTPTR(b8);                                       \
+  return highbd_masked_variancewxh_ssse3(a, a_stride,                          \
+                                         b, b_stride,                          \
+                                         m, m_stride,                          \
+                                         W, H, sse);                           \
+}                                                                              \
+                                                                               \
+unsigned int vpx_highbd_10_masked_variance##W##x##H##_ssse3(                   \
+  const uint8_t *a8, int a_stride,                                             \
+  const uint8_t *b8, int b_stride,                                             \
+  const uint8_t *m, int m_stride,                                              \
+  unsigned int *sse) {                                                         \
+  uint16_t *a = CONVERT_TO_SHORTPTR(a8);                                       \
+  uint16_t *b = CONVERT_TO_SHORTPTR(b8);                                       \
+  return highbd_10_masked_variancewxh_ssse3(a, a_stride,                       \
+                                            b, b_stride,                       \
+                                            m, m_stride,                       \
+                                            W, H, sse);                        \
+}                                                                              \
+                                                                               \
+unsigned int vpx_highbd_12_masked_variance##W##x##H##_ssse3(                   \
+  const uint8_t *a8, int a_stride,                                             \
+  const uint8_t *b8, int b_stride,                                             \
+  const uint8_t *m, int m_stride,                                              \
+  unsigned int *sse) {                                                         \
+  uint16_t *a = CONVERT_TO_SHORTPTR(a8);                                       \
+  uint16_t *b = CONVERT_TO_SHORTPTR(b8);                                       \
+  return highbd_12_masked_variancewxh_ssse3(a, a_stride,                       \
+                                            b, b_stride,                       \
+                                            m, m_stride,                       \
+                                            W, H, sse);                        \
+}
+
+HIGHBD_MASKED_VARWXH(4, 4)
+HIGHBD_MASKED_VARWXH(4, 8)
+HIGHBD_MASKED_VARWXH(8, 4)
+HIGHBD_MASKED_VARWXH(8, 8)
+HIGHBD_MASKED_VARWXH(8, 16)
+HIGHBD_MASKED_VARWXH(16, 8)
+HIGHBD_MASKED_VARWXH(16, 16)
+HIGHBD_MASKED_VARWXH(16, 32)
+HIGHBD_MASKED_VARWXH(32, 16)
+HIGHBD_MASKED_VARWXH(32, 32)
+HIGHBD_MASKED_VARWXH(32, 64)
+HIGHBD_MASKED_VARWXH(64, 32)
+HIGHBD_MASKED_VARWXH(64, 64)
+
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+// Sub pixel versions
+//////////////////////////////////////////////////////////////////////////////
+
+typedef __m128i (*filter_fn_t)(__m128i v_a_b, __m128i v_b_b,
+                                    __m128i v_filter_b);
+
+static INLINE __m128i apply_filter8(const __m128i v_a_b, const __m128i v_b_b,
+                                    const __m128i v_filter_b) {
+  (void) v_filter_b;
+  return _mm_avg_epu8(v_a_b, v_b_b);
+}
+
+static INLINE __m128i apply_filter(const __m128i v_a_b, const __m128i v_b_b,
+                                   const __m128i v_filter_b) {
+  const __m128i v_rounding_w = _mm_set1_epi16(1 << (FILTER_BITS - 1));
+  __m128i v_input_lo_b = _mm_unpacklo_epi8(v_a_b, v_b_b);
+  __m128i v_input_hi_b = _mm_unpackhi_epi8(v_a_b, v_b_b);
+  __m128i v_temp0_w = _mm_maddubs_epi16(v_input_lo_b, v_filter_b);
+  __m128i v_temp1_w = _mm_maddubs_epi16(v_input_hi_b, v_filter_b);
+  __m128i v_res_lo_w = _mm_srai_epi16(_mm_add_epi16(v_temp0_w, v_rounding_w),
+                                      FILTER_BITS);
+  __m128i v_res_hi_w = _mm_srai_epi16(_mm_add_epi16(v_temp1_w, v_rounding_w),
+                                      FILTER_BITS);
+  return _mm_packus_epi16(v_res_lo_w, v_res_hi_w);
+}
+
+// Apply the filter to the contents of the lower half of a and b
+static INLINE void apply_filter_lo(const __m128i v_a_lo_b,
+                                   const __m128i v_b_lo_b,
+                                   const __m128i v_filter_b,
+                                   __m128i* v_res_w) {
+  const __m128i v_rounding_w = _mm_set1_epi16(1 << (FILTER_BITS - 1));
+  __m128i v_input_b = _mm_unpacklo_epi8(v_a_lo_b, v_b_lo_b);
+  __m128i v_temp0_w = _mm_maddubs_epi16(v_input_b, v_filter_b);
+  *v_res_w = _mm_srai_epi16(_mm_add_epi16(v_temp0_w, v_rounding_w),
+                              FILTER_BITS);
+}
+
+static void sum_and_sse(const __m128i v_a_b, const __m128i v_b_b,
+                        const __m128i v_m_b, __m128i* v_sum_d,
+                        __m128i* v_sse_q) {
+  const __m128i v_zero = _mm_setzero_si128();
+  // Unpack to 16 bits - still containing max 8 bits
+  const __m128i v_a0_w = _mm_unpacklo_epi8(v_a_b, v_zero);
+  const __m128i v_b0_w = _mm_unpacklo_epi8(v_b_b, v_zero);
+  const __m128i v_m0_w = _mm_unpacklo_epi8(v_m_b, v_zero);
+  const __m128i v_a1_w = _mm_unpackhi_epi8(v_a_b, v_zero);
+  const __m128i v_b1_w = _mm_unpackhi_epi8(v_b_b, v_zero);
+  const __m128i v_m1_w = _mm_unpackhi_epi8(v_m_b, v_zero);
+
+  // Difference: [-255, 255]
+  const __m128i v_d0_w = _mm_sub_epi16(v_a0_w, v_b0_w);
+  const __m128i v_d1_w = _mm_sub_epi16(v_a1_w, v_b1_w);
+
+  // Error - [-255, 255] * [0, 64] = [0xc040, 0x3fc0] => fits in 15 bits
+  const __m128i v_e0_w = _mm_mullo_epi16(v_d0_w, v_m0_w);
+  const __m128i v_e0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
+  const __m128i v_e1_w = _mm_mullo_epi16(v_d1_w, v_m1_w);
+  const __m128i v_e1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
+
+  // Squared error - using madd it's max (15 bits * 15 bits) * 2 = 31 bits
+  const __m128i v_se0_d = _mm_madd_epi16(v_e0_w, v_e0_w);
+  const __m128i v_se1_d = _mm_madd_epi16(v_e1_w, v_e1_w);
+
+  // Sum of v_se{0,1}_d - 31 bits + 31 bits = 32 bits
+  const __m128i v_se_d = _mm_add_epi32(v_se0_d, v_se1_d);
+
+  // Unpack Squared error to 64 bits
+  const __m128i v_se_lo_q = _mm_unpacklo_epi32(v_se_d, v_zero);
+  const __m128i v_se_hi_q = _mm_unpackhi_epi32(v_se_d, v_zero);
+
+  // Accumulate
+  *v_sum_d = _mm_add_epi32(*v_sum_d, v_e0_d);
+  *v_sum_d = _mm_add_epi32(*v_sum_d, v_e1_d);
+  *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_lo_q);
+  *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_hi_q);
+}
+
+static INLINE int calc_masked_variance(__m128i v_sum_d, __m128i v_sse_q,
+                                       unsigned int* sse,
+                                       const int w, const int h) {
+  int sum;
+
+  // Horizontal sum
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_srli_si128(v_sse_q, 8));
+
+  // Round
+  sum = _mm_cvtsi128_si32(v_sum_d);
+  sum = (sum >= 0) ? ((sum + 31) >> 6) : -((-sum + 31) >> 6);
+
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 12);
+
+  // Store the SSE
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  return  *sse - (((int64_t)sum * sum) >> (LOG2_P2(h) + LOG2_P2(w)));
+}
+
+
+// Functions for width (W) >= 16
+unsigned int vpx_masked_subpel_varWxH_xzero(
+        const uint8_t *src, int src_stride, int yoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int w, int h, filter_fn_t filter_fn) {
+  int i, j;
+  __m128i v_src0_b, v_src1_b, v_res_b, v_dst_b, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  const __m128i v_filter_b = _mm_set1_epi16((
+        vpx_bilinear_filters[yoffset][1] << 8) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(yoffset < 8);
+  for (j = 0; j < w; j += 16) {
+    // Load the first row ready
+    v_src0_b = _mm_loadu_si128((const __m128i*)(src + j));
+    // Process 2 rows at a time
+    for (i = 0; i < h; i += 2) {
+      // Load the next row apply the filter
+      v_src1_b = _mm_loadu_si128((const __m128i*)(src + j + src_stride));
+      v_res_b = filter_fn(v_src0_b, v_src1_b, v_filter_b);
+      // Load the dst and msk for the variance calculation
+      v_dst_b = _mm_loadu_si128((const __m128i*)(dst + j));
+      v_msk_b = _mm_loadu_si128((const __m128i*)(msk + j));
+      sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+
+      // Load the next row apply the filter
+      v_src0_b = _mm_loadu_si128((const __m128i*)(src + j + src_stride * 2));
+      v_res_b = filter_fn(v_src1_b, v_src0_b, v_filter_b);
+      // Load the dst and msk for the variance calculation
+      v_dst_b = _mm_loadu_si128((const __m128i*)(dst + j + dst_stride));
+      v_msk_b = _mm_loadu_si128((const __m128i*)(msk + j + msk_stride));
+      sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+      // Move onto the next block of rows
+      src += src_stride * 2;
+      dst += dst_stride * 2;
+      msk += msk_stride * 2;
+    }
+    // Reset to the top of the block
+    src -= src_stride * h;
+    dst -= dst_stride * h;
+    msk -= msk_stride * h;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
+}
+unsigned int vpx_masked_subpel_varWxH_yzero(
+        const uint8_t *src, int src_stride, int xoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int w, int h, filter_fn_t filter_fn) {
+  int i, j;
+  __m128i v_src0_b, v_src1_b, v_res_b, v_dst_b, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  const __m128i v_filter_b = _mm_set1_epi16((
+        vpx_bilinear_filters[xoffset][1] << 8) +
+        vpx_bilinear_filters[xoffset][0]);
+  assert(xoffset < 8);
+  for (i = 0; i < h; i++) {
+    for (j = 0; j < w; j += 16) {
+      // Load this row and one below & apply the filter to them
+      v_src0_b = _mm_loadu_si128((const __m128i*)(src + j));
+      v_src1_b = _mm_loadu_si128((const __m128i*)(src + j + 1));
+      v_res_b = filter_fn(v_src0_b, v_src1_b, v_filter_b);
+
+      // Load the dst and msk for the variance calculation
+      v_dst_b = _mm_loadu_si128((const __m128i*)(dst + j));
+      v_msk_b = _mm_loadu_si128((const __m128i*)(msk + j));
+      sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+    }
+    src += src_stride;
+    dst += dst_stride;
+    msk += msk_stride;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
+}
+unsigned int vpx_masked_subpel_varWxH_xnonzero_ynonzero(
+        const uint8_t *src, int src_stride, int xoffset, int yoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int w, int h, filter_fn_t xfilter_fn,
+        filter_fn_t yfilter_fn) {
+  int i, j;
+  __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b;
+  __m128i v_filtered0_b, v_filtered1_b, v_res_b, v_dst_b, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  const __m128i v_filterx_b = _mm_set1_epi16((
+        vpx_bilinear_filters[xoffset][1] << 8) +
+        vpx_bilinear_filters[xoffset][0]);
+  const __m128i v_filtery_b = _mm_set1_epi16((
+        vpx_bilinear_filters[yoffset][1] << 8) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(yoffset < 8);
+  assert(xoffset < 8);
+  for (j = 0; j < w; j += 16) {
+    // Load the first row ready
+    v_src0_b = _mm_loadu_si128((const __m128i*)(src + j));
+    v_src1_b = _mm_loadu_si128((const __m128i*)(src + j + 1));
+    v_filtered0_b = xfilter_fn(v_src0_b, v_src1_b, v_filterx_b);
+    // Process 2 rows at a time
+    for (i = 0; i < h; i += 2) {
+      // Load the next row & apply the filter
+      v_src2_b = _mm_loadu_si128((const __m128i*)(src + src_stride + j));
+      v_src3_b = _mm_loadu_si128((const __m128i*)(src + src_stride + j + 1));
+      v_filtered1_b = xfilter_fn(v_src2_b, v_src3_b, v_filterx_b);
+      // Load the dst and msk for the variance calculation
+      v_dst_b = _mm_loadu_si128((const __m128i*)(dst + j));
+      v_msk_b = _mm_loadu_si128((const __m128i*)(msk + j));
+      // Complete the calculation for this row and add it to the running total
+      v_res_b = yfilter_fn(v_filtered0_b, v_filtered1_b, v_filtery_b);
+      sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+
+      // Load the next row & apply the filter
+      v_src0_b = _mm_loadu_si128((const __m128i*)(src + src_stride * 2 + j));
+      v_src1_b = _mm_loadu_si128((const __m128i*)(src + src_stride * 2 +
+                                                  j + 1));
+      v_filtered0_b = xfilter_fn(v_src0_b, v_src1_b, v_filterx_b);
+      // Load the dst and msk for the variance calculation
+      v_dst_b = _mm_loadu_si128((const __m128i*)(dst + dst_stride + j));
+      v_msk_b = _mm_loadu_si128((const __m128i*)(msk + msk_stride + j));
+      // Complete the calculation for this row and add it to the running total
+      v_res_b = yfilter_fn(v_filtered1_b, v_filtered0_b, v_filtery_b);
+      sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+      // Move onto the next block of rows
+      src += src_stride * 2;
+      dst += dst_stride * 2;
+      msk += msk_stride * 2;
+    }
+    // Reset to the top of the block
+    src -= src_stride * h;
+    dst -= dst_stride * h;
+    msk -= msk_stride * h;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, w, h);
+}
+
+// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
+// xmm[63:32] = row 3, xmm[31:0] = row 4
+unsigned int vpx_masked_subpel_var4xH_xzero(
+        const uint8_t *src, int src_stride, int  yoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h) {
+  int i;
+  __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered1_w, v_filtered2_w;
+  __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b;
+  __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_res_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filter_b = _mm_set1_epi16((
+        vpx_bilinear_filters[yoffset][1] << 8) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(yoffset < 8);
+  // Load the first row of src data ready
+  v_src0_b = _mm_loadl_epi64((const __m128i*)src);
+  for (i = 0; i < h; i += 4) {
+    // Load the rest of the source data for these rows
+    v_src1_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 1));
+    v_src1_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
+    v_src2_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 2));
+    v_src3_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 3));
+    v_src3_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
+    v_src0_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 4));
+    // Load the dst data
+    v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 0));
+    v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 1));
+    v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
+    v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 2));
+    v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 3));
+    v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
+    v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
+    // Load the mask data
+    v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 0));
+    v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 1));
+    v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
+    v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 2));
+    v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 3));
+    v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
+    v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
+    // Apply the y filter
+    if (yoffset == 8) {
+      v_src1_b = _mm_unpacklo_epi64(v_src3_b, v_src1_b);
+      v_src2_b = _mm_or_si128(_mm_slli_si128(v_src1_b, 4),
+            _mm_and_si128(v_src0_b, _mm_setr_epi32(-1, 0, 0, 0)));
+      v_res_b = _mm_avg_epu8(v_src1_b, v_src2_b);
+    } else {
+      v_src2_b = _mm_or_si128(_mm_slli_si128(v_src1_b, 4),
+            _mm_and_si128(v_src2_b, _mm_setr_epi32(-1, 0, 0, 0)));
+      apply_filter_lo(v_src1_b, v_src2_b, v_filter_b, &v_filtered1_w);
+      v_src2_b = _mm_or_si128(_mm_slli_si128(v_src3_b, 4),
+            _mm_and_si128(v_src0_b, _mm_setr_epi32(-1, 0, 0, 0)));
+      apply_filter_lo(v_src3_b, v_src2_b, v_filter_b, &v_filtered2_w);
+      v_res_b = _mm_packus_epi16(v_filtered2_w, v_filtered1_w);
+    }
+    // Compute the sum and SSE
+    sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 4;
+    dst += dst_stride * 4;
+    msk += msk_stride * 4;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
+}
+
+// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
+unsigned int vpx_masked_subpel_var8xH_xzero(
+        const uint8_t *src, int src_stride, int yoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h) {
+  int i;
+  __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w, v_res_b;
+  __m128i v_dst_b = _mm_setzero_si128();
+  __m128i v_msk_b = _mm_setzero_si128();
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filter_b = _mm_set1_epi16((
+        vpx_bilinear_filters[yoffset][1] << 8) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(yoffset < 8);
+  // Load the first row of src data ready
+  v_src0_b = _mm_loadl_epi64((const __m128i*)src);
+  for (i = 0; i < h; i += 2) {
+    if (yoffset == 8) {
+      // Load the rest of the source data for these rows
+      v_src1_b = _mm_or_si128(
+            _mm_slli_si128(v_src0_b, 8),
+            _mm_loadl_epi64((const __m128i*)(src + src_stride * 1)));
+      v_src0_b = _mm_or_si128(
+            _mm_slli_si128(v_src1_b, 8),
+            _mm_loadl_epi64((const __m128i*)(src + src_stride * 2)));
+      // Apply the y filter
+      v_res_b = _mm_avg_epu8(v_src1_b, v_src0_b);
+    } else {
+      // Load the data and apply the y filter
+      v_src1_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 1));
+      apply_filter_lo(v_src0_b, v_src1_b, v_filter_b, &v_filtered0_w);
+      v_src0_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 2));
+      apply_filter_lo(v_src1_b, v_src0_b, v_filter_b, &v_filtered1_w);
+      v_res_b = _mm_packus_epi16(v_filtered1_w, v_filtered0_w);
+    }
+    // Load the dst data
+    v_dst_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 1)),
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 0)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 1)),
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 0)));
+    // Compute the sum and SSE
+    sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 2;
+    dst += dst_stride * 2;
+    msk += msk_stride * 2;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
+}
+
+// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
+// xmm[63:32] = row 3, xmm[31:0] = row 4
+unsigned int vpx_masked_subpel_var4xH_yzero(
+        const uint8_t *src, int src_stride, int xoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h) {
+  int i;
+  __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered0_w, v_filtered2_w;
+  __m128i v_src0_shift_b, v_src1_shift_b, v_src2_shift_b, v_src3_shift_b;
+  __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b;
+  __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_res_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filter_b = _mm_set1_epi16((
+        vpx_bilinear_filters[xoffset][1] << 8) +
+        vpx_bilinear_filters[xoffset][0]);
+  assert(xoffset < 8);
+  for (i = 0; i < h; i += 4) {
+    // Load the src data
+    v_src0_b = _mm_loadl_epi64((const __m128i*)src);
+    v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
+    v_src1_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 1));
+    v_src0_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
+    v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
+    v_src2_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 2));
+    v_src0_shift_b = _mm_unpacklo_epi32(v_src1_shift_b, v_src0_shift_b);
+    v_src2_shift_b = _mm_srli_si128(v_src2_b, 1);
+    v_src3_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 3));
+    v_src2_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
+    v_src3_shift_b = _mm_srli_si128(v_src3_b, 1);
+    v_src2_shift_b = _mm_unpacklo_epi32(v_src3_shift_b, v_src2_shift_b);
+    // Load the dst data
+    v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 0));
+    v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 1));
+    v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
+    v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 2));
+    v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 3));
+    v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
+    v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
+    // Load the mask data
+    v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 0));
+    v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 1));
+    v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
+    v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 2));
+    v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 3));
+    v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
+    v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src0_b = _mm_unpacklo_epi64(v_src2_b, v_src0_b);
+      v_src0_shift_b = _mm_unpacklo_epi64(v_src2_shift_b, v_src0_shift_b);
+      v_res_b = _mm_avg_epu8(v_src0_b, v_src0_shift_b);
+    } else {
+      apply_filter_lo(v_src0_b, v_src0_shift_b, v_filter_b, &v_filtered0_w);
+      apply_filter_lo(v_src2_b, v_src2_shift_b, v_filter_b, &v_filtered2_w);
+      v_res_b = _mm_packus_epi16(v_filtered2_w, v_filtered0_w);
+    }
+    // Compute the sum and SSE
+    sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 4;
+    dst += dst_stride * 4;
+    msk += msk_stride * 4;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
+}
+
+unsigned int vpx_masked_subpel_var8xH_yzero(
+        const uint8_t *src, int src_stride, int xoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h) {
+  int i;
+  __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w;
+  __m128i v_src0_shift_b, v_src1_shift_b, v_res_b, v_dst_b, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filter_b = _mm_set1_epi16((
+        vpx_bilinear_filters[xoffset][1] << 8) +
+        vpx_bilinear_filters[xoffset][0]);
+  assert(xoffset < 8);
+  for (i = 0; i < h; i += 2) {
+    // Load the src data
+    v_src0_b = _mm_loadu_si128((const __m128i*)(src));
+    v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
+    v_src1_b = _mm_loadu_si128((const __m128i*)(src + src_stride));
+    v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
+      v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
+      v_res_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
+    } else {
+      apply_filter_lo(v_src0_b, v_src0_shift_b, v_filter_b, &v_filtered0_w);
+      apply_filter_lo(v_src1_b, v_src1_shift_b, v_filter_b, &v_filtered1_w);
+      v_res_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+    }
+    // Load the dst data
+    v_dst_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 0)),
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 1)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 0)),
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 1)));
+    // Compute the sum and SSE
+    sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 2;
+    dst += dst_stride * 2;
+    msk += msk_stride * 2;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
+}
+
+// Note order in which rows loaded xmm[127:96] = row 1, xmm[95:64] = row 2,
+// xmm[63:32] = row 3, xmm[31:0] = row 4
+unsigned int vpx_masked_subpel_var4xH_xnonzero_ynonzero(
+        const uint8_t *src, int src_stride, int xoffset, int  yoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h) {
+  int i;
+  __m128i v_src0_b, v_src1_b, v_src2_b, v_src3_b, v_filtered0_w, v_filtered2_w;
+  __m128i v_src0_shift_b, v_src1_shift_b, v_src2_shift_b, v_src3_shift_b;
+  __m128i v_dst0_b, v_dst1_b, v_dst2_b, v_dst3_b, v_temp_b;
+  __m128i v_msk0_b, v_msk1_b, v_msk2_b, v_msk3_b, v_extra_row_b, v_res_b;
+  __m128i v_xres_b[2];
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filterx_b = _mm_set1_epi16((
+        vpx_bilinear_filters[xoffset][1] << 8) +
+        vpx_bilinear_filters[xoffset][0]);
+  __m128i v_filtery_b = _mm_set1_epi16((
+        vpx_bilinear_filters[yoffset][1] << 8) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(xoffset < 8);
+  assert(yoffset < 8);
+  for (i = 0; i < h; i += 4) {
+    // Load the src data
+    v_src0_b = _mm_loadl_epi64((const __m128i*)src);
+    v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
+    v_src1_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 1));
+    v_src0_b = _mm_unpacklo_epi32(v_src1_b, v_src0_b);
+    v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
+    v_src2_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 2));
+    v_src0_shift_b = _mm_unpacklo_epi32(v_src1_shift_b, v_src0_shift_b);
+    v_src2_shift_b = _mm_srli_si128(v_src2_b, 1);
+    v_src3_b = _mm_loadl_epi64((const __m128i*)(src + src_stride * 3));
+    v_src2_b = _mm_unpacklo_epi32(v_src3_b, v_src2_b);
+    v_src3_shift_b = _mm_srli_si128(v_src3_b, 1);
+    v_src2_shift_b = _mm_unpacklo_epi32(v_src3_shift_b, v_src2_shift_b);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src0_b = _mm_unpacklo_epi64(v_src2_b, v_src0_b);
+      v_src0_shift_b = _mm_unpacklo_epi64(v_src2_shift_b, v_src0_shift_b);
+      v_xres_b[i == 0 ? 0 : 1] = _mm_avg_epu8(v_src0_b, v_src0_shift_b);
+    } else {
+      apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
+      apply_filter_lo(v_src2_b, v_src2_shift_b, v_filterx_b, &v_filtered2_w);
+      v_xres_b[i == 0 ? 0 : 1] = _mm_packus_epi16(v_filtered2_w, v_filtered0_w);
+    }
+    // Move onto the next set of rows
+    src += src_stride * 4;
+  }
+  // Load one more row to be used in the y filter
+  v_src0_b = _mm_loadl_epi64((const __m128i*)src);
+  v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
+  // Apply the x filter
+  if (xoffset == 8) {
+    v_extra_row_b = _mm_and_si128(
+            _mm_avg_epu8(v_src0_b, v_src0_shift_b),
+            _mm_setr_epi32(-1, 0, 0, 0));
+  } else {
+    apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
+    v_extra_row_b = _mm_and_si128(
+            _mm_packus_epi16(v_filtered0_w, _mm_setzero_si128()),
+            _mm_setr_epi32(-1, 0, 0, 0));
+  }
+
+  for (i = 0; i < h; i += 4) {
+    if (h == 8 && i == 0) {
+      v_temp_b = _mm_or_si128(_mm_slli_si128(v_xres_b[0], 4),
+                              _mm_srli_si128(v_xres_b[1], 12));
+    } else {
+      v_temp_b = _mm_or_si128(_mm_slli_si128(v_xres_b[i == 0 ? 0 : 1], 4),
+                              v_extra_row_b);
+    }
+    // Apply the y filter
+    if (yoffset == 8) {
+      v_res_b = _mm_avg_epu8(v_xres_b[i == 0 ? 0 : 1], v_temp_b);
+    } else {
+      v_res_b = apply_filter(v_xres_b[i == 0 ? 0 : 1], v_temp_b, v_filtery_b);
+    }
+
+    // Load the dst data
+    v_dst0_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 0));
+    v_dst1_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 1));
+    v_dst0_b = _mm_unpacklo_epi32(v_dst1_b, v_dst0_b);
+    v_dst2_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 2));
+    v_dst3_b = _mm_cvtsi32_si128(*(const uint32_t*)(dst + dst_stride * 3));
+    v_dst2_b = _mm_unpacklo_epi32(v_dst3_b, v_dst2_b);
+    v_dst0_b = _mm_unpacklo_epi64(v_dst2_b, v_dst0_b);
+    // Load the mask data
+    v_msk0_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 0));
+    v_msk1_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 1));
+    v_msk0_b = _mm_unpacklo_epi32(v_msk1_b, v_msk0_b);
+    v_msk2_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 2));
+    v_msk3_b = _mm_cvtsi32_si128(*(const uint32_t*)(msk + msk_stride * 3));
+    v_msk2_b = _mm_unpacklo_epi32(v_msk3_b, v_msk2_b);
+    v_msk0_b = _mm_unpacklo_epi64(v_msk2_b, v_msk0_b);
+    // Compute the sum and SSE
+    sum_and_sse(v_res_b, v_dst0_b, v_msk0_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    dst += dst_stride * 4;
+    msk += msk_stride * 4;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, 4, h);
+}
+
+unsigned int vpx_masked_subpel_var8xH_xnonzero_ynonzero(
+        const uint8_t *src, int src_stride, int xoffset, int  yoffset,
+        const uint8_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h) {
+  int i;
+  __m128i v_src0_b, v_src1_b, v_filtered0_w, v_filtered1_w, v_dst_b, v_msk_b;
+  __m128i v_src0_shift_b, v_src1_shift_b;
+  __m128i v_xres0_b, v_xres1_b, v_res_b, v_temp_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filterx_b = _mm_set1_epi16((
+        vpx_bilinear_filters[xoffset][1] << 8) +
+        vpx_bilinear_filters[xoffset][0]);
+  __m128i v_filtery_b = _mm_set1_epi16((
+        vpx_bilinear_filters[yoffset][1] << 8) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(xoffset < 8);
+  assert(yoffset < 8);
+
+  // Load the first block of src data
+  v_src0_b = _mm_loadu_si128((const __m128i*)(src));
+  v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
+  v_src1_b = _mm_loadu_si128((const __m128i*)(src + src_stride));
+  v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
+  // Apply the x filter
+  if (xoffset == 8) {
+    v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
+    v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
+    v_xres0_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
+  } else {
+    apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
+    apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
+    v_xres0_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+  }
+  for (i = 0; i < h; i += 4) {
+    // Load the next block of src data
+    v_src0_b = _mm_loadu_si128((const __m128i*)(src + src_stride * 2));
+    v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
+    v_src1_b = _mm_loadu_si128((const __m128i*)(src + src_stride * 3));
+    v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
+      v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
+      v_xres1_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
+    } else {
+      apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
+      apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
+      v_xres1_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+    }
+    // Apply the y filter to the previous block
+    v_temp_b = _mm_or_si128(_mm_srli_si128(v_xres0_b, 8),
+                            _mm_slli_si128(v_xres1_b, 8));
+    if (yoffset == 8) {
+      v_res_b = _mm_avg_epu8(v_xres0_b, v_temp_b);
+    } else {
+      v_res_b = apply_filter(v_xres0_b, v_temp_b, v_filtery_b);
+    }
+    // Load the dst data
+    v_dst_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
+    // Compute the sum and SSE
+    sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+
+    // Load the next block of src data
+    v_src0_b = _mm_loadu_si128((const __m128i*)(src + src_stride * 4));
+    v_src0_shift_b = _mm_srli_si128(v_src0_b, 1);
+    v_src1_b = _mm_loadu_si128((const __m128i*)(src + src_stride * 5));
+    v_src1_shift_b = _mm_srli_si128(v_src1_b, 1);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src1_b = _mm_unpacklo_epi64(v_src0_b, v_src1_b);
+      v_src1_shift_b = _mm_unpacklo_epi64(v_src0_shift_b, v_src1_shift_b);
+      v_xres0_b = _mm_avg_epu8(v_src1_b, v_src1_shift_b);
+    } else {
+      apply_filter_lo(v_src0_b, v_src0_shift_b, v_filterx_b, &v_filtered0_w);
+      apply_filter_lo(v_src1_b, v_src1_shift_b, v_filterx_b, &v_filtered1_w);
+      v_xres0_b = _mm_packus_epi16(v_filtered0_w, v_filtered1_w);
+    }
+    // Apply the y filter to the previous block
+    v_temp_b = _mm_or_si128(_mm_srli_si128(v_xres1_b, 8),
+                            _mm_slli_si128(v_xres0_b, 8));
+    if (yoffset == 8) {
+      v_res_b = _mm_avg_epu8(v_xres1_b, v_temp_b);
+    } else {
+      v_res_b = apply_filter(v_xres1_b, v_temp_b, v_filtery_b);
+    }
+    // Load the dst data
+    v_dst_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 2)),
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 3)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 2)),
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 3)));
+    // Compute the sum and SSE
+    sum_and_sse(v_res_b, v_dst_b, v_msk_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 4;
+    dst += dst_stride * 4;
+    msk += msk_stride * 4;
+  }
+  return calc_masked_variance(v_sum_d, v_sse_q, sse, 8, h);
+}
+
+
+// For W >=16
+#define MASK_SUBPIX_VAR_LARGE(W, H)                                            \
+unsigned int vpx_masked_sub_pixel_variance##W##x##H##_ssse3(                   \
+        const uint8_t *src, int src_stride,                                    \
+        int xoffset, int  yoffset,                                             \
+        const uint8_t *dst, int dst_stride,                                    \
+        const uint8_t *msk, int msk_stride,                                    \
+        unsigned int *sse) {                                                   \
+  assert(W % 16 == 0);                                                         \
+  if (xoffset == 0) {                                                          \
+    if (yoffset == 0)                                                          \
+      return vpx_masked_variance##W##x##H##_ssse3(src, src_stride,             \
+                                                  dst, dst_stride,             \
+                                                  msk, msk_stride, sse);       \
+    else if (yoffset == 8)                                                     \
+      return vpx_masked_subpel_varWxH_xzero(src, src_stride, 8,                \
+                                            dst, dst_stride, msk, msk_stride,  \
+                                            sse, W, H, apply_filter8);         \
+    else                                                                       \
+      return vpx_masked_subpel_varWxH_xzero(src, src_stride, yoffset,          \
+                                            dst, dst_stride, msk, msk_stride,  \
+                                            sse, W, H, apply_filter);          \
+  } else if (yoffset == 0) {                                                   \
+    if (xoffset == 8)                                                          \
+      return vpx_masked_subpel_varWxH_yzero(src, src_stride, 8,                \
+                                            dst, dst_stride, msk, msk_stride,  \
+                                            sse, W, H, apply_filter8);         \
+    else                                                                       \
+      return vpx_masked_subpel_varWxH_yzero(src, src_stride, xoffset,          \
+                                            dst, dst_stride, msk, msk_stride,  \
+                                            sse, W, H, apply_filter);          \
+  } else if (xoffset == 8) {                                                   \
+    if (yoffset == 8)                                                          \
+      return vpx_masked_subpel_varWxH_xnonzero_ynonzero(src, src_stride,       \
+              8, 8, dst, dst_stride, msk, msk_stride, sse, W, H,               \
+              apply_filter8, apply_filter8);                                   \
+    else                                                                       \
+      return vpx_masked_subpel_varWxH_xnonzero_ynonzero(src, src_stride,       \
+              8, yoffset, dst, dst_stride, msk, msk_stride, sse, W, H,         \
+              apply_filter8, apply_filter);                                    \
+  } else {                                                                     \
+    if (yoffset == 8)                                                          \
+      return vpx_masked_subpel_varWxH_xnonzero_ynonzero(src, src_stride,       \
+              xoffset, 8, dst, dst_stride, msk, msk_stride, sse, W, H,         \
+              apply_filter, apply_filter8);                                    \
+    else                                                                       \
+      return vpx_masked_subpel_varWxH_xnonzero_ynonzero(src, src_stride,       \
+              xoffset, yoffset, dst, dst_stride, msk, msk_stride, sse, W, H,   \
+              apply_filter, apply_filter);                                     \
+  }                                                                            \
+}
+
+// For W < 16
+#define MASK_SUBPIX_VAR_SMALL(W, H)                                            \
+unsigned int vpx_masked_sub_pixel_variance##W##x##H##_ssse3(                   \
+        const uint8_t *src, int src_stride,                                    \
+        int xoffset, int  yoffset,                                             \
+        const uint8_t *dst, int dst_stride,                                    \
+        const uint8_t *msk, int msk_stride,                                    \
+        unsigned int *sse) {                                                   \
+  assert(W == 4 || W == 8);                                                    \
+  if (xoffset == 0 && yoffset == 0)                                            \
+    return vpx_masked_variance##W##x##H##_ssse3(src, src_stride,               \
+                                                dst, dst_stride,               \
+                                                msk, msk_stride, sse);         \
+  else if (xoffset == 0)                                                       \
+    return vpx_masked_subpel_var##W##xH_xzero(src, src_stride, yoffset,        \
+                                              dst, dst_stride,                 \
+                                              msk, msk_stride, sse, H);        \
+  else if (yoffset == 0)                                                       \
+    return vpx_masked_subpel_var##W##xH_yzero(src, src_stride, xoffset,        \
+                                              dst, dst_stride,                 \
+                                              msk, msk_stride, sse, H);        \
+  else                                                                         \
+    return vpx_masked_subpel_var##W##xH_xnonzero_ynonzero(                     \
+          src, src_stride, xoffset, yoffset, dst, dst_stride,                  \
+          msk, msk_stride, sse, H);                                            \
+}
+
+MASK_SUBPIX_VAR_SMALL(4, 4)
+MASK_SUBPIX_VAR_SMALL(4, 8)
+MASK_SUBPIX_VAR_SMALL(8, 4)
+MASK_SUBPIX_VAR_SMALL(8, 8)
+MASK_SUBPIX_VAR_SMALL(8, 16)
+MASK_SUBPIX_VAR_LARGE(16, 8)
+MASK_SUBPIX_VAR_LARGE(16, 16)
+MASK_SUBPIX_VAR_LARGE(16, 32)
+MASK_SUBPIX_VAR_LARGE(32, 16)
+MASK_SUBPIX_VAR_LARGE(32, 32)
+MASK_SUBPIX_VAR_LARGE(32, 64)
+MASK_SUBPIX_VAR_LARGE(64, 32)
+MASK_SUBPIX_VAR_LARGE(64, 64)
+
+#if CONFIG_VP9_HIGHBITDEPTH
+typedef int (*highbd_calc_masked_var_t)(__m128i v_sum_d, __m128i v_sse_q,
+             unsigned int* sse, const int w, const int h);
+typedef unsigned int (*highbd_variance_fn_t)(
+                      const uint8_t *a8, int a_stride,
+                      const uint8_t *b8, int b_stride,
+                      const uint8_t *m, int m_stride,
+                      unsigned int *sse);
+typedef __m128i (*highbd_filter_fn_t)(__m128i v_a_w, __m128i v_b_w,
+                                    __m128i v_filter_w);
+
+static INLINE __m128i highbd_apply_filter8(const __m128i v_a_w,
+                                           const __m128i v_b_w,
+                                           const __m128i v_filter_w) {
+  (void) v_filter_w;
+  return _mm_avg_epu16(v_a_w, v_b_w);
+}
+
+static INLINE __m128i highbd_apply_filter(const __m128i v_a_w,
+                                          const __m128i v_b_w,
+                                          const __m128i v_filter_w) {
+  const __m128i v_rounding_d = _mm_set1_epi32(1 << (FILTER_BITS - 1));
+  __m128i v_input_lo_w = _mm_unpacklo_epi16(v_a_w, v_b_w);
+  __m128i v_input_hi_w = _mm_unpackhi_epi16(v_a_w, v_b_w);
+  __m128i v_temp0_d = _mm_madd_epi16(v_input_lo_w, v_filter_w);
+  __m128i v_temp1_d = _mm_madd_epi16(v_input_hi_w, v_filter_w);
+  __m128i v_res_lo_d = _mm_srai_epi32(_mm_add_epi32(v_temp0_d, v_rounding_d),
+                                      FILTER_BITS);
+  __m128i v_res_hi_d = _mm_srai_epi32(_mm_add_epi32(v_temp1_d, v_rounding_d),
+                                      FILTER_BITS);
+  return _mm_packs_epi32(v_res_lo_d, v_res_hi_d);
+}
+// Apply the filter to the contents of the lower half of a and b
+static INLINE void highbd_apply_filter_lo(const __m128i v_a_lo_w,
+                                          const __m128i v_b_lo_w,
+                                          const __m128i v_filter_w,
+                                          __m128i* v_res_d) {
+  const __m128i v_rounding_d = _mm_set1_epi32(1 << (FILTER_BITS - 1));
+  __m128i v_input_w = _mm_unpacklo_epi16(v_a_lo_w, v_b_lo_w);
+  __m128i v_temp0_d = _mm_madd_epi16(v_input_w, v_filter_w);
+  *v_res_d = _mm_srai_epi32(_mm_add_epi32(v_temp0_d, v_rounding_d),
+                            FILTER_BITS);
+}
+
+static void highbd_sum_and_sse(const __m128i v_a_w, const __m128i v_b_w,
+                               const __m128i v_m_b, __m128i* v_sum_d,
+                               __m128i* v_sse_q) {
+  const __m128i v_zero = _mm_setzero_si128();
+  const __m128i v_m_w = _mm_unpacklo_epi8(v_m_b, v_zero);
+
+  // Difference: [-2^12, 2^12] => 13 bits (incld sign bit)
+  const __m128i v_d_w = _mm_sub_epi16(v_a_w, v_b_w);
+
+  // Error - [-4095, 4095] * [0, 64] & sum pairs => fits in 19 + 1 bits
+  const __m128i v_e_d = _mm_madd_epi16(v_d_w, v_m_w);
+
+  // Squared error - max (18 bits * 18 bits) = 36 bits (no sign bit)
+  const __m128i v_absd_w = _mm_abs_epi16(v_d_w);
+  const __m128i v_dlo_d = _mm_unpacklo_epi16(v_absd_w, v_zero);
+  const __m128i v_mlo_d = _mm_unpacklo_epi16(v_m_w, v_zero);
+  const __m128i v_elo_d = _mm_madd_epi16(v_dlo_d, v_mlo_d);
+  const __m128i v_dhi_d = _mm_unpackhi_epi16(v_absd_w, v_zero);
+  const __m128i v_mhi_d = _mm_unpackhi_epi16(v_m_w, v_zero);
+  const __m128i v_ehi_d = _mm_madd_epi16(v_dhi_d, v_mhi_d);
+  // Square and sum the errors -> 36bits * 4 = 38bits
+  __m128i v_se0_q, v_se1_q, v_se2_q, v_se3_q, v_se_q, v_elo1_d, v_ehi3_d;
+  v_se0_q = _mm_mul_epu32(v_elo_d, v_elo_d);
+  v_elo1_d = _mm_srli_si128(v_elo_d, 4);
+  v_se1_q = _mm_mul_epu32(v_elo1_d, v_elo1_d);
+  v_se0_q = _mm_add_epi64(v_se0_q, v_se1_q);
+  v_se2_q = _mm_mul_epu32(v_ehi_d, v_ehi_d);
+  v_ehi3_d = _mm_srli_si128(v_ehi_d, 4);
+  v_se3_q = _mm_mul_epu32(v_ehi3_d, v_ehi3_d);
+  v_se1_q = _mm_add_epi64(v_se2_q, v_se3_q);
+  v_se_q = _mm_add_epi64(v_se0_q, v_se1_q);
+
+  // Accumulate
+  *v_sum_d = _mm_add_epi32(*v_sum_d, v_e_d);
+  *v_sse_q = _mm_add_epi64(*v_sse_q, v_se_q);
+}
+
+static INLINE int highbd_10_calc_masked_variance(__m128i v_sum_d,
+                                                 __m128i v_sse_q,
+                                                 unsigned int* sse,
+                                                 const int w, const int h) {
+  int sum;
+
+  // Horizontal sum
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_srli_si128(v_sse_q, 8));
+
+  // Round
+  sum = _mm_cvtsi128_si32(v_sum_d);
+  sum = (sum >= 0) ? ((sum + 31) >> 6) : -((-sum + 31) >> 6);
+  sum = ROUND_POWER_OF_TWO(sum, 2);
+
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 12);
+
+  // Store the SSE
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 0x8));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 4);
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  return  *sse - (((int64_t)sum * sum) >> (LOG2_P2(h) + LOG2_P2(w)));
+}
+static INLINE int highbd_12_calc_masked_variance(__m128i v_sum_d,
+                                                 __m128i v_sse_q,
+                                                 unsigned int* sse,
+                                                 const int w, const int h) {
+  int sum;
+
+  // Horizontal sum
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sum_d = _mm_hadd_epi32(v_sum_d, v_sum_d);
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_srli_si128(v_sse_q, 8));
+
+  // Round
+  sum = _mm_cvtsi128_si32(v_sum_d);
+  sum = (sum >= 0) ? ((sum + 31) >> 6) : -((-sum + 31) >> 6);
+  sum = ROUND_POWER_OF_TWO(sum, 4);
+
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 2047));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 12);
+
+  // Store the SSE
+  v_sse_q = _mm_add_epi64(v_sse_q, _mm_set_epi32(0, 0, 0, 0x80));
+  v_sse_q = _mm_srli_epi64(v_sse_q, 8);
+  *sse = _mm_cvtsi128_si32(v_sse_q);
+
+  // Compute the variance
+  return  *sse - (((int64_t)sum * sum) >> (LOG2_P2(h) + LOG2_P2(w)));
+}
+
+
+// High bit depth functions for width (W) >= 8
+unsigned int vpx_highbd_masked_subpel_varWxH_xzero(
+        const uint16_t *src, int src_stride, int  yoffset,
+        const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int w, int h, highbd_filter_fn_t filter_fn,
+        highbd_calc_masked_var_t calc_var) {
+  int i, j;
+  __m128i v_src0_w, v_src1_w, v_res_w, v_dst_w, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  const __m128i v_filter_w = _mm_set1_epi32((
+        vpx_bilinear_filters[yoffset][1] << 16) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(yoffset < 8);
+  for (j = 0; j < w; j += 8) {
+    // Load the first row ready
+    v_src0_w = _mm_loadu_si128((const __m128i*)(src + j));
+    // Process 2 rows at a time
+    for (i = 0; i < h; i += 2) {
+      // Load the next row apply the filter
+      v_src1_w = _mm_loadu_si128((const __m128i*)(src + j + src_stride));
+      v_res_w = filter_fn(v_src0_w, v_src1_w, v_filter_w);
+      // Load the dst and msk for the variance calculation
+      v_dst_w = _mm_loadu_si128((const __m128i*)(dst + j));
+      v_msk_b = _mm_loadl_epi64((const __m128i*)(msk + j));
+      highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+
+      // Load the next row apply the filter
+      v_src0_w = _mm_loadu_si128((const __m128i*)(src + j + src_stride * 2));
+      v_res_w = filter_fn(v_src1_w, v_src0_w, v_filter_w);
+      // Load the dst and msk for the variance calculation
+      v_dst_w = _mm_loadu_si128((const __m128i*)(dst + j + dst_stride));
+      v_msk_b = _mm_loadl_epi64((const __m128i*)(msk + j + msk_stride));
+      highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+      // Move onto the next block of rows
+      src += src_stride * 2;
+      dst += dst_stride * 2;
+      msk += msk_stride * 2;
+    }
+    // Reset to the top of the block
+    src -= src_stride * h;
+    dst -= dst_stride * h;
+    msk -= msk_stride * h;
+  }
+  return calc_var(v_sum_d, v_sse_q, sse, w, h);
+}
+unsigned int vpx_highbd_masked_subpel_varWxH_yzero(
+        const uint16_t *src, int src_stride, int xoffset,
+        const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int w, int h, highbd_filter_fn_t filter_fn,
+        highbd_calc_masked_var_t calc_var) {
+  int i, j;
+  __m128i v_src0_w, v_src1_w, v_res_w, v_dst_w, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  const __m128i v_filter_w = _mm_set1_epi32((
+        vpx_bilinear_filters[xoffset][1] << 16) +
+        vpx_bilinear_filters[xoffset][0]);
+  assert(xoffset < 8);
+  for (i = 0; i < h; i++) {
+    for (j = 0; j < w; j += 8) {
+      // Load this row & apply the filter to them
+      v_src0_w = _mm_loadu_si128((const __m128i*)(src + j));
+      v_src1_w = _mm_loadu_si128((const __m128i*)(src + j + 1));
+      v_res_w = filter_fn(v_src0_w, v_src1_w, v_filter_w);
+
+      // Load the dst and msk for the variance calculation
+      v_dst_w = _mm_loadu_si128((const __m128i*)(dst + j));
+      v_msk_b = _mm_loadl_epi64((const __m128i*)(msk + j));
+      highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+    }
+    src += src_stride;
+    dst += dst_stride;
+    msk += msk_stride;
+  }
+  return calc_var(v_sum_d, v_sse_q, sse, w, h);
+}
+
+unsigned int vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(
+        const uint16_t *src, int src_stride, int xoffset, int yoffset,
+        const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int w, int h, highbd_filter_fn_t xfilter_fn,
+        highbd_filter_fn_t yfilter_fn, highbd_calc_masked_var_t calc_var) {
+  int i, j;
+  __m128i v_src0_w, v_src1_w, v_src2_w, v_src3_w;
+  __m128i v_filtered0_w, v_filtered1_w, v_res_w, v_dst_w, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  const __m128i v_filterx_w = _mm_set1_epi32((
+        vpx_bilinear_filters[xoffset][1] << 16) +
+        vpx_bilinear_filters[xoffset][0]);
+  const __m128i v_filtery_w = _mm_set1_epi32((
+        vpx_bilinear_filters[yoffset][1] << 16) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(xoffset < 8);
+  assert(yoffset < 8);
+  for (j = 0; j < w; j += 8) {
+    // Load the first row ready
+    v_src0_w = _mm_loadu_si128((const __m128i*)(src + j));
+    v_src1_w = _mm_loadu_si128((const __m128i*)(src + j + 1));
+    v_filtered0_w = xfilter_fn(v_src0_w, v_src1_w, v_filterx_w);
+    // Process 2 rows at a time
+    for (i = 0; i < h; i += 2) {
+      // Load the next row & apply the filter
+      v_src2_w = _mm_loadu_si128((const __m128i*)(src + src_stride + j));
+      v_src3_w = _mm_loadu_si128((const __m128i*)(src + src_stride + j + 1));
+      v_filtered1_w = xfilter_fn(v_src2_w, v_src3_w, v_filterx_w);
+      // Load the dst and msk for the variance calculation
+      v_dst_w = _mm_loadu_si128((const __m128i*)(dst + j));
+      v_msk_b = _mm_loadl_epi64((const __m128i*)(msk + j));
+      // Complete the calculation for this row and add it to the running total
+      v_res_w = yfilter_fn(v_filtered0_w, v_filtered1_w, v_filtery_w);
+      highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+
+      // Load the next row & apply the filter
+      v_src0_w = _mm_loadu_si128((const __m128i*)(src + src_stride * 2 + j));
+      v_src1_w = _mm_loadu_si128((const __m128i*)(src + src_stride * 2 +
+                                                  j + 1));
+      v_filtered0_w = xfilter_fn(v_src0_w, v_src1_w, v_filterx_w);
+      // Load the dst and msk for the variance calculation
+      v_dst_w = _mm_loadu_si128((const __m128i*)(dst + dst_stride + j));
+      v_msk_b = _mm_loadl_epi64((const __m128i*)(msk + msk_stride + j));
+      // Complete the calculation for this row and add it to the running total
+      v_res_w = yfilter_fn(v_filtered1_w, v_filtered0_w, v_filtery_w);
+      highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+      // Move onto the next block of rows
+      src += src_stride * 2;
+      dst += dst_stride * 2;
+      msk += msk_stride * 2;
+    }
+    // Reset to the top of the block
+    src -= src_stride * h;
+    dst -= dst_stride * h;
+    msk -= msk_stride * h;
+  }
+  return calc_var(v_sum_d, v_sse_q, sse, w, h);
+}
+
+// Note order in which rows loaded xmm[127:64] = row 1, xmm[63:0] = row 2
+unsigned int vpx_highbd_masked_subpel_var4xH_xzero(
+        const uint16_t *src, int src_stride, int yoffset,
+        const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h, highbd_calc_masked_var_t calc_var) {
+  int i;
+  __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d, v_res_w;
+  __m128i v_dst_w, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filter_w = _mm_set1_epi32((
+        vpx_bilinear_filters[yoffset][1] << 16) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(yoffset < 8);
+  // Load the first row of src data ready
+  v_src0_w = _mm_loadl_epi64((const __m128i*)src);
+  for (i = 0; i < h; i += 2) {
+    if (yoffset == 8) {
+      // Load the rest of the source data for these rows
+      v_src1_w = _mm_or_si128(
+            _mm_slli_si128(v_src0_w, 8),
+            _mm_loadl_epi64((const __m128i*)(src + src_stride * 1)));
+      v_src0_w = _mm_or_si128(
+            _mm_slli_si128(v_src1_w, 8),
+            _mm_loadl_epi64((const __m128i*)(src + src_stride * 2)));
+      // Apply the y filter
+      v_res_w = _mm_avg_epu16(v_src1_w, v_src0_w);
+    } else {
+      // Load the data and apply the y filter
+      v_src1_w = _mm_loadl_epi64((const __m128i*)(src + src_stride * 1));
+      highbd_apply_filter_lo(v_src0_w, v_src1_w, v_filter_w, &v_filtered0_d);
+      v_src0_w = _mm_loadl_epi64((const __m128i*)(src + src_stride * 2));
+      highbd_apply_filter_lo(v_src1_w, v_src0_w, v_filter_w, &v_filtered1_d);
+      v_res_w = _mm_packs_epi32(v_filtered1_d, v_filtered0_d);
+    }
+    // Load the dst data
+    v_dst_w = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 1)),
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 0)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi32(
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 1)),
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 0)));
+    // Compute the sum and SSE
+    highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 2;
+    dst += dst_stride * 2;
+    msk += msk_stride * 2;
+  }
+  return calc_var(v_sum_d, v_sse_q, sse, 4, h);
+}
+
+unsigned int vpx_highbd_masked_subpel_var4xH_yzero(
+        const uint16_t *src, int src_stride, int xoffset,
+        const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h, highbd_calc_masked_var_t calc_var) {
+  int i;
+  __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d;
+  __m128i v_src0_shift_w, v_src1_shift_w, v_res_w, v_dst_w, v_msk_b;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filter_w = _mm_set1_epi32((
+        vpx_bilinear_filters[xoffset][1] << 16) +
+        vpx_bilinear_filters[xoffset][0]);
+  assert(xoffset < 8);
+  for (i = 0; i < h; i += 2) {
+    // Load the src data
+    v_src0_w = _mm_loadu_si128((const __m128i*)(src));
+    v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
+    v_src1_w = _mm_loadu_si128((const __m128i*)(src + src_stride));
+    v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
+      v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
+      v_res_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
+    } else {
+      highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filter_w,
+                             &v_filtered0_d);
+      highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filter_w,
+                             &v_filtered1_d);
+      v_res_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+    }
+    // Load the dst data
+    v_dst_w = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 0)),
+            _mm_loadl_epi64((const __m128i*)(dst + dst_stride * 1)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi32(
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 0)),
+            _mm_loadl_epi64((const __m128i*)(msk + msk_stride * 1)));
+    // Compute the sum and SSE
+    highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 2;
+    dst += dst_stride * 2;
+    msk += msk_stride * 2;
+  }
+  return calc_var(v_sum_d, v_sse_q, sse, 4, h);
+}
+
+unsigned int vpx_highbd_masked_subpel_var4xH_xnonzero_ynonzero(
+        const uint16_t *src, int src_stride, int xoffset, int  yoffset,
+        const uint16_t *dst, int dst_stride, const uint8_t *msk, int msk_stride,
+        unsigned int *sse, int h, highbd_calc_masked_var_t calc_var) {
+  int i;
+  __m128i v_src0_w, v_src1_w, v_filtered0_d, v_filtered1_d, v_dst_w, v_msk_b;
+  __m128i v_src0_shift_w, v_src1_shift_w;
+  __m128i v_xres0_w, v_xres1_w, v_res_w, v_temp_w;
+  __m128i v_sum_d = _mm_setzero_si128();
+  __m128i v_sse_q = _mm_setzero_si128();
+  __m128i v_filterx_w = _mm_set1_epi32((
+        vpx_bilinear_filters[xoffset][1] << 16) +
+        vpx_bilinear_filters[xoffset][0]);
+  __m128i v_filtery_w = _mm_set1_epi32((
+        vpx_bilinear_filters[yoffset][1] << 16) +
+        vpx_bilinear_filters[yoffset][0]);
+  assert(xoffset < 8);
+  assert(yoffset < 8);
+
+  // Load the first block of src data
+  v_src0_w = _mm_loadu_si128((const __m128i*)(src));
+  v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
+  v_src1_w = _mm_loadu_si128((const __m128i*)(src + src_stride));
+  v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
+  // Apply the x filter
+  if (xoffset == 8) {
+    v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
+    v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
+    v_xres0_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
+  } else {
+    highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
+                           &v_filtered0_d);
+    highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
+                           &v_filtered1_d);
+    v_xres0_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+  }
+  for (i = 0; i < h; i += 4) {
+    // Load the next block of src data
+    v_src0_w = _mm_loadu_si128((const __m128i*)(src + src_stride * 2));
+    v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
+    v_src1_w = _mm_loadu_si128((const __m128i*)(src + src_stride * 3));
+    v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
+      v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
+      v_xres1_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
+    } else {
+      highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
+                             &v_filtered0_d);
+      highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
+                             &v_filtered1_d);
+      v_xres1_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+    }
+    // Apply the y filter to the previous block
+    v_temp_w = _mm_or_si128(_mm_srli_si128(v_xres0_w, 8),
+                            _mm_slli_si128(v_xres1_w, 8));
+    if (yoffset == 8) {
+      v_res_w = _mm_avg_epu16(v_xres0_w, v_temp_w);
+    } else {
+      v_res_w = highbd_apply_filter(v_xres0_w, v_temp_w, v_filtery_w);
+    }
+    // Load the dst data
+    v_dst_w = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 0)),
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 1)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi32(
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 0)),
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 1)));
+    // Compute the sum and SSE
+    highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+
+    // Load the next block of src data
+    v_src0_w = _mm_loadu_si128((const __m128i*)(src + src_stride * 4));
+    v_src0_shift_w = _mm_srli_si128(v_src0_w, 2);
+    v_src1_w = _mm_loadu_si128((const __m128i*)(src + src_stride * 5));
+    v_src1_shift_w = _mm_srli_si128(v_src1_w, 2);
+    // Apply the x filter
+    if (xoffset == 8) {
+      v_src1_w = _mm_unpacklo_epi64(v_src0_w, v_src1_w);
+      v_src1_shift_w = _mm_unpacklo_epi64(v_src0_shift_w, v_src1_shift_w);
+      v_xres0_w = _mm_avg_epu16(v_src1_w, v_src1_shift_w);
+    } else {
+      highbd_apply_filter_lo(v_src0_w, v_src0_shift_w, v_filterx_w,
+                             &v_filtered0_d);
+      highbd_apply_filter_lo(v_src1_w, v_src1_shift_w, v_filterx_w,
+                             &v_filtered1_d);
+      v_xres0_w = _mm_packs_epi32(v_filtered0_d, v_filtered1_d);
+    }
+    // Apply the y filter to the previous block
+    v_temp_w = _mm_or_si128(_mm_srli_si128(v_xres1_w, 8),
+                            _mm_slli_si128(v_xres0_w, 8));
+    if (yoffset == 8) {
+      v_res_w = _mm_avg_epu16(v_xres1_w, v_temp_w);
+    } else {
+      v_res_w = highbd_apply_filter(v_xres1_w, v_temp_w, v_filtery_w);
+    }
+    // Load the dst data
+    v_dst_w = _mm_unpacklo_epi64(
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 2)),
+            _mm_loadl_epi64((const __m128i *)(dst + dst_stride * 3)));
+    // Load the mask data
+    v_msk_b = _mm_unpacklo_epi32(
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 2)),
+            _mm_loadl_epi64((const __m128i *)(msk + msk_stride * 3)));
+    // Compute the sum and SSE
+    highbd_sum_and_sse(v_res_w, v_dst_w, v_msk_b, &v_sum_d, &v_sse_q);
+    // Move onto the next set of rows
+    src += src_stride * 4;
+    dst += dst_stride * 4;
+    msk += msk_stride * 4;
+  }
+  return calc_var(v_sum_d, v_sse_q, sse, 4, h);
+}
+
+// For W >=8
+#define HIGHBD_MASK_SUBPIX_VAR_LARGE(W, H)                                     \
+unsigned int highbd_masked_sub_pixel_variance##W##x##H##_ssse3(                \
+        const uint8_t *src8, int src_stride,                                   \
+        int xoffset, int  yoffset,                                             \
+        const uint8_t *dst8, int dst_stride,                                   \
+        const uint8_t *msk, int msk_stride,                                    \
+        unsigned int *sse,                                                     \
+        highbd_calc_masked_var_t calc_var,                                     \
+        highbd_variance_fn_t full_variance_function) {                         \
+  uint16_t* src = CONVERT_TO_SHORTPTR(src8);                                   \
+  uint16_t* dst = CONVERT_TO_SHORTPTR(dst8);                                   \
+  assert(W % 8 == 0);                                                          \
+  if (xoffset == 0) {                                                          \
+    if (yoffset == 0)                                                          \
+      return full_variance_function(src8, src_stride, dst8, dst_stride,        \
+                                    msk, msk_stride, sse);                     \
+    else if (yoffset == 8)                                                     \
+      return vpx_highbd_masked_subpel_varWxH_xzero(src, src_stride, 8,         \
+                                                   dst, dst_stride,            \
+                                                   msk, msk_stride,            \
+                                                   sse, W, H,                  \
+                                                   highbd_apply_filter8,       \
+                                                   calc_var);                  \
+    else                                                                       \
+      return vpx_highbd_masked_subpel_varWxH_xzero(src, src_stride, yoffset,   \
+                                                   dst, dst_stride,            \
+                                                   msk, msk_stride,            \
+                                                   sse, W, H,                  \
+                                                   highbd_apply_filter,        \
+                                                   calc_var);                  \
+  } else if (yoffset == 0) {                                                   \
+    if (xoffset == 8)                                                          \
+      return vpx_highbd_masked_subpel_varWxH_yzero(src, src_stride, 8,         \
+                                                   dst, dst_stride,            \
+                                                   msk, msk_stride,            \
+                                                   sse, W, H,                  \
+                                                   highbd_apply_filter8,       \
+                                                   calc_var);                  \
+    else                                                                       \
+      return vpx_highbd_masked_subpel_varWxH_yzero(src, src_stride, xoffset,   \
+                                                   dst, dst_stride,            \
+                                                   msk, msk_stride,            \
+                                                   sse, W, H,                  \
+                                                   highbd_apply_filter,        \
+                                                   calc_var);                  \
+  } else if (xoffset == 8) {                                                   \
+    if (yoffset == 8)                                                          \
+      return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(                \
+              src, src_stride, 8, 8, dst, dst_stride, msk, msk_stride,         \
+              sse, W, H, highbd_apply_filter8, highbd_apply_filter8, calc_var);\
+    else                                                                       \
+      return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(                \
+              src, src_stride, 8, yoffset, dst, dst_stride,                    \
+              msk, msk_stride, sse, W, H, highbd_apply_filter8,                \
+              highbd_apply_filter, calc_var);                                  \
+  } else {                                                                     \
+    if (yoffset == 8)                                                          \
+      return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(                \
+              src, src_stride, xoffset, 8, dst, dst_stride, msk, msk_stride,   \
+              sse, W, H, highbd_apply_filter, highbd_apply_filter8, calc_var); \
+    else                                                                       \
+      return vpx_highbd_masked_subpel_varWxH_xnonzero_ynonzero(                \
+              src, src_stride, xoffset, yoffset, dst, dst_stride,              \
+               msk, msk_stride, sse, W, H, highbd_apply_filter,                \
+               highbd_apply_filter, calc_var);                                 \
+  }                                                                            \
+}
+
+// For W < 8
+#define HIGHBD_MASK_SUBPIX_VAR_SMALL(W, H)                                     \
+unsigned int highbd_masked_sub_pixel_variance##W##x##H##_ssse3(                \
+        const uint8_t *src8, int src_stride,                                   \
+        int xoffset, int  yoffset,                                             \
+        const uint8_t *dst8, int dst_stride,                                   \
+        const uint8_t *msk, int msk_stride,                                    \
+        unsigned int *sse,                                                     \
+        highbd_calc_masked_var_t calc_var,                                     \
+        highbd_variance_fn_t full_variance_function) {                         \
+  uint16_t* src = CONVERT_TO_SHORTPTR(src8);                                   \
+  uint16_t* dst = CONVERT_TO_SHORTPTR(dst8);                                   \
+  assert(W == 4);                                                              \
+  if (xoffset == 0 && yoffset == 0)                                            \
+    return full_variance_function(src8, src_stride, dst8, dst_stride,          \
+                                  msk, msk_stride, sse);                       \
+  else if (xoffset == 0)                                                       \
+    return vpx_highbd_masked_subpel_var4xH_xzero(src, src_stride, yoffset,     \
+                                                     dst, dst_stride,          \
+                                                     msk, msk_stride, sse, H,  \
+                                                     calc_var);                \
+  else if (yoffset == 0)                                                       \
+    return vpx_highbd_masked_subpel_var4xH_yzero(src, src_stride, xoffset,     \
+                                                     dst, dst_stride,          \
+                                                     msk, msk_stride, sse, H,  \
+                                                     calc_var);                \
+  else                                                                         \
+    return vpx_highbd_masked_subpel_var4xH_xnonzero_ynonzero(                  \
+          src, src_stride, xoffset, yoffset, dst, dst_stride,                  \
+          msk, msk_stride, sse, H, calc_var);                                  \
+}
+
+#define HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(W, H)                                  \
+unsigned int vpx_highbd_masked_sub_pixel_variance##W##x##H##_ssse3(            \
+        const uint8_t *src8, int src_stride,                                   \
+        int xoffset, int  yoffset,                                             \
+        const uint8_t *dst8, int dst_stride,                                   \
+        const uint8_t *msk, int msk_stride,                                    \
+        unsigned int *sse) {                                                   \
+    return highbd_masked_sub_pixel_variance##W##x##H##_ssse3(src8, src_stride, \
+            xoffset, yoffset, dst8, dst_stride, msk, msk_stride, sse,          \
+            calc_masked_variance,                                              \
+            vpx_highbd_masked_variance##W##x##H##_ssse3);                      \
+}                                                                              \
+unsigned int vpx_highbd_10_masked_sub_pixel_variance##W##x##H##_ssse3(         \
+        const uint8_t *src8, int src_stride,                                   \
+        int xoffset, int  yoffset,                                             \
+        const uint8_t *dst8, int dst_stride,                                   \
+        const uint8_t *msk, int msk_stride,                                    \
+        unsigned int *sse) {                                                   \
+    return highbd_masked_sub_pixel_variance##W##x##H##_ssse3(src8, src_stride, \
+            xoffset, yoffset, dst8, dst_stride, msk, msk_stride, sse,          \
+            highbd_10_calc_masked_variance,                                    \
+            vpx_highbd_10_masked_variance##W##x##H##_ssse3);                   \
+}                                                                              \
+unsigned int vpx_highbd_12_masked_sub_pixel_variance##W##x##H##_ssse3(         \
+        const uint8_t *src8, int src_stride,                                   \
+        int xoffset, int  yoffset,                                             \
+        const uint8_t *dst8, int dst_stride,                                   \
+        const uint8_t *msk, int msk_stride,                                    \
+        unsigned int *sse) {                                                   \
+    return highbd_masked_sub_pixel_variance##W##x##H##_ssse3(src8, src_stride, \
+            xoffset, yoffset, dst8, dst_stride, msk, msk_stride, sse,          \
+            highbd_12_calc_masked_variance,                                    \
+            vpx_highbd_12_masked_variance##W##x##H##_ssse3);                   \
+}                                                                              \
+
+HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 4)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(4, 4)
+HIGHBD_MASK_SUBPIX_VAR_SMALL(4, 8)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(4, 8)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 4)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 4)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 8)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 8)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(8, 16)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(8, 16)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 8)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 8)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 16)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 16)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(16, 32)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(16, 32)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 16)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 16)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 32)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 32)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(32, 64)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(32, 64)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 32)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 32)
+HIGHBD_MASK_SUBPIX_VAR_LARGE(64, 64)
+HIGHBD_MASK_SUBPIX_VAR_WRAPPERS(64, 64)
+#endif