2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "third_party/googletest/src/include/gtest/gtest.h"
16 #include "./vpx_config.h"
17 #include "./vpx_dsp_rtcd.h"
18 #include "test/acm_random.h"
19 #include "test/clear_system_state.h"
20 #include "test/register_state_check.h"
21 #include "vpx/vpx_codec.h"
22 #include "vpx/vpx_integer.h"
23 #include "vpx_mem/vpx_mem.h"
24 #include "vpx_ports/mem.h"
28 typedef unsigned int (*VarianceMxNFunc)(const uint8_t *a, int a_stride,
29 const uint8_t *b, int b_stride,
31 typedef unsigned int (*SubpixVarMxNFunc)(const uint8_t *a, int a_stride,
32 int xoffset, int yoffset,
33 const uint8_t *b, int b_stride,
35 typedef unsigned int (*SubpixAvgVarMxNFunc)(const uint8_t *a, int a_stride,
36 int xoffset, int yoffset,
37 const uint8_t *b, int b_stride,
39 const uint8_t *second_pred);
40 typedef unsigned int (*Get4x4SseFunc)(const uint8_t *a, int a_stride,
41 const uint8_t *b, int b_stride);
42 typedef unsigned int (*SumOfSquaresFunction)(const int16_t *src);
45 using ::std::tr1::get;
46 using ::std::tr1::make_tuple;
47 using ::std::tr1::tuple;
48 using libvpx_test::ACMRandom;
50 // Truncate high bit depth results by downshifting (with rounding) by:
51 // 2 * (bit_depth - 8) for sse
52 // (bit_depth - 8) for se
53 static void RoundHighBitDepth(int bit_depth, int64_t *se, uint64_t *sse) {
56 *sse = (*sse + 128) >> 8;
60 *sse = (*sse + 8) >> 4;
69 static unsigned int mb_ss_ref(const int16_t *src) {
71 for (int i = 0; i < 256; ++i) {
72 res += src[i] * src[i];
78 * Our codebase calculates the "diff" value in the variance algorithm by
81 static uint32_t variance_ref(const uint8_t *src, const uint8_t *ref,
82 int l2w, int l2h, int src_stride_coeff,
83 int ref_stride_coeff, uint32_t *sse_ptr,
84 bool use_high_bit_depth_,
85 vpx_bit_depth_t bit_depth) {
88 const int w = 1 << l2w;
89 const int h = 1 << l2h;
90 for (int y = 0; y < h; y++) {
91 for (int x = 0; x < w; x++) {
93 if (!use_high_bit_depth_) {
94 diff = src[w * y * src_stride_coeff + x] -
95 ref[w * y * ref_stride_coeff + x];
98 #if CONFIG_VP9_HIGHBITDEPTH
100 diff = CONVERT_TO_SHORTPTR(src)[w * y * src_stride_coeff + x] -
101 CONVERT_TO_SHORTPTR(ref)[w * y * ref_stride_coeff + x];
104 #endif // CONFIG_VP9_HIGHBITDEPTH
108 RoundHighBitDepth(bit_depth, &se, &sse);
109 *sse_ptr = static_cast<uint32_t>(sse);
110 return static_cast<uint32_t>(sse -
111 ((static_cast<int64_t>(se) * se) >>
115 /* The subpel reference functions differ from the codec version in one aspect:
116 * they calculate the bilinear factors directly instead of using a lookup table
117 * and therefore upshift xoff and yoff by 1. Only every other calculated value
118 * is used so the codec version shrinks the table to save space and maintain
119 * compatibility with vp8.
121 static uint32_t subpel_variance_ref(const uint8_t *ref, const uint8_t *src,
122 int l2w, int l2h, int xoff, int yoff,
124 bool use_high_bit_depth_,
125 vpx_bit_depth_t bit_depth) {
128 const int w = 1 << l2w;
129 const int h = 1 << l2h;
134 for (int y = 0; y < h; y++) {
135 for (int x = 0; x < w; x++) {
136 // Bilinear interpolation at a 16th pel step.
137 if (!use_high_bit_depth_) {
138 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
139 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
140 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
141 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
142 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
143 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
144 const int r = a + (((b - a) * yoff + 8) >> 4);
145 const int diff = r - src[w * y + x];
148 #if CONFIG_VP9_HIGHBITDEPTH
150 uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
151 uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
152 const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
153 const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
154 const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
155 const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
156 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
157 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
158 const int r = a + (((b - a) * yoff + 8) >> 4);
159 const int diff = r - src16[w * y + x];
162 #endif // CONFIG_VP9_HIGHBITDEPTH
166 RoundHighBitDepth(bit_depth, &se, &sse);
167 *sse_ptr = static_cast<uint32_t>(sse);
168 return static_cast<uint32_t>(sse -
169 ((static_cast<int64_t>(se) * se) >>
173 class SumOfSquaresTest : public ::testing::TestWithParam<SumOfSquaresFunction> {
175 SumOfSquaresTest() : func_(GetParam()) {}
177 virtual ~SumOfSquaresTest() {
178 libvpx_test::ClearSystemState();
185 SumOfSquaresFunction func_;
189 void SumOfSquaresTest::ConstTest() {
192 for (int v = 0; v < 256; ++v) {
193 for (int i = 0; i < 256; ++i) {
196 ASM_REGISTER_STATE_CHECK(res = func_(mem));
197 EXPECT_EQ(256u * (v * v), res);
201 void SumOfSquaresTest::RefTest() {
203 for (int i = 0; i < 100; ++i) {
204 for (int j = 0; j < 256; ++j) {
205 mem[j] = rnd_.Rand8() - rnd_.Rand8();
208 const unsigned int expected = mb_ss_ref(mem);
210 ASM_REGISTER_STATE_CHECK(res = func_(mem));
211 EXPECT_EQ(expected, res);
215 template<typename VarianceFunctionType>
217 : public ::testing::TestWithParam<tuple<int, int,
218 VarianceFunctionType, int> > {
220 virtual void SetUp() {
221 const tuple<int, int, VarianceFunctionType, int>& params = this->GetParam();
222 log2width_ = get<0>(params);
223 width_ = 1 << log2width_;
224 log2height_ = get<1>(params);
225 height_ = 1 << log2height_;
226 variance_ = get<2>(params);
227 if (get<3>(params)) {
228 bit_depth_ = static_cast<vpx_bit_depth_t>(get<3>(params));
229 use_high_bit_depth_ = true;
231 bit_depth_ = VPX_BITS_8;
232 use_high_bit_depth_ = false;
234 mask_ = (1 << bit_depth_) - 1;
236 rnd_.Reset(ACMRandom::DeterministicSeed());
237 block_size_ = width_ * height_;
238 if (!use_high_bit_depth_) {
239 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_ * 2));
240 ref_ = new uint8_t[block_size_ * 2];
241 #if CONFIG_VP9_HIGHBITDEPTH
243 src_ = CONVERT_TO_BYTEPTR(reinterpret_cast<uint16_t *>(
244 vpx_memalign(16, block_size_ * 2 * sizeof(uint16_t))));
245 ref_ = CONVERT_TO_BYTEPTR(new uint16_t[block_size_ * 2]);
246 #endif // CONFIG_VP9_HIGHBITDEPTH
248 ASSERT_TRUE(src_ != NULL);
249 ASSERT_TRUE(ref_ != NULL);
252 virtual void TearDown() {
253 if (!use_high_bit_depth_) {
256 #if CONFIG_VP9_HIGHBITDEPTH
258 vpx_free(CONVERT_TO_SHORTPTR(src_));
259 delete[] CONVERT_TO_SHORTPTR(ref_);
260 #endif // CONFIG_VP9_HIGHBITDEPTH
262 libvpx_test::ClearSystemState();
268 void RefStrideTest();
269 void OneQuarterTest();
274 int width_, log2width_;
275 int height_, log2height_;
276 vpx_bit_depth_t bit_depth_;
278 bool use_high_bit_depth_;
280 VarianceFunctionType variance_;
283 template<typename VarianceFunctionType>
284 void VarianceTest<VarianceFunctionType>::ZeroTest() {
285 for (int i = 0; i <= 255; ++i) {
286 if (!use_high_bit_depth_) {
287 memset(src_, i, block_size_);
288 #if CONFIG_VP9_HIGHBITDEPTH
290 vpx_memset16(CONVERT_TO_SHORTPTR(src_), i << (bit_depth_ - 8),
292 #endif // CONFIG_VP9_HIGHBITDEPTH
294 for (int j = 0; j <= 255; ++j) {
295 if (!use_high_bit_depth_) {
296 memset(ref_, j, block_size_);
297 #if CONFIG_VP9_HIGHBITDEPTH
299 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), j << (bit_depth_ - 8),
301 #endif // CONFIG_VP9_HIGHBITDEPTH
305 ASM_REGISTER_STATE_CHECK(
306 var = variance_(src_, width_, ref_, width_, &sse));
307 EXPECT_EQ(0u, var) << "src values: " << i << " ref values: " << j;
312 template<typename VarianceFunctionType>
313 void VarianceTest<VarianceFunctionType>::RefTest() {
314 for (int i = 0; i < 10; ++i) {
315 for (int j = 0; j < block_size_; j++) {
316 if (!use_high_bit_depth_) {
317 src_[j] = rnd_.Rand8();
318 ref_[j] = rnd_.Rand8();
319 #if CONFIG_VP9_HIGHBITDEPTH
321 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
322 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
323 #endif // CONFIG_VP9_HIGHBITDEPTH
326 unsigned int sse1, sse2;
328 const int stride_coeff = 1;
329 ASM_REGISTER_STATE_CHECK(
330 var1 = variance_(src_, width_, ref_, width_, &sse1));
331 const unsigned int var2 = variance_ref(src_, ref_, log2width_,
332 log2height_, stride_coeff,
334 use_high_bit_depth_, bit_depth_);
335 EXPECT_EQ(sse1, sse2)
336 << "Error at test index: " << i;
337 EXPECT_EQ(var1, var2)
338 << "Error at test index: " << i;
342 template<typename VarianceFunctionType>
343 void VarianceTest<VarianceFunctionType>::RefStrideTest() {
344 for (int i = 0; i < 10; ++i) {
345 int ref_stride_coeff = i % 2;
346 int src_stride_coeff = (i >> 1) % 2;
347 for (int j = 0; j < block_size_; j++) {
348 int ref_ind = (j / width_) * ref_stride_coeff * width_ + j % width_;
349 int src_ind = (j / width_) * src_stride_coeff * width_ + j % width_;
350 if (!use_high_bit_depth_) {
351 src_[src_ind] = rnd_.Rand8();
352 ref_[ref_ind] = rnd_.Rand8();
353 #if CONFIG_VP9_HIGHBITDEPTH
355 CONVERT_TO_SHORTPTR(src_)[src_ind] = rnd_.Rand16() & mask_;
356 CONVERT_TO_SHORTPTR(ref_)[ref_ind] = rnd_.Rand16() & mask_;
357 #endif // CONFIG_VP9_HIGHBITDEPTH
360 unsigned int sse1, sse2;
363 ASM_REGISTER_STATE_CHECK(
364 var1 = variance_(src_, width_ * src_stride_coeff,
365 ref_, width_ * ref_stride_coeff, &sse1));
366 const unsigned int var2 = variance_ref(src_, ref_, log2width_,
367 log2height_, src_stride_coeff,
368 ref_stride_coeff, &sse2,
369 use_high_bit_depth_, bit_depth_);
370 EXPECT_EQ(sse1, sse2)
371 << "Error at test index: " << i;
372 EXPECT_EQ(var1, var2)
373 << "Error at test index: " << i;
377 template<typename VarianceFunctionType>
378 void VarianceTest<VarianceFunctionType>::OneQuarterTest() {
379 const int half = block_size_ / 2;
380 if (!use_high_bit_depth_) {
381 memset(src_, 255, block_size_);
382 memset(ref_, 255, half);
383 memset(ref_ + half, 0, half);
384 #if CONFIG_VP9_HIGHBITDEPTH
386 vpx_memset16(CONVERT_TO_SHORTPTR(src_), 255 << (bit_depth_ - 8),
388 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 255 << (bit_depth_ - 8), half);
389 vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, 0, half);
390 #endif // CONFIG_VP9_HIGHBITDEPTH
394 ASM_REGISTER_STATE_CHECK(var = variance_(src_, width_, ref_, width_, &sse));
395 const unsigned int expected = block_size_ * 255 * 255 / 4;
396 EXPECT_EQ(expected, var);
399 template<typename MseFunctionType>
401 : public ::testing::TestWithParam<tuple<int, int, MseFunctionType> > {
403 virtual void SetUp() {
404 const tuple<int, int, MseFunctionType>& params = this->GetParam();
405 log2width_ = get<0>(params);
406 width_ = 1 << log2width_;
407 log2height_ = get<1>(params);
408 height_ = 1 << log2height_;
409 mse_ = get<2>(params);
411 rnd(ACMRandom::DeterministicSeed());
412 block_size_ = width_ * height_;
413 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
414 ref_ = new uint8_t[block_size_];
415 ASSERT_TRUE(src_ != NULL);
416 ASSERT_TRUE(ref_ != NULL);
419 virtual void TearDown() {
422 libvpx_test::ClearSystemState();
434 int width_, log2width_;
435 int height_, log2height_;
437 MseFunctionType mse_;
440 template<typename MseFunctionType>
441 void MseTest<MseFunctionType>::RefTest_mse() {
442 for (int i = 0; i < 10; ++i) {
443 for (int j = 0; j < block_size_; j++) {
444 src_[j] = rnd.Rand8();
445 ref_[j] = rnd.Rand8();
447 unsigned int sse1, sse2;
448 const int stride_coeff = 1;
449 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse1));
450 variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
451 stride_coeff, &sse2, false, VPX_BITS_8);
452 EXPECT_EQ(sse1, sse2);
456 template<typename MseFunctionType>
457 void MseTest<MseFunctionType>::RefTest_sse() {
458 for (int i = 0; i < 10; ++i) {
459 for (int j = 0; j < block_size_; j++) {
460 src_[j] = rnd.Rand8();
461 ref_[j] = rnd.Rand8();
465 const int stride_coeff = 1;
466 ASM_REGISTER_STATE_CHECK(var1 = mse_(src_, width_, ref_, width_));
467 variance_ref(src_, ref_, log2width_, log2height_, stride_coeff,
468 stride_coeff, &sse2, false, VPX_BITS_8);
469 EXPECT_EQ(var1, sse2);
473 template<typename MseFunctionType>
474 void MseTest<MseFunctionType>::MaxTest_mse() {
475 memset(src_, 255, block_size_);
476 memset(ref_, 0, block_size_);
478 ASM_REGISTER_STATE_CHECK(mse_(src_, width_, ref_, width_, &sse));
479 const unsigned int expected = block_size_ * 255 * 255;
480 EXPECT_EQ(expected, sse);
483 template<typename MseFunctionType>
484 void MseTest<MseFunctionType>::MaxTest_sse() {
485 memset(src_, 255, block_size_);
486 memset(ref_, 0, block_size_);
488 ASM_REGISTER_STATE_CHECK(var = mse_(src_, width_, ref_, width_));
489 const unsigned int expected = block_size_ * 255 * 255;
490 EXPECT_EQ(expected, var);
493 static uint32_t subpel_avg_variance_ref(const uint8_t *ref,
495 const uint8_t *second_pred,
499 bool use_high_bit_depth,
500 vpx_bit_depth_t bit_depth) {
503 const int w = 1 << l2w;
504 const int h = 1 << l2h;
509 for (int y = 0; y < h; y++) {
510 for (int x = 0; x < w; x++) {
511 // bilinear interpolation at a 16th pel step
512 if (!use_high_bit_depth) {
513 const int a1 = ref[(w + 1) * (y + 0) + x + 0];
514 const int a2 = ref[(w + 1) * (y + 0) + x + 1];
515 const int b1 = ref[(w + 1) * (y + 1) + x + 0];
516 const int b2 = ref[(w + 1) * (y + 1) + x + 1];
517 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
518 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
519 const int r = a + (((b - a) * yoff + 8) >> 4);
520 const int diff = ((r + second_pred[w * y + x] + 1) >> 1) - src[w * y + x];
523 #if CONFIG_VP9_HIGHBITDEPTH
525 uint16_t *ref16 = CONVERT_TO_SHORTPTR(ref);
526 uint16_t *src16 = CONVERT_TO_SHORTPTR(src);
527 uint16_t *sec16 = CONVERT_TO_SHORTPTR(second_pred);
528 const int a1 = ref16[(w + 1) * (y + 0) + x + 0];
529 const int a2 = ref16[(w + 1) * (y + 0) + x + 1];
530 const int b1 = ref16[(w + 1) * (y + 1) + x + 0];
531 const int b2 = ref16[(w + 1) * (y + 1) + x + 1];
532 const int a = a1 + (((a2 - a1) * xoff + 8) >> 4);
533 const int b = b1 + (((b2 - b1) * xoff + 8) >> 4);
534 const int r = a + (((b - a) * yoff + 8) >> 4);
535 const int diff = ((r + sec16[w * y + x] + 1) >> 1) - src16[w * y + x];
538 #endif // CONFIG_VP9_HIGHBITDEPTH
542 RoundHighBitDepth(bit_depth, &se, &sse);
543 *sse_ptr = static_cast<uint32_t>(sse);
544 return static_cast<uint32_t>(sse -
545 ((static_cast<int64_t>(se) * se) >>
549 template<typename SubpelVarianceFunctionType>
550 class SubpelVarianceTest
551 : public ::testing::TestWithParam<tuple<int, int,
552 SubpelVarianceFunctionType, int> > {
554 virtual void SetUp() {
555 const tuple<int, int, SubpelVarianceFunctionType, int>& params =
557 log2width_ = get<0>(params);
558 width_ = 1 << log2width_;
559 log2height_ = get<1>(params);
560 height_ = 1 << log2height_;
561 subpel_variance_ = get<2>(params);
562 if (get<3>(params)) {
563 bit_depth_ = (vpx_bit_depth_t) get<3>(params);
564 use_high_bit_depth_ = true;
566 bit_depth_ = VPX_BITS_8;
567 use_high_bit_depth_ = false;
569 mask_ = (1 << bit_depth_)-1;
571 rnd_.Reset(ACMRandom::DeterministicSeed());
572 block_size_ = width_ * height_;
573 if (!use_high_bit_depth_) {
574 src_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
575 sec_ = reinterpret_cast<uint8_t *>(vpx_memalign(16, block_size_));
576 ref_ = new uint8_t[block_size_ + width_ + height_ + 1];
577 #if CONFIG_VP9_HIGHBITDEPTH
579 src_ = CONVERT_TO_BYTEPTR(
580 reinterpret_cast<uint16_t *>(
581 vpx_memalign(16, block_size_*sizeof(uint16_t))));
582 sec_ = CONVERT_TO_BYTEPTR(
583 reinterpret_cast<uint16_t *>(
584 vpx_memalign(16, block_size_*sizeof(uint16_t))));
585 ref_ = CONVERT_TO_BYTEPTR(
586 new uint16_t[block_size_ + width_ + height_ + 1]);
587 #endif // CONFIG_VP9_HIGHBITDEPTH
589 ASSERT_TRUE(src_ != NULL);
590 ASSERT_TRUE(sec_ != NULL);
591 ASSERT_TRUE(ref_ != NULL);
594 virtual void TearDown() {
595 if (!use_high_bit_depth_) {
599 #if CONFIG_VP9_HIGHBITDEPTH
601 vpx_free(CONVERT_TO_SHORTPTR(src_));
602 delete[] CONVERT_TO_SHORTPTR(ref_);
603 vpx_free(CONVERT_TO_SHORTPTR(sec_));
604 #endif // CONFIG_VP9_HIGHBITDEPTH
606 libvpx_test::ClearSystemState();
611 void ExtremeRefTest();
617 bool use_high_bit_depth_;
618 vpx_bit_depth_t bit_depth_;
619 int width_, log2width_;
620 int height_, log2height_;
621 int block_size_, mask_;
622 SubpelVarianceFunctionType subpel_variance_;
625 template<typename SubpelVarianceFunctionType>
626 void SubpelVarianceTest<SubpelVarianceFunctionType>::RefTest() {
627 for (int x = 0; x < 8; ++x) {
628 for (int y = 0; y < 8; ++y) {
629 if (!use_high_bit_depth_) {
630 for (int j = 0; j < block_size_; j++) {
631 src_[j] = rnd_.Rand8();
633 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
634 ref_[j] = rnd_.Rand8();
636 #if CONFIG_VP9_HIGHBITDEPTH
638 for (int j = 0; j < block_size_; j++) {
639 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
641 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
642 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
644 #endif // CONFIG_VP9_HIGHBITDEPTH
646 unsigned int sse1, sse2;
648 ASM_REGISTER_STATE_CHECK(var1 = subpel_variance_(ref_, width_ + 1, x, y,
649 src_, width_, &sse1));
650 const unsigned int var2 = subpel_variance_ref(ref_, src_,
651 log2width_, log2height_,
655 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
656 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
661 template<typename SubpelVarianceFunctionType>
662 void SubpelVarianceTest<SubpelVarianceFunctionType>::ExtremeRefTest() {
663 // Compare against reference.
664 // Src: Set the first half of values to 0, the second half to the maximum.
665 // Ref: Set the first half of values to the maximum, the second half to 0.
666 for (int x = 0; x < 8; ++x) {
667 for (int y = 0; y < 8; ++y) {
668 const int half = block_size_ / 2;
669 if (!use_high_bit_depth_) {
670 memset(src_, 0, half);
671 memset(src_ + half, 255, half);
672 memset(ref_, 255, half);
673 memset(ref_ + half, 0, half + width_ + height_ + 1);
674 #if CONFIG_VP9_HIGHBITDEPTH
676 vpx_memset16(CONVERT_TO_SHORTPTR(src_), mask_, half);
677 vpx_memset16(CONVERT_TO_SHORTPTR(src_) + half, 0, half);
678 vpx_memset16(CONVERT_TO_SHORTPTR(ref_), 0, half);
679 vpx_memset16(CONVERT_TO_SHORTPTR(ref_) + half, mask_,
680 half + width_ + height_ + 1);
681 #endif // CONFIG_VP9_HIGHBITDEPTH
683 unsigned int sse1, sse2;
685 ASM_REGISTER_STATE_CHECK(
686 var1 = subpel_variance_(ref_, width_ + 1, x, y, src_, width_, &sse1));
687 const unsigned int var2 =
688 subpel_variance_ref(ref_, src_, log2width_, log2height_,
689 x, y, &sse2, use_high_bit_depth_, bit_depth_);
690 EXPECT_EQ(sse1, sse2) << "for xoffset " << x << " and yoffset " << y;
691 EXPECT_EQ(var1, var2) << "for xoffset " << x << " and yoffset " << y;
697 void SubpelVarianceTest<SubpixAvgVarMxNFunc>::RefTest() {
698 for (int x = 0; x < 8; ++x) {
699 for (int y = 0; y < 8; ++y) {
700 if (!use_high_bit_depth_) {
701 for (int j = 0; j < block_size_; j++) {
702 src_[j] = rnd_.Rand8();
703 sec_[j] = rnd_.Rand8();
705 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
706 ref_[j] = rnd_.Rand8();
708 #if CONFIG_VP9_HIGHBITDEPTH
710 for (int j = 0; j < block_size_; j++) {
711 CONVERT_TO_SHORTPTR(src_)[j] = rnd_.Rand16() & mask_;
712 CONVERT_TO_SHORTPTR(sec_)[j] = rnd_.Rand16() & mask_;
714 for (int j = 0; j < block_size_ + width_ + height_ + 1; j++) {
715 CONVERT_TO_SHORTPTR(ref_)[j] = rnd_.Rand16() & mask_;
717 #endif // CONFIG_VP9_HIGHBITDEPTH
719 unsigned int sse1, sse2;
721 ASM_REGISTER_STATE_CHECK(
722 var1 = subpel_variance_(ref_, width_ + 1, x, y,
723 src_, width_, &sse1, sec_));
724 const unsigned int var2 = subpel_avg_variance_ref(ref_, src_, sec_,
725 log2width_, log2height_,
729 EXPECT_EQ(sse1, sse2) << "at position " << x << ", " << y;
730 EXPECT_EQ(var1, var2) << "at position " << x << ", " << y;
735 typedef MseTest<Get4x4SseFunc> VpxSseTest;
736 typedef MseTest<VarianceMxNFunc> VpxMseTest;
737 typedef VarianceTest<VarianceMxNFunc> VpxVarianceTest;
738 typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxSubpelVarianceTest;
739 typedef SubpelVarianceTest<SubpixAvgVarMxNFunc> VpxSubpelAvgVarianceTest;
741 TEST_P(VpxSseTest, Ref_sse) { RefTest_sse(); }
742 TEST_P(VpxSseTest, Max_sse) { MaxTest_sse(); }
743 TEST_P(VpxMseTest, Ref_mse) { RefTest_mse(); }
744 TEST_P(VpxMseTest, Max_mse) { MaxTest_mse(); }
745 TEST_P(VpxVarianceTest, Zero) { ZeroTest(); }
746 TEST_P(VpxVarianceTest, Ref) { RefTest(); }
747 TEST_P(VpxVarianceTest, RefStride) { RefStrideTest(); }
748 TEST_P(VpxVarianceTest, OneQuarter) { OneQuarterTest(); }
749 TEST_P(SumOfSquaresTest, Const) { ConstTest(); }
750 TEST_P(SumOfSquaresTest, Ref) { RefTest(); }
751 TEST_P(VpxSubpelVarianceTest, Ref) { RefTest(); }
752 TEST_P(VpxSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
753 TEST_P(VpxSubpelAvgVarianceTest, Ref) { RefTest(); }
755 INSTANTIATE_TEST_CASE_P(C, SumOfSquaresTest,
756 ::testing::Values(vpx_get_mb_ss_c));
758 INSTANTIATE_TEST_CASE_P(C, VpxSseTest,
759 ::testing::Values(make_tuple(2, 2,
760 &vpx_get4x4sse_cs_c)));
762 INSTANTIATE_TEST_CASE_P(C, VpxMseTest,
763 ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_c),
764 make_tuple(4, 3, &vpx_mse16x8_c),
765 make_tuple(3, 4, &vpx_mse8x16_c),
766 make_tuple(3, 3, &vpx_mse8x8_c)));
768 INSTANTIATE_TEST_CASE_P(
770 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_c, 0),
771 make_tuple(6, 5, &vpx_variance64x32_c, 0),
772 make_tuple(5, 6, &vpx_variance32x64_c, 0),
773 make_tuple(5, 5, &vpx_variance32x32_c, 0),
774 make_tuple(5, 4, &vpx_variance32x16_c, 0),
775 make_tuple(4, 5, &vpx_variance16x32_c, 0),
776 make_tuple(4, 4, &vpx_variance16x16_c, 0),
777 make_tuple(4, 3, &vpx_variance16x8_c, 0),
778 make_tuple(3, 4, &vpx_variance8x16_c, 0),
779 make_tuple(3, 3, &vpx_variance8x8_c, 0),
780 make_tuple(3, 2, &vpx_variance8x4_c, 0),
781 make_tuple(2, 3, &vpx_variance4x8_c, 0),
782 make_tuple(2, 2, &vpx_variance4x4_c, 0)));
784 INSTANTIATE_TEST_CASE_P(
785 C, VpxSubpelVarianceTest,
786 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_c, 0),
787 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_c, 0),
788 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_c, 0),
789 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_c, 0),
790 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_c, 0),
791 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_c, 0),
792 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_c, 0),
793 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_c, 0),
794 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_c, 0),
795 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_c, 0),
796 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_c, 0),
797 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_c, 0),
798 make_tuple(2, 2, &vpx_sub_pixel_variance4x4_c, 0)));
800 INSTANTIATE_TEST_CASE_P(
801 C, VpxSubpelAvgVarianceTest,
802 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_c, 0),
803 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_c, 0),
804 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_c, 0),
805 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_c, 0),
806 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_c, 0),
807 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_c, 0),
808 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_c, 0),
809 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_c, 0),
810 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_c, 0),
811 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_c, 0),
812 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_c, 0),
813 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_c, 0),
814 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_c, 0)));
816 #if CONFIG_VP9_HIGHBITDEPTH
817 typedef MseTest<VarianceMxNFunc> VpxHBDMseTest;
818 typedef VarianceTest<VarianceMxNFunc> VpxHBDVarianceTest;
819 typedef SubpelVarianceTest<SubpixVarMxNFunc> VpxHBDSubpelVarianceTest;
820 typedef SubpelVarianceTest<SubpixAvgVarMxNFunc>
821 VpxHBDSubpelAvgVarianceTest;
823 TEST_P(VpxHBDMseTest, Ref_mse) { RefTest_mse(); }
824 TEST_P(VpxHBDMseTest, Max_mse) { MaxTest_mse(); }
825 TEST_P(VpxHBDVarianceTest, Zero) { ZeroTest(); }
826 TEST_P(VpxHBDVarianceTest, Ref) { RefTest(); }
827 TEST_P(VpxHBDVarianceTest, RefStride) { RefStrideTest(); }
828 TEST_P(VpxHBDVarianceTest, OneQuarter) { OneQuarterTest(); }
829 TEST_P(VpxHBDSubpelVarianceTest, Ref) { RefTest(); }
830 TEST_P(VpxHBDSubpelVarianceTest, ExtremeRef) { ExtremeRefTest(); }
831 TEST_P(VpxHBDSubpelAvgVarianceTest, Ref) { RefTest(); }
833 /* TODO(debargha): This test does not support the highbd version
834 INSTANTIATE_TEST_CASE_P(
836 ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_c),
837 make_tuple(4, 4, &vpx_highbd_12_mse16x8_c),
838 make_tuple(4, 4, &vpx_highbd_12_mse8x16_c),
839 make_tuple(4, 4, &vpx_highbd_12_mse8x8_c),
840 make_tuple(4, 4, &vpx_highbd_10_mse16x16_c),
841 make_tuple(4, 4, &vpx_highbd_10_mse16x8_c),
842 make_tuple(4, 4, &vpx_highbd_10_mse8x16_c),
843 make_tuple(4, 4, &vpx_highbd_10_mse8x8_c),
844 make_tuple(4, 4, &vpx_highbd_8_mse16x16_c),
845 make_tuple(4, 4, &vpx_highbd_8_mse16x8_c),
846 make_tuple(4, 4, &vpx_highbd_8_mse8x16_c),
847 make_tuple(4, 4, &vpx_highbd_8_mse8x8_c)));
850 INSTANTIATE_TEST_CASE_P(
851 C, VpxHBDVarianceTest,
852 ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_c, 12),
853 make_tuple(6, 5, &vpx_highbd_12_variance64x32_c, 12),
854 make_tuple(5, 6, &vpx_highbd_12_variance32x64_c, 12),
855 make_tuple(5, 5, &vpx_highbd_12_variance32x32_c, 12),
856 make_tuple(5, 4, &vpx_highbd_12_variance32x16_c, 12),
857 make_tuple(4, 5, &vpx_highbd_12_variance16x32_c, 12),
858 make_tuple(4, 4, &vpx_highbd_12_variance16x16_c, 12),
859 make_tuple(4, 3, &vpx_highbd_12_variance16x8_c, 12),
860 make_tuple(3, 4, &vpx_highbd_12_variance8x16_c, 12),
861 make_tuple(3, 3, &vpx_highbd_12_variance8x8_c, 12),
862 make_tuple(3, 2, &vpx_highbd_12_variance8x4_c, 12),
863 make_tuple(2, 3, &vpx_highbd_12_variance4x8_c, 12),
864 make_tuple(2, 2, &vpx_highbd_12_variance4x4_c, 12),
865 make_tuple(6, 6, &vpx_highbd_10_variance64x64_c, 10),
866 make_tuple(6, 5, &vpx_highbd_10_variance64x32_c, 10),
867 make_tuple(5, 6, &vpx_highbd_10_variance32x64_c, 10),
868 make_tuple(5, 5, &vpx_highbd_10_variance32x32_c, 10),
869 make_tuple(5, 4, &vpx_highbd_10_variance32x16_c, 10),
870 make_tuple(4, 5, &vpx_highbd_10_variance16x32_c, 10),
871 make_tuple(4, 4, &vpx_highbd_10_variance16x16_c, 10),
872 make_tuple(4, 3, &vpx_highbd_10_variance16x8_c, 10),
873 make_tuple(3, 4, &vpx_highbd_10_variance8x16_c, 10),
874 make_tuple(3, 3, &vpx_highbd_10_variance8x8_c, 10),
875 make_tuple(3, 2, &vpx_highbd_10_variance8x4_c, 10),
876 make_tuple(2, 3, &vpx_highbd_10_variance4x8_c, 10),
877 make_tuple(2, 2, &vpx_highbd_10_variance4x4_c, 10),
878 make_tuple(6, 6, &vpx_highbd_8_variance64x64_c, 8),
879 make_tuple(6, 5, &vpx_highbd_8_variance64x32_c, 8),
880 make_tuple(5, 6, &vpx_highbd_8_variance32x64_c, 8),
881 make_tuple(5, 5, &vpx_highbd_8_variance32x32_c, 8),
882 make_tuple(5, 4, &vpx_highbd_8_variance32x16_c, 8),
883 make_tuple(4, 5, &vpx_highbd_8_variance16x32_c, 8),
884 make_tuple(4, 4, &vpx_highbd_8_variance16x16_c, 8),
885 make_tuple(4, 3, &vpx_highbd_8_variance16x8_c, 8),
886 make_tuple(3, 4, &vpx_highbd_8_variance8x16_c, 8),
887 make_tuple(3, 3, &vpx_highbd_8_variance8x8_c, 8),
888 make_tuple(3, 2, &vpx_highbd_8_variance8x4_c, 8),
889 make_tuple(2, 3, &vpx_highbd_8_variance4x8_c, 8),
890 make_tuple(2, 2, &vpx_highbd_8_variance4x4_c, 8)));
892 INSTANTIATE_TEST_CASE_P(
893 C, VpxHBDSubpelVarianceTest,
895 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_c, 8),
896 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_c, 8),
897 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_c, 8),
898 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_c, 8),
899 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_c, 8),
900 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_c, 8),
901 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_c, 8),
902 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_c, 8),
903 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_c, 8),
904 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_c, 8),
905 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_c, 8),
906 make_tuple(2, 3, &vpx_highbd_8_sub_pixel_variance4x8_c, 8),
907 make_tuple(2, 2, &vpx_highbd_8_sub_pixel_variance4x4_c, 8),
908 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_c, 10),
909 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_c, 10),
910 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_c, 10),
911 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_c, 10),
912 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_c, 10),
913 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_c, 10),
914 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_c, 10),
915 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_c, 10),
916 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_c, 10),
917 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_c, 10),
918 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_c, 10),
919 make_tuple(2, 3, &vpx_highbd_10_sub_pixel_variance4x8_c, 10),
920 make_tuple(2, 2, &vpx_highbd_10_sub_pixel_variance4x4_c, 10),
921 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_c, 12),
922 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_c, 12),
923 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_c, 12),
924 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_c, 12),
925 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_c, 12),
926 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_c, 12),
927 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_c, 12),
928 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_c, 12),
929 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_c, 12),
930 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_c, 12),
931 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_c, 12),
932 make_tuple(2, 3, &vpx_highbd_12_sub_pixel_variance4x8_c, 12),
933 make_tuple(2, 2, &vpx_highbd_12_sub_pixel_variance4x4_c, 12)));
935 INSTANTIATE_TEST_CASE_P(
936 C, VpxHBDSubpelAvgVarianceTest,
938 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_c, 8),
939 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_c, 8),
940 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_c, 8),
941 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_c, 8),
942 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_c, 8),
943 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_c, 8),
944 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_c, 8),
945 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_c, 8),
946 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_c, 8),
947 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_c, 8),
948 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_c, 8),
949 make_tuple(2, 3, &vpx_highbd_8_sub_pixel_avg_variance4x8_c, 8),
950 make_tuple(2, 2, &vpx_highbd_8_sub_pixel_avg_variance4x4_c, 8),
951 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_c, 10),
952 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_c, 10),
953 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_c, 10),
954 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_c, 10),
955 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_c, 10),
956 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_c, 10),
957 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_c, 10),
958 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_c, 10),
959 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_c, 10),
960 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_c, 10),
961 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_c, 10),
962 make_tuple(2, 3, &vpx_highbd_10_sub_pixel_avg_variance4x8_c, 10),
963 make_tuple(2, 2, &vpx_highbd_10_sub_pixel_avg_variance4x4_c, 10),
964 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_c, 12),
965 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_c, 12),
966 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_c, 12),
967 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_c, 12),
968 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_c, 12),
969 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_c, 12),
970 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_c, 12),
971 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_c, 12),
972 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_c, 12),
973 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_c, 12),
974 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_c, 12),
975 make_tuple(2, 3, &vpx_highbd_12_sub_pixel_avg_variance4x8_c, 12),
976 make_tuple(2, 2, &vpx_highbd_12_sub_pixel_avg_variance4x4_c, 12)));
977 #endif // CONFIG_VP9_HIGHBITDEPTH
980 INSTANTIATE_TEST_CASE_P(SSE2, SumOfSquaresTest,
981 ::testing::Values(vpx_get_mb_ss_sse2));
983 INSTANTIATE_TEST_CASE_P(SSE2, VpxMseTest,
984 ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_sse2),
985 make_tuple(4, 3, &vpx_mse16x8_sse2),
986 make_tuple(3, 4, &vpx_mse8x16_sse2),
987 make_tuple(3, 3, &vpx_mse8x8_sse2)));
989 INSTANTIATE_TEST_CASE_P(
990 SSE2, VpxVarianceTest,
991 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_sse2, 0),
992 make_tuple(6, 5, &vpx_variance64x32_sse2, 0),
993 make_tuple(5, 6, &vpx_variance32x64_sse2, 0),
994 make_tuple(5, 5, &vpx_variance32x32_sse2, 0),
995 make_tuple(5, 4, &vpx_variance32x16_sse2, 0),
996 make_tuple(4, 5, &vpx_variance16x32_sse2, 0),
997 make_tuple(4, 4, &vpx_variance16x16_sse2, 0),
998 make_tuple(4, 3, &vpx_variance16x8_sse2, 0),
999 make_tuple(3, 4, &vpx_variance8x16_sse2, 0),
1000 make_tuple(3, 3, &vpx_variance8x8_sse2, 0),
1001 make_tuple(3, 2, &vpx_variance8x4_sse2, 0),
1002 make_tuple(2, 3, &vpx_variance4x8_sse2, 0),
1003 make_tuple(2, 2, &vpx_variance4x4_sse2, 0)));
1005 #if CONFIG_USE_X86INC
1006 INSTANTIATE_TEST_CASE_P(
1007 SSE2, VpxSubpelVarianceTest,
1008 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_sse2, 0),
1009 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_sse2, 0),
1010 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_sse2, 0),
1011 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_sse2, 0),
1012 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_sse2, 0),
1013 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_sse2, 0),
1014 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_sse2, 0),
1015 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_sse2, 0),
1016 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_sse2, 0),
1017 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_sse2, 0),
1018 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_sse2, 0),
1019 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_sse2, 0),
1020 make_tuple(2, 2, &vpx_sub_pixel_variance4x4_sse2, 0)));
1022 INSTANTIATE_TEST_CASE_P(
1023 SSE2, VpxSubpelAvgVarianceTest,
1025 make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_sse2, 0),
1026 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_sse2, 0),
1027 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_sse2, 0),
1028 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_sse2, 0),
1029 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_sse2, 0),
1030 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_sse2, 0),
1031 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_sse2, 0),
1032 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_sse2, 0),
1033 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_sse2, 0),
1034 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_sse2, 0),
1035 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_sse2, 0),
1036 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_sse2, 0),
1037 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_sse2, 0)));
1038 #endif // CONFIG_USE_X86INC
1040 #if CONFIG_VP9_HIGHBITDEPTH
1041 /* TODO(debargha): This test does not support the highbd version
1042 INSTANTIATE_TEST_CASE_P(
1043 SSE2, VpxHBDMseTest,
1044 ::testing::Values(make_tuple(4, 4, &vpx_highbd_12_mse16x16_sse2),
1045 make_tuple(4, 3, &vpx_highbd_12_mse16x8_sse2),
1046 make_tuple(3, 4, &vpx_highbd_12_mse8x16_sse2),
1047 make_tuple(3, 3, &vpx_highbd_12_mse8x8_sse2),
1048 make_tuple(4, 4, &vpx_highbd_10_mse16x16_sse2),
1049 make_tuple(4, 3, &vpx_highbd_10_mse16x8_sse2),
1050 make_tuple(3, 4, &vpx_highbd_10_mse8x16_sse2),
1051 make_tuple(3, 3, &vpx_highbd_10_mse8x8_sse2),
1052 make_tuple(4, 4, &vpx_highbd_8_mse16x16_sse2),
1053 make_tuple(4, 3, &vpx_highbd_8_mse16x8_sse2),
1054 make_tuple(3, 4, &vpx_highbd_8_mse8x16_sse2),
1055 make_tuple(3, 3, &vpx_highbd_8_mse8x8_sse2)));
1058 INSTANTIATE_TEST_CASE_P(
1059 SSE2, VpxHBDVarianceTest,
1060 ::testing::Values(make_tuple(6, 6, &vpx_highbd_12_variance64x64_sse2, 12),
1061 make_tuple(6, 5, &vpx_highbd_12_variance64x32_sse2, 12),
1062 make_tuple(5, 6, &vpx_highbd_12_variance32x64_sse2, 12),
1063 make_tuple(5, 5, &vpx_highbd_12_variance32x32_sse2, 12),
1064 make_tuple(5, 4, &vpx_highbd_12_variance32x16_sse2, 12),
1065 make_tuple(4, 5, &vpx_highbd_12_variance16x32_sse2, 12),
1066 make_tuple(4, 4, &vpx_highbd_12_variance16x16_sse2, 12),
1067 make_tuple(4, 3, &vpx_highbd_12_variance16x8_sse2, 12),
1068 make_tuple(3, 4, &vpx_highbd_12_variance8x16_sse2, 12),
1069 make_tuple(3, 3, &vpx_highbd_12_variance8x8_sse2, 12),
1070 make_tuple(6, 6, &vpx_highbd_10_variance64x64_sse2, 10),
1071 make_tuple(6, 5, &vpx_highbd_10_variance64x32_sse2, 10),
1072 make_tuple(5, 6, &vpx_highbd_10_variance32x64_sse2, 10),
1073 make_tuple(5, 5, &vpx_highbd_10_variance32x32_sse2, 10),
1074 make_tuple(5, 4, &vpx_highbd_10_variance32x16_sse2, 10),
1075 make_tuple(4, 5, &vpx_highbd_10_variance16x32_sse2, 10),
1076 make_tuple(4, 4, &vpx_highbd_10_variance16x16_sse2, 10),
1077 make_tuple(4, 3, &vpx_highbd_10_variance16x8_sse2, 10),
1078 make_tuple(3, 4, &vpx_highbd_10_variance8x16_sse2, 10),
1079 make_tuple(3, 3, &vpx_highbd_10_variance8x8_sse2, 10),
1080 make_tuple(6, 6, &vpx_highbd_8_variance64x64_sse2, 8),
1081 make_tuple(6, 5, &vpx_highbd_8_variance64x32_sse2, 8),
1082 make_tuple(5, 6, &vpx_highbd_8_variance32x64_sse2, 8),
1083 make_tuple(5, 5, &vpx_highbd_8_variance32x32_sse2, 8),
1084 make_tuple(5, 4, &vpx_highbd_8_variance32x16_sse2, 8),
1085 make_tuple(4, 5, &vpx_highbd_8_variance16x32_sse2, 8),
1086 make_tuple(4, 4, &vpx_highbd_8_variance16x16_sse2, 8),
1087 make_tuple(4, 3, &vpx_highbd_8_variance16x8_sse2, 8),
1088 make_tuple(3, 4, &vpx_highbd_8_variance8x16_sse2, 8),
1089 make_tuple(3, 3, &vpx_highbd_8_variance8x8_sse2, 8)));
1091 #if CONFIG_USE_X86INC
1092 INSTANTIATE_TEST_CASE_P(
1093 SSE2, VpxHBDSubpelVarianceTest,
1095 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_variance64x64_sse2, 12),
1096 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_variance64x32_sse2, 12),
1097 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_variance32x64_sse2, 12),
1098 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_variance32x32_sse2, 12),
1099 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_variance32x16_sse2, 12),
1100 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_variance16x32_sse2, 12),
1101 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_variance16x16_sse2, 12),
1102 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_variance16x8_sse2, 12),
1103 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_variance8x16_sse2, 12),
1104 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_variance8x8_sse2, 12),
1105 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_variance8x4_sse2, 12),
1106 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_variance64x64_sse2, 10),
1107 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_variance64x32_sse2, 10),
1108 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_variance32x64_sse2, 10),
1109 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_variance32x32_sse2, 10),
1110 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_variance32x16_sse2, 10),
1111 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_variance16x32_sse2, 10),
1112 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_variance16x16_sse2, 10),
1113 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_variance16x8_sse2, 10),
1114 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_variance8x16_sse2, 10),
1115 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_variance8x8_sse2, 10),
1116 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_variance8x4_sse2, 10),
1117 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_variance64x64_sse2, 8),
1118 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_variance64x32_sse2, 8),
1119 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_variance32x64_sse2, 8),
1120 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_variance32x32_sse2, 8),
1121 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_variance32x16_sse2, 8),
1122 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_variance16x32_sse2, 8),
1123 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_variance16x16_sse2, 8),
1124 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_variance16x8_sse2, 8),
1125 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_variance8x16_sse2, 8),
1126 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_variance8x8_sse2, 8),
1127 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_variance8x4_sse2, 8)));
1129 INSTANTIATE_TEST_CASE_P(
1130 SSE2, VpxHBDSubpelAvgVarianceTest,
1132 make_tuple(6, 6, &vpx_highbd_12_sub_pixel_avg_variance64x64_sse2, 12),
1133 make_tuple(6, 5, &vpx_highbd_12_sub_pixel_avg_variance64x32_sse2, 12),
1134 make_tuple(5, 6, &vpx_highbd_12_sub_pixel_avg_variance32x64_sse2, 12),
1135 make_tuple(5, 5, &vpx_highbd_12_sub_pixel_avg_variance32x32_sse2, 12),
1136 make_tuple(5, 4, &vpx_highbd_12_sub_pixel_avg_variance32x16_sse2, 12),
1137 make_tuple(4, 5, &vpx_highbd_12_sub_pixel_avg_variance16x32_sse2, 12),
1138 make_tuple(4, 4, &vpx_highbd_12_sub_pixel_avg_variance16x16_sse2, 12),
1139 make_tuple(4, 3, &vpx_highbd_12_sub_pixel_avg_variance16x8_sse2, 12),
1140 make_tuple(3, 4, &vpx_highbd_12_sub_pixel_avg_variance8x16_sse2, 12),
1141 make_tuple(3, 3, &vpx_highbd_12_sub_pixel_avg_variance8x8_sse2, 12),
1142 make_tuple(3, 2, &vpx_highbd_12_sub_pixel_avg_variance8x4_sse2, 12),
1143 make_tuple(6, 6, &vpx_highbd_10_sub_pixel_avg_variance64x64_sse2, 10),
1144 make_tuple(6, 5, &vpx_highbd_10_sub_pixel_avg_variance64x32_sse2, 10),
1145 make_tuple(5, 6, &vpx_highbd_10_sub_pixel_avg_variance32x64_sse2, 10),
1146 make_tuple(5, 5, &vpx_highbd_10_sub_pixel_avg_variance32x32_sse2, 10),
1147 make_tuple(5, 4, &vpx_highbd_10_sub_pixel_avg_variance32x16_sse2, 10),
1148 make_tuple(4, 5, &vpx_highbd_10_sub_pixel_avg_variance16x32_sse2, 10),
1149 make_tuple(4, 4, &vpx_highbd_10_sub_pixel_avg_variance16x16_sse2, 10),
1150 make_tuple(4, 3, &vpx_highbd_10_sub_pixel_avg_variance16x8_sse2, 10),
1151 make_tuple(3, 4, &vpx_highbd_10_sub_pixel_avg_variance8x16_sse2, 10),
1152 make_tuple(3, 3, &vpx_highbd_10_sub_pixel_avg_variance8x8_sse2, 10),
1153 make_tuple(3, 2, &vpx_highbd_10_sub_pixel_avg_variance8x4_sse2, 10),
1154 make_tuple(6, 6, &vpx_highbd_8_sub_pixel_avg_variance64x64_sse2, 8),
1155 make_tuple(6, 5, &vpx_highbd_8_sub_pixel_avg_variance64x32_sse2, 8),
1156 make_tuple(5, 6, &vpx_highbd_8_sub_pixel_avg_variance32x64_sse2, 8),
1157 make_tuple(5, 5, &vpx_highbd_8_sub_pixel_avg_variance32x32_sse2, 8),
1158 make_tuple(5, 4, &vpx_highbd_8_sub_pixel_avg_variance32x16_sse2, 8),
1159 make_tuple(4, 5, &vpx_highbd_8_sub_pixel_avg_variance16x32_sse2, 8),
1160 make_tuple(4, 4, &vpx_highbd_8_sub_pixel_avg_variance16x16_sse2, 8),
1161 make_tuple(4, 3, &vpx_highbd_8_sub_pixel_avg_variance16x8_sse2, 8),
1162 make_tuple(3, 4, &vpx_highbd_8_sub_pixel_avg_variance8x16_sse2, 8),
1163 make_tuple(3, 3, &vpx_highbd_8_sub_pixel_avg_variance8x8_sse2, 8),
1164 make_tuple(3, 2, &vpx_highbd_8_sub_pixel_avg_variance8x4_sse2, 8)));
1165 #endif // CONFIG_USE_X86INC
1166 #endif // CONFIG_VP9_HIGHBITDEPTH
1170 #if CONFIG_USE_X86INC
1171 INSTANTIATE_TEST_CASE_P(
1172 SSSE3, VpxSubpelVarianceTest,
1173 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_ssse3, 0),
1174 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_ssse3, 0),
1175 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_ssse3, 0),
1176 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_ssse3, 0),
1177 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_ssse3, 0),
1178 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_ssse3, 0),
1179 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_ssse3, 0),
1180 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_ssse3, 0),
1181 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_ssse3, 0),
1182 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_ssse3, 0),
1183 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_ssse3, 0),
1184 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_ssse3, 0),
1185 make_tuple(2, 2, &vpx_sub_pixel_variance4x4_ssse3, 0)));
1187 INSTANTIATE_TEST_CASE_P(
1188 SSSE3, VpxSubpelAvgVarianceTest,
1190 make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_ssse3, 0),
1191 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_ssse3, 0),
1192 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_ssse3, 0),
1193 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_ssse3, 0),
1194 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_ssse3, 0),
1195 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_ssse3, 0),
1196 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_ssse3, 0),
1197 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_ssse3, 0),
1198 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_ssse3, 0),
1199 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_ssse3, 0),
1200 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_ssse3, 0),
1201 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_ssse3, 0),
1202 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_ssse3, 0)));
1203 #endif // CONFIG_USE_X86INC
1204 #endif // HAVE_SSSE3
1207 INSTANTIATE_TEST_CASE_P(AVX2, VpxMseTest,
1208 ::testing::Values(make_tuple(4, 4,
1209 &vpx_mse16x16_avx2)));
1211 INSTANTIATE_TEST_CASE_P(
1212 AVX2, VpxVarianceTest,
1213 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_avx2, 0),
1214 make_tuple(6, 5, &vpx_variance64x32_avx2, 0),
1215 make_tuple(5, 5, &vpx_variance32x32_avx2, 0),
1216 make_tuple(5, 4, &vpx_variance32x16_avx2, 0),
1217 make_tuple(4, 4, &vpx_variance16x16_avx2, 0)));
1219 INSTANTIATE_TEST_CASE_P(
1220 AVX2, VpxSubpelVarianceTest,
1221 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_avx2, 0),
1222 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_avx2, 0)));
1224 INSTANTIATE_TEST_CASE_P(
1225 AVX2, VpxSubpelAvgVarianceTest,
1227 make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_avx2, 0),
1228 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_avx2, 0)));
1232 INSTANTIATE_TEST_CASE_P(MEDIA, VpxMseTest,
1233 ::testing::Values(make_tuple(4, 4,
1234 &vpx_mse16x16_media)));
1236 INSTANTIATE_TEST_CASE_P(
1237 MEDIA, VpxVarianceTest,
1238 ::testing::Values(make_tuple(4, 4, &vpx_variance16x16_media, 0),
1239 make_tuple(3, 3, &vpx_variance8x8_media, 0)));
1241 INSTANTIATE_TEST_CASE_P(
1242 MEDIA, VpxSubpelVarianceTest,
1243 ::testing::Values(make_tuple(4, 4, &vpx_sub_pixel_variance16x16_media, 0),
1244 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_media, 0)));
1245 #endif // HAVE_MEDIA
1248 INSTANTIATE_TEST_CASE_P(NEON, VpxSseTest,
1249 ::testing::Values(make_tuple(2, 2,
1250 &vpx_get4x4sse_cs_neon)));
1252 INSTANTIATE_TEST_CASE_P(NEON, VpxMseTest,
1253 ::testing::Values(make_tuple(4, 4,
1254 &vpx_mse16x16_neon)));
1256 INSTANTIATE_TEST_CASE_P(
1257 NEON, VpxVarianceTest,
1258 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_neon, 0),
1259 make_tuple(6, 5, &vpx_variance64x32_neon, 0),
1260 make_tuple(5, 6, &vpx_variance32x64_neon, 0),
1261 make_tuple(5, 5, &vpx_variance32x32_neon, 0),
1262 make_tuple(4, 4, &vpx_variance16x16_neon, 0),
1263 make_tuple(4, 3, &vpx_variance16x8_neon, 0),
1264 make_tuple(3, 4, &vpx_variance8x16_neon, 0),
1265 make_tuple(3, 3, &vpx_variance8x8_neon, 0)));
1267 INSTANTIATE_TEST_CASE_P(
1268 NEON, VpxSubpelVarianceTest,
1269 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_variance64x64_neon, 0),
1270 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_neon, 0),
1271 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_neon, 0),
1272 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_neon, 0)));
1276 INSTANTIATE_TEST_CASE_P(MSA, SumOfSquaresTest,
1277 ::testing::Values(vpx_get_mb_ss_msa));
1279 INSTANTIATE_TEST_CASE_P(MSA, VpxSseTest,
1280 ::testing::Values(make_tuple(2, 2,
1281 &vpx_get4x4sse_cs_msa)));
1283 INSTANTIATE_TEST_CASE_P(MSA, VpxMseTest,
1284 ::testing::Values(make_tuple(4, 4, &vpx_mse16x16_msa),
1285 make_tuple(4, 3, &vpx_mse16x8_msa),
1286 make_tuple(3, 4, &vpx_mse8x16_msa),
1287 make_tuple(3, 3, &vpx_mse8x8_msa)));
1289 INSTANTIATE_TEST_CASE_P(
1290 MSA, VpxVarianceTest,
1291 ::testing::Values(make_tuple(6, 6, &vpx_variance64x64_msa, 0),
1292 make_tuple(6, 5, &vpx_variance64x32_msa, 0),
1293 make_tuple(5, 6, &vpx_variance32x64_msa, 0),
1294 make_tuple(5, 5, &vpx_variance32x32_msa, 0),
1295 make_tuple(5, 4, &vpx_variance32x16_msa, 0),
1296 make_tuple(4, 5, &vpx_variance16x32_msa, 0),
1297 make_tuple(4, 4, &vpx_variance16x16_msa, 0),
1298 make_tuple(4, 3, &vpx_variance16x8_msa, 0),
1299 make_tuple(3, 4, &vpx_variance8x16_msa, 0),
1300 make_tuple(3, 3, &vpx_variance8x8_msa, 0),
1301 make_tuple(3, 2, &vpx_variance8x4_msa, 0),
1302 make_tuple(2, 3, &vpx_variance4x8_msa, 0),
1303 make_tuple(2, 2, &vpx_variance4x4_msa, 0)));
1305 INSTANTIATE_TEST_CASE_P(
1306 MSA, VpxSubpelVarianceTest,
1307 ::testing::Values(make_tuple(2, 2, &vpx_sub_pixel_variance4x4_msa, 0),
1308 make_tuple(2, 3, &vpx_sub_pixel_variance4x8_msa, 0),
1309 make_tuple(3, 2, &vpx_sub_pixel_variance8x4_msa, 0),
1310 make_tuple(3, 3, &vpx_sub_pixel_variance8x8_msa, 0),
1311 make_tuple(3, 4, &vpx_sub_pixel_variance8x16_msa, 0),
1312 make_tuple(4, 3, &vpx_sub_pixel_variance16x8_msa, 0),
1313 make_tuple(4, 4, &vpx_sub_pixel_variance16x16_msa, 0),
1314 make_tuple(4, 5, &vpx_sub_pixel_variance16x32_msa, 0),
1315 make_tuple(5, 4, &vpx_sub_pixel_variance32x16_msa, 0),
1316 make_tuple(5, 5, &vpx_sub_pixel_variance32x32_msa, 0),
1317 make_tuple(5, 6, &vpx_sub_pixel_variance32x64_msa, 0),
1318 make_tuple(6, 5, &vpx_sub_pixel_variance64x32_msa, 0),
1319 make_tuple(6, 6, &vpx_sub_pixel_variance64x64_msa, 0)));
1321 INSTANTIATE_TEST_CASE_P(
1322 MSA, VpxSubpelAvgVarianceTest,
1323 ::testing::Values(make_tuple(6, 6, &vpx_sub_pixel_avg_variance64x64_msa, 0),
1324 make_tuple(6, 5, &vpx_sub_pixel_avg_variance64x32_msa, 0),
1325 make_tuple(5, 6, &vpx_sub_pixel_avg_variance32x64_msa, 0),
1326 make_tuple(5, 5, &vpx_sub_pixel_avg_variance32x32_msa, 0),
1327 make_tuple(5, 4, &vpx_sub_pixel_avg_variance32x16_msa, 0),
1328 make_tuple(4, 5, &vpx_sub_pixel_avg_variance16x32_msa, 0),
1329 make_tuple(4, 4, &vpx_sub_pixel_avg_variance16x16_msa, 0),
1330 make_tuple(4, 3, &vpx_sub_pixel_avg_variance16x8_msa, 0),
1331 make_tuple(3, 4, &vpx_sub_pixel_avg_variance8x16_msa, 0),
1332 make_tuple(3, 3, &vpx_sub_pixel_avg_variance8x8_msa, 0),
1333 make_tuple(3, 2, &vpx_sub_pixel_avg_variance8x4_msa, 0),
1334 make_tuple(2, 3, &vpx_sub_pixel_avg_variance4x8_msa, 0),
1335 make_tuple(2, 2, &vpx_sub_pixel_avg_variance4x4_msa, 0)));