2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
17 #include "third_party/googletest/src/include/gtest/gtest.h"
19 #include "./vp9_rtcd.h"
20 #include "./vpx_dsp_rtcd.h"
21 #include "test/acm_random.h"
22 #include "test/clear_system_state.h"
23 #include "test/register_state_check.h"
24 #include "test/util.h"
25 #include "vp9/common/vp9_blockd.h"
26 #include "vp9/common/vp9_scan.h"
27 #include "vpx/vpx_integer.h"
28 #include "vpx_ports/vpx_timer.h"
30 using libvpx_test::ACMRandom;
34 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
35 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
36 typedef void (*InvTxfmWithBdFunc)(const tran_low_t *in, uint8_t *out,
39 template <InvTxfmFunc fn>
40 void wrapper(const tran_low_t *in, uint8_t *out, int stride, int bd) {
45 #if CONFIG_VP9_HIGHBITDEPTH
46 template <InvTxfmWithBdFunc fn>
47 void highbd_wrapper(const tran_low_t *in, uint8_t *out, int stride, int bd) {
48 fn(in, CONVERT_TO_BYTEPTR(out), stride, bd);
52 typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmWithBdFunc, InvTxfmWithBdFunc,
53 TX_SIZE, int, int, int>
55 const int kMaxNumCoeffs = 1024;
56 const int kCountTestBlock = 1000;
58 // https://bugs.chromium.org/p/webm/issues/detail?id=1332
59 // The functions specified do not pass with INT16_MIN/MAX. They fail at the
60 // value specified, but pass when 1 is added/subtracted.
61 int16_t MaxSupportedCoeff(InvTxfmWithBdFunc a) {
62 #if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE && \
63 !CONFIG_VP9_HIGHBITDEPTH
64 if (a == &wrapper<vpx_idct8x8_64_add_ssse3> ||
65 a == &wrapper<vpx_idct8x8_12_add_ssse3>) {
71 return std::numeric_limits<int16_t>::max();
74 int16_t MinSupportedCoeff(InvTxfmWithBdFunc a) {
75 #if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE && \
76 !CONFIG_VP9_HIGHBITDEPTH
77 if (a == &wrapper<vpx_idct8x8_64_add_ssse3> ||
78 a == &wrapper<vpx_idct8x8_12_add_ssse3>) {
84 return std::numeric_limits<int16_t>::min();
87 class PartialIDctTest : public ::testing::TestWithParam<PartialInvTxfmParam> {
89 virtual ~PartialIDctTest() {}
90 virtual void SetUp() {
91 rnd_.Reset(ACMRandom::DeterministicSeed());
92 ftxfm_ = GET_PARAM(0);
93 full_itxfm_ = GET_PARAM(1);
94 partial_itxfm_ = GET_PARAM(2);
95 tx_size_ = GET_PARAM(3);
96 last_nonzero_ = GET_PARAM(4);
97 bit_depth_ = GET_PARAM(5);
98 pixel_size_ = GET_PARAM(6);
99 mask_ = (1 << bit_depth_) - 1;
102 case TX_4X4: size_ = 4; break;
103 case TX_8X8: size_ = 8; break;
104 case TX_16X16: size_ = 16; break;
105 case TX_32X32: size_ = 32; break;
106 default: FAIL() << "Wrong Size!"; break;
109 // Randomize stride_ to a value less than or equal to 1024
110 stride_ = rnd_(1024) + 1;
111 if (stride_ < size_) {
114 // Align stride_ to 16 if it's bigger than 16.
119 input_block_size_ = size_ * size_;
120 output_block_size_ = size_ * stride_;
122 input_block_ = reinterpret_cast<tran_low_t *>(
123 vpx_memalign(16, sizeof(*input_block_) * input_block_size_));
124 output_block_ = reinterpret_cast<uint8_t *>(
125 vpx_memalign(16, pixel_size_ * output_block_size_));
126 output_block_ref_ = reinterpret_cast<uint8_t *>(
127 vpx_memalign(16, pixel_size_ * output_block_size_));
130 virtual void TearDown() {
131 vpx_free(input_block_);
133 vpx_free(output_block_);
134 output_block_ = NULL;
135 vpx_free(output_block_ref_);
136 output_block_ref_ = NULL;
137 libvpx_test::ClearSystemState();
141 memset(input_block_, 0, sizeof(*input_block_) * input_block_size_);
142 if (pixel_size_ == 1) {
143 for (int j = 0; j < output_block_size_; ++j) {
144 output_block_[j] = output_block_ref_[j] = rnd_.Rand16() & mask_;
147 ASSERT_EQ(2, pixel_size_);
148 uint16_t *const output = reinterpret_cast<uint16_t *>(output_block_);
149 uint16_t *const output_ref =
150 reinterpret_cast<uint16_t *>(output_block_ref_);
151 for (int j = 0; j < output_block_size_; ++j) {
152 output[j] = output_ref[j] = rnd_.Rand16() & mask_;
158 const int max_coeff = 32766 / 4;
159 int max_energy_leftover = max_coeff * max_coeff;
160 for (int j = 0; j < last_nonzero_; ++j) {
161 int16_t coeff = static_cast<int16_t>(sqrt(1.0 * max_energy_leftover) *
162 (rnd_.Rand16() - 32768) / 65536);
163 max_energy_leftover -= coeff * coeff;
164 if (max_energy_leftover < 0) {
165 max_energy_leftover = 0;
168 input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = coeff;
175 tran_low_t *input_block_;
176 uint8_t *output_block_;
177 uint8_t *output_block_ref_;
181 int input_block_size_;
182 int output_block_size_;
186 InvTxfmWithBdFunc full_itxfm_;
187 InvTxfmWithBdFunc partial_itxfm_;
191 TEST_P(PartialIDctTest, RunQuantCheck) {
192 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kMaxNumCoeffs]);
193 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kMaxNumCoeffs]);
196 for (int i = 0; i < kCountTestBlock; ++i) {
197 // Initialize a test block with input range [-mask_, mask_].
199 for (int k = 0; k < input_block_size_; ++k) {
200 input_extreme_block[k] = mask_;
203 for (int k = 0; k < input_block_size_; ++k) {
204 input_extreme_block[k] = -mask_;
207 for (int k = 0; k < input_block_size_; ++k) {
208 input_extreme_block[k] = rnd_.Rand8() % 2 ? mask_ : -mask_;
212 ftxfm_(input_extreme_block, output_ref_block, size_);
214 // quantization with minimum allowed step sizes
215 input_block_[0] = (output_ref_block[0] / 4) * 4;
216 for (int k = 1; k < last_nonzero_; ++k) {
217 const int pos = vp9_default_scan_orders[tx_size_].scan[k];
218 input_block_[pos] = (output_ref_block[pos] / 4) * 4;
221 ASM_REGISTER_STATE_CHECK(
222 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
223 ASM_REGISTER_STATE_CHECK(
224 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
225 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
226 pixel_size_ * output_block_size_))
227 << "Error: partial inverse transform produces different results";
231 TEST_P(PartialIDctTest, ResultsMatch) {
232 for (int i = 0; i < kCountTestBlock; ++i) {
236 ASM_REGISTER_STATE_CHECK(
237 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
238 ASM_REGISTER_STATE_CHECK(
239 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
240 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
241 pixel_size_ * output_block_size_))
242 << "Error: partial inverse transform produces different results";
246 TEST_P(PartialIDctTest, AddOutputBlock) {
247 for (int i = 0; i < kCountTestBlock; ++i) {
249 for (int j = 0; j < last_nonzero_; ++j) {
250 input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = 10;
253 ASM_REGISTER_STATE_CHECK(
254 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
255 ASM_REGISTER_STATE_CHECK(
256 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
257 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
258 pixel_size_ * output_block_size_))
259 << "Error: Transform results are not correctly added to output.";
263 TEST_P(PartialIDctTest, SingleExtremeCoeff) {
264 const int16_t max_coeff = MaxSupportedCoeff(partial_itxfm_);
265 const int16_t min_coeff = MinSupportedCoeff(partial_itxfm_);
266 for (int i = 0; i < last_nonzero_; ++i) {
267 memset(input_block_, 0, sizeof(*input_block_) * input_block_size_);
268 // Run once for min and once for max.
269 for (int j = 0; j < 2; ++j) {
270 const int coeff = j ? min_coeff : max_coeff;
272 memset(output_block_, 0, pixel_size_ * output_block_size_);
273 memset(output_block_ref_, 0, pixel_size_ * output_block_size_);
274 input_block_[vp9_default_scan_orders[tx_size_].scan[i]] = coeff;
276 ASM_REGISTER_STATE_CHECK(
277 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
278 ASM_REGISTER_STATE_CHECK(
279 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
280 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
281 pixel_size_ * output_block_size_))
282 << "Error: Fails with single coeff of " << coeff << " at " << i
288 TEST_P(PartialIDctTest, DISABLED_Speed) {
289 // Keep runtime stable with transform size.
290 const int kCountSpeedTestBlock = 500000000 / input_block_size_;
294 for (int i = 0; i < kCountSpeedTestBlock; ++i) {
295 ASM_REGISTER_STATE_CHECK(
296 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
298 vpx_usec_timer timer;
299 vpx_usec_timer_start(&timer);
300 for (int i = 0; i < kCountSpeedTestBlock; ++i) {
301 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_);
303 libvpx_test::ClearSystemState();
304 vpx_usec_timer_mark(&timer);
305 const int elapsed_time =
306 static_cast<int>(vpx_usec_timer_elapsed(&timer) / 1000);
307 printf("idct%dx%d_%d (bitdepth %d) time: %5d ms\n", size_, size_,
308 last_nonzero_, bit_depth_, elapsed_time);
310 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
311 pixel_size_ * output_block_size_))
312 << "Error: partial inverse transform produces different results";
315 using std::tr1::make_tuple;
317 const PartialInvTxfmParam c_partial_idct_tests[] = {
318 #if CONFIG_VP9_HIGHBITDEPTH
320 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
321 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>, TX_32X32, 1024, 8, 2),
323 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
324 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>, TX_32X32, 1024, 10, 2),
326 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
327 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>, TX_32X32, 1024, 12, 2),
329 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
330 &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>, TX_32X32, 34, 8, 2),
332 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
333 &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>, TX_32X32, 34, 10, 2),
335 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
336 &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>, TX_32X32, 34, 12, 2),
337 make_tuple(&vpx_highbd_fdct32x32_c,
338 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
339 &highbd_wrapper<vpx_highbd_idct32x32_1_add_c>, TX_32X32, 1, 8, 2),
340 make_tuple(&vpx_highbd_fdct32x32_c,
341 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
342 &highbd_wrapper<vpx_highbd_idct32x32_1_add_c>, TX_32X32, 1, 10, 2),
343 make_tuple(&vpx_highbd_fdct32x32_c,
344 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
345 &highbd_wrapper<vpx_highbd_idct32x32_1_add_c>, TX_32X32, 1, 12, 2),
347 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
348 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>, TX_16X16, 256, 8, 2),
350 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
351 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>, TX_16X16, 256, 10, 2),
353 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
354 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>, TX_16X16, 256, 12, 2),
356 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
357 &highbd_wrapper<vpx_highbd_idct16x16_38_add_c>, TX_16X16, 38, 8, 2),
359 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
360 &highbd_wrapper<vpx_highbd_idct16x16_38_add_c>, TX_16X16, 38, 10, 2),
362 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
363 &highbd_wrapper<vpx_highbd_idct16x16_38_add_c>, TX_16X16, 38, 12, 2),
365 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
366 &highbd_wrapper<vpx_highbd_idct16x16_10_add_c>, TX_16X16, 10, 8, 2),
368 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
369 &highbd_wrapper<vpx_highbd_idct16x16_10_add_c>, TX_16X16, 10, 10, 2),
371 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
372 &highbd_wrapper<vpx_highbd_idct16x16_10_add_c>, TX_16X16, 10, 12, 2),
373 make_tuple(&vpx_highbd_fdct16x16_c,
374 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
375 &highbd_wrapper<vpx_highbd_idct16x16_1_add_c>, TX_16X16, 1, 8, 2),
376 make_tuple(&vpx_highbd_fdct16x16_c,
377 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
378 &highbd_wrapper<vpx_highbd_idct16x16_1_add_c>, TX_16X16, 1, 10, 2),
379 make_tuple(&vpx_highbd_fdct16x16_c,
380 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
381 &highbd_wrapper<vpx_highbd_idct16x16_1_add_c>, TX_16X16, 1, 12, 2),
382 make_tuple(&vpx_highbd_fdct8x8_c,
383 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
384 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>, TX_8X8, 64, 8, 2),
385 make_tuple(&vpx_highbd_fdct8x8_c,
386 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
387 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>, TX_8X8, 64, 10, 2),
388 make_tuple(&vpx_highbd_fdct8x8_c,
389 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
390 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>, TX_8X8, 64, 12, 2),
391 make_tuple(&vpx_highbd_fdct8x8_c,
392 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
393 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>, TX_8X8, 12, 8, 2),
394 make_tuple(&vpx_highbd_fdct8x8_c,
395 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
396 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>, TX_8X8, 12, 10, 2),
397 make_tuple(&vpx_highbd_fdct8x8_c,
398 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
399 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>, TX_8X8, 12, 12, 2),
400 make_tuple(&vpx_highbd_fdct8x8_c,
401 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
402 &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>, TX_8X8, 1, 8, 2),
403 make_tuple(&vpx_highbd_fdct8x8_c,
404 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
405 &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>, TX_8X8, 1, 10, 2),
406 make_tuple(&vpx_highbd_fdct8x8_c,
407 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
408 &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>, TX_8X8, 1, 12, 2),
409 make_tuple(&vpx_highbd_fdct4x4_c,
410 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
411 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>, TX_4X4, 16, 8, 2),
412 make_tuple(&vpx_highbd_fdct4x4_c,
413 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
414 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>, TX_4X4, 16, 10, 2),
415 make_tuple(&vpx_highbd_fdct4x4_c,
416 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
417 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>, TX_4X4, 16, 12, 2),
418 make_tuple(&vpx_highbd_fdct4x4_c,
419 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
420 &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>, TX_4X4, 1, 8, 2),
421 make_tuple(&vpx_highbd_fdct4x4_c,
422 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
423 &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>, TX_4X4, 1, 10, 2),
424 make_tuple(&vpx_highbd_fdct4x4_c,
425 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
426 &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>, TX_4X4, 1, 12, 2),
427 #endif // CONFIG_VP9_HIGHBITDEPTH
428 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
429 &wrapper<vpx_idct32x32_1024_add_c>, TX_32X32, 1024, 8, 1),
430 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
431 &wrapper<vpx_idct32x32_135_add_c>, TX_32X32, 135, 8, 1),
432 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
433 &wrapper<vpx_idct32x32_34_add_c>, TX_32X32, 34, 8, 1),
434 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
435 &wrapper<vpx_idct32x32_1_add_c>, TX_32X32, 1, 8, 1),
436 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
437 &wrapper<vpx_idct16x16_256_add_c>, TX_16X16, 256, 8, 1),
438 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
439 &wrapper<vpx_idct16x16_38_add_c>, TX_16X16, 38, 8, 1),
440 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
441 &wrapper<vpx_idct16x16_10_add_c>, TX_16X16, 10, 8, 1),
442 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
443 &wrapper<vpx_idct16x16_1_add_c>, TX_16X16, 1, 8, 1),
444 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
445 &wrapper<vpx_idct8x8_64_add_c>, TX_8X8, 64, 8, 1),
446 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
447 &wrapper<vpx_idct8x8_12_add_c>, TX_8X8, 12, 8, 1),
448 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
449 &wrapper<vpx_idct8x8_1_add_c>, TX_8X8, 1, 8, 1),
450 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
451 &wrapper<vpx_idct4x4_16_add_c>, TX_4X4, 16, 8, 1),
452 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
453 &wrapper<vpx_idct4x4_1_add_c>, TX_4X4, 1, 8, 1)
456 INSTANTIATE_TEST_CASE_P(C, PartialIDctTest,
457 ::testing::ValuesIn(c_partial_idct_tests));
459 #if HAVE_NEON && !CONFIG_EMULATE_HARDWARE
460 const PartialInvTxfmParam neon_partial_idct_tests[] = {
461 #if CONFIG_VP9_HIGHBITDEPTH
463 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
464 &highbd_wrapper<vpx_highbd_idct32x32_1_add_neon>, TX_32X32, 1, 8, 2),
466 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
467 &highbd_wrapper<vpx_highbd_idct32x32_1_add_neon>, TX_32X32, 1, 10, 2),
469 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
470 &highbd_wrapper<vpx_highbd_idct32x32_1_add_neon>, TX_32X32, 1, 12, 2),
472 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
473 &highbd_wrapper<vpx_highbd_idct16x16_256_add_neon>, TX_16X16, 256, 8, 2),
475 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
476 &highbd_wrapper<vpx_highbd_idct16x16_256_add_neon>, TX_16X16, 256, 10, 2),
478 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
479 &highbd_wrapper<vpx_highbd_idct16x16_256_add_neon>, TX_16X16, 256, 12, 2),
481 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
482 &highbd_wrapper<vpx_highbd_idct16x16_38_add_neon>, TX_16X16, 38, 8, 2),
484 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
485 &highbd_wrapper<vpx_highbd_idct16x16_38_add_neon>, TX_16X16, 38, 10, 2),
487 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
488 &highbd_wrapper<vpx_highbd_idct16x16_38_add_neon>, TX_16X16, 38, 12, 2),
490 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
491 &highbd_wrapper<vpx_highbd_idct16x16_1_add_neon>, TX_16X16, 1, 8, 2),
493 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
494 &highbd_wrapper<vpx_highbd_idct16x16_1_add_neon>, TX_16X16, 1, 10, 2),
496 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
497 &highbd_wrapper<vpx_highbd_idct16x16_1_add_neon>, TX_16X16, 1, 12, 2),
498 make_tuple(&vpx_highbd_fdct8x8_c,
499 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
500 &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 8, 2),
502 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
503 &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 10, 2),
505 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
506 &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 12, 2),
507 make_tuple(&vpx_highbd_fdct8x8_c,
508 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
509 &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 8, 2),
511 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
512 &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 10, 2),
514 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
515 &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 12, 2),
516 make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
517 &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 8, 2),
518 make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
519 &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 10, 2),
520 make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
521 &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 12, 2),
522 make_tuple(&vpx_highbd_fdct4x4_c,
523 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
524 &highbd_wrapper<vpx_highbd_idct4x4_16_add_neon>, TX_4X4, 16, 8, 2),
526 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
527 &highbd_wrapper<vpx_highbd_idct4x4_16_add_neon>, TX_4X4, 16, 10, 2),
529 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
530 &highbd_wrapper<vpx_highbd_idct4x4_16_add_neon>, TX_4X4, 16, 12, 2),
531 make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>,
532 &highbd_wrapper<vpx_highbd_idct4x4_1_add_neon>, TX_4X4, 1, 8, 2),
533 make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>,
534 &highbd_wrapper<vpx_highbd_idct4x4_1_add_neon>, TX_4X4, 1, 10, 2),
535 make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>,
536 &highbd_wrapper<vpx_highbd_idct4x4_1_add_neon>, TX_4X4, 1, 12, 2),
537 #endif // CONFIG_VP9_HIGHBITDEPTH
538 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
539 &wrapper<vpx_idct32x32_1024_add_neon>, TX_32X32, 1024, 8, 1),
540 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
541 &wrapper<vpx_idct32x32_135_add_neon>, TX_32X32, 135, 8, 1),
542 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
543 &wrapper<vpx_idct32x32_34_add_neon>, TX_32X32, 34, 8, 1),
544 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
545 &wrapper<vpx_idct32x32_1_add_neon>, TX_32X32, 1, 8, 1),
546 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
547 &wrapper<vpx_idct16x16_256_add_neon>, TX_16X16, 256, 8, 1),
548 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
549 &wrapper<vpx_idct16x16_38_add_neon>, TX_16X16, 38, 8, 1),
550 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
551 &wrapper<vpx_idct16x16_10_add_neon>, TX_16X16, 10, 8, 1),
552 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
553 &wrapper<vpx_idct16x16_1_add_neon>, TX_16X16, 1, 8, 1),
554 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
555 &wrapper<vpx_idct8x8_64_add_neon>, TX_8X8, 64, 8, 1),
556 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
557 &wrapper<vpx_idct8x8_12_add_neon>, TX_8X8, 12, 8, 1),
558 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
559 &wrapper<vpx_idct8x8_1_add_neon>, TX_8X8, 1, 8, 1),
560 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
561 &wrapper<vpx_idct4x4_16_add_neon>, TX_4X4, 16, 8, 1),
562 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
563 &wrapper<vpx_idct4x4_1_add_neon>, TX_4X4, 1, 8, 1)
566 INSTANTIATE_TEST_CASE_P(NEON, PartialIDctTest,
567 ::testing::ValuesIn(neon_partial_idct_tests));
568 #endif // HAVE_NEON && !CONFIG_EMULATE_HARDWARE
570 #if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
571 // 32x32_135_ is implemented using the 1024 version.
572 const PartialInvTxfmParam sse2_partial_idct_tests[] = {
573 #if CONFIG_VP9_HIGHBITDEPTH
575 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
576 &highbd_wrapper<vpx_highbd_idct32x32_1_add_sse2>, TX_32X32, 1, 8, 2),
578 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
579 &highbd_wrapper<vpx_highbd_idct32x32_1_add_sse2>, TX_32X32, 1, 10, 2),
581 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
582 &highbd_wrapper<vpx_highbd_idct32x32_1_add_sse2>, TX_32X32, 1, 12, 2),
584 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
585 &highbd_wrapper<vpx_highbd_idct16x16_256_add_sse2>, TX_16X16, 256, 8, 2),
587 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
588 &highbd_wrapper<vpx_highbd_idct16x16_256_add_sse2>, TX_16X16, 256, 10, 2),
590 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
591 &highbd_wrapper<vpx_highbd_idct16x16_256_add_sse2>, TX_16X16, 256, 12, 2),
593 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
594 &highbd_wrapper<vpx_highbd_idct16x16_10_add_sse2>, TX_16X16, 10, 8, 2),
596 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
597 &highbd_wrapper<vpx_highbd_idct16x16_10_add_sse2>, TX_16X16, 10, 10, 2),
599 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
600 &highbd_wrapper<vpx_highbd_idct16x16_10_add_sse2>, TX_16X16, 10, 12, 2),
601 make_tuple(&vpx_highbd_fdct8x8_c,
602 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
603 &highbd_wrapper<vpx_highbd_idct8x8_64_add_sse2>, TX_8X8, 64, 8, 2),
605 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
606 &highbd_wrapper<vpx_highbd_idct8x8_64_add_sse2>, TX_8X8, 64, 10, 2),
608 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
609 &highbd_wrapper<vpx_highbd_idct8x8_64_add_sse2>, TX_8X8, 64, 12, 2),
610 make_tuple(&vpx_highbd_fdct8x8_c,
611 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
612 &highbd_wrapper<vpx_highbd_idct8x8_12_add_sse2>, TX_8X8, 12, 8, 2),
614 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
615 &highbd_wrapper<vpx_highbd_idct8x8_12_add_sse2>, TX_8X8, 12, 10, 2),
617 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
618 &highbd_wrapper<vpx_highbd_idct8x8_12_add_sse2>, TX_8X8, 12, 12, 2),
619 make_tuple(&vpx_highbd_fdct4x4_c,
620 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
621 &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse2>, TX_4X4, 16, 8, 2),
623 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
624 &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse2>, TX_4X4, 16, 10, 2),
626 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
627 &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse2>, TX_4X4, 16, 12, 2),
628 #endif // CONFIG_VP9_HIGHBITDEPTH
629 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
630 &wrapper<vpx_idct32x32_1024_add_sse2>, TX_32X32, 1024, 8, 1),
631 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
632 &wrapper<vpx_idct32x32_1024_add_sse2>, TX_32X32, 135, 8, 1),
633 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
634 &wrapper<vpx_idct32x32_34_add_sse2>, TX_32X32, 34, 8, 1),
635 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
636 &wrapper<vpx_idct32x32_1_add_sse2>, TX_32X32, 1, 8, 1),
637 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
638 &wrapper<vpx_idct16x16_256_add_sse2>, TX_16X16, 256, 8, 1),
639 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
640 &wrapper<vpx_idct16x16_10_add_sse2>, TX_16X16, 10, 8, 1),
641 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
642 &wrapper<vpx_idct16x16_1_add_sse2>, TX_16X16, 1, 8, 1),
643 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
644 &wrapper<vpx_idct8x8_64_add_sse2>, TX_8X8, 64, 8, 1),
645 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
646 &wrapper<vpx_idct8x8_12_add_sse2>, TX_8X8, 12, 8, 1),
647 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
648 &wrapper<vpx_idct8x8_1_add_sse2>, TX_8X8, 1, 8, 1),
649 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
650 &wrapper<vpx_idct4x4_16_add_sse2>, TX_4X4, 16, 8, 1),
651 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
652 &wrapper<vpx_idct4x4_1_add_sse2>, TX_4X4, 1, 8, 1)
655 INSTANTIATE_TEST_CASE_P(SSE2, PartialIDctTest,
656 ::testing::ValuesIn(sse2_partial_idct_tests));
658 #endif // HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
660 #if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE && \
661 !CONFIG_VP9_HIGHBITDEPTH
662 const PartialInvTxfmParam ssse3_partial_idct_tests[] = {
663 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
664 &wrapper<vpx_idct32x32_1024_add_ssse3>, TX_32X32, 1024, 8, 1),
665 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
666 &wrapper<vpx_idct32x32_135_add_ssse3>, TX_32X32, 135, 8, 1),
667 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
668 &wrapper<vpx_idct32x32_34_add_ssse3>, TX_32X32, 34, 8, 1),
669 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
670 &wrapper<vpx_idct8x8_64_add_ssse3>, TX_8X8, 64, 8, 1),
671 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
672 &wrapper<vpx_idct8x8_12_add_ssse3>, TX_8X8, 12, 8, 1)
675 INSTANTIATE_TEST_CASE_P(SSSE3, PartialIDctTest,
676 ::testing::ValuesIn(ssse3_partial_idct_tests));
677 #endif // HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE
679 #if HAVE_DSPR2 && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
680 const PartialInvTxfmParam dspr2_partial_idct_tests[] = {
681 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
682 &wrapper<vpx_idct32x32_1024_add_dspr2>, TX_32X32, 1024, 8, 1),
683 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
684 &wrapper<vpx_idct32x32_1024_add_dspr2>, TX_32X32, 135, 8, 1),
685 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
686 &wrapper<vpx_idct32x32_34_add_dspr2>, TX_32X32, 34, 8, 1),
687 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
688 &wrapper<vpx_idct32x32_1_add_dspr2>, TX_32X32, 1, 8, 1),
689 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
690 &wrapper<vpx_idct16x16_256_add_dspr2>, TX_16X16, 256, 8, 1),
691 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
692 &wrapper<vpx_idct16x16_10_add_dspr2>, TX_16X16, 10, 8, 1),
693 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
694 &wrapper<vpx_idct16x16_1_add_dspr2>, TX_16X16, 1, 8, 1),
695 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
696 &wrapper<vpx_idct8x8_64_add_dspr2>, TX_8X8, 64, 8, 1),
697 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
698 &wrapper<vpx_idct8x8_12_add_dspr2>, TX_8X8, 12, 8, 1),
699 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
700 &wrapper<vpx_idct8x8_1_add_dspr2>, TX_8X8, 1, 8, 1),
701 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
702 &wrapper<vpx_idct4x4_16_add_dspr2>, TX_4X4, 16, 8, 1),
703 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
704 &wrapper<vpx_idct4x4_1_add_dspr2>, TX_4X4, 1, 8, 1)
707 INSTANTIATE_TEST_CASE_P(DSPR2, PartialIDctTest,
708 ::testing::ValuesIn(dspr2_partial_idct_tests));
709 #endif // HAVE_DSPR2 && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
711 #if HAVE_MSA && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
712 // 32x32_135_ is implemented using the 1024 version.
713 const PartialInvTxfmParam msa_partial_idct_tests[] = {
714 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
715 &wrapper<vpx_idct32x32_1024_add_msa>, TX_32X32, 1024, 8, 1),
716 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
717 &wrapper<vpx_idct32x32_1024_add_msa>, TX_32X32, 135, 8, 1),
718 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
719 &wrapper<vpx_idct32x32_34_add_msa>, TX_32X32, 34, 8, 1),
720 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
721 &wrapper<vpx_idct32x32_1_add_msa>, TX_32X32, 1, 8, 1),
722 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
723 &wrapper<vpx_idct16x16_256_add_msa>, TX_16X16, 256, 8, 1),
724 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
725 &wrapper<vpx_idct16x16_10_add_msa>, TX_16X16, 10, 8, 1),
726 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
727 &wrapper<vpx_idct16x16_1_add_msa>, TX_16X16, 1, 8, 1),
728 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
729 &wrapper<vpx_idct8x8_64_add_msa>, TX_8X8, 64, 8, 1),
730 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
731 &wrapper<vpx_idct8x8_12_add_msa>, TX_8X8, 12, 8, 1),
732 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
733 &wrapper<vpx_idct8x8_1_add_msa>, TX_8X8, 1, 8, 1),
734 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
735 &wrapper<vpx_idct4x4_16_add_msa>, TX_4X4, 16, 8, 1),
736 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
737 &wrapper<vpx_idct4x4_1_add_msa>, TX_4X4, 1, 8, 1)
740 INSTANTIATE_TEST_CASE_P(MSA, PartialIDctTest,
741 ::testing::ValuesIn(msa_partial_idct_tests));
742 #endif // HAVE_MSA && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH