2 * Copyright (c) 2013 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
17 #include "third_party/googletest/src/include/gtest/gtest.h"
19 #include "./vp9_rtcd.h"
20 #include "./vpx_dsp_rtcd.h"
21 #include "test/acm_random.h"
22 #include "test/clear_system_state.h"
23 #include "test/register_state_check.h"
24 #include "test/util.h"
25 #include "vp9/common/vp9_blockd.h"
26 #include "vp9/common/vp9_scan.h"
27 #include "vpx/vpx_integer.h"
28 #include "vpx_ports/vpx_timer.h"
30 using libvpx_test::ACMRandom;
34 typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride);
35 typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride);
36 typedef void (*InvTxfmWithBdFunc)(const tran_low_t *in, uint8_t *out,
39 template <InvTxfmFunc fn>
40 void wrapper(const tran_low_t *in, uint8_t *out, int stride, int bd) {
45 #if CONFIG_VP9_HIGHBITDEPTH
46 template <InvTxfmWithBdFunc fn>
47 void highbd_wrapper(const tran_low_t *in, uint8_t *out, int stride, int bd) {
48 fn(in, CONVERT_TO_BYTEPTR(out), stride, bd);
52 typedef std::tr1::tuple<FwdTxfmFunc, InvTxfmWithBdFunc, InvTxfmWithBdFunc,
53 TX_SIZE, int, int, int>
55 const int kMaxNumCoeffs = 1024;
56 const int kCountTestBlock = 1000;
58 // https://bugs.chromium.org/p/webm/issues/detail?id=1332
59 // The functions specified do not pass with INT16_MIN/MAX. They fail at the
60 // value specified, but pass when 1 is added/subtracted.
61 int16_t MaxSupportedCoeff(InvTxfmWithBdFunc a) {
62 #if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE && \
63 !CONFIG_VP9_HIGHBITDEPTH
64 if (a == &wrapper<vpx_idct8x8_64_add_ssse3> ||
65 a == &wrapper<vpx_idct8x8_12_add_ssse3>) {
71 return std::numeric_limits<int16_t>::max();
74 int16_t MinSupportedCoeff(InvTxfmWithBdFunc a) {
75 #if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE && \
76 !CONFIG_VP9_HIGHBITDEPTH
77 if (a == &wrapper<vpx_idct8x8_64_add_ssse3> ||
78 a == &wrapper<vpx_idct8x8_12_add_ssse3>) {
84 return std::numeric_limits<int16_t>::min();
87 class PartialIDctTest : public ::testing::TestWithParam<PartialInvTxfmParam> {
89 virtual ~PartialIDctTest() {}
90 virtual void SetUp() {
91 rnd_.Reset(ACMRandom::DeterministicSeed());
92 ftxfm_ = GET_PARAM(0);
93 full_itxfm_ = GET_PARAM(1);
94 partial_itxfm_ = GET_PARAM(2);
95 tx_size_ = GET_PARAM(3);
96 last_nonzero_ = GET_PARAM(4);
97 bit_depth_ = GET_PARAM(5);
98 pixel_size_ = GET_PARAM(6);
99 mask_ = (1 << bit_depth_) - 1;
102 case TX_4X4: size_ = 4; break;
103 case TX_8X8: size_ = 8; break;
104 case TX_16X16: size_ = 16; break;
105 case TX_32X32: size_ = 32; break;
106 default: FAIL() << "Wrong Size!"; break;
109 // Randomize stride_ to a value less than or equal to 1024
110 stride_ = rnd_(1024) + 1;
111 if (stride_ < size_) {
114 // Align stride_ to 16 if it's bigger than 16.
119 input_block_size_ = size_ * size_;
120 output_block_size_ = size_ * stride_;
122 input_block_ = reinterpret_cast<tran_low_t *>(
123 vpx_memalign(16, sizeof(*input_block_) * input_block_size_));
124 output_block_ = reinterpret_cast<uint8_t *>(
125 vpx_memalign(16, pixel_size_ * output_block_size_));
126 output_block_ref_ = reinterpret_cast<uint8_t *>(
127 vpx_memalign(16, pixel_size_ * output_block_size_));
130 virtual void TearDown() {
131 vpx_free(input_block_);
133 vpx_free(output_block_);
134 output_block_ = NULL;
135 vpx_free(output_block_ref_);
136 output_block_ref_ = NULL;
137 libvpx_test::ClearSystemState();
141 memset(input_block_, 0, sizeof(*input_block_) * input_block_size_);
142 if (pixel_size_ == 1) {
143 for (int j = 0; j < output_block_size_; ++j) {
144 output_block_[j] = output_block_ref_[j] = rnd_.Rand16() & mask_;
147 ASSERT_EQ(2, pixel_size_);
148 uint16_t *const output = reinterpret_cast<uint16_t *>(output_block_);
149 uint16_t *const output_ref =
150 reinterpret_cast<uint16_t *>(output_block_ref_);
151 for (int j = 0; j < output_block_size_; ++j) {
152 output[j] = output_ref[j] = rnd_.Rand16() & mask_;
158 const int max_coeff = 32766 / 4;
159 int max_energy_leftover = max_coeff * max_coeff;
160 for (int j = 0; j < last_nonzero_; ++j) {
161 int16_t coeff = static_cast<int16_t>(sqrt(1.0 * max_energy_leftover) *
162 (rnd_.Rand16() - 32768) / 65536);
163 max_energy_leftover -= coeff * coeff;
164 if (max_energy_leftover < 0) {
165 max_energy_leftover = 0;
168 input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = coeff;
175 tran_low_t *input_block_;
176 uint8_t *output_block_;
177 uint8_t *output_block_ref_;
181 int input_block_size_;
182 int output_block_size_;
186 InvTxfmWithBdFunc full_itxfm_;
187 InvTxfmWithBdFunc partial_itxfm_;
191 TEST_P(PartialIDctTest, RunQuantCheck) {
192 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kMaxNumCoeffs]);
193 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kMaxNumCoeffs]);
196 for (int i = 0; i < kCountTestBlock; ++i) {
197 // Initialize a test block with input range [-mask_, mask_].
199 for (int k = 0; k < input_block_size_; ++k) {
200 input_extreme_block[k] = mask_;
203 for (int k = 0; k < input_block_size_; ++k) {
204 input_extreme_block[k] = -mask_;
207 for (int k = 0; k < input_block_size_; ++k) {
208 input_extreme_block[k] = rnd_.Rand8() % 2 ? mask_ : -mask_;
212 ftxfm_(input_extreme_block, output_ref_block, size_);
214 // quantization with minimum allowed step sizes
215 input_block_[0] = (output_ref_block[0] / 4) * 4;
216 for (int k = 1; k < last_nonzero_; ++k) {
217 const int pos = vp9_default_scan_orders[tx_size_].scan[k];
218 input_block_[pos] = (output_ref_block[pos] / 4) * 4;
221 ASM_REGISTER_STATE_CHECK(
222 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
223 ASM_REGISTER_STATE_CHECK(
224 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
225 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
226 pixel_size_ * output_block_size_))
227 << "Error: partial inverse transform produces different results";
231 TEST_P(PartialIDctTest, ResultsMatch) {
232 for (int i = 0; i < kCountTestBlock; ++i) {
236 ASM_REGISTER_STATE_CHECK(
237 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
238 ASM_REGISTER_STATE_CHECK(
239 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
240 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
241 pixel_size_ * output_block_size_))
242 << "Error: partial inverse transform produces different results";
246 TEST_P(PartialIDctTest, AddOutputBlock) {
247 for (int i = 0; i < kCountTestBlock; ++i) {
249 for (int j = 0; j < last_nonzero_; ++j) {
250 input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = 10;
253 ASM_REGISTER_STATE_CHECK(
254 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
255 ASM_REGISTER_STATE_CHECK(
256 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
257 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
258 pixel_size_ * output_block_size_))
259 << "Error: Transform results are not correctly added to output.";
263 TEST_P(PartialIDctTest, SingleExtremeCoeff) {
264 const int16_t max_coeff = MaxSupportedCoeff(partial_itxfm_);
265 const int16_t min_coeff = MinSupportedCoeff(partial_itxfm_);
266 for (int i = 0; i < last_nonzero_; ++i) {
267 memset(input_block_, 0, sizeof(*input_block_) * input_block_size_);
268 // Run once for min and once for max.
269 for (int j = 0; j < 2; ++j) {
270 const int coeff = j ? min_coeff : max_coeff;
272 memset(output_block_, 0, pixel_size_ * output_block_size_);
273 memset(output_block_ref_, 0, pixel_size_ * output_block_size_);
274 input_block_[vp9_default_scan_orders[tx_size_].scan[i]] = coeff;
276 ASM_REGISTER_STATE_CHECK(
277 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
278 ASM_REGISTER_STATE_CHECK(
279 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_));
280 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
281 pixel_size_ * output_block_size_))
282 << "Error: Fails with single coeff of " << coeff << " at " << i
288 TEST_P(PartialIDctTest, DISABLED_Speed) {
289 // Keep runtime stable with transform size.
290 const int kCountSpeedTestBlock = 500000000 / input_block_size_;
294 for (int i = 0; i < kCountSpeedTestBlock; ++i) {
295 ASM_REGISTER_STATE_CHECK(
296 full_itxfm_(input_block_, output_block_ref_, stride_, bit_depth_));
298 vpx_usec_timer timer;
299 vpx_usec_timer_start(&timer);
300 for (int i = 0; i < kCountSpeedTestBlock; ++i) {
301 partial_itxfm_(input_block_, output_block_, stride_, bit_depth_);
303 libvpx_test::ClearSystemState();
304 vpx_usec_timer_mark(&timer);
305 const int elapsed_time =
306 static_cast<int>(vpx_usec_timer_elapsed(&timer) / 1000);
307 printf("idct%dx%d_%d (bitdepth %d) time: %5d ms\n", size_, size_,
308 last_nonzero_, bit_depth_, elapsed_time);
310 ASSERT_EQ(0, memcmp(output_block_ref_, output_block_,
311 pixel_size_ * output_block_size_))
312 << "Error: partial inverse transform produces different results";
315 using std::tr1::make_tuple;
317 const PartialInvTxfmParam c_partial_idct_tests[] = {
318 #if CONFIG_VP9_HIGHBITDEPTH
320 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
321 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>, TX_32X32, 1024, 8, 2),
323 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
324 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>, TX_32X32, 1024, 10, 2),
326 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
327 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>, TX_32X32, 1024, 12, 2),
329 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
330 &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>, TX_32X32, 34, 8, 2),
332 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
333 &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>, TX_32X32, 34, 10, 2),
335 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
336 &highbd_wrapper<vpx_highbd_idct32x32_34_add_c>, TX_32X32, 34, 12, 2),
337 make_tuple(&vpx_highbd_fdct32x32_c,
338 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
339 &highbd_wrapper<vpx_highbd_idct32x32_1_add_c>, TX_32X32, 1, 8, 2),
340 make_tuple(&vpx_highbd_fdct32x32_c,
341 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
342 &highbd_wrapper<vpx_highbd_idct32x32_1_add_c>, TX_32X32, 1, 10, 2),
343 make_tuple(&vpx_highbd_fdct32x32_c,
344 &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
345 &highbd_wrapper<vpx_highbd_idct32x32_1_add_c>, TX_32X32, 1, 12, 2),
347 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
348 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>, TX_16X16, 256, 8, 2),
350 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
351 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>, TX_16X16, 256, 10, 2),
353 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
354 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>, TX_16X16, 256, 12, 2),
356 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
357 &highbd_wrapper<vpx_highbd_idct16x16_10_add_c>, TX_16X16, 10, 8, 2),
359 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
360 &highbd_wrapper<vpx_highbd_idct16x16_10_add_c>, TX_16X16, 10, 10, 2),
362 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
363 &highbd_wrapper<vpx_highbd_idct16x16_10_add_c>, TX_16X16, 10, 12, 2),
364 make_tuple(&vpx_highbd_fdct16x16_c,
365 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
366 &highbd_wrapper<vpx_highbd_idct16x16_1_add_c>, TX_16X16, 1, 8, 2),
367 make_tuple(&vpx_highbd_fdct16x16_c,
368 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
369 &highbd_wrapper<vpx_highbd_idct16x16_1_add_c>, TX_16X16, 1, 10, 2),
370 make_tuple(&vpx_highbd_fdct16x16_c,
371 &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
372 &highbd_wrapper<vpx_highbd_idct16x16_1_add_c>, TX_16X16, 1, 12, 2),
373 make_tuple(&vpx_highbd_fdct8x8_c,
374 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
375 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>, TX_8X8, 64, 8, 2),
376 make_tuple(&vpx_highbd_fdct8x8_c,
377 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
378 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>, TX_8X8, 64, 10, 2),
379 make_tuple(&vpx_highbd_fdct8x8_c,
380 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
381 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>, TX_8X8, 64, 12, 2),
382 make_tuple(&vpx_highbd_fdct8x8_c,
383 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
384 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>, TX_8X8, 12, 8, 2),
385 make_tuple(&vpx_highbd_fdct8x8_c,
386 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
387 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>, TX_8X8, 12, 10, 2),
388 make_tuple(&vpx_highbd_fdct8x8_c,
389 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
390 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>, TX_8X8, 12, 12, 2),
391 make_tuple(&vpx_highbd_fdct8x8_c,
392 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
393 &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>, TX_8X8, 1, 8, 2),
394 make_tuple(&vpx_highbd_fdct8x8_c,
395 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
396 &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>, TX_8X8, 1, 10, 2),
397 make_tuple(&vpx_highbd_fdct8x8_c,
398 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
399 &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>, TX_8X8, 1, 12, 2),
400 make_tuple(&vpx_highbd_fdct4x4_c,
401 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
402 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>, TX_4X4, 16, 8, 2),
403 make_tuple(&vpx_highbd_fdct4x4_c,
404 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
405 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>, TX_4X4, 16, 10, 2),
406 make_tuple(&vpx_highbd_fdct4x4_c,
407 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
408 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>, TX_4X4, 16, 12, 2),
409 make_tuple(&vpx_highbd_fdct4x4_c,
410 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
411 &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>, TX_4X4, 1, 8, 2),
412 make_tuple(&vpx_highbd_fdct4x4_c,
413 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
414 &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>, TX_4X4, 1, 10, 2),
415 make_tuple(&vpx_highbd_fdct4x4_c,
416 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
417 &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>, TX_4X4, 1, 12, 2),
418 #endif // CONFIG_VP9_HIGHBITDEPTH
419 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
420 &wrapper<vpx_idct32x32_1024_add_c>, TX_32X32, 1024, 8, 1),
421 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
422 &wrapper<vpx_idct32x32_135_add_c>, TX_32X32, 135, 8, 1),
423 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
424 &wrapper<vpx_idct32x32_34_add_c>, TX_32X32, 34, 8, 1),
425 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
426 &wrapper<vpx_idct32x32_1_add_c>, TX_32X32, 1, 8, 1),
427 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
428 &wrapper<vpx_idct16x16_256_add_c>, TX_16X16, 256, 8, 1),
429 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
430 &wrapper<vpx_idct16x16_38_add_c>, TX_16X16, 38, 8, 1),
431 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
432 &wrapper<vpx_idct16x16_10_add_c>, TX_16X16, 10, 8, 1),
433 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
434 &wrapper<vpx_idct16x16_1_add_c>, TX_16X16, 1, 8, 1),
435 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
436 &wrapper<vpx_idct8x8_64_add_c>, TX_8X8, 64, 8, 1),
437 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
438 &wrapper<vpx_idct8x8_12_add_c>, TX_8X8, 12, 8, 1),
439 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
440 &wrapper<vpx_idct8x8_1_add_c>, TX_8X8, 1, 8, 1),
441 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
442 &wrapper<vpx_idct4x4_16_add_c>, TX_4X4, 16, 8, 1),
443 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
444 &wrapper<vpx_idct4x4_1_add_c>, TX_4X4, 1, 8, 1)
447 INSTANTIATE_TEST_CASE_P(C, PartialIDctTest,
448 ::testing::ValuesIn(c_partial_idct_tests));
450 #if HAVE_NEON && !CONFIG_EMULATE_HARDWARE
451 const PartialInvTxfmParam neon_partial_idct_tests[] = {
452 #if CONFIG_VP9_HIGHBITDEPTH
453 make_tuple(&vpx_highbd_fdct8x8_c,
454 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
455 &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 8, 2),
457 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
458 &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 10, 2),
460 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
461 &highbd_wrapper<vpx_highbd_idct8x8_64_add_neon>, TX_8X8, 64, 12, 2),
462 make_tuple(&vpx_highbd_fdct8x8_c,
463 &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
464 &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 8, 2),
466 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
467 &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 10, 2),
469 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_12_add_c>,
470 &highbd_wrapper<vpx_highbd_idct8x8_12_add_neon>, TX_8X8, 12, 12, 2),
471 make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
472 &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 8, 2),
473 make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
474 &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 10, 2),
475 make_tuple(&vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_1_add_c>,
476 &highbd_wrapper<vpx_highbd_idct8x8_1_add_neon>, TX_8X8, 1, 12, 2),
477 make_tuple(&vpx_highbd_fdct4x4_c,
478 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
479 &highbd_wrapper<vpx_highbd_idct4x4_16_add_neon>, TX_4X4, 16, 8, 2),
481 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
482 &highbd_wrapper<vpx_highbd_idct4x4_16_add_neon>, TX_4X4, 16, 10, 2),
484 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
485 &highbd_wrapper<vpx_highbd_idct4x4_16_add_neon>, TX_4X4, 16, 12, 2),
486 make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>,
487 &highbd_wrapper<vpx_highbd_idct4x4_1_add_neon>, TX_4X4, 1, 8, 2),
488 make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>,
489 &highbd_wrapper<vpx_highbd_idct4x4_1_add_neon>, TX_4X4, 1, 10, 2),
490 make_tuple(&vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_1_add_c>,
491 &highbd_wrapper<vpx_highbd_idct4x4_1_add_neon>, TX_4X4, 1, 12, 2),
492 #endif // CONFIG_VP9_HIGHBITDEPTH
493 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
494 &wrapper<vpx_idct32x32_1024_add_neon>, TX_32X32, 1024, 8, 1),
495 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
496 &wrapper<vpx_idct32x32_135_add_neon>, TX_32X32, 135, 8, 1),
497 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
498 &wrapper<vpx_idct32x32_34_add_neon>, TX_32X32, 34, 8, 1),
499 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
500 &wrapper<vpx_idct32x32_1_add_neon>, TX_32X32, 1, 8, 1),
501 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
502 &wrapper<vpx_idct16x16_256_add_neon>, TX_16X16, 256, 8, 1),
503 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
504 &wrapper<vpx_idct16x16_38_add_neon>, TX_16X16, 38, 8, 1),
505 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
506 &wrapper<vpx_idct16x16_10_add_neon>, TX_16X16, 10, 8, 1),
507 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
508 &wrapper<vpx_idct16x16_1_add_neon>, TX_16X16, 1, 8, 1),
509 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
510 &wrapper<vpx_idct8x8_64_add_neon>, TX_8X8, 64, 8, 1),
511 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
512 &wrapper<vpx_idct8x8_12_add_neon>, TX_8X8, 12, 8, 1),
513 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
514 &wrapper<vpx_idct8x8_1_add_neon>, TX_8X8, 1, 8, 1),
515 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
516 &wrapper<vpx_idct4x4_16_add_neon>, TX_4X4, 16, 8, 1),
517 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
518 &wrapper<vpx_idct4x4_1_add_neon>, TX_4X4, 1, 8, 1)
521 INSTANTIATE_TEST_CASE_P(NEON, PartialIDctTest,
522 ::testing::ValuesIn(neon_partial_idct_tests));
523 #endif // HAVE_NEON && !CONFIG_EMULATE_HARDWARE
525 #if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
526 // 32x32_135_ is implemented using the 1024 version.
527 const PartialInvTxfmParam sse2_partial_idct_tests[] = {
528 #if CONFIG_VP9_HIGHBITDEPTH
530 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
531 &highbd_wrapper<vpx_highbd_idct32x32_1_add_sse2>, TX_32X32, 1, 8, 2),
533 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
534 &highbd_wrapper<vpx_highbd_idct32x32_1_add_sse2>, TX_32X32, 1, 10, 2),
536 &vpx_highbd_fdct32x32_c, &highbd_wrapper<vpx_highbd_idct32x32_1024_add_c>,
537 &highbd_wrapper<vpx_highbd_idct32x32_1_add_sse2>, TX_32X32, 1, 12, 2),
539 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
540 &highbd_wrapper<vpx_highbd_idct16x16_256_add_sse2>, TX_16X16, 256, 8, 2),
542 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
543 &highbd_wrapper<vpx_highbd_idct16x16_256_add_sse2>, TX_16X16, 256, 10, 2),
545 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
546 &highbd_wrapper<vpx_highbd_idct16x16_256_add_sse2>, TX_16X16, 256, 12, 2),
548 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
549 &highbd_wrapper<vpx_highbd_idct16x16_10_add_sse2>, TX_16X16, 10, 8, 2),
551 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
552 &highbd_wrapper<vpx_highbd_idct16x16_10_add_sse2>, TX_16X16, 10, 10, 2),
554 &vpx_highbd_fdct16x16_c, &highbd_wrapper<vpx_highbd_idct16x16_256_add_c>,
555 &highbd_wrapper<vpx_highbd_idct16x16_10_add_sse2>, TX_16X16, 10, 12, 2),
556 make_tuple(&vpx_highbd_fdct8x8_c,
557 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
558 &highbd_wrapper<vpx_highbd_idct8x8_64_add_sse2>, TX_8X8, 64, 8, 2),
560 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
561 &highbd_wrapper<vpx_highbd_idct8x8_64_add_sse2>, TX_8X8, 64, 10, 2),
563 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
564 &highbd_wrapper<vpx_highbd_idct8x8_64_add_sse2>, TX_8X8, 64, 12, 2),
565 make_tuple(&vpx_highbd_fdct8x8_c,
566 &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
567 &highbd_wrapper<vpx_highbd_idct8x8_12_add_sse2>, TX_8X8, 12, 8, 2),
569 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
570 &highbd_wrapper<vpx_highbd_idct8x8_12_add_sse2>, TX_8X8, 12, 10, 2),
572 &vpx_highbd_fdct8x8_c, &highbd_wrapper<vpx_highbd_idct8x8_64_add_c>,
573 &highbd_wrapper<vpx_highbd_idct8x8_12_add_sse2>, TX_8X8, 12, 12, 2),
574 make_tuple(&vpx_highbd_fdct4x4_c,
575 &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
576 &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse2>, TX_4X4, 16, 8, 2),
578 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
579 &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse2>, TX_4X4, 16, 10, 2),
581 &vpx_highbd_fdct4x4_c, &highbd_wrapper<vpx_highbd_idct4x4_16_add_c>,
582 &highbd_wrapper<vpx_highbd_idct4x4_16_add_sse2>, TX_4X4, 16, 12, 2),
583 #endif // CONFIG_VP9_HIGHBITDEPTH
584 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
585 &wrapper<vpx_idct32x32_1024_add_sse2>, TX_32X32, 1024, 8, 1),
586 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
587 &wrapper<vpx_idct32x32_1024_add_sse2>, TX_32X32, 135, 8, 1),
588 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
589 &wrapper<vpx_idct32x32_34_add_sse2>, TX_32X32, 34, 8, 1),
590 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
591 &wrapper<vpx_idct32x32_1_add_sse2>, TX_32X32, 1, 8, 1),
592 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
593 &wrapper<vpx_idct16x16_256_add_sse2>, TX_16X16, 256, 8, 1),
594 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
595 &wrapper<vpx_idct16x16_10_add_sse2>, TX_16X16, 10, 8, 1),
596 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
597 &wrapper<vpx_idct16x16_1_add_sse2>, TX_16X16, 1, 8, 1),
598 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
599 &wrapper<vpx_idct8x8_64_add_sse2>, TX_8X8, 64, 8, 1),
600 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
601 &wrapper<vpx_idct8x8_12_add_sse2>, TX_8X8, 12, 8, 1),
602 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
603 &wrapper<vpx_idct8x8_1_add_sse2>, TX_8X8, 1, 8, 1),
604 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
605 &wrapper<vpx_idct4x4_16_add_sse2>, TX_4X4, 16, 8, 1),
606 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
607 &wrapper<vpx_idct4x4_1_add_sse2>, TX_4X4, 1, 8, 1)
610 INSTANTIATE_TEST_CASE_P(SSE2, PartialIDctTest,
611 ::testing::ValuesIn(sse2_partial_idct_tests));
613 #endif // HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE
615 #if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE && \
616 !CONFIG_VP9_HIGHBITDEPTH
617 const PartialInvTxfmParam ssse3_partial_idct_tests[] = {
618 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
619 &wrapper<vpx_idct32x32_1024_add_ssse3>, TX_32X32, 1024, 8, 1),
620 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
621 &wrapper<vpx_idct32x32_135_add_ssse3>, TX_32X32, 135, 8, 1),
622 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
623 &wrapper<vpx_idct32x32_34_add_ssse3>, TX_32X32, 34, 8, 1),
624 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
625 &wrapper<vpx_idct8x8_64_add_ssse3>, TX_8X8, 64, 8, 1),
626 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
627 &wrapper<vpx_idct8x8_12_add_ssse3>, TX_8X8, 12, 8, 1)
630 INSTANTIATE_TEST_CASE_P(SSSE3, PartialIDctTest,
631 ::testing::ValuesIn(ssse3_partial_idct_tests));
632 #endif // HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE
634 #if HAVE_DSPR2 && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
635 const PartialInvTxfmParam dspr2_partial_idct_tests[] = {
636 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
637 &wrapper<vpx_idct32x32_1024_add_dspr2>, TX_32X32, 1024, 8, 1),
638 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
639 &wrapper<vpx_idct32x32_1024_add_dspr2>, TX_32X32, 135, 8, 1),
640 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
641 &wrapper<vpx_idct32x32_34_add_dspr2>, TX_32X32, 34, 8, 1),
642 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
643 &wrapper<vpx_idct32x32_1_add_dspr2>, TX_32X32, 1, 8, 1),
644 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
645 &wrapper<vpx_idct16x16_256_add_dspr2>, TX_16X16, 256, 8, 1),
646 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
647 &wrapper<vpx_idct16x16_10_add_dspr2>, TX_16X16, 10, 8, 1),
648 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
649 &wrapper<vpx_idct16x16_1_add_dspr2>, TX_16X16, 1, 8, 1),
650 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
651 &wrapper<vpx_idct8x8_64_add_dspr2>, TX_8X8, 64, 8, 1),
652 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
653 &wrapper<vpx_idct8x8_12_add_dspr2>, TX_8X8, 12, 8, 1),
654 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
655 &wrapper<vpx_idct8x8_1_add_dspr2>, TX_8X8, 1, 8, 1),
656 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
657 &wrapper<vpx_idct4x4_16_add_dspr2>, TX_4X4, 16, 8, 1),
658 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
659 &wrapper<vpx_idct4x4_1_add_dspr2>, TX_4X4, 1, 8, 1)
662 INSTANTIATE_TEST_CASE_P(DSPR2, PartialIDctTest,
663 ::testing::ValuesIn(dspr2_partial_idct_tests));
664 #endif // HAVE_DSPR2 && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
666 #if HAVE_MSA && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH
667 // 32x32_135_ is implemented using the 1024 version.
668 const PartialInvTxfmParam msa_partial_idct_tests[] = {
669 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
670 &wrapper<vpx_idct32x32_1024_add_msa>, TX_32X32, 1024, 8, 1),
671 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
672 &wrapper<vpx_idct32x32_1024_add_msa>, TX_32X32, 135, 8, 1),
673 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
674 &wrapper<vpx_idct32x32_34_add_msa>, TX_32X32, 34, 8, 1),
675 make_tuple(&vpx_fdct32x32_c, &wrapper<vpx_idct32x32_1024_add_c>,
676 &wrapper<vpx_idct32x32_1_add_msa>, TX_32X32, 1, 8, 1),
677 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
678 &wrapper<vpx_idct16x16_256_add_msa>, TX_16X16, 256, 8, 1),
679 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
680 &wrapper<vpx_idct16x16_10_add_msa>, TX_16X16, 10, 8, 1),
681 make_tuple(&vpx_fdct16x16_c, &wrapper<vpx_idct16x16_256_add_c>,
682 &wrapper<vpx_idct16x16_1_add_msa>, TX_16X16, 1, 8, 1),
683 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
684 &wrapper<vpx_idct8x8_64_add_msa>, TX_8X8, 64, 8, 1),
685 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
686 &wrapper<vpx_idct8x8_12_add_msa>, TX_8X8, 12, 8, 1),
687 make_tuple(&vpx_fdct8x8_c, &wrapper<vpx_idct8x8_64_add_c>,
688 &wrapper<vpx_idct8x8_1_add_msa>, TX_8X8, 1, 8, 1),
689 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
690 &wrapper<vpx_idct4x4_16_add_msa>, TX_4X4, 16, 8, 1),
691 make_tuple(&vpx_fdct4x4_c, &wrapper<vpx_idct4x4_16_add_c>,
692 &wrapper<vpx_idct4x4_1_add_msa>, TX_4X4, 1, 8, 1)
695 INSTANTIATE_TEST_CASE_P(MSA, PartialIDctTest,
696 ::testing::ValuesIn(msa_partial_idct_tests));
697 #endif // HAVE_MSA && !CONFIG_EMULATE_HARDWARE && !CONFIG_VP9_HIGHBITDEPTH