From: Linfeng Zhang Date: Fri, 4 Nov 2016 18:24:47 +0000 (-0700) Subject: Update partial_idct_test.cc to support high bitdepth X-Git-Tag: v1.6.1~68^2~3 X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=d479c9653efcfb885df3aad3dbd831f23b86c1e2;p=libvpx Update partial_idct_test.cc to support high bitdepth BUG=webm:1301 Change-Id: Ieedadee221ce539e39bf806c41331f749f891a3c --- diff --git a/test/partial_idct_test.cc b/test/partial_idct_test.cc index 2512366ed..5971273b5 100644 --- a/test/partial_idct_test.cc +++ b/test/partial_idct_test.cc @@ -29,11 +29,23 @@ using libvpx_test::ACMRandom; namespace { + typedef void (*FwdTxfmFunc)(const int16_t *in, tran_low_t *out, int stride); + +#if CONFIG_VP9_HIGHBITDEPTH +typedef uint16_t Pixel; +typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride, + int bd); +#else // !CONFIG_VP9_HIGHBITDEPTH +typedef uint8_t Pixel; typedef void (*InvTxfmFunc)(const tran_low_t *in, uint8_t *out, int stride); -typedef std::tr1::tuple +#endif // CONFIG_VP9_HIGHBITDEPTH + +typedef std::tr1::tuple PartialInvTxfmParam; const int kMaxNumCoeffs = 1024; +const int kCountTestBlock = 1000; // https://bugs.chromium.org/p/webm/issues/detail?id=1332 // The functions specified do not pass with INT16_MIN/MAX. They fail at the @@ -57,7 +69,7 @@ int16_t MinSupportedCoeff(InvTxfmFunc a) { if (a == vpx_idct8x8_64_add_ssse3 || a == vpx_idct8x8_12_add_ssse3) { return -23625 + 1; } -#elif HAVE_NEON +#elif HAVE_NEON && !CONFIG_VP9_HIGHBITDEPTH if (a == vpx_idct4x4_16_add_neon) { return std::numeric_limits::min() + 1; } @@ -70,11 +82,14 @@ class PartialIDctTest : public ::testing::TestWithParam { public: virtual ~PartialIDctTest() {} virtual void SetUp() { + ACMRandom rnd(ACMRandom::DeterministicSeed()); ftxfm_ = GET_PARAM(0); full_itxfm_ = GET_PARAM(1); partial_itxfm_ = GET_PARAM(2); tx_size_ = GET_PARAM(3); last_nonzero_ = GET_PARAM(4); + bit_depth_ = GET_PARAM(5); + mask_ = (1 << bit_depth_) - 1; switch (tx_size_) { case TX_4X4: size_ = 4; break; @@ -83,14 +98,26 @@ class PartialIDctTest : public ::testing::TestWithParam { case TX_32X32: size_ = 32; break; default: FAIL() << "Wrong Size!"; break; } - block_size_ = size_ * size_; + + // Randomize stride_ to a value less than or equal to 1024 + stride_ = rnd(1024) + 1; + if (stride_ < size_) { + stride_ = size_; + } + // Align stride_ to 16 if it's bigger than 16. + if (stride_ > 16) { + stride_ &= ~15; + } + + input_block_size_ = size_ * size_; + output_block_size_ = size_ * stride_; input_block_ = reinterpret_cast( - vpx_memalign(16, sizeof(*input_block_) * block_size_)); - output_block_ = reinterpret_cast( - vpx_memalign(16, sizeof(*output_block_) * block_size_)); - output_block_ref_ = reinterpret_cast( - vpx_memalign(16, sizeof(*output_block_ref_) * block_size_)); + vpx_memalign(16, sizeof(*input_block_) * input_block_size_)); + output_block_ = reinterpret_cast( + vpx_memalign(16, sizeof(*output_block_) * output_block_size_)); + output_block_ref_ = reinterpret_cast( + vpx_memalign(16, sizeof(*output_block_ref_) * output_block_size_)); } virtual void TearDown() { @@ -103,14 +130,26 @@ class PartialIDctTest : public ::testing::TestWithParam { libvpx_test::ClearSystemState(); } + void Exec(InvTxfmFunc func, void *out) { +#if CONFIG_VP9_HIGHBITDEPTH + func(input_block_, CONVERT_TO_BYTEPTR(out), stride_, bit_depth_); +#else + func(input_block_, reinterpret_cast(out), stride_); +#endif + } + protected: int last_nonzero_; TX_SIZE tx_size_; tran_low_t *input_block_; - uint8_t *output_block_; - uint8_t *output_block_ref_; + Pixel *output_block_; + Pixel *output_block_ref_; int size_; - int block_size_; + int stride_; + int input_block_size_; + int output_block_size_; + int bit_depth_; + int mask_; FwdTxfmFunc ftxfm_; InvTxfmFunc full_itxfm_; InvTxfmFunc partial_itxfm_; @@ -118,29 +157,31 @@ class PartialIDctTest : public ::testing::TestWithParam { TEST_P(PartialIDctTest, RunQuantCheck) { ACMRandom rnd(ACMRandom::DeterministicSeed()); - - const int count_test_block = 1000; - DECLARE_ALIGNED(16, int16_t, input_extreme_block[kMaxNumCoeffs]); DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kMaxNumCoeffs]); - for (int i = 0; i < count_test_block; ++i) { + for (int i = 0; i < kCountTestBlock; ++i) { // clear out destination buffer - memset(input_block_, 0, sizeof(*input_block_) * block_size_); - memset(output_block_, 0, sizeof(*output_block_) * block_size_); - memset(output_block_ref_, 0, sizeof(*output_block_ref_) * block_size_); + memset(input_block_, 0, sizeof(*input_block_) * input_block_size_); + for (int j = 0; j < output_block_size_; ++j) { + output_block_[j] = output_block_ref_[j] = rnd.Rand16() & mask_; + } ACMRandom rnd(ACMRandom::DeterministicSeed()); - for (int i = 0; i < count_test_block; ++i) { - // Initialize a test block with input range [-255, 255]. - if (i == 0) { - for (int j = 0; j < block_size_; ++j) input_extreme_block[j] = 255; - } else if (i == 1) { - for (int j = 0; j < block_size_; ++j) input_extreme_block[j] = -255; + for (int j = 0; j < kCountTestBlock; ++j) { + // Initialize a test block with input range [-mask_, mask_]. + if (j == 0) { + for (int k = 0; k < input_block_size_; ++k) { + input_extreme_block[k] = mask_; + } + } else if (j == 1) { + for (int k = 0; k < input_block_size_; ++k) { + input_extreme_block[k] = -mask_; + } } else { - for (int j = 0; j < block_size_; ++j) { - input_extreme_block[j] = rnd.Rand8() % 2 ? 255 : -255; + for (int k = 0; k < input_block_size_; ++k) { + input_extreme_block[k] = rnd.Rand8() % 2 ? mask_ : -mask_; } } @@ -148,32 +189,29 @@ TEST_P(PartialIDctTest, RunQuantCheck) { // quantization with maximum allowed step sizes input_block_[0] = (output_ref_block[0] / 1336) * 1336; - for (int j = 1; j < last_nonzero_; ++j) { - input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = - (output_ref_block[j] / 1828) * 1828; + for (int k = 1; k < last_nonzero_; ++k) { + input_block_[vp9_default_scan_orders[tx_size_].scan[k]] = + (output_ref_block[k] / 1828) * 1828; } } - ASM_REGISTER_STATE_CHECK( - full_itxfm_(input_block_, output_block_ref_, size_)); - ASM_REGISTER_STATE_CHECK( - partial_itxfm_(input_block_, output_block_, size_)); - + ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_)); + ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_)); ASSERT_EQ(0, memcmp(output_block_ref_, output_block_, - sizeof(*output_block_) * block_size_)) + sizeof(*output_block_) * output_block_size_)) << "Error: partial inverse transform produces different results"; } } TEST_P(PartialIDctTest, ResultsMatch) { ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = 1000; const int max_coeff = 32766 / 4; - for (int i = 0; i < count_test_block; ++i) { + for (int i = 0; i < kCountTestBlock; ++i) { // clear out destination buffer - memset(input_block_, 0, sizeof(*input_block_) * block_size_); - memset(output_block_, 0, sizeof(*output_block_) * block_size_); - memset(output_block_ref_, 0, sizeof(*output_block_ref_) * block_size_); + memset(input_block_, 0, sizeof(*input_block_) * input_block_size_); + for (int j = 0; j < output_block_size_; ++j) { + output_block_[j] = output_block_ref_[j] = rnd.Rand16() & mask_; + } int max_energy_leftover = max_coeff * max_coeff; for (int j = 0; j < last_nonzero_; ++j) { int16_t coeff = static_cast(sqrt(1.0 * max_energy_leftover) * @@ -186,37 +224,32 @@ TEST_P(PartialIDctTest, ResultsMatch) { input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = coeff; } - ASM_REGISTER_STATE_CHECK( - full_itxfm_(input_block_, output_block_ref_, size_)); - ASM_REGISTER_STATE_CHECK( - partial_itxfm_(input_block_, output_block_, size_)); + ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_)); + ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_)); ASSERT_EQ(0, memcmp(output_block_ref_, output_block_, - sizeof(*output_block_) * block_size_)) + sizeof(*output_block_) * output_block_size_)) << "Error: partial inverse transform produces different results"; } } TEST_P(PartialIDctTest, AddOutputBlock) { ACMRandom rnd(ACMRandom::DeterministicSeed()); - const int count_test_block = 10; - for (int i = 0; i < count_test_block; ++i) { - memset(input_block_, 0, sizeof(*input_block_) * block_size_); + for (int i = 0; i < kCountTestBlock; ++i) { + memset(input_block_, 0, sizeof(*input_block_) * input_block_size_); for (int j = 0; j < last_nonzero_; ++j) { input_block_[vp9_default_scan_orders[tx_size_].scan[j]] = 10; } - for (int j = 0; j < block_size_; ++j) { - output_block_[j] = output_block_ref_[j] = rnd.Rand8(); + for (int j = 0; j < output_block_size_; ++j) { + output_block_[j] = output_block_ref_[j] = rnd.Rand16() & mask_; } - ASM_REGISTER_STATE_CHECK( - full_itxfm_(input_block_, output_block_ref_, size_)); - ASM_REGISTER_STATE_CHECK( - partial_itxfm_(input_block_, output_block_, size_)); + ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_)); + ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_)); ASSERT_EQ(0, memcmp(output_block_ref_, output_block_, - sizeof(*output_block_) * block_size_)) + sizeof(*output_block_) * output_block_size_)) << "Error: Transform results are not correctly added to output."; } } @@ -226,181 +259,282 @@ TEST_P(PartialIDctTest, SingleExtremeCoeff) { const int16_t max_coeff = MaxSupportedCoeff(partial_itxfm_); const int16_t min_coeff = MinSupportedCoeff(partial_itxfm_); for (int i = 0; i < last_nonzero_; ++i) { - memset(input_block_, 0, sizeof(*input_block_) * block_size_); + memset(input_block_, 0, sizeof(*input_block_) * input_block_size_); // Run once for min and once for max. for (int j = 0; j < 2; ++j) { const int coeff = j ? min_coeff : max_coeff; - memset(output_block_, 0, sizeof(*output_block_) * block_size_); - memset(output_block_ref_, 0, sizeof(*output_block_ref_) * block_size_); + memset(output_block_, 0, sizeof(*output_block_) * output_block_size_); + memset(output_block_ref_, 0, + sizeof(*output_block_ref_) * output_block_size_); input_block_[vp9_default_scan_orders[tx_size_].scan[i]] = coeff; - ASM_REGISTER_STATE_CHECK( - full_itxfm_(input_block_, output_block_ref_, size_)); - ASM_REGISTER_STATE_CHECK( - partial_itxfm_(input_block_, output_block_, size_)); + ASM_REGISTER_STATE_CHECK(Exec(full_itxfm_, output_block_ref_)); + ASM_REGISTER_STATE_CHECK(Exec(partial_itxfm_, output_block_)); ASSERT_EQ(0, memcmp(output_block_ref_, output_block_, - sizeof(*output_block_) * block_size_)) + sizeof(*output_block_) * output_block_size_)) << "Error: Fails with single coeff of " << coeff << " at " << i << "."; } } } + using std::tr1::make_tuple; +#if CONFIG_VP9_HIGHBITDEPTH + +INSTANTIATE_TEST_CASE_P( + C, PartialIDctTest, + ::testing::Values( + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1024_add_c, TX_32X32, 1024, 8), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1024_add_c, TX_32X32, 1024, 10), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1024_add_c, TX_32X32, 1024, 12), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_34_add_c, TX_32X32, 34, 8), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_34_add_c, TX_32X32, 34, 10), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_34_add_c, TX_32X32, 34, 12), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1_add_c, TX_32X32, 1, 8), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1_add_c, TX_32X32, 1, 10), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1_add_c, TX_32X32, 1, 12), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_256_add_c, TX_16X16, 256, 8), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_256_add_c, TX_16X16, 256, 10), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_256_add_c, TX_16X16, 256, 12), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_10_add_c, TX_16X16, 10, 8), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_10_add_c, TX_16X16, 10, 10), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_10_add_c, TX_16X16, 10, 12), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_1_add_c, TX_16X16, 1, 8), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_1_add_c, TX_16X16, 1, 10), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_1_add_c, TX_16X16, 1, 12), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_64_add_c, TX_8X8, 64, 8), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_64_add_c, TX_8X8, 64, 10), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_64_add_c, TX_8X8, 64, 12), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_12_add_c, TX_8X8, 12, 8), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_12_add_c, TX_8X8, 12, 10), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_12_add_c, TX_8X8, 12, 12), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_1_add_c, TX_8X8, 1, 8), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_1_add_c, TX_8X8, 1, 10), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_1_add_c, TX_8X8, 1, 12), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_16_add_c, TX_4X4, 16, 8), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_16_add_c, TX_4X4, 16, 10), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_16_add_c, TX_4X4, 16, 12), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_1_add_c, TX_4X4, 1, 8), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_1_add_c, TX_4X4, 1, 10), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_1_add_c, TX_4X4, 1, 12))); + +#if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE +INSTANTIATE_TEST_CASE_P( + SSE2, PartialIDctTest, + ::testing::Values( + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1_add_sse2, TX_32X32, 1, 8), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1_add_sse2, TX_32X32, 1, 10), + make_tuple(&vpx_highbd_fdct32x32_c, &vpx_highbd_idct32x32_1024_add_c, + &vpx_highbd_idct32x32_1_add_sse2, TX_32X32, 1, 12), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_256_add_sse2, TX_16X16, 256, 8), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_256_add_sse2, TX_16X16, 256, 10), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_256_add_sse2, TX_16X16, 256, 12), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_10_add_sse2, TX_16X16, 10, 8), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_10_add_sse2, TX_16X16, 10, 10), + make_tuple(&vpx_highbd_fdct16x16_c, &vpx_highbd_idct16x16_256_add_c, + &vpx_highbd_idct16x16_10_add_sse2, TX_16X16, 10, 12), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_64_add_sse2, TX_8X8, 64, 8), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_64_add_sse2, TX_8X8, 64, 10), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_64_add_sse2, TX_8X8, 64, 12), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_12_add_sse2, TX_8X8, 12, 8), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_12_add_sse2, TX_8X8, 12, 10), + make_tuple(&vpx_highbd_fdct8x8_c, &vpx_highbd_idct8x8_64_add_c, + &vpx_highbd_idct8x8_12_add_sse2, TX_8X8, 12, 12), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_16_add_sse2, TX_4X4, 1, 8), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_16_add_sse2, TX_4X4, 1, 10), + make_tuple(&vpx_highbd_fdct4x4_c, &vpx_highbd_idct4x4_16_add_c, + &vpx_highbd_idct4x4_16_add_sse2, TX_4X4, 1, 12))); +#endif // HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE + +#else // !CONFIG_VP9_HIGHBITDEPTH + INSTANTIATE_TEST_CASE_P( C, PartialIDctTest, ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1024_add_c, TX_32X32, 1024), + &vpx_idct32x32_1024_add_c, TX_32X32, 1024, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_135_add_c, TX_32X32, 135), + &vpx_idct32x32_135_add_c, TX_32X32, 135, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_c, TX_32X32, 34), + &vpx_idct32x32_34_add_c, TX_32X32, 34, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_c, TX_32X32, 1), + &vpx_idct32x32_1_add_c, TX_32X32, 1, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_256_add_c, TX_16X16, 256), + &vpx_idct16x16_256_add_c, TX_16X16, 256, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_c, TX_16X16, 10), + &vpx_idct16x16_10_add_c, TX_16X16, 10, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_c, TX_16X16, 1), + &vpx_idct16x16_1_add_c, TX_16X16, 1, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_64_add_c, TX_8X8, 64), + &vpx_idct8x8_64_add_c, TX_8X8, 64, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_c, TX_8X8, 12), + &vpx_idct8x8_12_add_c, TX_8X8, 12, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_c, TX_8X8, 1), + &vpx_idct8x8_1_add_c, TX_8X8, 1, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_16_add_c, TX_4X4, 16), + &vpx_idct4x4_16_add_c, TX_4X4, 16, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_c, TX_4X4, 1))); + &vpx_idct4x4_1_add_c, TX_4X4, 1, 8))); #if HAVE_NEON && !CONFIG_EMULATE_HARDWARE -#if CONFIG_VP9_HIGHBITDEPTH -INSTANTIATE_TEST_CASE_P( - NEON, PartialIDctTest, - ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_neon, TX_32X32, 1), - make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_neon, TX_32X32, 34), - make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_neon, TX_16X16, 1), - make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_64_add_neon, TX_8X8, 64), - make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_neon, TX_8X8, 12), - make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_neon, TX_8X8, 1), - make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_16_add_neon, TX_4X4, 16), - make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_neon, TX_4X4, 1))); -#else // !CONFIG_VP9_HIGHBITDEPTH INSTANTIATE_TEST_CASE_P( NEON, PartialIDctTest, ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1024_add_neon, TX_32X32, 1024), + &vpx_idct32x32_1024_add_neon, TX_32X32, 1024, + 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_135_add_neon, TX_32X32, 135), + &vpx_idct32x32_135_add_neon, TX_32X32, 135, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_neon, TX_32X32, 34), + &vpx_idct32x32_34_add_neon, TX_32X32, 34, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_neon, TX_32X32, 1), + &vpx_idct32x32_1_add_neon, TX_32X32, 1, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_256_add_neon, TX_16X16, 256), + &vpx_idct16x16_256_add_neon, TX_16X16, 256, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_neon, TX_16X16, 10), + &vpx_idct16x16_10_add_neon, TX_16X16, 10, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_neon, TX_16X16, 1), + &vpx_idct16x16_1_add_neon, TX_16X16, 1, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_64_add_neon, TX_8X8, 64), + &vpx_idct8x8_64_add_neon, TX_8X8, 64, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_neon, TX_8X8, 12), + &vpx_idct8x8_12_add_neon, TX_8X8, 12, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_neon, TX_8X8, 1), + &vpx_idct8x8_1_add_neon, TX_8X8, 1, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_16_add_neon, TX_4X4, 16), + &vpx_idct4x4_16_add_neon, TX_4X4, 16, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_neon, TX_4X4, 1))); -#endif // CONFIG_VP9_HIGHBITDEPTH + &vpx_idct4x4_1_add_neon, TX_4X4, 1, 8))); #endif // HAVE_NEON && !CONFIG_EMULATE_HARDWARE -#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE +#if HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE // 32x32_135_ is implemented using the 1024 version. INSTANTIATE_TEST_CASE_P( SSE2, PartialIDctTest, ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1024_add_sse2, TX_32X32, 1024), + &vpx_idct32x32_1024_add_sse2, TX_32X32, 1024, + 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1024_add_sse2, TX_32X32, 135), + &vpx_idct32x32_1024_add_sse2, TX_32X32, 135, + 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_sse2, TX_32X32, 34), + &vpx_idct32x32_34_add_sse2, TX_32X32, 34, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_sse2, TX_32X32, 1), + &vpx_idct32x32_1_add_sse2, TX_32X32, 1, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_256_add_sse2, TX_16X16, 256), + &vpx_idct16x16_256_add_sse2, TX_16X16, 256, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_sse2, TX_16X16, 10), + &vpx_idct16x16_10_add_sse2, TX_16X16, 10, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_sse2, TX_16X16, 1), + &vpx_idct16x16_1_add_sse2, TX_16X16, 1, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_64_add_sse2, TX_8X8, 64), + &vpx_idct8x8_64_add_sse2, TX_8X8, 64, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_sse2, TX_8X8, 12), + &vpx_idct8x8_12_add_sse2, TX_8X8, 12, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_sse2, TX_8X8, 1), + &vpx_idct8x8_1_add_sse2, TX_8X8, 1, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_16_add_sse2, TX_4X4, 16), + &vpx_idct4x4_16_add_sse2, TX_4X4, 16, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_sse2, TX_4X4, 1))); -#endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE + &vpx_idct4x4_1_add_sse2, TX_4X4, 1, 8))); +#endif // HAVE_SSE2 && !CONFIG_EMULATE_HARDWARE -#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && \ - !CONFIG_EMULATE_HARDWARE +#if HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE INSTANTIATE_TEST_CASE_P( SSSE3_64, PartialIDctTest, ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1024_add_ssse3, TX_32X32, 1024), + &vpx_idct32x32_1024_add_ssse3, TX_32X32, 1024, + 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_135_add_ssse3, TX_32X32, 135), + &vpx_idct32x32_135_add_ssse3, TX_32X32, 135, + 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_ssse3, TX_32X32, 34), + &vpx_idct32x32_34_add_ssse3, TX_32X32, 34, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_64_add_ssse3, TX_8X8, 64), + &vpx_idct8x8_64_add_ssse3, TX_8X8, 64, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_ssse3, TX_8X8, 12))); -#endif // HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_VP9_HIGHBITDEPTH && - // !CONFIG_EMULATE_HARDWARE + &vpx_idct8x8_12_add_ssse3, TX_8X8, 12, 8))); +#endif // HAVE_SSSE3 && ARCH_X86_64 && !CONFIG_EMULATE_HARDWARE -#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE +#if HAVE_MSA && !CONFIG_EMULATE_HARDWARE // 32x32_135_ is implemented using the 1024 version. INSTANTIATE_TEST_CASE_P( MSA, PartialIDctTest, ::testing::Values(make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1024_add_msa, TX_32X32, 1024), + &vpx_idct32x32_1024_add_msa, TX_32X32, 1024, + 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1024_add_msa, TX_32X32, 135), + &vpx_idct32x32_1024_add_msa, TX_32X32, 135, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_34_add_msa, TX_32X32, 34), + &vpx_idct32x32_34_add_msa, TX_32X32, 34, 8), make_tuple(&vpx_fdct32x32_c, &vpx_idct32x32_1024_add_c, - &vpx_idct32x32_1_add_msa, TX_32X32, 1), + &vpx_idct32x32_1_add_msa, TX_32X32, 1, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_256_add_msa, TX_16X16, 256), + &vpx_idct16x16_256_add_msa, TX_16X16, 256, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_10_add_msa, TX_16X16, 10), + &vpx_idct16x16_10_add_msa, TX_16X16, 10, 8), make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, - &vpx_idct16x16_1_add_msa, TX_16X16, 1), + &vpx_idct16x16_1_add_msa, TX_16X16, 1, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_64_add_msa, TX_8X8, 64), + &vpx_idct8x8_64_add_msa, TX_8X8, 64, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_12_add_msa, TX_8X8, 10), + &vpx_idct8x8_12_add_msa, TX_8X8, 10, 8), make_tuple(&vpx_fdct8x8_c, &vpx_idct8x8_64_add_c, - &vpx_idct8x8_1_add_msa, TX_8X8, 1), + &vpx_idct8x8_1_add_msa, TX_8X8, 1, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_16_add_msa, TX_4X4, 16), + &vpx_idct4x4_16_add_msa, TX_4X4, 16, 8), make_tuple(&vpx_fdct4x4_c, &vpx_idct4x4_16_add_c, - &vpx_idct4x4_1_add_msa, TX_4X4, 1))); -#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE + &vpx_idct4x4_1_add_msa, TX_4X4, 1, 8))); +#endif // HAVE_MSA && !CONFIG_EMULATE_HARDWARE + +#endif // CONFIG_VP9_HIGHBITDEPTH } // namespace