#include <string.h>
#include "third_party/googletest/src/include/gtest/gtest.h"
+
+#include "./vp9_rtcd.h"
+#include "./vpx_dsp_rtcd.h"
#include "test/acm_random.h"
#include "test/clear_system_state.h"
#include "test/register_state_check.h"
#include "test/util.h"
-
-#include "./vp9_rtcd.h"
#include "vp9/common/vp9_entropy.h"
+#include "vp9/common/vp9_scan.h"
#include "vpx/vpx_codec.h"
#include "vpx/vpx_integer.h"
+#include "vpx_ports/mem.h"
using libvpx_test::ACMRandom;
void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
int /*tx_type*/) {
- vp9_fdct16x16_c(in, out, stride);
+ vpx_fdct16x16_c(in, out, stride);
}
void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
int /*tx_type*/) {
- vp9_idct16x16_256_add_c(in, dest, stride);
+ vpx_idct16x16_256_add_c(in, dest, stride);
}
void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
#if CONFIG_VP9_HIGHBITDEPTH
void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_256_add_c(in, out, stride, 10);
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 10);
}
void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_256_add_c(in, out, stride, 12);
+ vpx_highbd_idct16x16_256_add_c(in, out, stride, 12);
}
void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
}
void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_10_add_c(in, out, stride, 10);
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 10);
}
void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_10_add_c(in, out, stride, 12);
+ vpx_highbd_idct16x16_10_add_c(in, out, stride, 12);
}
#if HAVE_SSE2
void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
}
void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
+ vpx_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
}
void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
}
void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
- vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
+ vpx_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
}
#endif // HAVE_SSE2
#endif // CONFIG_VP9_HIGHBITDEPTH
int64_t total_error = 0;
const int count_test_block = 10000;
for (int i = 0; i < count_test_block; ++i) {
- DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, test_temp_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, kNumCoeffs);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif
// Initialize a test block with input range [-mask_, mask_].
void RunCoeffCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
void RunMemCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
if (i == 0) {
void RunQuantCheck(int dc_thred, int ac_thred) {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 100000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, ref, kNumCoeffs);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, ref16, kNumCoeffs);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
#endif
for (int i = 0; i < count_test_block; ++i) {
// Initialize a test block with input range [-mask_, mask_].
for (int j = 0; j < kNumCoeffs; ++j) {
- if (bit_depth_ == VPX_BITS_8)
- input_block[j] = rnd.Rand8() - rnd.Rand8();
- else
- input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
}
if (i == 0)
fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
// clear reconstructed pixel buffers
- vpx_memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
- vpx_memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
+ memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
#if CONFIG_VP9_HIGHBITDEPTH
- vpx_memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
- vpx_memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
+ memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
#endif
// quantization with maximum allowed step sizes
void RunInvAccuracyCheck() {
ACMRandom rnd(ACMRandom::DeterministicSeed());
const int count_test_block = 1000;
- DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
+ DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, kNumCoeffs);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
const int count_test_block = 10000;
const int eob = 10;
const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
- DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint8_t, ref, kNumCoeffs);
+ DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
#if CONFIG_VP9_HIGHBITDEPTH
- DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
- DECLARE_ALIGNED_ARRAY(16, uint16_t, ref16, kNumCoeffs);
+ DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
+ DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
#endif // CONFIG_VP9_HIGHBITDEPTH
for (int i = 0; i < count_test_block; ++i) {
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
- make_tuple(&vp9_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
+ make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
#else
INSTANTIATE_TEST_CASE_P(
C, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
+ make_tuple(&vpx_fdct16x16_c, &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
#endif // CONFIG_VP9_HIGHBITDEPTH
#if CONFIG_VP9_HIGHBITDEPTH
INSTANTIATE_TEST_CASE_P(
NEON, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_c,
- &vp9_idct16x16_256_add_neon, 0, VPX_BITS_8)));
+ make_tuple(&vpx_fdct16x16_c,
+ &vpx_idct16x16_256_add_neon, 0, VPX_BITS_8)));
#endif
#if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16DCT,
::testing::Values(
- make_tuple(&vp9_highbd_fdct16x16_sse2,
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
&idct16x16_10, 0, VPX_BITS_10),
- make_tuple(&vp9_highbd_fdct16x16_c,
+ make_tuple(&vpx_highbd_fdct16x16_c,
&idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
- make_tuple(&vp9_highbd_fdct16x16_sse2,
+ make_tuple(&vpx_highbd_fdct16x16_sse2,
&idct16x16_12, 0, VPX_BITS_12),
- make_tuple(&vp9_highbd_fdct16x16_c,
+ make_tuple(&vpx_highbd_fdct16x16_c,
&idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
- make_tuple(&vp9_fdct16x16_sse2,
- &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
+ make_tuple(&vpx_fdct16x16_sse2,
+ &vpx_idct16x16_256_add_c, 0, VPX_BITS_8)));
INSTANTIATE_TEST_CASE_P(
SSE2, Trans16x16HT,
::testing::Values(
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 0, VPX_BITS_10),
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 1, VPX_BITS_10),
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 2, VPX_BITS_10),
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 3, VPX_BITS_10),
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 0, VPX_BITS_12),
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 1, VPX_BITS_12),
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 2, VPX_BITS_12),
- make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 3, VPX_BITS_12),
make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
make_tuple(&idct16x16_12,
&idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
#endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+
+#if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16DCT,
+ ::testing::Values(
+ make_tuple(&vpx_fdct16x16_msa,
+ &vpx_idct16x16_256_add_msa, 0, VPX_BITS_8)));
+INSTANTIATE_TEST_CASE_P(
+ MSA, Trans16x16HT,
+ ::testing::Values(
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
+ make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
+ VPX_BITS_8)));
+#endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
} // namespace