* be found in the AUTHORS file in the root of the source tree.
*/
+#include <cmath>
+#include <cstdlib>
+#include <string>
+
#include "third_party/googletest/src/include/gtest/gtest.h"
#include "./vpx_config.h"
#include "./vpx_dsp_rtcd.h"
#include "vpx_ports/mem.h"
-
-#include "test/array_utils.h"
-#include "test/assertion_helpers.h"
-#include "test/function_equivalence_test.h"
-#include "test/randomise.h"
+#include "test/acm_random.h"
+#include "test/clear_system_state.h"
#include "test/register_state_check.h"
-#include "test/snapshot.h"
-
-using libvpx_test::FunctionEquivalenceTest;
-using libvpx_test::Snapshot;
-using libvpx_test::Randomise;
-using libvpx_test::array_utils::arraySet;
-using libvpx_test::assertion_helpers::ArraysEq;
-
-namespace {
-
-static const int16_t int13_max = (1<<12) - 1;
-
-//////////////////////////////////////////////////////////////////////////////
-// 2D version
-//////////////////////////////////////////////////////////////////////////////
-
-typedef uint64_t (*F2D)(const int16_t *src, int stride, uint32_t size);
-
-class SumSquares2DTest : public FunctionEquivalenceTest<F2D> {
- protected:
- void Common() {
- const int sizelog2 = randomise.uniform<int>(2, 8);
-
- const uint32_t size = 1 << sizelog2;
- const int stride = 1 << randomise.uniform<int>(sizelog2, 9);
-
- snapshot(src);
+#include "test/util.h"
- uint64_t ref_res, tst_res;
- ref_res = ref_func_(src, stride, size);
- ASM_REGISTER_STATE_CHECK(tst_res = tst_func_(src, stride, size));
+using libvpx_test::ACMRandom;
- ASSERT_EQ(ref_res, tst_res);
-
- ASSERT_TRUE(ArraysEq(snapshot.get(src), src));
- }
-
- Snapshot snapshot;
- Randomise randomise;
-
- DECLARE_ALIGNED(16, int16_t, src[256*256]);
-};
+namespace {
+const int kNumIterations = 10000;
-TEST_P(SumSquares2DTest, RandomValues) {
- for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
- randomise(src, -int13_max, int13_max + 1);
+typedef uint64_t (*SSI16Func)(const int16_t *src,
+ int stride ,
+ int size);
- Common();
- }
-}
+typedef std::tr1::tuple<SSI16Func, SSI16Func> SumSquaresParam;
-TEST_P(SumSquares2DTest, ExtremeValues) {
- for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
- if (randomise.uniform<bool>())
- arraySet(src, int13_max);
- else
- arraySet(src, -int13_max);
-
- Common();
+class SumSquaresTest : public ::testing::TestWithParam<SumSquaresParam> {
+ public:
+ virtual ~SumSquaresTest() {}
+ virtual void SetUp() {
+ ref_func_ = GET_PARAM(0);
+ tst_func_ = GET_PARAM(1);
}
-}
-using std::tr1::make_tuple;
-
-#if HAVE_SSE2
-INSTANTIATE_TEST_CASE_P(
- SSE2, SumSquares2DTest,
- ::testing::Values(
- make_tuple(&vpx_sum_squares_2d_i16_c, &vpx_sum_squares_2d_i16_sse2)
- )
-);
-#endif // HAVE_SSE2
-//////////////////////////////////////////////////////////////////////////////
-// 1D version
-//////////////////////////////////////////////////////////////////////////////
+ virtual void TearDown() { libvpx_test::ClearSystemState(); }
-typedef uint64_t (*F1D)(const int16_t *src, uint32_t N);
-
-class SumSquares1DTest : public FunctionEquivalenceTest<F1D> {
protected:
- void Common() {
- const int N = randomise.uniform<int>(1, 256*256-1);
-
- snapshot(src);
-
- uint64_t ref_res, tst_res;
-
- ref_res = ref_func_(src, N);
- ASM_REGISTER_STATE_CHECK(tst_res = tst_func_(src, N));
-
- ASSERT_EQ(ref_res, tst_res);
-
- ASSERT_TRUE(ArraysEq(snapshot.get(src), src));
- }
-
- Snapshot snapshot;
- Randomise randomise;
-
- DECLARE_ALIGNED(16, int16_t, src[256*256]);
+ SSI16Func ref_func_;
+ SSI16Func tst_func_;
};
-TEST_P(SumSquares1DTest, RandomValues) {
- for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
- randomise(src, -int13_max, int13_max+1);
+TEST_P(SumSquaresTest, OperationCheck) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ DECLARE_ALIGNED(16, int16_t, src[256*256]);
- Common();
+ int failed = 0;
+
+ const int msb = 11; // Up to 12 bit input
+ const int limit = 1 << (msb+1);
+
+ for (int k = 0; k < kNumIterations; k++) {
+ int size = 4 << rnd(6); // Up to 128x128
+ int stride = 4 << rnd(7); // Up to 256 stride
+ while (stride < size) { // Make sure it's valid
+ stride = 4 << rnd(7);
+ }
+
+ for (int ii = 0 ; ii < size; ii++) {
+ for (int jj = 0; jj < size; jj++) {
+ src[ii*stride+jj] = rnd(2) ? rnd(limit) : -rnd(limit);
+ }
+ }
+
+ uint64_t res_ref = ref_func_(src, stride, size);
+ uint64_t res_tst;
+ ASM_REGISTER_STATE_CHECK(res_tst = tst_func_(src, stride, size));
+
+ if (!failed) {
+ failed = res_ref != res_tst;
+ EXPECT_EQ(res_ref, res_tst)
+ << "Error: Sum Squares Test"
+ << " C output does not match optimized output.";
+ }
}
}
-TEST_P(SumSquares1DTest, ExtremeValues) {
- for (int i = 0 ; i < 10000 && !HasFatalFailure(); i++) {
- if (randomise.uniform<bool>())
- arraySet(src, int13_max);
- else
- arraySet(src, -int13_max);
+TEST_P(SumSquaresTest, ExtremeValues) {
+ ACMRandom rnd(ACMRandom::DeterministicSeed());
+ DECLARE_ALIGNED(16, int16_t, src[256*256]);
- Common();
+ int failed = 0;
+
+ const int msb = 11; // Up to 12 bit input
+ const int limit = 1 << (msb+1);
+
+ for (int k = 0; k < kNumIterations; k++) {
+ int size = 4 << rnd(6); // Up to 128x128
+ int stride = 4 << rnd(7); // Up to 256 stride
+ while (stride < size) { // Make sure it's valid
+ stride = 4 << rnd(7);
+ }
+
+ int val = rnd(2) ? limit-1 : -(limit-1);
+ for (int ii = 0 ; ii < size; ii++) {
+ for (int jj = 0; jj < size; jj++) {
+ src[ii*stride+jj] = val;
+ }
+ }
+
+ uint64_t res_ref = ref_func_(src, stride, size);
+ uint64_t res_tst;
+ ASM_REGISTER_STATE_CHECK(res_tst = tst_func_(src, stride, size));
+
+ if (!failed) {
+ failed = res_ref != res_tst;
+ EXPECT_EQ(res_ref, res_tst)
+ << "Error: Sum Squares Test"
+ << " C output does not match optimized output.";
+ }
}
}
using std::tr1::make_tuple;
#if HAVE_SSE2
+
INSTANTIATE_TEST_CASE_P(
- SSE2, SumSquares1DTest,
+ SSE2, SumSquaresTest,
::testing::Values(
- make_tuple(&vpx_sum_squares_i16_c, &vpx_sum_squares_i16_sse2)
+ make_tuple(&vpx_sum_squares_2d_i16_c, &vpx_sum_squares_2d_i16_sse2)
)
);
#endif // HAVE_SSE2
#include <emmintrin.h>
#include <stdio.h>
-#include "vpx_dsp/x86/synonyms.h"
-
#include "./vpx_dsp_rtcd.h"
-//////////////////////////////////////////////////////////////////////////////
-// 2D version
-//////////////////////////////////////////////////////////////////////////////
-
static uint64_t vpx_sum_squares_2d_i16_4x4_sse2(const int16_t *src,
int stride) {
const __m128i v_val_0_w = _mm_loadl_epi64((const __m128i*)(src+0*stride));
#endif
static uint64_t vpx_sum_squares_2d_i16_nxn_sse2(const int16_t *src,
int stride,
- uint32_t size) {
+ int size) {
int r, c;
const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
}
uint64_t vpx_sum_squares_2d_i16_sse2(const int16_t *src, int stride,
- uint32_t size) {
+ int size) {
// 4 elements per row only requires half an XMM register, so this
// must be a special case, but also note that over 75% of all calls
// are with size == 4, so it is also the common case.
return vpx_sum_squares_2d_i16_nxn_sse2(src, stride, size);
}
}
-
-//////////////////////////////////////////////////////////////////////////////
-// 1D version
-//////////////////////////////////////////////////////////////////////////////
-
-static uint64_t vpx_sum_squares_i16_64n_sse2(const int16_t *src, uint32_t n) {
- const __m128i v_zext_mask_q = _mm_set_epi32(0, 0xffffffff, 0, 0xffffffff);
- __m128i v_acc0_q = _mm_setzero_si128();
- __m128i v_acc1_q = _mm_setzero_si128();
-
- const int16_t *const end = src + n;
-
- assert(n % 64 == 0);
-
- while (src < end) {
- const __m128i v_val_0_w = xx_load_128(src);
- const __m128i v_val_1_w = xx_load_128(src + 8);
- const __m128i v_val_2_w = xx_load_128(src + 16);
- const __m128i v_val_3_w = xx_load_128(src + 24);
- const __m128i v_val_4_w = xx_load_128(src + 32);
- const __m128i v_val_5_w = xx_load_128(src + 40);
- const __m128i v_val_6_w = xx_load_128(src + 48);
- const __m128i v_val_7_w = xx_load_128(src + 56);
-
- const __m128i v_sq_0_d = _mm_madd_epi16(v_val_0_w, v_val_0_w);
- const __m128i v_sq_1_d = _mm_madd_epi16(v_val_1_w, v_val_1_w);
- const __m128i v_sq_2_d = _mm_madd_epi16(v_val_2_w, v_val_2_w);
- const __m128i v_sq_3_d = _mm_madd_epi16(v_val_3_w, v_val_3_w);
- const __m128i v_sq_4_d = _mm_madd_epi16(v_val_4_w, v_val_4_w);
- const __m128i v_sq_5_d = _mm_madd_epi16(v_val_5_w, v_val_5_w);
- const __m128i v_sq_6_d = _mm_madd_epi16(v_val_6_w, v_val_6_w);
- const __m128i v_sq_7_d = _mm_madd_epi16(v_val_7_w, v_val_7_w);
-
- const __m128i v_sum_01_d = _mm_add_epi32(v_sq_0_d, v_sq_1_d);
- const __m128i v_sum_23_d = _mm_add_epi32(v_sq_2_d, v_sq_3_d);
- const __m128i v_sum_45_d = _mm_add_epi32(v_sq_4_d, v_sq_5_d);
- const __m128i v_sum_67_d = _mm_add_epi32(v_sq_6_d, v_sq_7_d);
-
- const __m128i v_sum_0123_d = _mm_add_epi32(v_sum_01_d, v_sum_23_d);
- const __m128i v_sum_4567_d = _mm_add_epi32(v_sum_45_d, v_sum_67_d);
-
- const __m128i v_sum_d = _mm_add_epi32(v_sum_0123_d, v_sum_4567_d);
-
- v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_and_si128(v_sum_d, v_zext_mask_q));
- v_acc1_q = _mm_add_epi64(v_acc1_q, _mm_srli_epi64(v_sum_d, 32));
-
- src += 64;
- }
-
- v_acc0_q = _mm_add_epi64(v_acc0_q, v_acc1_q);
- v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8));
-
-#if ARCH_X86_64
- return (uint64_t)_mm_cvtsi128_si64(v_acc0_q);
-#else
- {
- uint64_t tmp;
- _mm_storel_epi64((__m128i*)&tmp, v_acc0_q);
- return tmp;
- }
-#endif
-}
-
-uint64_t vpx_sum_squares_i16_sse2(const int16_t *src, uint32_t n) {
- if (n % 64 == 0) {
- return vpx_sum_squares_i16_64n_sse2(src, n);
- } else if (n > 64) {
- int k = n & ~(64-1);
- return vpx_sum_squares_i16_64n_sse2(src, k) +
- vpx_sum_squares_i16_c(src + k, n - k);
- } else {
- return vpx_sum_squares_i16_c(src, n);
- }
-}
-