2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "third_party/googletest/src/include/gtest/gtest.h"
16 #include "test/acm_random.h"
17 #include "test/clear_system_state.h"
18 #include "test/register_state_check.h"
19 #include "test/util.h"
21 #include "./vp9_rtcd.h"
22 #include "vp9/common/vp9_entropy.h"
23 #include "vpx/vpx_codec.h"
24 #include "vpx/vpx_integer.h"
25 #include "vpx_ports/mem.h"
27 using libvpx_test::ACMRandom;
32 static int round(double x) {
34 return static_cast<int>(ceil(x - 0.5));
36 return static_cast<int>(floor(x + 0.5));
40 const int kNumCoeffs = 256;
41 const double PI = 3.1415926535898;
42 void reference2_16x16_idct_2d(double *input, double *output) {
44 for (int l = 0; l < 16; ++l) {
45 for (int k = 0; k < 16; ++k) {
47 for (int i = 0; i < 16; ++i) {
48 for (int j = 0; j < 16; ++j) {
49 x = cos(PI * j * (l + 0.5) / 16.0) *
50 cos(PI * i * (k + 0.5) / 16.0) *
51 input[i * 16 + j] / 256;
65 const double C1 = 0.995184726672197;
66 const double C2 = 0.98078528040323;
67 const double C3 = 0.956940335732209;
68 const double C4 = 0.923879532511287;
69 const double C5 = 0.881921264348355;
70 const double C6 = 0.831469612302545;
71 const double C7 = 0.773010453362737;
72 const double C8 = 0.707106781186548;
73 const double C9 = 0.634393284163646;
74 const double C10 = 0.555570233019602;
75 const double C11 = 0.471396736825998;
76 const double C12 = 0.38268343236509;
77 const double C13 = 0.290284677254462;
78 const double C14 = 0.195090322016128;
79 const double C15 = 0.098017140329561;
81 void butterfly_16x16_dct_1d(double input[16], double output[16]) {
83 double intermediate[16];
87 step[ 0] = input[0] + input[15];
88 step[ 1] = input[1] + input[14];
89 step[ 2] = input[2] + input[13];
90 step[ 3] = input[3] + input[12];
91 step[ 4] = input[4] + input[11];
92 step[ 5] = input[5] + input[10];
93 step[ 6] = input[6] + input[ 9];
94 step[ 7] = input[7] + input[ 8];
95 step[ 8] = input[7] - input[ 8];
96 step[ 9] = input[6] - input[ 9];
97 step[10] = input[5] - input[10];
98 step[11] = input[4] - input[11];
99 step[12] = input[3] - input[12];
100 step[13] = input[2] - input[13];
101 step[14] = input[1] - input[14];
102 step[15] = input[0] - input[15];
105 output[0] = step[0] + step[7];
106 output[1] = step[1] + step[6];
107 output[2] = step[2] + step[5];
108 output[3] = step[3] + step[4];
109 output[4] = step[3] - step[4];
110 output[5] = step[2] - step[5];
111 output[6] = step[1] - step[6];
112 output[7] = step[0] - step[7];
114 temp1 = step[ 8] * C7;
115 temp2 = step[15] * C9;
116 output[ 8] = temp1 + temp2;
118 temp1 = step[ 9] * C11;
119 temp2 = step[14] * C5;
120 output[ 9] = temp1 - temp2;
122 temp1 = step[10] * C3;
123 temp2 = step[13] * C13;
124 output[10] = temp1 + temp2;
126 temp1 = step[11] * C15;
127 temp2 = step[12] * C1;
128 output[11] = temp1 - temp2;
130 temp1 = step[11] * C1;
131 temp2 = step[12] * C15;
132 output[12] = temp2 + temp1;
134 temp1 = step[10] * C13;
135 temp2 = step[13] * C3;
136 output[13] = temp2 - temp1;
138 temp1 = step[ 9] * C5;
139 temp2 = step[14] * C11;
140 output[14] = temp2 + temp1;
142 temp1 = step[ 8] * C9;
143 temp2 = step[15] * C7;
144 output[15] = temp2 - temp1;
147 step[ 0] = output[0] + output[3];
148 step[ 1] = output[1] + output[2];
149 step[ 2] = output[1] - output[2];
150 step[ 3] = output[0] - output[3];
152 temp1 = output[4] * C14;
153 temp2 = output[7] * C2;
154 step[ 4] = temp1 + temp2;
156 temp1 = output[5] * C10;
157 temp2 = output[6] * C6;
158 step[ 5] = temp1 + temp2;
160 temp1 = output[5] * C6;
161 temp2 = output[6] * C10;
162 step[ 6] = temp2 - temp1;
164 temp1 = output[4] * C2;
165 temp2 = output[7] * C14;
166 step[ 7] = temp2 - temp1;
168 step[ 8] = output[ 8] + output[11];
169 step[ 9] = output[ 9] + output[10];
170 step[10] = output[ 9] - output[10];
171 step[11] = output[ 8] - output[11];
173 step[12] = output[12] + output[15];
174 step[13] = output[13] + output[14];
175 step[14] = output[13] - output[14];
176 step[15] = output[12] - output[15];
179 output[ 0] = (step[ 0] + step[ 1]);
180 output[ 8] = (step[ 0] - step[ 1]);
182 temp1 = step[2] * C12;
183 temp2 = step[3] * C4;
184 temp1 = temp1 + temp2;
185 output[ 4] = 2*(temp1 * C8);
187 temp1 = step[2] * C4;
188 temp2 = step[3] * C12;
189 temp1 = temp2 - temp1;
190 output[12] = 2 * (temp1 * C8);
192 output[ 2] = 2 * ((step[4] + step[ 5]) * C8);
193 output[14] = 2 * ((step[7] - step[ 6]) * C8);
195 temp1 = step[4] - step[5];
196 temp2 = step[6] + step[7];
197 output[ 6] = (temp1 + temp2);
198 output[10] = (temp1 - temp2);
200 intermediate[8] = step[8] + step[14];
201 intermediate[9] = step[9] + step[15];
203 temp1 = intermediate[8] * C12;
204 temp2 = intermediate[9] * C4;
205 temp1 = temp1 - temp2;
206 output[3] = 2 * (temp1 * C8);
208 temp1 = intermediate[8] * C4;
209 temp2 = intermediate[9] * C12;
210 temp1 = temp2 + temp1;
211 output[13] = 2 * (temp1 * C8);
213 output[ 9] = 2 * ((step[10] + step[11]) * C8);
215 intermediate[11] = step[10] - step[11];
216 intermediate[12] = step[12] + step[13];
217 intermediate[13] = step[12] - step[13];
218 intermediate[14] = step[ 8] - step[14];
219 intermediate[15] = step[ 9] - step[15];
221 output[15] = (intermediate[11] + intermediate[12]);
222 output[ 1] = -(intermediate[11] - intermediate[12]);
224 output[ 7] = 2 * (intermediate[13] * C8);
226 temp1 = intermediate[14] * C12;
227 temp2 = intermediate[15] * C4;
228 temp1 = temp1 - temp2;
229 output[11] = -2 * (temp1 * C8);
231 temp1 = intermediate[14] * C4;
232 temp2 = intermediate[15] * C12;
233 temp1 = temp2 + temp1;
234 output[ 5] = 2 * (temp1 * C8);
237 void reference_16x16_dct_2d(int16_t input[256], double output[256]) {
238 // First transform columns
239 for (int i = 0; i < 16; ++i) {
240 double temp_in[16], temp_out[16];
241 for (int j = 0; j < 16; ++j)
242 temp_in[j] = input[j * 16 + i];
243 butterfly_16x16_dct_1d(temp_in, temp_out);
244 for (int j = 0; j < 16; ++j)
245 output[j * 16 + i] = temp_out[j];
247 // Then transform rows
248 for (int i = 0; i < 16; ++i) {
249 double temp_in[16], temp_out[16];
250 for (int j = 0; j < 16; ++j)
251 temp_in[j] = output[j + i * 16];
252 butterfly_16x16_dct_1d(temp_in, temp_out);
253 // Scale by some magic number
254 for (int j = 0; j < 16; ++j)
255 output[j + i * 16] = temp_out[j]/2;
259 typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
260 typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
261 typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
263 typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
266 typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
267 typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
268 typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
271 void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
273 vp9_fdct16x16_c(in, out, stride);
276 void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
278 vp9_idct16x16_256_add_c(in, dest, stride);
281 void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
283 vp9_fht16x16_c(in, out, stride, tx_type);
286 void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
288 vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
291 #if CONFIG_VP9_HIGHBITDEPTH
292 void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
293 vp9_highbd_idct16x16_256_add_c(in, out, stride, 10);
296 void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
297 vp9_highbd_idct16x16_256_add_c(in, out, stride, 12);
300 void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
302 idct16x16_10(in, out, stride);
305 void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
307 idct16x16_12(in, out, stride);
310 void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
311 vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
314 void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
315 vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
318 void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
319 vp9_highbd_idct16x16_10_add_c(in, out, stride, 10);
322 void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
323 vp9_highbd_idct16x16_10_add_c(in, out, stride, 12);
327 void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
328 vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
331 void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
332 vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
335 void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
336 vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
339 void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
340 vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
343 #endif // CONFIG_VP9_HIGHBITDEPTH
345 class Trans16x16TestBase {
347 virtual ~Trans16x16TestBase() {}
350 virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
352 virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
354 void RunAccuracyCheck() {
355 ACMRandom rnd(ACMRandom::DeterministicSeed());
356 uint32_t max_error = 0;
357 int64_t total_error = 0;
358 const int count_test_block = 10000;
359 for (int i = 0; i < count_test_block; ++i) {
360 DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
361 DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
362 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
363 DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
364 #if CONFIG_VP9_HIGHBITDEPTH
365 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
366 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
369 // Initialize a test block with input range [-mask_, mask_].
370 for (int j = 0; j < kNumCoeffs; ++j) {
371 if (bit_depth_ == VPX_BITS_8) {
372 src[j] = rnd.Rand8();
373 dst[j] = rnd.Rand8();
374 test_input_block[j] = src[j] - dst[j];
375 #if CONFIG_VP9_HIGHBITDEPTH
377 src16[j] = rnd.Rand16() & mask_;
378 dst16[j] = rnd.Rand16() & mask_;
379 test_input_block[j] = src16[j] - dst16[j];
384 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
385 test_temp_block, pitch_));
386 if (bit_depth_ == VPX_BITS_8) {
387 ASM_REGISTER_STATE_CHECK(
388 RunInvTxfm(test_temp_block, dst, pitch_));
389 #if CONFIG_VP9_HIGHBITDEPTH
391 ASM_REGISTER_STATE_CHECK(
392 RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
396 for (int j = 0; j < kNumCoeffs; ++j) {
397 #if CONFIG_VP9_HIGHBITDEPTH
398 const uint32_t diff =
399 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
401 const uint32_t diff = dst[j] - src[j];
403 const uint32_t error = diff * diff;
404 if (max_error < error)
406 total_error += error;
410 EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
411 << "Error: 16x16 FHT/IHT has an individual round trip error > 1";
413 EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
414 << "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
417 void RunCoeffCheck() {
418 ACMRandom rnd(ACMRandom::DeterministicSeed());
419 const int count_test_block = 1000;
420 DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
421 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
422 DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
424 for (int i = 0; i < count_test_block; ++i) {
425 // Initialize a test block with input range [-mask_, mask_].
426 for (int j = 0; j < kNumCoeffs; ++j)
427 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
429 fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
430 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
432 // The minimum quant value is 4.
433 for (int j = 0; j < kNumCoeffs; ++j)
434 EXPECT_EQ(output_block[j], output_ref_block[j]);
439 ACMRandom rnd(ACMRandom::DeterministicSeed());
440 const int count_test_block = 1000;
441 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
442 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
443 DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
445 for (int i = 0; i < count_test_block; ++i) {
446 // Initialize a test block with input range [-mask_, mask_].
447 for (int j = 0; j < kNumCoeffs; ++j) {
448 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
451 for (int j = 0; j < kNumCoeffs; ++j)
452 input_extreme_block[j] = mask_;
454 for (int j = 0; j < kNumCoeffs; ++j)
455 input_extreme_block[j] = -mask_;
458 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
459 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
460 output_block, pitch_));
462 // The minimum quant value is 4.
463 for (int j = 0; j < kNumCoeffs; ++j) {
464 EXPECT_EQ(output_block[j], output_ref_block[j]);
465 EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
466 << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
471 void RunQuantCheck(int dc_thred, int ac_thred) {
472 ACMRandom rnd(ACMRandom::DeterministicSeed());
473 const int count_test_block = 100000;
474 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
475 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
477 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
478 DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
479 #if CONFIG_VP9_HIGHBITDEPTH
480 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
481 DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
484 for (int i = 0; i < count_test_block; ++i) {
485 // Initialize a test block with input range [-mask_, mask_].
486 for (int j = 0; j < kNumCoeffs; ++j) {
487 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
490 for (int j = 0; j < kNumCoeffs; ++j)
491 input_extreme_block[j] = mask_;
493 for (int j = 0; j < kNumCoeffs; ++j)
494 input_extreme_block[j] = -mask_;
496 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
498 // clear reconstructed pixel buffers
499 memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
500 memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
501 #if CONFIG_VP9_HIGHBITDEPTH
502 memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
503 memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
506 // quantization with maximum allowed step sizes
507 output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
508 for (int j = 1; j < kNumCoeffs; ++j)
509 output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
510 if (bit_depth_ == VPX_BITS_8) {
511 inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
512 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
513 #if CONFIG_VP9_HIGHBITDEPTH
515 inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
517 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
518 CONVERT_TO_BYTEPTR(dst16), pitch_));
521 if (bit_depth_ == VPX_BITS_8) {
522 for (int j = 0; j < kNumCoeffs; ++j)
523 EXPECT_EQ(ref[j], dst[j]);
524 #if CONFIG_VP9_HIGHBITDEPTH
526 for (int j = 0; j < kNumCoeffs; ++j)
527 EXPECT_EQ(ref16[j], dst16[j]);
533 void RunInvAccuracyCheck() {
534 ACMRandom rnd(ACMRandom::DeterministicSeed());
535 const int count_test_block = 1000;
536 DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
537 DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
538 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
539 DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
540 #if CONFIG_VP9_HIGHBITDEPTH
541 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
542 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
543 #endif // CONFIG_VP9_HIGHBITDEPTH
545 for (int i = 0; i < count_test_block; ++i) {
546 double out_r[kNumCoeffs];
548 // Initialize a test block with input range [-255, 255].
549 for (int j = 0; j < kNumCoeffs; ++j) {
550 if (bit_depth_ == VPX_BITS_8) {
551 src[j] = rnd.Rand8();
552 dst[j] = rnd.Rand8();
553 in[j] = src[j] - dst[j];
554 #if CONFIG_VP9_HIGHBITDEPTH
556 src16[j] = rnd.Rand16() & mask_;
557 dst16[j] = rnd.Rand16() & mask_;
558 in[j] = src16[j] - dst16[j];
559 #endif // CONFIG_VP9_HIGHBITDEPTH
563 reference_16x16_dct_2d(in, out_r);
564 for (int j = 0; j < kNumCoeffs; ++j)
565 coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
567 if (bit_depth_ == VPX_BITS_8) {
568 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
569 #if CONFIG_VP9_HIGHBITDEPTH
571 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
573 #endif // CONFIG_VP9_HIGHBITDEPTH
576 for (int j = 0; j < kNumCoeffs; ++j) {
577 #if CONFIG_VP9_HIGHBITDEPTH
578 const uint32_t diff =
579 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
581 const uint32_t diff = dst[j] - src[j];
582 #endif // CONFIG_VP9_HIGHBITDEPTH
583 const uint32_t error = diff * diff;
585 << "Error: 16x16 IDCT has error " << error
586 << " at index " << j;
591 void CompareInvReference(IdctFunc ref_txfm, int thresh) {
592 ACMRandom rnd(ACMRandom::DeterministicSeed());
593 const int count_test_block = 10000;
595 const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
596 DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
597 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
598 DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
599 #if CONFIG_VP9_HIGHBITDEPTH
600 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
601 DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
602 #endif // CONFIG_VP9_HIGHBITDEPTH
604 for (int i = 0; i < count_test_block; ++i) {
605 for (int j = 0; j < kNumCoeffs; ++j) {
607 // Random values less than the threshold, either positive or negative
608 coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
612 if (bit_depth_ == VPX_BITS_8) {
615 #if CONFIG_VP9_HIGHBITDEPTH
619 #endif // CONFIG_VP9_HIGHBITDEPTH
622 if (bit_depth_ == VPX_BITS_8) {
623 ref_txfm(coeff, ref, pitch_);
624 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
626 #if CONFIG_VP9_HIGHBITDEPTH
627 ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
628 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
630 #endif // CONFIG_VP9_HIGHBITDEPTH
633 for (int j = 0; j < kNumCoeffs; ++j) {
634 #if CONFIG_VP9_HIGHBITDEPTH
635 const uint32_t diff =
636 bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
638 const uint32_t diff = dst[j] - ref[j];
639 #endif // CONFIG_VP9_HIGHBITDEPTH
640 const uint32_t error = diff * diff;
642 << "Error: 16x16 IDCT Comparison has error " << error
643 << " at index " << j;
650 vpx_bit_depth_t bit_depth_;
652 FhtFunc fwd_txfm_ref;
653 IhtFunc inv_txfm_ref;
657 : public Trans16x16TestBase,
658 public ::testing::TestWithParam<Dct16x16Param> {
660 virtual ~Trans16x16DCT() {}
662 virtual void SetUp() {
663 fwd_txfm_ = GET_PARAM(0);
664 inv_txfm_ = GET_PARAM(1);
665 tx_type_ = GET_PARAM(2);
666 bit_depth_ = GET_PARAM(3);
668 fwd_txfm_ref = fdct16x16_ref;
669 inv_txfm_ref = idct16x16_ref;
670 mask_ = (1 << bit_depth_) - 1;
671 #if CONFIG_VP9_HIGHBITDEPTH
672 switch (bit_depth_) {
674 inv_txfm_ref = idct16x16_10_ref;
677 inv_txfm_ref = idct16x16_12_ref;
680 inv_txfm_ref = idct16x16_ref;
684 inv_txfm_ref = idct16x16_ref;
687 virtual void TearDown() { libvpx_test::ClearSystemState(); }
690 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
691 fwd_txfm_(in, out, stride);
693 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
694 inv_txfm_(out, dst, stride);
701 TEST_P(Trans16x16DCT, AccuracyCheck) {
705 TEST_P(Trans16x16DCT, CoeffCheck) {
709 TEST_P(Trans16x16DCT, MemCheck) {
713 TEST_P(Trans16x16DCT, QuantCheck) {
714 // Use maximally allowed quantization step sizes for DC and AC
715 // coefficients respectively.
716 RunQuantCheck(1336, 1828);
719 TEST_P(Trans16x16DCT, InvAccuracyCheck) {
720 RunInvAccuracyCheck();
724 : public Trans16x16TestBase,
725 public ::testing::TestWithParam<Ht16x16Param> {
727 virtual ~Trans16x16HT() {}
729 virtual void SetUp() {
730 fwd_txfm_ = GET_PARAM(0);
731 inv_txfm_ = GET_PARAM(1);
732 tx_type_ = GET_PARAM(2);
733 bit_depth_ = GET_PARAM(3);
735 fwd_txfm_ref = fht16x16_ref;
736 inv_txfm_ref = iht16x16_ref;
737 mask_ = (1 << bit_depth_) - 1;
738 #if CONFIG_VP9_HIGHBITDEPTH
739 switch (bit_depth_) {
741 inv_txfm_ref = iht16x16_10;
744 inv_txfm_ref = iht16x16_12;
747 inv_txfm_ref = iht16x16_ref;
751 inv_txfm_ref = iht16x16_ref;
754 virtual void TearDown() { libvpx_test::ClearSystemState(); }
757 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
758 fwd_txfm_(in, out, stride, tx_type_);
760 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
761 inv_txfm_(out, dst, stride, tx_type_);
768 TEST_P(Trans16x16HT, AccuracyCheck) {
772 TEST_P(Trans16x16HT, CoeffCheck) {
776 TEST_P(Trans16x16HT, MemCheck) {
780 TEST_P(Trans16x16HT, QuantCheck) {
781 // The encoder skips any non-DC intra prediction modes,
782 // when the quantization step size goes beyond 988.
783 RunQuantCheck(429, 729);
786 class InvTrans16x16DCT
787 : public Trans16x16TestBase,
788 public ::testing::TestWithParam<Idct16x16Param> {
790 virtual ~InvTrans16x16DCT() {}
792 virtual void SetUp() {
793 ref_txfm_ = GET_PARAM(0);
794 inv_txfm_ = GET_PARAM(1);
795 thresh_ = GET_PARAM(2);
796 bit_depth_ = GET_PARAM(3);
798 mask_ = (1 << bit_depth_) - 1;
800 virtual void TearDown() { libvpx_test::ClearSystemState(); }
803 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
804 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
805 inv_txfm_(out, dst, stride);
813 TEST_P(InvTrans16x16DCT, CompareReference) {
814 CompareInvReference(ref_txfm_, thresh_);
817 using std::tr1::make_tuple;
819 #if CONFIG_VP9_HIGHBITDEPTH
820 INSTANTIATE_TEST_CASE_P(
823 make_tuple(&vp9_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
824 make_tuple(&vp9_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
825 make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
827 INSTANTIATE_TEST_CASE_P(
830 make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
831 #endif // CONFIG_VP9_HIGHBITDEPTH
833 #if CONFIG_VP9_HIGHBITDEPTH
834 INSTANTIATE_TEST_CASE_P(
837 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
838 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
839 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
840 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
841 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
842 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
843 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
844 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
845 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
846 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
847 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
848 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
850 INSTANTIATE_TEST_CASE_P(
853 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
854 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
855 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
856 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
857 #endif // CONFIG_VP9_HIGHBITDEPTH
859 #if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
860 INSTANTIATE_TEST_CASE_P(
863 make_tuple(&vp9_fdct16x16_c,
864 &vp9_idct16x16_256_add_neon, 0, VPX_BITS_8)));
867 #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
868 INSTANTIATE_TEST_CASE_P(
871 make_tuple(&vp9_fdct16x16_sse2,
872 &vp9_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
873 INSTANTIATE_TEST_CASE_P(
876 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
878 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
880 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
882 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
884 #endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
886 #if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
887 INSTANTIATE_TEST_CASE_P(
890 make_tuple(&vp9_highbd_fdct16x16_sse2,
891 &idct16x16_10, 0, VPX_BITS_10),
892 make_tuple(&vp9_highbd_fdct16x16_c,
893 &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
894 make_tuple(&vp9_highbd_fdct16x16_sse2,
895 &idct16x16_12, 0, VPX_BITS_12),
896 make_tuple(&vp9_highbd_fdct16x16_c,
897 &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
898 make_tuple(&vp9_fdct16x16_sse2,
899 &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
900 INSTANTIATE_TEST_CASE_P(
903 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 0, VPX_BITS_10),
904 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 1, VPX_BITS_10),
905 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 2, VPX_BITS_10),
906 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 3, VPX_BITS_10),
907 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 0, VPX_BITS_12),
908 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 1, VPX_BITS_12),
909 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 2, VPX_BITS_12),
910 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 3, VPX_BITS_12),
911 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
912 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
913 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
914 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
916 // Optimizations take effect at a threshold of 3155, so we use a value close to
917 // that to test both branches.
918 INSTANTIATE_TEST_CASE_P(
919 SSE2, InvTrans16x16DCT,
921 make_tuple(&idct16x16_10_add_10_c,
922 &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
923 make_tuple(&idct16x16_10,
924 &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
925 make_tuple(&idct16x16_10_add_12_c,
926 &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
927 make_tuple(&idct16x16_12,
928 &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
929 #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
931 #if 0 // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
932 // TODO(parag): enable when function hooks are added
933 INSTANTIATE_TEST_CASE_P(
936 make_tuple(&vp9_fdct16x16_c,
937 &vp9_idct16x16_256_add_msa, 0, VPX_BITS_8)));
938 INSTANTIATE_TEST_CASE_P(
941 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
942 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
943 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
944 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 3, VPX_BITS_8)));
945 #endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE