2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "third_party/googletest/src/include/gtest/gtest.h"
16 #include "test/acm_random.h"
17 #include "test/clear_system_state.h"
18 #include "test/register_state_check.h"
19 #include "test/util.h"
21 #include "./vp9_rtcd.h"
22 #include "vp9/common/vp9_entropy.h"
23 #include "vpx/vpx_codec.h"
24 #include "vpx/vpx_integer.h"
26 using libvpx_test::ACMRandom;
31 static int round(double x) {
33 return static_cast<int>(ceil(x - 0.5));
35 return static_cast<int>(floor(x + 0.5));
39 const int kNumCoeffs = 256;
40 const double PI = 3.1415926535898;
41 void reference2_16x16_idct_2d(double *input, double *output) {
43 for (int l = 0; l < 16; ++l) {
44 for (int k = 0; k < 16; ++k) {
46 for (int i = 0; i < 16; ++i) {
47 for (int j = 0; j < 16; ++j) {
48 x = cos(PI * j * (l + 0.5) / 16.0) *
49 cos(PI * i * (k + 0.5) / 16.0) *
50 input[i * 16 + j] / 256;
64 const double C1 = 0.995184726672197;
65 const double C2 = 0.98078528040323;
66 const double C3 = 0.956940335732209;
67 const double C4 = 0.923879532511287;
68 const double C5 = 0.881921264348355;
69 const double C6 = 0.831469612302545;
70 const double C7 = 0.773010453362737;
71 const double C8 = 0.707106781186548;
72 const double C9 = 0.634393284163646;
73 const double C10 = 0.555570233019602;
74 const double C11 = 0.471396736825998;
75 const double C12 = 0.38268343236509;
76 const double C13 = 0.290284677254462;
77 const double C14 = 0.195090322016128;
78 const double C15 = 0.098017140329561;
80 void butterfly_16x16_dct_1d(double input[16], double output[16]) {
82 double intermediate[16];
86 step[ 0] = input[0] + input[15];
87 step[ 1] = input[1] + input[14];
88 step[ 2] = input[2] + input[13];
89 step[ 3] = input[3] + input[12];
90 step[ 4] = input[4] + input[11];
91 step[ 5] = input[5] + input[10];
92 step[ 6] = input[6] + input[ 9];
93 step[ 7] = input[7] + input[ 8];
94 step[ 8] = input[7] - input[ 8];
95 step[ 9] = input[6] - input[ 9];
96 step[10] = input[5] - input[10];
97 step[11] = input[4] - input[11];
98 step[12] = input[3] - input[12];
99 step[13] = input[2] - input[13];
100 step[14] = input[1] - input[14];
101 step[15] = input[0] - input[15];
104 output[0] = step[0] + step[7];
105 output[1] = step[1] + step[6];
106 output[2] = step[2] + step[5];
107 output[3] = step[3] + step[4];
108 output[4] = step[3] - step[4];
109 output[5] = step[2] - step[5];
110 output[6] = step[1] - step[6];
111 output[7] = step[0] - step[7];
113 temp1 = step[ 8] * C7;
114 temp2 = step[15] * C9;
115 output[ 8] = temp1 + temp2;
117 temp1 = step[ 9] * C11;
118 temp2 = step[14] * C5;
119 output[ 9] = temp1 - temp2;
121 temp1 = step[10] * C3;
122 temp2 = step[13] * C13;
123 output[10] = temp1 + temp2;
125 temp1 = step[11] * C15;
126 temp2 = step[12] * C1;
127 output[11] = temp1 - temp2;
129 temp1 = step[11] * C1;
130 temp2 = step[12] * C15;
131 output[12] = temp2 + temp1;
133 temp1 = step[10] * C13;
134 temp2 = step[13] * C3;
135 output[13] = temp2 - temp1;
137 temp1 = step[ 9] * C5;
138 temp2 = step[14] * C11;
139 output[14] = temp2 + temp1;
141 temp1 = step[ 8] * C9;
142 temp2 = step[15] * C7;
143 output[15] = temp2 - temp1;
146 step[ 0] = output[0] + output[3];
147 step[ 1] = output[1] + output[2];
148 step[ 2] = output[1] - output[2];
149 step[ 3] = output[0] - output[3];
151 temp1 = output[4] * C14;
152 temp2 = output[7] * C2;
153 step[ 4] = temp1 + temp2;
155 temp1 = output[5] * C10;
156 temp2 = output[6] * C6;
157 step[ 5] = temp1 + temp2;
159 temp1 = output[5] * C6;
160 temp2 = output[6] * C10;
161 step[ 6] = temp2 - temp1;
163 temp1 = output[4] * C2;
164 temp2 = output[7] * C14;
165 step[ 7] = temp2 - temp1;
167 step[ 8] = output[ 8] + output[11];
168 step[ 9] = output[ 9] + output[10];
169 step[10] = output[ 9] - output[10];
170 step[11] = output[ 8] - output[11];
172 step[12] = output[12] + output[15];
173 step[13] = output[13] + output[14];
174 step[14] = output[13] - output[14];
175 step[15] = output[12] - output[15];
178 output[ 0] = (step[ 0] + step[ 1]);
179 output[ 8] = (step[ 0] - step[ 1]);
181 temp1 = step[2] * C12;
182 temp2 = step[3] * C4;
183 temp1 = temp1 + temp2;
184 output[ 4] = 2*(temp1 * C8);
186 temp1 = step[2] * C4;
187 temp2 = step[3] * C12;
188 temp1 = temp2 - temp1;
189 output[12] = 2 * (temp1 * C8);
191 output[ 2] = 2 * ((step[4] + step[ 5]) * C8);
192 output[14] = 2 * ((step[7] - step[ 6]) * C8);
194 temp1 = step[4] - step[5];
195 temp2 = step[6] + step[7];
196 output[ 6] = (temp1 + temp2);
197 output[10] = (temp1 - temp2);
199 intermediate[8] = step[8] + step[14];
200 intermediate[9] = step[9] + step[15];
202 temp1 = intermediate[8] * C12;
203 temp2 = intermediate[9] * C4;
204 temp1 = temp1 - temp2;
205 output[3] = 2 * (temp1 * C8);
207 temp1 = intermediate[8] * C4;
208 temp2 = intermediate[9] * C12;
209 temp1 = temp2 + temp1;
210 output[13] = 2 * (temp1 * C8);
212 output[ 9] = 2 * ((step[10] + step[11]) * C8);
214 intermediate[11] = step[10] - step[11];
215 intermediate[12] = step[12] + step[13];
216 intermediate[13] = step[12] - step[13];
217 intermediate[14] = step[ 8] - step[14];
218 intermediate[15] = step[ 9] - step[15];
220 output[15] = (intermediate[11] + intermediate[12]);
221 output[ 1] = -(intermediate[11] - intermediate[12]);
223 output[ 7] = 2 * (intermediate[13] * C8);
225 temp1 = intermediate[14] * C12;
226 temp2 = intermediate[15] * C4;
227 temp1 = temp1 - temp2;
228 output[11] = -2 * (temp1 * C8);
230 temp1 = intermediate[14] * C4;
231 temp2 = intermediate[15] * C12;
232 temp1 = temp2 + temp1;
233 output[ 5] = 2 * (temp1 * C8);
236 void reference_16x16_dct_2d(int16_t input[256], double output[256]) {
237 // First transform columns
238 for (int i = 0; i < 16; ++i) {
239 double temp_in[16], temp_out[16];
240 for (int j = 0; j < 16; ++j)
241 temp_in[j] = input[j * 16 + i];
242 butterfly_16x16_dct_1d(temp_in, temp_out);
243 for (int j = 0; j < 16; ++j)
244 output[j * 16 + i] = temp_out[j];
246 // Then transform rows
247 for (int i = 0; i < 16; ++i) {
248 double temp_in[16], temp_out[16];
249 for (int j = 0; j < 16; ++j)
250 temp_in[j] = output[j + i * 16];
251 butterfly_16x16_dct_1d(temp_in, temp_out);
252 // Scale by some magic number
253 for (int j = 0; j < 16; ++j)
254 output[j + i * 16] = temp_out[j]/2;
258 typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
259 typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
260 typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
262 typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
265 typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
266 typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
267 typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
270 void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
272 vp9_fdct16x16_c(in, out, stride);
275 void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
277 vp9_idct16x16_256_add_c(in, dest, stride);
280 void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
282 vp9_fht16x16_c(in, out, stride, tx_type);
285 void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
287 vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
290 #if CONFIG_VP9_HIGHBITDEPTH
291 void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
292 vp9_highbd_idct16x16_256_add_c(in, out, stride, 10);
295 void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
296 vp9_highbd_idct16x16_256_add_c(in, out, stride, 12);
299 void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
301 idct16x16_10(in, out, stride);
304 void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
306 idct16x16_12(in, out, stride);
309 void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
310 vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
313 void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
314 vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
317 void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
318 vp9_highbd_idct16x16_10_add_c(in, out, stride, 10);
321 void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
322 vp9_highbd_idct16x16_10_add_c(in, out, stride, 12);
326 void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
327 vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
330 void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
331 vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
334 void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
335 vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
338 void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
339 vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
342 #endif // CONFIG_VP9_HIGHBITDEPTH
344 class Trans16x16TestBase {
346 virtual ~Trans16x16TestBase() {}
349 virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
351 virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
353 void RunAccuracyCheck() {
354 ACMRandom rnd(ACMRandom::DeterministicSeed());
355 uint32_t max_error = 0;
356 int64_t total_error = 0;
357 const int count_test_block = 10000;
358 for (int i = 0; i < count_test_block; ++i) {
359 DECLARE_ALIGNED_ARRAY(16, int16_t, test_input_block, kNumCoeffs);
360 DECLARE_ALIGNED_ARRAY(16, tran_low_t, test_temp_block, kNumCoeffs);
361 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
362 DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
363 #if CONFIG_VP9_HIGHBITDEPTH
364 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
365 DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, kNumCoeffs);
368 // Initialize a test block with input range [-mask_, mask_].
369 for (int j = 0; j < kNumCoeffs; ++j) {
370 if (bit_depth_ == VPX_BITS_8) {
371 src[j] = rnd.Rand8();
372 dst[j] = rnd.Rand8();
373 test_input_block[j] = src[j] - dst[j];
374 #if CONFIG_VP9_HIGHBITDEPTH
376 src16[j] = rnd.Rand16() & mask_;
377 dst16[j] = rnd.Rand16() & mask_;
378 test_input_block[j] = src16[j] - dst16[j];
383 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
384 test_temp_block, pitch_));
385 if (bit_depth_ == VPX_BITS_8) {
386 ASM_REGISTER_STATE_CHECK(
387 RunInvTxfm(test_temp_block, dst, pitch_));
388 #if CONFIG_VP9_HIGHBITDEPTH
390 ASM_REGISTER_STATE_CHECK(
391 RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
395 for (int j = 0; j < kNumCoeffs; ++j) {
396 #if CONFIG_VP9_HIGHBITDEPTH
397 const uint32_t diff =
398 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
400 const uint32_t diff = dst[j] - src[j];
402 const uint32_t error = diff * diff;
403 if (max_error < error)
405 total_error += error;
409 EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
410 << "Error: 16x16 FHT/IHT has an individual round trip error > 1";
412 EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
413 << "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
416 void RunCoeffCheck() {
417 ACMRandom rnd(ACMRandom::DeterministicSeed());
418 const int count_test_block = 1000;
419 DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
420 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs);
421 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_block, kNumCoeffs);
423 for (int i = 0; i < count_test_block; ++i) {
424 // Initialize a test block with input range [-mask_, mask_].
425 for (int j = 0; j < kNumCoeffs; ++j)
426 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
428 fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
429 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
431 // The minimum quant value is 4.
432 for (int j = 0; j < kNumCoeffs; ++j)
433 EXPECT_EQ(output_block[j], output_ref_block[j]);
438 ACMRandom rnd(ACMRandom::DeterministicSeed());
439 const int count_test_block = 1000;
440 DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
441 DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
442 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs);
443 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_block, kNumCoeffs);
445 for (int i = 0; i < count_test_block; ++i) {
446 // Initialize a test block with input range [-mask_, mask_].
447 for (int j = 0; j < kNumCoeffs; ++j) {
448 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
449 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
452 for (int j = 0; j < kNumCoeffs; ++j)
453 input_extreme_block[j] = mask_;
455 for (int j = 0; j < kNumCoeffs; ++j)
456 input_extreme_block[j] = -mask_;
459 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
460 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
461 output_block, pitch_));
463 // The minimum quant value is 4.
464 for (int j = 0; j < kNumCoeffs; ++j) {
465 EXPECT_EQ(output_block[j], output_ref_block[j]);
466 EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
467 << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
472 void RunQuantCheck(int dc_thred, int ac_thred) {
473 ACMRandom rnd(ACMRandom::DeterministicSeed());
474 const int count_test_block = 100000;
475 DECLARE_ALIGNED_ARRAY(16, int16_t, input_block, kNumCoeffs);
476 DECLARE_ALIGNED_ARRAY(16, int16_t, input_extreme_block, kNumCoeffs);
477 DECLARE_ALIGNED_ARRAY(16, tran_low_t, output_ref_block, kNumCoeffs);
479 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
480 DECLARE_ALIGNED_ARRAY(16, uint8_t, ref, kNumCoeffs);
481 #if CONFIG_VP9_HIGHBITDEPTH
482 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
483 DECLARE_ALIGNED_ARRAY(16, uint16_t, ref16, kNumCoeffs);
486 for (int i = 0; i < count_test_block; ++i) {
487 // Initialize a test block with input range [-mask_, mask_].
488 for (int j = 0; j < kNumCoeffs; ++j) {
489 if (bit_depth_ == VPX_BITS_8)
490 input_block[j] = rnd.Rand8() - rnd.Rand8();
492 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
493 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
496 for (int j = 0; j < kNumCoeffs; ++j)
497 input_extreme_block[j] = mask_;
499 for (int j = 0; j < kNumCoeffs; ++j)
500 input_extreme_block[j] = -mask_;
502 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
504 // clear reconstructed pixel buffers
505 memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
506 memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
507 #if CONFIG_VP9_HIGHBITDEPTH
508 memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
509 memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
512 // quantization with maximum allowed step sizes
513 output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
514 for (int j = 1; j < kNumCoeffs; ++j)
515 output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
516 if (bit_depth_ == VPX_BITS_8) {
517 inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
518 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
519 #if CONFIG_VP9_HIGHBITDEPTH
521 inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
523 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
524 CONVERT_TO_BYTEPTR(dst16), pitch_));
527 if (bit_depth_ == VPX_BITS_8) {
528 for (int j = 0; j < kNumCoeffs; ++j)
529 EXPECT_EQ(ref[j], dst[j]);
530 #if CONFIG_VP9_HIGHBITDEPTH
532 for (int j = 0; j < kNumCoeffs; ++j)
533 EXPECT_EQ(ref16[j], dst16[j]);
539 void RunInvAccuracyCheck() {
540 ACMRandom rnd(ACMRandom::DeterministicSeed());
541 const int count_test_block = 1000;
542 DECLARE_ALIGNED_ARRAY(16, int16_t, in, kNumCoeffs);
543 DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs);
544 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
545 DECLARE_ALIGNED_ARRAY(16, uint8_t, src, kNumCoeffs);
546 #if CONFIG_VP9_HIGHBITDEPTH
547 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
548 DECLARE_ALIGNED_ARRAY(16, uint16_t, src16, kNumCoeffs);
549 #endif // CONFIG_VP9_HIGHBITDEPTH
551 for (int i = 0; i < count_test_block; ++i) {
552 double out_r[kNumCoeffs];
554 // Initialize a test block with input range [-255, 255].
555 for (int j = 0; j < kNumCoeffs; ++j) {
556 if (bit_depth_ == VPX_BITS_8) {
557 src[j] = rnd.Rand8();
558 dst[j] = rnd.Rand8();
559 in[j] = src[j] - dst[j];
560 #if CONFIG_VP9_HIGHBITDEPTH
562 src16[j] = rnd.Rand16() & mask_;
563 dst16[j] = rnd.Rand16() & mask_;
564 in[j] = src16[j] - dst16[j];
565 #endif // CONFIG_VP9_HIGHBITDEPTH
569 reference_16x16_dct_2d(in, out_r);
570 for (int j = 0; j < kNumCoeffs; ++j)
571 coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
573 if (bit_depth_ == VPX_BITS_8) {
574 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
575 #if CONFIG_VP9_HIGHBITDEPTH
577 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
579 #endif // CONFIG_VP9_HIGHBITDEPTH
582 for (int j = 0; j < kNumCoeffs; ++j) {
583 #if CONFIG_VP9_HIGHBITDEPTH
584 const uint32_t diff =
585 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
587 const uint32_t diff = dst[j] - src[j];
588 #endif // CONFIG_VP9_HIGHBITDEPTH
589 const uint32_t error = diff * diff;
591 << "Error: 16x16 IDCT has error " << error
592 << " at index " << j;
597 void CompareInvReference(IdctFunc ref_txfm, int thresh) {
598 ACMRandom rnd(ACMRandom::DeterministicSeed());
599 const int count_test_block = 10000;
601 const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
602 DECLARE_ALIGNED_ARRAY(16, tran_low_t, coeff, kNumCoeffs);
603 DECLARE_ALIGNED_ARRAY(16, uint8_t, dst, kNumCoeffs);
604 DECLARE_ALIGNED_ARRAY(16, uint8_t, ref, kNumCoeffs);
605 #if CONFIG_VP9_HIGHBITDEPTH
606 DECLARE_ALIGNED_ARRAY(16, uint16_t, dst16, kNumCoeffs);
607 DECLARE_ALIGNED_ARRAY(16, uint16_t, ref16, kNumCoeffs);
608 #endif // CONFIG_VP9_HIGHBITDEPTH
610 for (int i = 0; i < count_test_block; ++i) {
611 for (int j = 0; j < kNumCoeffs; ++j) {
613 // Random values less than the threshold, either positive or negative
614 coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
618 if (bit_depth_ == VPX_BITS_8) {
621 #if CONFIG_VP9_HIGHBITDEPTH
625 #endif // CONFIG_VP9_HIGHBITDEPTH
628 if (bit_depth_ == VPX_BITS_8) {
629 ref_txfm(coeff, ref, pitch_);
630 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
632 #if CONFIG_VP9_HIGHBITDEPTH
633 ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
634 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
636 #endif // CONFIG_VP9_HIGHBITDEPTH
639 for (int j = 0; j < kNumCoeffs; ++j) {
640 #if CONFIG_VP9_HIGHBITDEPTH
641 const uint32_t diff =
642 bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
644 const uint32_t diff = dst[j] - ref[j];
645 #endif // CONFIG_VP9_HIGHBITDEPTH
646 const uint32_t error = diff * diff;
648 << "Error: 16x16 IDCT Comparison has error " << error
649 << " at index " << j;
656 vpx_bit_depth_t bit_depth_;
658 FhtFunc fwd_txfm_ref;
659 IhtFunc inv_txfm_ref;
663 : public Trans16x16TestBase,
664 public ::testing::TestWithParam<Dct16x16Param> {
666 virtual ~Trans16x16DCT() {}
668 virtual void SetUp() {
669 fwd_txfm_ = GET_PARAM(0);
670 inv_txfm_ = GET_PARAM(1);
671 tx_type_ = GET_PARAM(2);
672 bit_depth_ = GET_PARAM(3);
674 fwd_txfm_ref = fdct16x16_ref;
675 inv_txfm_ref = idct16x16_ref;
676 mask_ = (1 << bit_depth_) - 1;
677 #if CONFIG_VP9_HIGHBITDEPTH
678 switch (bit_depth_) {
680 inv_txfm_ref = idct16x16_10_ref;
683 inv_txfm_ref = idct16x16_12_ref;
686 inv_txfm_ref = idct16x16_ref;
690 inv_txfm_ref = idct16x16_ref;
693 virtual void TearDown() { libvpx_test::ClearSystemState(); }
696 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
697 fwd_txfm_(in, out, stride);
699 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
700 inv_txfm_(out, dst, stride);
707 TEST_P(Trans16x16DCT, AccuracyCheck) {
711 TEST_P(Trans16x16DCT, CoeffCheck) {
715 TEST_P(Trans16x16DCT, MemCheck) {
719 TEST_P(Trans16x16DCT, QuantCheck) {
720 // Use maximally allowed quantization step sizes for DC and AC
721 // coefficients respectively.
722 RunQuantCheck(1336, 1828);
725 TEST_P(Trans16x16DCT, InvAccuracyCheck) {
726 RunInvAccuracyCheck();
730 : public Trans16x16TestBase,
731 public ::testing::TestWithParam<Ht16x16Param> {
733 virtual ~Trans16x16HT() {}
735 virtual void SetUp() {
736 fwd_txfm_ = GET_PARAM(0);
737 inv_txfm_ = GET_PARAM(1);
738 tx_type_ = GET_PARAM(2);
739 bit_depth_ = GET_PARAM(3);
741 fwd_txfm_ref = fht16x16_ref;
742 inv_txfm_ref = iht16x16_ref;
743 mask_ = (1 << bit_depth_) - 1;
744 #if CONFIG_VP9_HIGHBITDEPTH
745 switch (bit_depth_) {
747 inv_txfm_ref = iht16x16_10;
750 inv_txfm_ref = iht16x16_12;
753 inv_txfm_ref = iht16x16_ref;
757 inv_txfm_ref = iht16x16_ref;
760 virtual void TearDown() { libvpx_test::ClearSystemState(); }
763 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
764 fwd_txfm_(in, out, stride, tx_type_);
766 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
767 inv_txfm_(out, dst, stride, tx_type_);
774 TEST_P(Trans16x16HT, AccuracyCheck) {
778 TEST_P(Trans16x16HT, CoeffCheck) {
782 TEST_P(Trans16x16HT, MemCheck) {
786 TEST_P(Trans16x16HT, QuantCheck) {
787 // The encoder skips any non-DC intra prediction modes,
788 // when the quantization step size goes beyond 988.
789 RunQuantCheck(429, 729);
792 class InvTrans16x16DCT
793 : public Trans16x16TestBase,
794 public ::testing::TestWithParam<Idct16x16Param> {
796 virtual ~InvTrans16x16DCT() {}
798 virtual void SetUp() {
799 ref_txfm_ = GET_PARAM(0);
800 inv_txfm_ = GET_PARAM(1);
801 thresh_ = GET_PARAM(2);
802 bit_depth_ = GET_PARAM(3);
804 mask_ = (1 << bit_depth_) - 1;
806 virtual void TearDown() { libvpx_test::ClearSystemState(); }
809 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
810 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
811 inv_txfm_(out, dst, stride);
819 TEST_P(InvTrans16x16DCT, CompareReference) {
820 CompareInvReference(ref_txfm_, thresh_);
823 using std::tr1::make_tuple;
825 #if CONFIG_VP9_HIGHBITDEPTH
826 INSTANTIATE_TEST_CASE_P(
829 make_tuple(&vp9_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
830 make_tuple(&vp9_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
831 make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
833 INSTANTIATE_TEST_CASE_P(
836 make_tuple(&vp9_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
837 #endif // CONFIG_VP9_HIGHBITDEPTH
839 #if CONFIG_VP9_HIGHBITDEPTH
840 INSTANTIATE_TEST_CASE_P(
843 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
844 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
845 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
846 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
847 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
848 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
849 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
850 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
851 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
852 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
853 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
854 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
856 INSTANTIATE_TEST_CASE_P(
859 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
860 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
861 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
862 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
863 #endif // CONFIG_VP9_HIGHBITDEPTH
865 #if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
866 INSTANTIATE_TEST_CASE_P(
869 make_tuple(&vp9_fdct16x16_c,
870 &vp9_idct16x16_256_add_neon, 0, VPX_BITS_8)));
873 #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
874 INSTANTIATE_TEST_CASE_P(
877 make_tuple(&vp9_fdct16x16_sse2,
878 &vp9_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
879 INSTANTIATE_TEST_CASE_P(
882 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
884 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
886 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
888 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
890 #endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
892 #if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
893 INSTANTIATE_TEST_CASE_P(
896 make_tuple(&vp9_highbd_fdct16x16_sse2,
897 &idct16x16_10, 0, VPX_BITS_10),
898 make_tuple(&vp9_highbd_fdct16x16_c,
899 &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
900 make_tuple(&vp9_highbd_fdct16x16_sse2,
901 &idct16x16_12, 0, VPX_BITS_12),
902 make_tuple(&vp9_highbd_fdct16x16_c,
903 &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
904 make_tuple(&vp9_fdct16x16_sse2,
905 &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
906 INSTANTIATE_TEST_CASE_P(
909 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 0, VPX_BITS_10),
910 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 1, VPX_BITS_10),
911 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 2, VPX_BITS_10),
912 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_10, 3, VPX_BITS_10),
913 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 0, VPX_BITS_12),
914 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 1, VPX_BITS_12),
915 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 2, VPX_BITS_12),
916 make_tuple(&vp9_highbd_fht16x16_sse2, &iht16x16_12, 3, VPX_BITS_12),
917 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
918 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
919 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
920 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
922 // Optimizations take effect at a threshold of 3155, so we use a value close to
923 // that to test both branches.
924 INSTANTIATE_TEST_CASE_P(
925 SSE2, InvTrans16x16DCT,
927 make_tuple(&idct16x16_10_add_10_c,
928 &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
929 make_tuple(&idct16x16_10,
930 &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
931 make_tuple(&idct16x16_10_add_12_c,
932 &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
933 make_tuple(&idct16x16_12,
934 &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
935 #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
937 #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
938 INSTANTIATE_TEST_CASE_P(
941 make_tuple(&vp9_fdct16x16_c,
942 &vp9_idct16x16_256_add_msa, 0, VPX_BITS_8)));
943 INSTANTIATE_TEST_CASE_P(
946 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
947 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
948 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
949 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_msa, 3, VPX_BITS_8)));
950 #endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE