2 * Copyright (c) 2012 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
15 #include "third_party/googletest/src/include/gtest/gtest.h"
17 #include "./vp9_rtcd.h"
18 #include "./vpx_dsp_rtcd.h"
19 #include "test/acm_random.h"
20 #include "test/clear_system_state.h"
21 #include "test/register_state_check.h"
22 #include "test/util.h"
23 #include "vp9/common/vp9_entropy.h"
24 #include "vp9/common/vp9_scan.h"
25 #include "vpx/vpx_codec.h"
26 #include "vpx/vpx_integer.h"
27 #include "vpx_ports/mem.h"
29 using libvpx_test::ACMRandom;
34 static int round(double x) {
36 return static_cast<int>(ceil(x - 0.5));
38 return static_cast<int>(floor(x + 0.5));
42 const int kNumCoeffs = 256;
43 const double PI = 3.1415926535898;
44 void reference2_16x16_idct_2d(double *input, double *output) {
46 for (int l = 0; l < 16; ++l) {
47 for (int k = 0; k < 16; ++k) {
49 for (int i = 0; i < 16; ++i) {
50 for (int j = 0; j < 16; ++j) {
51 x = cos(PI * j * (l + 0.5) / 16.0) *
52 cos(PI * i * (k + 0.5) / 16.0) *
53 input[i * 16 + j] / 256;
67 const double C1 = 0.995184726672197;
68 const double C2 = 0.98078528040323;
69 const double C3 = 0.956940335732209;
70 const double C4 = 0.923879532511287;
71 const double C5 = 0.881921264348355;
72 const double C6 = 0.831469612302545;
73 const double C7 = 0.773010453362737;
74 const double C8 = 0.707106781186548;
75 const double C9 = 0.634393284163646;
76 const double C10 = 0.555570233019602;
77 const double C11 = 0.471396736825998;
78 const double C12 = 0.38268343236509;
79 const double C13 = 0.290284677254462;
80 const double C14 = 0.195090322016128;
81 const double C15 = 0.098017140329561;
83 void butterfly_16x16_dct_1d(double input[16], double output[16]) {
85 double intermediate[16];
89 step[ 0] = input[0] + input[15];
90 step[ 1] = input[1] + input[14];
91 step[ 2] = input[2] + input[13];
92 step[ 3] = input[3] + input[12];
93 step[ 4] = input[4] + input[11];
94 step[ 5] = input[5] + input[10];
95 step[ 6] = input[6] + input[ 9];
96 step[ 7] = input[7] + input[ 8];
97 step[ 8] = input[7] - input[ 8];
98 step[ 9] = input[6] - input[ 9];
99 step[10] = input[5] - input[10];
100 step[11] = input[4] - input[11];
101 step[12] = input[3] - input[12];
102 step[13] = input[2] - input[13];
103 step[14] = input[1] - input[14];
104 step[15] = input[0] - input[15];
107 output[0] = step[0] + step[7];
108 output[1] = step[1] + step[6];
109 output[2] = step[2] + step[5];
110 output[3] = step[3] + step[4];
111 output[4] = step[3] - step[4];
112 output[5] = step[2] - step[5];
113 output[6] = step[1] - step[6];
114 output[7] = step[0] - step[7];
116 temp1 = step[ 8] * C7;
117 temp2 = step[15] * C9;
118 output[ 8] = temp1 + temp2;
120 temp1 = step[ 9] * C11;
121 temp2 = step[14] * C5;
122 output[ 9] = temp1 - temp2;
124 temp1 = step[10] * C3;
125 temp2 = step[13] * C13;
126 output[10] = temp1 + temp2;
128 temp1 = step[11] * C15;
129 temp2 = step[12] * C1;
130 output[11] = temp1 - temp2;
132 temp1 = step[11] * C1;
133 temp2 = step[12] * C15;
134 output[12] = temp2 + temp1;
136 temp1 = step[10] * C13;
137 temp2 = step[13] * C3;
138 output[13] = temp2 - temp1;
140 temp1 = step[ 9] * C5;
141 temp2 = step[14] * C11;
142 output[14] = temp2 + temp1;
144 temp1 = step[ 8] * C9;
145 temp2 = step[15] * C7;
146 output[15] = temp2 - temp1;
149 step[ 0] = output[0] + output[3];
150 step[ 1] = output[1] + output[2];
151 step[ 2] = output[1] - output[2];
152 step[ 3] = output[0] - output[3];
154 temp1 = output[4] * C14;
155 temp2 = output[7] * C2;
156 step[ 4] = temp1 + temp2;
158 temp1 = output[5] * C10;
159 temp2 = output[6] * C6;
160 step[ 5] = temp1 + temp2;
162 temp1 = output[5] * C6;
163 temp2 = output[6] * C10;
164 step[ 6] = temp2 - temp1;
166 temp1 = output[4] * C2;
167 temp2 = output[7] * C14;
168 step[ 7] = temp2 - temp1;
170 step[ 8] = output[ 8] + output[11];
171 step[ 9] = output[ 9] + output[10];
172 step[10] = output[ 9] - output[10];
173 step[11] = output[ 8] - output[11];
175 step[12] = output[12] + output[15];
176 step[13] = output[13] + output[14];
177 step[14] = output[13] - output[14];
178 step[15] = output[12] - output[15];
181 output[ 0] = (step[ 0] + step[ 1]);
182 output[ 8] = (step[ 0] - step[ 1]);
184 temp1 = step[2] * C12;
185 temp2 = step[3] * C4;
186 temp1 = temp1 + temp2;
187 output[ 4] = 2*(temp1 * C8);
189 temp1 = step[2] * C4;
190 temp2 = step[3] * C12;
191 temp1 = temp2 - temp1;
192 output[12] = 2 * (temp1 * C8);
194 output[ 2] = 2 * ((step[4] + step[ 5]) * C8);
195 output[14] = 2 * ((step[7] - step[ 6]) * C8);
197 temp1 = step[4] - step[5];
198 temp2 = step[6] + step[7];
199 output[ 6] = (temp1 + temp2);
200 output[10] = (temp1 - temp2);
202 intermediate[8] = step[8] + step[14];
203 intermediate[9] = step[9] + step[15];
205 temp1 = intermediate[8] * C12;
206 temp2 = intermediate[9] * C4;
207 temp1 = temp1 - temp2;
208 output[3] = 2 * (temp1 * C8);
210 temp1 = intermediate[8] * C4;
211 temp2 = intermediate[9] * C12;
212 temp1 = temp2 + temp1;
213 output[13] = 2 * (temp1 * C8);
215 output[ 9] = 2 * ((step[10] + step[11]) * C8);
217 intermediate[11] = step[10] - step[11];
218 intermediate[12] = step[12] + step[13];
219 intermediate[13] = step[12] - step[13];
220 intermediate[14] = step[ 8] - step[14];
221 intermediate[15] = step[ 9] - step[15];
223 output[15] = (intermediate[11] + intermediate[12]);
224 output[ 1] = -(intermediate[11] - intermediate[12]);
226 output[ 7] = 2 * (intermediate[13] * C8);
228 temp1 = intermediate[14] * C12;
229 temp2 = intermediate[15] * C4;
230 temp1 = temp1 - temp2;
231 output[11] = -2 * (temp1 * C8);
233 temp1 = intermediate[14] * C4;
234 temp2 = intermediate[15] * C12;
235 temp1 = temp2 + temp1;
236 output[ 5] = 2 * (temp1 * C8);
239 void reference_16x16_dct_2d(int16_t input[256], double output[256]) {
240 // First transform columns
241 for (int i = 0; i < 16; ++i) {
242 double temp_in[16], temp_out[16];
243 for (int j = 0; j < 16; ++j)
244 temp_in[j] = input[j * 16 + i];
245 butterfly_16x16_dct_1d(temp_in, temp_out);
246 for (int j = 0; j < 16; ++j)
247 output[j * 16 + i] = temp_out[j];
249 // Then transform rows
250 for (int i = 0; i < 16; ++i) {
251 double temp_in[16], temp_out[16];
252 for (int j = 0; j < 16; ++j)
253 temp_in[j] = output[j + i * 16];
254 butterfly_16x16_dct_1d(temp_in, temp_out);
255 // Scale by some magic number
256 for (int j = 0; j < 16; ++j)
257 output[j + i * 16] = temp_out[j]/2;
261 typedef void (*FdctFunc)(const int16_t *in, tran_low_t *out, int stride);
262 typedef void (*IdctFunc)(const tran_low_t *in, uint8_t *out, int stride);
263 typedef void (*FhtFunc)(const int16_t *in, tran_low_t *out, int stride,
265 typedef void (*IhtFunc)(const tran_low_t *in, uint8_t *out, int stride,
268 typedef std::tr1::tuple<FdctFunc, IdctFunc, int, vpx_bit_depth_t> Dct16x16Param;
269 typedef std::tr1::tuple<FhtFunc, IhtFunc, int, vpx_bit_depth_t> Ht16x16Param;
270 typedef std::tr1::tuple<IdctFunc, IdctFunc, int, vpx_bit_depth_t>
273 void fdct16x16_ref(const int16_t *in, tran_low_t *out, int stride,
275 vpx_fdct16x16_c(in, out, stride);
278 void idct16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
280 vp9_idct16x16_256_add_c(in, dest, stride);
283 void fht16x16_ref(const int16_t *in, tran_low_t *out, int stride,
285 vp9_fht16x16_c(in, out, stride, tx_type);
288 void iht16x16_ref(const tran_low_t *in, uint8_t *dest, int stride,
290 vp9_iht16x16_256_add_c(in, dest, stride, tx_type);
293 #if CONFIG_VP9_HIGHBITDEPTH
294 void idct16x16_10(const tran_low_t *in, uint8_t *out, int stride) {
295 vp9_highbd_idct16x16_256_add_c(in, out, stride, 10);
298 void idct16x16_12(const tran_low_t *in, uint8_t *out, int stride) {
299 vp9_highbd_idct16x16_256_add_c(in, out, stride, 12);
302 void idct16x16_10_ref(const tran_low_t *in, uint8_t *out, int stride,
304 idct16x16_10(in, out, stride);
307 void idct16x16_12_ref(const tran_low_t *in, uint8_t *out, int stride,
309 idct16x16_12(in, out, stride);
312 void iht16x16_10(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
313 vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 10);
316 void iht16x16_12(const tran_low_t *in, uint8_t *out, int stride, int tx_type) {
317 vp9_highbd_iht16x16_256_add_c(in, out, stride, tx_type, 12);
320 void idct16x16_10_add_10_c(const tran_low_t *in, uint8_t *out, int stride) {
321 vp9_highbd_idct16x16_10_add_c(in, out, stride, 10);
324 void idct16x16_10_add_12_c(const tran_low_t *in, uint8_t *out, int stride) {
325 vp9_highbd_idct16x16_10_add_c(in, out, stride, 12);
329 void idct16x16_256_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
330 vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 10);
333 void idct16x16_256_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
334 vp9_highbd_idct16x16_256_add_sse2(in, out, stride, 12);
337 void idct16x16_10_add_10_sse2(const tran_low_t *in, uint8_t *out, int stride) {
338 vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 10);
341 void idct16x16_10_add_12_sse2(const tran_low_t *in, uint8_t *out, int stride) {
342 vp9_highbd_idct16x16_10_add_sse2(in, out, stride, 12);
345 #endif // CONFIG_VP9_HIGHBITDEPTH
347 class Trans16x16TestBase {
349 virtual ~Trans16x16TestBase() {}
352 virtual void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) = 0;
354 virtual void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) = 0;
356 void RunAccuracyCheck() {
357 ACMRandom rnd(ACMRandom::DeterministicSeed());
358 uint32_t max_error = 0;
359 int64_t total_error = 0;
360 const int count_test_block = 10000;
361 for (int i = 0; i < count_test_block; ++i) {
362 DECLARE_ALIGNED(16, int16_t, test_input_block[kNumCoeffs]);
363 DECLARE_ALIGNED(16, tran_low_t, test_temp_block[kNumCoeffs]);
364 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
365 DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
366 #if CONFIG_VP9_HIGHBITDEPTH
367 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
368 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
371 // Initialize a test block with input range [-mask_, mask_].
372 for (int j = 0; j < kNumCoeffs; ++j) {
373 if (bit_depth_ == VPX_BITS_8) {
374 src[j] = rnd.Rand8();
375 dst[j] = rnd.Rand8();
376 test_input_block[j] = src[j] - dst[j];
377 #if CONFIG_VP9_HIGHBITDEPTH
379 src16[j] = rnd.Rand16() & mask_;
380 dst16[j] = rnd.Rand16() & mask_;
381 test_input_block[j] = src16[j] - dst16[j];
386 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(test_input_block,
387 test_temp_block, pitch_));
388 if (bit_depth_ == VPX_BITS_8) {
389 ASM_REGISTER_STATE_CHECK(
390 RunInvTxfm(test_temp_block, dst, pitch_));
391 #if CONFIG_VP9_HIGHBITDEPTH
393 ASM_REGISTER_STATE_CHECK(
394 RunInvTxfm(test_temp_block, CONVERT_TO_BYTEPTR(dst16), pitch_));
398 for (int j = 0; j < kNumCoeffs; ++j) {
399 #if CONFIG_VP9_HIGHBITDEPTH
400 const uint32_t diff =
401 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
403 const uint32_t diff = dst[j] - src[j];
405 const uint32_t error = diff * diff;
406 if (max_error < error)
408 total_error += error;
412 EXPECT_GE(1u << 2 * (bit_depth_ - 8), max_error)
413 << "Error: 16x16 FHT/IHT has an individual round trip error > 1";
415 EXPECT_GE(count_test_block << 2 * (bit_depth_ - 8), total_error)
416 << "Error: 16x16 FHT/IHT has average round trip error > 1 per block";
419 void RunCoeffCheck() {
420 ACMRandom rnd(ACMRandom::DeterministicSeed());
421 const int count_test_block = 1000;
422 DECLARE_ALIGNED(16, int16_t, input_block[kNumCoeffs]);
423 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
424 DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
426 for (int i = 0; i < count_test_block; ++i) {
427 // Initialize a test block with input range [-mask_, mask_].
428 for (int j = 0; j < kNumCoeffs; ++j)
429 input_block[j] = (rnd.Rand16() & mask_) - (rnd.Rand16() & mask_);
431 fwd_txfm_ref(input_block, output_ref_block, pitch_, tx_type_);
432 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_block, output_block, pitch_));
434 // The minimum quant value is 4.
435 for (int j = 0; j < kNumCoeffs; ++j)
436 EXPECT_EQ(output_block[j], output_ref_block[j]);
441 ACMRandom rnd(ACMRandom::DeterministicSeed());
442 const int count_test_block = 1000;
443 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
444 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
445 DECLARE_ALIGNED(16, tran_low_t, output_block[kNumCoeffs]);
447 for (int i = 0; i < count_test_block; ++i) {
448 // Initialize a test block with input range [-mask_, mask_].
449 for (int j = 0; j < kNumCoeffs; ++j) {
450 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
453 for (int j = 0; j < kNumCoeffs; ++j)
454 input_extreme_block[j] = mask_;
456 for (int j = 0; j < kNumCoeffs; ++j)
457 input_extreme_block[j] = -mask_;
460 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
461 ASM_REGISTER_STATE_CHECK(RunFwdTxfm(input_extreme_block,
462 output_block, pitch_));
464 // The minimum quant value is 4.
465 for (int j = 0; j < kNumCoeffs; ++j) {
466 EXPECT_EQ(output_block[j], output_ref_block[j]);
467 EXPECT_GE(4 * DCT_MAX_VALUE << (bit_depth_ - 8), abs(output_block[j]))
468 << "Error: 16x16 FDCT has coefficient larger than 4*DCT_MAX_VALUE";
473 void RunQuantCheck(int dc_thred, int ac_thred) {
474 ACMRandom rnd(ACMRandom::DeterministicSeed());
475 const int count_test_block = 100000;
476 DECLARE_ALIGNED(16, int16_t, input_extreme_block[kNumCoeffs]);
477 DECLARE_ALIGNED(16, tran_low_t, output_ref_block[kNumCoeffs]);
479 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
480 DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
481 #if CONFIG_VP9_HIGHBITDEPTH
482 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
483 DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
486 for (int i = 0; i < count_test_block; ++i) {
487 // Initialize a test block with input range [-mask_, mask_].
488 for (int j = 0; j < kNumCoeffs; ++j) {
489 input_extreme_block[j] = rnd.Rand8() % 2 ? mask_ : -mask_;
492 for (int j = 0; j < kNumCoeffs; ++j)
493 input_extreme_block[j] = mask_;
495 for (int j = 0; j < kNumCoeffs; ++j)
496 input_extreme_block[j] = -mask_;
498 fwd_txfm_ref(input_extreme_block, output_ref_block, pitch_, tx_type_);
500 // clear reconstructed pixel buffers
501 memset(dst, 0, kNumCoeffs * sizeof(uint8_t));
502 memset(ref, 0, kNumCoeffs * sizeof(uint8_t));
503 #if CONFIG_VP9_HIGHBITDEPTH
504 memset(dst16, 0, kNumCoeffs * sizeof(uint16_t));
505 memset(ref16, 0, kNumCoeffs * sizeof(uint16_t));
508 // quantization with maximum allowed step sizes
509 output_ref_block[0] = (output_ref_block[0] / dc_thred) * dc_thred;
510 for (int j = 1; j < kNumCoeffs; ++j)
511 output_ref_block[j] = (output_ref_block[j] / ac_thred) * ac_thred;
512 if (bit_depth_ == VPX_BITS_8) {
513 inv_txfm_ref(output_ref_block, ref, pitch_, tx_type_);
514 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block, dst, pitch_));
515 #if CONFIG_VP9_HIGHBITDEPTH
517 inv_txfm_ref(output_ref_block, CONVERT_TO_BYTEPTR(ref16), pitch_,
519 ASM_REGISTER_STATE_CHECK(RunInvTxfm(output_ref_block,
520 CONVERT_TO_BYTEPTR(dst16), pitch_));
523 if (bit_depth_ == VPX_BITS_8) {
524 for (int j = 0; j < kNumCoeffs; ++j)
525 EXPECT_EQ(ref[j], dst[j]);
526 #if CONFIG_VP9_HIGHBITDEPTH
528 for (int j = 0; j < kNumCoeffs; ++j)
529 EXPECT_EQ(ref16[j], dst16[j]);
535 void RunInvAccuracyCheck() {
536 ACMRandom rnd(ACMRandom::DeterministicSeed());
537 const int count_test_block = 1000;
538 DECLARE_ALIGNED(16, int16_t, in[kNumCoeffs]);
539 DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
540 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
541 DECLARE_ALIGNED(16, uint8_t, src[kNumCoeffs]);
542 #if CONFIG_VP9_HIGHBITDEPTH
543 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
544 DECLARE_ALIGNED(16, uint16_t, src16[kNumCoeffs]);
545 #endif // CONFIG_VP9_HIGHBITDEPTH
547 for (int i = 0; i < count_test_block; ++i) {
548 double out_r[kNumCoeffs];
550 // Initialize a test block with input range [-255, 255].
551 for (int j = 0; j < kNumCoeffs; ++j) {
552 if (bit_depth_ == VPX_BITS_8) {
553 src[j] = rnd.Rand8();
554 dst[j] = rnd.Rand8();
555 in[j] = src[j] - dst[j];
556 #if CONFIG_VP9_HIGHBITDEPTH
558 src16[j] = rnd.Rand16() & mask_;
559 dst16[j] = rnd.Rand16() & mask_;
560 in[j] = src16[j] - dst16[j];
561 #endif // CONFIG_VP9_HIGHBITDEPTH
565 reference_16x16_dct_2d(in, out_r);
566 for (int j = 0; j < kNumCoeffs; ++j)
567 coeff[j] = static_cast<tran_low_t>(round(out_r[j]));
569 if (bit_depth_ == VPX_BITS_8) {
570 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, 16));
571 #if CONFIG_VP9_HIGHBITDEPTH
573 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
575 #endif // CONFIG_VP9_HIGHBITDEPTH
578 for (int j = 0; j < kNumCoeffs; ++j) {
579 #if CONFIG_VP9_HIGHBITDEPTH
580 const uint32_t diff =
581 bit_depth_ == VPX_BITS_8 ? dst[j] - src[j] : dst16[j] - src16[j];
583 const uint32_t diff = dst[j] - src[j];
584 #endif // CONFIG_VP9_HIGHBITDEPTH
585 const uint32_t error = diff * diff;
587 << "Error: 16x16 IDCT has error " << error
588 << " at index " << j;
593 void CompareInvReference(IdctFunc ref_txfm, int thresh) {
594 ACMRandom rnd(ACMRandom::DeterministicSeed());
595 const int count_test_block = 10000;
597 const int16_t *scan = vp9_default_scan_orders[TX_16X16].scan;
598 DECLARE_ALIGNED(16, tran_low_t, coeff[kNumCoeffs]);
599 DECLARE_ALIGNED(16, uint8_t, dst[kNumCoeffs]);
600 DECLARE_ALIGNED(16, uint8_t, ref[kNumCoeffs]);
601 #if CONFIG_VP9_HIGHBITDEPTH
602 DECLARE_ALIGNED(16, uint16_t, dst16[kNumCoeffs]);
603 DECLARE_ALIGNED(16, uint16_t, ref16[kNumCoeffs]);
604 #endif // CONFIG_VP9_HIGHBITDEPTH
606 for (int i = 0; i < count_test_block; ++i) {
607 for (int j = 0; j < kNumCoeffs; ++j) {
609 // Random values less than the threshold, either positive or negative
610 coeff[scan[j]] = rnd(thresh) * (1 - 2 * (i % 2));
614 if (bit_depth_ == VPX_BITS_8) {
617 #if CONFIG_VP9_HIGHBITDEPTH
621 #endif // CONFIG_VP9_HIGHBITDEPTH
624 if (bit_depth_ == VPX_BITS_8) {
625 ref_txfm(coeff, ref, pitch_);
626 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, dst, pitch_));
628 #if CONFIG_VP9_HIGHBITDEPTH
629 ref_txfm(coeff, CONVERT_TO_BYTEPTR(ref16), pitch_);
630 ASM_REGISTER_STATE_CHECK(RunInvTxfm(coeff, CONVERT_TO_BYTEPTR(dst16),
632 #endif // CONFIG_VP9_HIGHBITDEPTH
635 for (int j = 0; j < kNumCoeffs; ++j) {
636 #if CONFIG_VP9_HIGHBITDEPTH
637 const uint32_t diff =
638 bit_depth_ == VPX_BITS_8 ? dst[j] - ref[j] : dst16[j] - ref16[j];
640 const uint32_t diff = dst[j] - ref[j];
641 #endif // CONFIG_VP9_HIGHBITDEPTH
642 const uint32_t error = diff * diff;
644 << "Error: 16x16 IDCT Comparison has error " << error
645 << " at index " << j;
652 vpx_bit_depth_t bit_depth_;
654 FhtFunc fwd_txfm_ref;
655 IhtFunc inv_txfm_ref;
659 : public Trans16x16TestBase,
660 public ::testing::TestWithParam<Dct16x16Param> {
662 virtual ~Trans16x16DCT() {}
664 virtual void SetUp() {
665 fwd_txfm_ = GET_PARAM(0);
666 inv_txfm_ = GET_PARAM(1);
667 tx_type_ = GET_PARAM(2);
668 bit_depth_ = GET_PARAM(3);
670 fwd_txfm_ref = fdct16x16_ref;
671 inv_txfm_ref = idct16x16_ref;
672 mask_ = (1 << bit_depth_) - 1;
673 #if CONFIG_VP9_HIGHBITDEPTH
674 switch (bit_depth_) {
676 inv_txfm_ref = idct16x16_10_ref;
679 inv_txfm_ref = idct16x16_12_ref;
682 inv_txfm_ref = idct16x16_ref;
686 inv_txfm_ref = idct16x16_ref;
689 virtual void TearDown() { libvpx_test::ClearSystemState(); }
692 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
693 fwd_txfm_(in, out, stride);
695 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
696 inv_txfm_(out, dst, stride);
703 TEST_P(Trans16x16DCT, AccuracyCheck) {
707 TEST_P(Trans16x16DCT, CoeffCheck) {
711 TEST_P(Trans16x16DCT, MemCheck) {
715 TEST_P(Trans16x16DCT, QuantCheck) {
716 // Use maximally allowed quantization step sizes for DC and AC
717 // coefficients respectively.
718 RunQuantCheck(1336, 1828);
721 TEST_P(Trans16x16DCT, InvAccuracyCheck) {
722 RunInvAccuracyCheck();
726 : public Trans16x16TestBase,
727 public ::testing::TestWithParam<Ht16x16Param> {
729 virtual ~Trans16x16HT() {}
731 virtual void SetUp() {
732 fwd_txfm_ = GET_PARAM(0);
733 inv_txfm_ = GET_PARAM(1);
734 tx_type_ = GET_PARAM(2);
735 bit_depth_ = GET_PARAM(3);
737 fwd_txfm_ref = fht16x16_ref;
738 inv_txfm_ref = iht16x16_ref;
739 mask_ = (1 << bit_depth_) - 1;
740 #if CONFIG_VP9_HIGHBITDEPTH
741 switch (bit_depth_) {
743 inv_txfm_ref = iht16x16_10;
746 inv_txfm_ref = iht16x16_12;
749 inv_txfm_ref = iht16x16_ref;
753 inv_txfm_ref = iht16x16_ref;
756 virtual void TearDown() { libvpx_test::ClearSystemState(); }
759 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {
760 fwd_txfm_(in, out, stride, tx_type_);
762 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
763 inv_txfm_(out, dst, stride, tx_type_);
770 TEST_P(Trans16x16HT, AccuracyCheck) {
774 TEST_P(Trans16x16HT, CoeffCheck) {
778 TEST_P(Trans16x16HT, MemCheck) {
782 TEST_P(Trans16x16HT, QuantCheck) {
783 // The encoder skips any non-DC intra prediction modes,
784 // when the quantization step size goes beyond 988.
785 RunQuantCheck(429, 729);
788 class InvTrans16x16DCT
789 : public Trans16x16TestBase,
790 public ::testing::TestWithParam<Idct16x16Param> {
792 virtual ~InvTrans16x16DCT() {}
794 virtual void SetUp() {
795 ref_txfm_ = GET_PARAM(0);
796 inv_txfm_ = GET_PARAM(1);
797 thresh_ = GET_PARAM(2);
798 bit_depth_ = GET_PARAM(3);
800 mask_ = (1 << bit_depth_) - 1;
802 virtual void TearDown() { libvpx_test::ClearSystemState(); }
805 void RunFwdTxfm(int16_t *in, tran_low_t *out, int stride) {}
806 void RunInvTxfm(tran_low_t *out, uint8_t *dst, int stride) {
807 inv_txfm_(out, dst, stride);
815 TEST_P(InvTrans16x16DCT, CompareReference) {
816 CompareInvReference(ref_txfm_, thresh_);
819 using std::tr1::make_tuple;
821 #if CONFIG_VP9_HIGHBITDEPTH
822 INSTANTIATE_TEST_CASE_P(
825 make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_10, 0, VPX_BITS_10),
826 make_tuple(&vpx_highbd_fdct16x16_c, &idct16x16_12, 0, VPX_BITS_12),
827 make_tuple(&vpx_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
829 INSTANTIATE_TEST_CASE_P(
832 make_tuple(&vpx_fdct16x16_c, &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
833 #endif // CONFIG_VP9_HIGHBITDEPTH
835 #if CONFIG_VP9_HIGHBITDEPTH
836 INSTANTIATE_TEST_CASE_P(
839 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 0, VPX_BITS_10),
840 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 1, VPX_BITS_10),
841 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 2, VPX_BITS_10),
842 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_10, 3, VPX_BITS_10),
843 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 0, VPX_BITS_12),
844 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 1, VPX_BITS_12),
845 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 2, VPX_BITS_12),
846 make_tuple(&vp9_highbd_fht16x16_c, &iht16x16_12, 3, VPX_BITS_12),
847 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
848 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
849 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
850 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
852 INSTANTIATE_TEST_CASE_P(
855 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
856 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
857 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
858 make_tuple(&vp9_fht16x16_c, &vp9_iht16x16_256_add_c, 3, VPX_BITS_8)));
859 #endif // CONFIG_VP9_HIGHBITDEPTH
861 #if HAVE_NEON_ASM && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
862 INSTANTIATE_TEST_CASE_P(
865 make_tuple(&vpx_fdct16x16_c,
866 &vp9_idct16x16_256_add_neon, 0, VPX_BITS_8)));
869 #if HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
870 INSTANTIATE_TEST_CASE_P(
873 make_tuple(&vpx_fdct16x16_sse2,
874 &vp9_idct16x16_256_add_sse2, 0, VPX_BITS_8)));
875 INSTANTIATE_TEST_CASE_P(
878 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 0,
880 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 1,
882 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 2,
884 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_sse2, 3,
886 #endif // HAVE_SSE2 && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
888 #if HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
889 INSTANTIATE_TEST_CASE_P(
892 make_tuple(&vpx_highbd_fdct16x16_sse2,
893 &idct16x16_10, 0, VPX_BITS_10),
894 make_tuple(&vpx_highbd_fdct16x16_c,
895 &idct16x16_256_add_10_sse2, 0, VPX_BITS_10),
896 make_tuple(&vpx_highbd_fdct16x16_sse2,
897 &idct16x16_12, 0, VPX_BITS_12),
898 make_tuple(&vpx_highbd_fdct16x16_c,
899 &idct16x16_256_add_12_sse2, 0, VPX_BITS_12),
900 make_tuple(&vpx_fdct16x16_sse2,
901 &vp9_idct16x16_256_add_c, 0, VPX_BITS_8)));
902 INSTANTIATE_TEST_CASE_P(
905 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 0, VPX_BITS_8),
906 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 1, VPX_BITS_8),
907 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 2, VPX_BITS_8),
908 make_tuple(&vp9_fht16x16_sse2, &vp9_iht16x16_256_add_c, 3,
910 // Optimizations take effect at a threshold of 3155, so we use a value close to
911 // that to test both branches.
912 INSTANTIATE_TEST_CASE_P(
913 SSE2, InvTrans16x16DCT,
915 make_tuple(&idct16x16_10_add_10_c,
916 &idct16x16_10_add_10_sse2, 3167, VPX_BITS_10),
917 make_tuple(&idct16x16_10,
918 &idct16x16_256_add_10_sse2, 3167, VPX_BITS_10),
919 make_tuple(&idct16x16_10_add_12_c,
920 &idct16x16_10_add_12_sse2, 3167, VPX_BITS_12),
921 make_tuple(&idct16x16_12,
922 &idct16x16_256_add_12_sse2, 3167, VPX_BITS_12)));
923 #endif // HAVE_SSE2 && CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
925 #if HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE
926 INSTANTIATE_TEST_CASE_P(
929 make_tuple(&vpx_fdct16x16_msa,
930 &vp9_idct16x16_256_add_msa, 0, VPX_BITS_8)));
931 INSTANTIATE_TEST_CASE_P(
934 make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 0, VPX_BITS_8),
935 make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 1, VPX_BITS_8),
936 make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 2, VPX_BITS_8),
937 make_tuple(&vp9_fht16x16_msa, &vp9_iht16x16_256_add_msa, 3,
939 #endif // HAVE_MSA && !CONFIG_VP9_HIGHBITDEPTH && !CONFIG_EMULATE_HARDWARE