2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
14 #include "third_party/googletest/src/include/gtest/gtest.h"
16 #include "./vp9_rtcd.h"
17 #include "./vpx_config.h"
18 #include "./vpx_dsp_rtcd.h"
19 #include "test/acm_random.h"
20 #include "test/clear_system_state.h"
21 #include "test/register_state_check.h"
22 #include "test/util.h"
23 #include "vp9/common/vp9_common.h"
24 #include "vp9/common/vp9_filter.h"
25 #include "vpx_dsp/vpx_dsp_common.h"
26 #include "vpx_dsp/vpx_filter.h"
27 #include "vpx_mem/vpx_mem.h"
28 #include "vpx_ports/mem.h"
29 #include "vpx_ports/vpx_timer.h"
33 static const unsigned int kMaxDimension = 64;
35 typedef void (*ConvolveFunc)(const uint8_t *src, ptrdiff_t src_stride,
36 uint8_t *dst, ptrdiff_t dst_stride,
37 const InterpKernel *filter, int x0_q4,
38 int x_step_q4, int y0_q4, int y_step_q4, int w,
41 typedef void (*WrapperFilterBlock2d8Func)(
42 const uint8_t *src_ptr, const unsigned int src_stride,
43 const int16_t *hfilter, const int16_t *vfilter, uint8_t *dst_ptr,
44 unsigned int dst_stride, unsigned int output_width,
45 unsigned int output_height, int use_highbd);
47 struct ConvolveFunctions {
48 ConvolveFunctions(ConvolveFunc copy, ConvolveFunc avg, ConvolveFunc h8,
49 ConvolveFunc h8_avg, ConvolveFunc v8, ConvolveFunc v8_avg,
50 ConvolveFunc hv8, ConvolveFunc hv8_avg, ConvolveFunc sh8,
51 ConvolveFunc sh8_avg, ConvolveFunc sv8,
52 ConvolveFunc sv8_avg, ConvolveFunc shv8,
53 ConvolveFunc shv8_avg, int bd)
71 ConvolveFunc copy_[2];
75 ConvolveFunc sh8_[2]; // scaled horiz
76 ConvolveFunc sv8_[2]; // scaled vert
77 ConvolveFunc shv8_[2]; // scaled horiz/vert
78 int use_highbd_; // 0 if high bitdepth not used, else the actual bit depth.
81 typedef std::tuple<int, int, const ConvolveFunctions *> ConvolveParam;
83 #define ALL_SIZES(convolve_fn) \
84 make_tuple(4, 4, &convolve_fn), make_tuple(8, 4, &convolve_fn), \
85 make_tuple(4, 8, &convolve_fn), make_tuple(8, 8, &convolve_fn), \
86 make_tuple(16, 8, &convolve_fn), make_tuple(8, 16, &convolve_fn), \
87 make_tuple(16, 16, &convolve_fn), make_tuple(32, 16, &convolve_fn), \
88 make_tuple(16, 32, &convolve_fn), make_tuple(32, 32, &convolve_fn), \
89 make_tuple(64, 32, &convolve_fn), make_tuple(32, 64, &convolve_fn), \
90 make_tuple(64, 64, &convolve_fn)
92 // Reference 8-tap subpixel filter, slightly modified to fit into this test.
93 #define VP9_FILTER_WEIGHT 128
94 #define VP9_FILTER_SHIFT 7
95 uint8_t clip_pixel(int x) { return x < 0 ? 0 : x > 255 ? 255 : x; }
97 void filter_block2d_8_c(const uint8_t *src_ptr, const unsigned int src_stride,
98 const int16_t *hfilter, const int16_t *vfilter,
99 uint8_t *dst_ptr, unsigned int dst_stride,
100 unsigned int output_width, unsigned int output_height) {
101 // Between passes, we use an intermediate buffer whose height is extended to
102 // have enough horizontally filtered values as input for the vertical pass.
103 // This buffer is allocated to be big enough for the largest block type we
105 const int kInterp_Extend = 4;
106 const unsigned int intermediate_height =
107 (kInterp_Extend - 1) + output_height + kInterp_Extend;
110 // Size of intermediate_buffer is max_intermediate_height * filter_max_width,
111 // where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
115 // and filter_max_width = 16
117 uint8_t intermediate_buffer[71 * kMaxDimension];
118 vp9_zero(intermediate_buffer);
119 const int intermediate_next_stride =
120 1 - static_cast<int>(intermediate_height * output_width);
122 // Horizontal pass (src -> transposed intermediate).
123 uint8_t *output_ptr = intermediate_buffer;
124 const int src_next_row_stride = src_stride - output_width;
125 src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
126 for (i = 0; i < intermediate_height; ++i) {
127 for (j = 0; j < output_width; ++j) {
129 const int temp = (src_ptr[0] * hfilter[0]) + (src_ptr[1] * hfilter[1]) +
130 (src_ptr[2] * hfilter[2]) + (src_ptr[3] * hfilter[3]) +
131 (src_ptr[4] * hfilter[4]) + (src_ptr[5] * hfilter[5]) +
132 (src_ptr[6] * hfilter[6]) + (src_ptr[7] * hfilter[7]) +
133 (VP9_FILTER_WEIGHT >> 1); // Rounding
135 // Normalize back to 0-255...
136 *output_ptr = clip_pixel(temp >> VP9_FILTER_SHIFT);
138 output_ptr += intermediate_height;
140 src_ptr += src_next_row_stride;
141 output_ptr += intermediate_next_stride;
144 // Vertical pass (transposed intermediate -> dst).
145 src_ptr = intermediate_buffer;
146 const int dst_next_row_stride = dst_stride - output_width;
147 for (i = 0; i < output_height; ++i) {
148 for (j = 0; j < output_width; ++j) {
150 const int temp = (src_ptr[0] * vfilter[0]) + (src_ptr[1] * vfilter[1]) +
151 (src_ptr[2] * vfilter[2]) + (src_ptr[3] * vfilter[3]) +
152 (src_ptr[4] * vfilter[4]) + (src_ptr[5] * vfilter[5]) +
153 (src_ptr[6] * vfilter[6]) + (src_ptr[7] * vfilter[7]) +
154 (VP9_FILTER_WEIGHT >> 1); // Rounding
156 // Normalize back to 0-255...
157 *dst_ptr++ = clip_pixel(temp >> VP9_FILTER_SHIFT);
158 src_ptr += intermediate_height;
160 src_ptr += intermediate_next_stride;
161 dst_ptr += dst_next_row_stride;
165 void block2d_average_c(uint8_t *src, unsigned int src_stride,
166 uint8_t *output_ptr, unsigned int output_stride,
167 unsigned int output_width, unsigned int output_height) {
169 for (i = 0; i < output_height; ++i) {
170 for (j = 0; j < output_width; ++j) {
171 output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
173 output_ptr += output_stride;
177 void filter_average_block2d_8_c(const uint8_t *src_ptr,
178 const unsigned int src_stride,
179 const int16_t *hfilter, const int16_t *vfilter,
180 uint8_t *dst_ptr, unsigned int dst_stride,
181 unsigned int output_width,
182 unsigned int output_height) {
183 uint8_t tmp[kMaxDimension * kMaxDimension];
185 assert(output_width <= kMaxDimension);
186 assert(output_height <= kMaxDimension);
187 filter_block2d_8_c(src_ptr, src_stride, hfilter, vfilter, tmp, 64,
188 output_width, output_height);
189 block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width, output_height);
192 #if CONFIG_VP9_HIGHBITDEPTH
193 void highbd_filter_block2d_8_c(const uint16_t *src_ptr,
194 const unsigned int src_stride,
195 const int16_t *hfilter, const int16_t *vfilter,
196 uint16_t *dst_ptr, unsigned int dst_stride,
197 unsigned int output_width,
198 unsigned int output_height, int bd) {
199 // Between passes, we use an intermediate buffer whose height is extended to
200 // have enough horizontally filtered values as input for the vertical pass.
201 // This buffer is allocated to be big enough for the largest block type we
203 const int kInterp_Extend = 4;
204 const unsigned int intermediate_height =
205 (kInterp_Extend - 1) + output_height + kInterp_Extend;
207 /* Size of intermediate_buffer is max_intermediate_height * filter_max_width,
208 * where max_intermediate_height = (kInterp_Extend - 1) + filter_max_height
212 * and filter_max_width = 16
214 uint16_t intermediate_buffer[71 * kMaxDimension];
215 const int intermediate_next_stride =
216 1 - static_cast<int>(intermediate_height * output_width);
218 vp9_zero(intermediate_buffer);
220 // Horizontal pass (src -> transposed intermediate).
222 uint16_t *output_ptr = intermediate_buffer;
223 const int src_next_row_stride = src_stride - output_width;
225 src_ptr -= (kInterp_Extend - 1) * src_stride + (kInterp_Extend - 1);
226 for (i = 0; i < intermediate_height; ++i) {
227 for (j = 0; j < output_width; ++j) {
229 const int temp = (src_ptr[0] * hfilter[0]) + (src_ptr[1] * hfilter[1]) +
230 (src_ptr[2] * hfilter[2]) + (src_ptr[3] * hfilter[3]) +
231 (src_ptr[4] * hfilter[4]) + (src_ptr[5] * hfilter[5]) +
232 (src_ptr[6] * hfilter[6]) + (src_ptr[7] * hfilter[7]) +
233 (VP9_FILTER_WEIGHT >> 1); // Rounding
235 // Normalize back to 0-255...
236 *output_ptr = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
238 output_ptr += intermediate_height;
240 src_ptr += src_next_row_stride;
241 output_ptr += intermediate_next_stride;
245 // Vertical pass (transposed intermediate -> dst).
247 uint16_t *src_ptr = intermediate_buffer;
248 const int dst_next_row_stride = dst_stride - output_width;
250 for (i = 0; i < output_height; ++i) {
251 for (j = 0; j < output_width; ++j) {
253 const int temp = (src_ptr[0] * vfilter[0]) + (src_ptr[1] * vfilter[1]) +
254 (src_ptr[2] * vfilter[2]) + (src_ptr[3] * vfilter[3]) +
255 (src_ptr[4] * vfilter[4]) + (src_ptr[5] * vfilter[5]) +
256 (src_ptr[6] * vfilter[6]) + (src_ptr[7] * vfilter[7]) +
257 (VP9_FILTER_WEIGHT >> 1); // Rounding
259 // Normalize back to 0-255...
260 *dst_ptr++ = clip_pixel_highbd(temp >> VP9_FILTER_SHIFT, bd);
261 src_ptr += intermediate_height;
263 src_ptr += intermediate_next_stride;
264 dst_ptr += dst_next_row_stride;
269 void highbd_block2d_average_c(uint16_t *src, unsigned int src_stride,
270 uint16_t *output_ptr, unsigned int output_stride,
271 unsigned int output_width,
272 unsigned int output_height) {
274 for (i = 0; i < output_height; ++i) {
275 for (j = 0; j < output_width; ++j) {
276 output_ptr[j] = (output_ptr[j] + src[i * src_stride + j] + 1) >> 1;
278 output_ptr += output_stride;
282 void highbd_filter_average_block2d_8_c(
283 const uint16_t *src_ptr, const unsigned int src_stride,
284 const int16_t *hfilter, const int16_t *vfilter, uint16_t *dst_ptr,
285 unsigned int dst_stride, unsigned int output_width,
286 unsigned int output_height, int bd) {
287 uint16_t tmp[kMaxDimension * kMaxDimension];
289 assert(output_width <= kMaxDimension);
290 assert(output_height <= kMaxDimension);
291 highbd_filter_block2d_8_c(src_ptr, src_stride, hfilter, vfilter, tmp, 64,
292 output_width, output_height, bd);
293 highbd_block2d_average_c(tmp, 64, dst_ptr, dst_stride, output_width,
296 #endif // CONFIG_VP9_HIGHBITDEPTH
298 void wrapper_filter_average_block2d_8_c(
299 const uint8_t *src_ptr, const unsigned int src_stride,
300 const int16_t *hfilter, const int16_t *vfilter, uint8_t *dst_ptr,
301 unsigned int dst_stride, unsigned int output_width,
302 unsigned int output_height, int use_highbd) {
303 #if CONFIG_VP9_HIGHBITDEPTH
304 if (use_highbd == 0) {
305 filter_average_block2d_8_c(src_ptr, src_stride, hfilter, vfilter, dst_ptr,
306 dst_stride, output_width, output_height);
308 highbd_filter_average_block2d_8_c(CAST_TO_SHORTPTR(src_ptr), src_stride,
310 CAST_TO_SHORTPTR(dst_ptr), dst_stride,
311 output_width, output_height, use_highbd);
314 ASSERT_EQ(0, use_highbd);
315 filter_average_block2d_8_c(src_ptr, src_stride, hfilter, vfilter, dst_ptr,
316 dst_stride, output_width, output_height);
320 void wrapper_filter_block2d_8_c(const uint8_t *src_ptr,
321 const unsigned int src_stride,
322 const int16_t *hfilter, const int16_t *vfilter,
323 uint8_t *dst_ptr, unsigned int dst_stride,
324 unsigned int output_width,
325 unsigned int output_height, int use_highbd) {
326 #if CONFIG_VP9_HIGHBITDEPTH
327 if (use_highbd == 0) {
328 filter_block2d_8_c(src_ptr, src_stride, hfilter, vfilter, dst_ptr,
329 dst_stride, output_width, output_height);
331 highbd_filter_block2d_8_c(CAST_TO_SHORTPTR(src_ptr), src_stride, hfilter,
332 vfilter, CAST_TO_SHORTPTR(dst_ptr), dst_stride,
333 output_width, output_height, use_highbd);
336 ASSERT_EQ(0, use_highbd);
337 filter_block2d_8_c(src_ptr, src_stride, hfilter, vfilter, dst_ptr, dst_stride,
338 output_width, output_height);
342 class ConvolveTest : public ::testing::TestWithParam<ConvolveParam> {
344 static void SetUpTestCase() {
345 // Force input_ to be unaligned, output to be 16 byte aligned.
346 input_ = reinterpret_cast<uint8_t *>(
347 vpx_memalign(kDataAlignment, kInputBufferSize + 1)) +
349 output_ = reinterpret_cast<uint8_t *>(
350 vpx_memalign(kDataAlignment, kOutputBufferSize));
351 output_ref_ = reinterpret_cast<uint8_t *>(
352 vpx_memalign(kDataAlignment, kOutputBufferSize));
353 #if CONFIG_VP9_HIGHBITDEPTH
354 input16_ = reinterpret_cast<uint16_t *>(vpx_memalign(
355 kDataAlignment, (kInputBufferSize + 1) * sizeof(uint16_t))) +
357 output16_ = reinterpret_cast<uint16_t *>(
358 vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
359 output16_ref_ = reinterpret_cast<uint16_t *>(
360 vpx_memalign(kDataAlignment, (kOutputBufferSize) * sizeof(uint16_t)));
364 virtual void TearDown() { libvpx_test::ClearSystemState(); }
366 static void TearDownTestCase() {
367 vpx_free(input_ - 1);
371 vpx_free(output_ref_);
373 #if CONFIG_VP9_HIGHBITDEPTH
374 vpx_free(input16_ - 1);
378 vpx_free(output16_ref_);
379 output16_ref_ = NULL;
384 static const int kDataAlignment = 16;
385 static const int kOuterBlockSize = 256;
386 static const int kInputStride = kOuterBlockSize;
387 static const int kOutputStride = kOuterBlockSize;
388 static const int kInputBufferSize = kOuterBlockSize * kOuterBlockSize;
389 static const int kOutputBufferSize = kOuterBlockSize * kOuterBlockSize;
391 int Width() const { return GET_PARAM(0); }
392 int Height() const { return GET_PARAM(1); }
393 int BorderLeft() const {
394 const int center = (kOuterBlockSize - Width()) / 2;
395 return (center + (kDataAlignment - 1)) & ~(kDataAlignment - 1);
397 int BorderTop() const { return (kOuterBlockSize - Height()) / 2; }
399 bool IsIndexInBorder(int i) {
400 return (i < BorderTop() * kOuterBlockSize ||
401 i >= (BorderTop() + Height()) * kOuterBlockSize ||
402 i % kOuterBlockSize < BorderLeft() ||
403 i % kOuterBlockSize >= (BorderLeft() + Width()));
406 virtual void SetUp() {
408 #if CONFIG_VP9_HIGHBITDEPTH
409 if (UUT_->use_highbd_ != 0) {
410 mask_ = (1 << UUT_->use_highbd_) - 1;
415 /* Set up guard blocks for an inner block centered in the outer block */
416 for (int i = 0; i < kOutputBufferSize; ++i) {
417 if (IsIndexInBorder(i)) {
424 ::libvpx_test::ACMRandom prng;
425 for (int i = 0; i < kInputBufferSize; ++i) {
428 #if CONFIG_VP9_HIGHBITDEPTH
432 input_[i] = prng.Rand8Extremes();
433 #if CONFIG_VP9_HIGHBITDEPTH
434 input16_[i] = prng.Rand16() & mask_;
440 void SetConstantInput(int value) {
441 memset(input_, value, kInputBufferSize);
442 #if CONFIG_VP9_HIGHBITDEPTH
443 vpx_memset16(input16_, value, kInputBufferSize);
447 void CopyOutputToRef() {
448 memcpy(output_ref_, output_, kOutputBufferSize);
449 #if CONFIG_VP9_HIGHBITDEPTH
450 memcpy(output16_ref_, output16_,
451 kOutputBufferSize * sizeof(output16_ref_[0]));
455 void CheckGuardBlocks() {
456 for (int i = 0; i < kOutputBufferSize; ++i) {
457 if (IsIndexInBorder(i)) {
458 EXPECT_EQ(255, output_[i]);
463 uint8_t *input() const {
464 const int offset = BorderTop() * kOuterBlockSize + BorderLeft();
465 #if CONFIG_VP9_HIGHBITDEPTH
466 if (UUT_->use_highbd_ == 0) {
467 return input_ + offset;
469 return CAST_TO_BYTEPTR(input16_ + offset);
472 return input_ + offset;
476 uint8_t *output() const {
477 const int offset = BorderTop() * kOuterBlockSize + BorderLeft();
478 #if CONFIG_VP9_HIGHBITDEPTH
479 if (UUT_->use_highbd_ == 0) {
480 return output_ + offset;
482 return CAST_TO_BYTEPTR(output16_ + offset);
485 return output_ + offset;
489 uint8_t *output_ref() const {
490 const int offset = BorderTop() * kOuterBlockSize + BorderLeft();
491 #if CONFIG_VP9_HIGHBITDEPTH
492 if (UUT_->use_highbd_ == 0) {
493 return output_ref_ + offset;
495 return CAST_TO_BYTEPTR(output16_ref_ + offset);
498 return output_ref_ + offset;
502 uint16_t lookup(uint8_t *list, int index) const {
503 #if CONFIG_VP9_HIGHBITDEPTH
504 if (UUT_->use_highbd_ == 0) {
507 return CAST_TO_SHORTPTR(list)[index];
514 void assign_val(uint8_t *list, int index, uint16_t val) const {
515 #if CONFIG_VP9_HIGHBITDEPTH
516 if (UUT_->use_highbd_ == 0) {
517 list[index] = (uint8_t)val;
519 CAST_TO_SHORTPTR(list)[index] = val;
522 list[index] = (uint8_t)val;
526 const ConvolveFunctions *UUT_;
527 static uint8_t *input_;
528 static uint8_t *output_;
529 static uint8_t *output_ref_;
530 #if CONFIG_VP9_HIGHBITDEPTH
531 static uint16_t *input16_;
532 static uint16_t *output16_;
533 static uint16_t *output16_ref_;
538 uint8_t *ConvolveTest::input_ = NULL;
539 uint8_t *ConvolveTest::output_ = NULL;
540 uint8_t *ConvolveTest::output_ref_ = NULL;
541 #if CONFIG_VP9_HIGHBITDEPTH
542 uint16_t *ConvolveTest::input16_ = NULL;
543 uint16_t *ConvolveTest::output16_ = NULL;
544 uint16_t *ConvolveTest::output16_ref_ = NULL;
547 TEST_P(ConvolveTest, GuardBlocks) { CheckGuardBlocks(); }
549 TEST_P(ConvolveTest, DISABLED_Copy_Speed) {
550 const uint8_t *const in = input();
551 uint8_t *const out = output();
552 const int kNumTests = 5000000;
553 const int width = Width();
554 const int height = Height();
555 vpx_usec_timer timer;
557 vpx_usec_timer_start(&timer);
558 for (int n = 0; n < kNumTests; ++n) {
559 UUT_->copy_[0](in, kInputStride, out, kOutputStride, NULL, 0, 0, 0, 0,
562 vpx_usec_timer_mark(&timer);
564 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
565 printf("convolve_copy_%dx%d_%d: %d us\n", width, height,
566 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
569 TEST_P(ConvolveTest, DISABLED_Avg_Speed) {
570 const uint8_t *const in = input();
571 uint8_t *const out = output();
572 const int kNumTests = 5000000;
573 const int width = Width();
574 const int height = Height();
575 vpx_usec_timer timer;
577 vpx_usec_timer_start(&timer);
578 for (int n = 0; n < kNumTests; ++n) {
579 UUT_->copy_[1](in, kInputStride, out, kOutputStride, NULL, 0, 0, 0, 0,
582 vpx_usec_timer_mark(&timer);
584 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
585 printf("convolve_avg_%dx%d_%d: %d us\n", width, height,
586 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
589 TEST_P(ConvolveTest, DISABLED_Scale_Speed) {
590 const uint8_t *const in = input();
591 uint8_t *const out = output();
592 const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP];
593 const int kNumTests = 5000000;
594 const int width = Width();
595 const int height = Height();
596 vpx_usec_timer timer;
598 SetConstantInput(127);
600 vpx_usec_timer_start(&timer);
601 for (int n = 0; n < kNumTests; ++n) {
602 UUT_->shv8_[0](in, kInputStride, out, kOutputStride, eighttap, 8, 16, 8, 16,
605 vpx_usec_timer_mark(&timer);
607 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
608 printf("convolve_scale_%dx%d_%d: %d us\n", width, height,
609 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
612 TEST_P(ConvolveTest, DISABLED_8Tap_Speed) {
613 const uint8_t *const in = input();
614 uint8_t *const out = output();
615 const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP_SHARP];
616 const int kNumTests = 5000000;
617 const int width = Width();
618 const int height = Height();
619 vpx_usec_timer timer;
621 SetConstantInput(127);
623 vpx_usec_timer_start(&timer);
624 for (int n = 0; n < kNumTests; ++n) {
625 UUT_->hv8_[0](in, kInputStride, out, kOutputStride, eighttap, 8, 16, 8, 16,
628 vpx_usec_timer_mark(&timer);
630 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
631 printf("convolve8_%dx%d_%d: %d us\n", width, height,
632 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
635 TEST_P(ConvolveTest, DISABLED_8Tap_Horiz_Speed) {
636 const uint8_t *const in = input();
637 uint8_t *const out = output();
638 const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP_SHARP];
639 const int kNumTests = 5000000;
640 const int width = Width();
641 const int height = Height();
642 vpx_usec_timer timer;
644 SetConstantInput(127);
646 vpx_usec_timer_start(&timer);
647 for (int n = 0; n < kNumTests; ++n) {
648 UUT_->h8_[0](in, kInputStride, out, kOutputStride, eighttap, 8, 16, 8, 16,
651 vpx_usec_timer_mark(&timer);
653 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
654 printf("convolve8_horiz_%dx%d_%d: %d us\n", width, height,
655 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
658 TEST_P(ConvolveTest, DISABLED_8Tap_Vert_Speed) {
659 const uint8_t *const in = input();
660 uint8_t *const out = output();
661 const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP_SHARP];
662 const int kNumTests = 5000000;
663 const int width = Width();
664 const int height = Height();
665 vpx_usec_timer timer;
667 SetConstantInput(127);
669 vpx_usec_timer_start(&timer);
670 for (int n = 0; n < kNumTests; ++n) {
671 UUT_->v8_[0](in, kInputStride, out, kOutputStride, eighttap, 8, 16, 8, 16,
674 vpx_usec_timer_mark(&timer);
676 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
677 printf("convolve8_vert_%dx%d_%d: %d us\n", width, height,
678 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
681 TEST_P(ConvolveTest, DISABLED_4Tap_Speed) {
682 const uint8_t *const in = input();
683 uint8_t *const out = output();
684 const InterpKernel *const fourtap = vp9_filter_kernels[FOURTAP];
685 const int kNumTests = 5000000;
686 const int width = Width();
687 const int height = Height();
688 vpx_usec_timer timer;
690 SetConstantInput(127);
692 vpx_usec_timer_start(&timer);
693 for (int n = 0; n < kNumTests; ++n) {
694 UUT_->hv8_[0](in, kInputStride, out, kOutputStride, fourtap, 8, 16, 8, 16,
697 vpx_usec_timer_mark(&timer);
699 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
700 printf("convolve4_%dx%d_%d: %d us\n", width, height,
701 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
704 TEST_P(ConvolveTest, DISABLED_4Tap_Horiz_Speed) {
705 const uint8_t *const in = input();
706 uint8_t *const out = output();
707 const InterpKernel *const fourtap = vp9_filter_kernels[FOURTAP];
708 const int kNumTests = 5000000;
709 const int width = Width();
710 const int height = Height();
711 vpx_usec_timer timer;
713 SetConstantInput(127);
715 vpx_usec_timer_start(&timer);
716 for (int n = 0; n < kNumTests; ++n) {
717 UUT_->h8_[0](in, kInputStride, out, kOutputStride, fourtap, 8, 16, 8, 16,
720 vpx_usec_timer_mark(&timer);
722 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
723 printf("convolve4_horiz_%dx%d_%d: %d us\n", width, height,
724 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
727 TEST_P(ConvolveTest, DISABLED_4Tap_Vert_Speed) {
728 const uint8_t *const in = input();
729 uint8_t *const out = output();
730 const InterpKernel *const fourtap = vp9_filter_kernels[FOURTAP];
731 const int kNumTests = 5000000;
732 const int width = Width();
733 const int height = Height();
734 vpx_usec_timer timer;
736 SetConstantInput(127);
738 vpx_usec_timer_start(&timer);
739 for (int n = 0; n < kNumTests; ++n) {
740 UUT_->v8_[0](in, kInputStride, out, kOutputStride, fourtap, 8, 16, 8, 16,
743 vpx_usec_timer_mark(&timer);
745 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
746 printf("convolve4_vert_%dx%d_%d: %d us\n", width, height,
747 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
749 TEST_P(ConvolveTest, DISABLED_8Tap_Avg_Speed) {
750 const uint8_t *const in = input();
751 uint8_t *const out = output();
752 const InterpKernel *const eighttap = vp9_filter_kernels[EIGHTTAP_SHARP];
753 const int kNumTests = 5000000;
754 const int width = Width();
755 const int height = Height();
756 vpx_usec_timer timer;
758 SetConstantInput(127);
760 vpx_usec_timer_start(&timer);
761 for (int n = 0; n < kNumTests; ++n) {
762 UUT_->hv8_[1](in, kInputStride, out, kOutputStride, eighttap, 8, 16, 8, 16,
765 vpx_usec_timer_mark(&timer);
767 const int elapsed_time = static_cast<int>(vpx_usec_timer_elapsed(&timer));
768 printf("convolve8_avg_%dx%d_%d: %d us\n", width, height,
769 UUT_->use_highbd_ ? UUT_->use_highbd_ : 8, elapsed_time);
772 TEST_P(ConvolveTest, Copy) {
773 uint8_t *const in = input();
774 uint8_t *const out = output();
776 ASM_REGISTER_STATE_CHECK(UUT_->copy_[0](in, kInputStride, out, kOutputStride,
777 NULL, 0, 0, 0, 0, Width(), Height()));
781 for (int y = 0; y < Height(); ++y) {
782 for (int x = 0; x < Width(); ++x)
783 ASSERT_EQ(lookup(out, y * kOutputStride + x),
784 lookup(in, y * kInputStride + x))
785 << "(" << x << "," << y << ")";
789 TEST_P(ConvolveTest, Avg) {
790 uint8_t *const in = input();
791 uint8_t *const out = output();
792 uint8_t *const out_ref = output_ref();
795 ASM_REGISTER_STATE_CHECK(UUT_->copy_[1](in, kInputStride, out, kOutputStride,
796 NULL, 0, 0, 0, 0, Width(), Height()));
800 for (int y = 0; y < Height(); ++y) {
801 for (int x = 0; x < Width(); ++x)
802 ASSERT_EQ(lookup(out, y * kOutputStride + x),
803 ROUND_POWER_OF_TWO(lookup(in, y * kInputStride + x) +
804 lookup(out_ref, y * kOutputStride + x),
806 << "(" << x << "," << y << ")";
810 TEST_P(ConvolveTest, CopyHoriz) {
811 uint8_t *const in = input();
812 uint8_t *const out = output();
814 ASM_REGISTER_STATE_CHECK(UUT_->sh8_[0](in, kInputStride, out, kOutputStride,
815 vp9_filter_kernels[0], 0, 16, 0, 16,
820 for (int y = 0; y < Height(); ++y) {
821 for (int x = 0; x < Width(); ++x)
822 ASSERT_EQ(lookup(out, y * kOutputStride + x),
823 lookup(in, y * kInputStride + x))
824 << "(" << x << "," << y << ")";
828 TEST_P(ConvolveTest, CopyVert) {
829 uint8_t *const in = input();
830 uint8_t *const out = output();
832 ASM_REGISTER_STATE_CHECK(UUT_->sv8_[0](in, kInputStride, out, kOutputStride,
833 vp9_filter_kernels[0], 0, 16, 0, 16,
838 for (int y = 0; y < Height(); ++y) {
839 for (int x = 0; x < Width(); ++x)
840 ASSERT_EQ(lookup(out, y * kOutputStride + x),
841 lookup(in, y * kInputStride + x))
842 << "(" << x << "," << y << ")";
846 TEST_P(ConvolveTest, Copy2D) {
847 uint8_t *const in = input();
848 uint8_t *const out = output();
850 ASM_REGISTER_STATE_CHECK(UUT_->shv8_[0](in, kInputStride, out, kOutputStride,
851 vp9_filter_kernels[0], 0, 16, 0, 16,
856 for (int y = 0; y < Height(); ++y) {
857 for (int x = 0; x < Width(); ++x)
858 ASSERT_EQ(lookup(out, y * kOutputStride + x),
859 lookup(in, y * kInputStride + x))
860 << "(" << x << "," << y << ")";
864 const int kNumFilterBanks = 5;
865 const int kNumFilters = 16;
867 TEST(ConvolveTest, FiltersWontSaturateWhenAddedPairwise) {
868 for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
869 const InterpKernel *filters =
870 vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
871 for (int i = 0; i < kNumFilters; i++) {
872 const int p0 = filters[i][0] + filters[i][1];
873 const int p1 = filters[i][2] + filters[i][3];
874 const int p2 = filters[i][4] + filters[i][5];
875 const int p3 = filters[i][6] + filters[i][7];
880 EXPECT_LE(p0 + p3, 128);
881 EXPECT_LE(p0 + p3 + p1, 128);
882 EXPECT_LE(p0 + p3 + p1 + p2, 128);
883 EXPECT_EQ(p0 + p1 + p2 + p3, 128);
888 const WrapperFilterBlock2d8Func wrapper_filter_block2d_8[2] = {
889 wrapper_filter_block2d_8_c, wrapper_filter_average_block2d_8_c
892 TEST_P(ConvolveTest, MatchesReferenceSubpixelFilter) {
893 for (int i = 0; i < 2; ++i) {
894 uint8_t *const in = input();
895 uint8_t *const out = output();
896 #if CONFIG_VP9_HIGHBITDEPTH
897 uint8_t ref8[kOutputStride * kMaxDimension];
898 uint16_t ref16[kOutputStride * kMaxDimension];
900 if (UUT_->use_highbd_ == 0) {
903 ref = CAST_TO_BYTEPTR(ref16);
906 uint8_t ref[kOutputStride * kMaxDimension];
909 // Populate ref and out with some random data
910 ::libvpx_test::ACMRandom prng;
911 for (int y = 0; y < Height(); ++y) {
912 for (int x = 0; x < Width(); ++x) {
914 #if CONFIG_VP9_HIGHBITDEPTH
915 if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
916 r = prng.Rand8Extremes();
918 r = prng.Rand16() & mask_;
921 r = prng.Rand8Extremes();
924 assign_val(out, y * kOutputStride + x, r);
925 assign_val(ref, y * kOutputStride + x, r);
929 for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
930 const InterpKernel *filters =
931 vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
933 for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
934 for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
935 wrapper_filter_block2d_8[i](in, kInputStride, filters[filter_x],
936 filters[filter_y], ref, kOutputStride,
937 Width(), Height(), UUT_->use_highbd_);
939 if (filter_x && filter_y)
940 ASM_REGISTER_STATE_CHECK(
941 UUT_->hv8_[i](in, kInputStride, out, kOutputStride, filters,
942 filter_x, 16, filter_y, 16, Width(), Height()));
944 ASM_REGISTER_STATE_CHECK(
945 UUT_->v8_[i](in, kInputStride, out, kOutputStride, filters, 0,
946 16, filter_y, 16, Width(), Height()));
948 ASM_REGISTER_STATE_CHECK(
949 UUT_->h8_[i](in, kInputStride, out, kOutputStride, filters,
950 filter_x, 16, 0, 16, Width(), Height()));
952 ASM_REGISTER_STATE_CHECK(UUT_->copy_[i](in, kInputStride, out,
953 kOutputStride, NULL, 0, 0,
954 0, 0, Width(), Height()));
958 for (int y = 0; y < Height(); ++y) {
959 for (int x = 0; x < Width(); ++x)
960 ASSERT_EQ(lookup(ref, y * kOutputStride + x),
961 lookup(out, y * kOutputStride + x))
962 << "mismatch at (" << x << "," << y << "), "
963 << "filters (" << filter_bank << "," << filter_x << ","
972 TEST_P(ConvolveTest, FilterExtremes) {
973 uint8_t *const in = input();
974 uint8_t *const out = output();
975 #if CONFIG_VP9_HIGHBITDEPTH
976 uint8_t ref8[kOutputStride * kMaxDimension];
977 uint16_t ref16[kOutputStride * kMaxDimension];
979 if (UUT_->use_highbd_ == 0) {
982 ref = CAST_TO_BYTEPTR(ref16);
985 uint8_t ref[kOutputStride * kMaxDimension];
988 // Populate ref and out with some random data
989 ::libvpx_test::ACMRandom prng;
990 for (int y = 0; y < Height(); ++y) {
991 for (int x = 0; x < Width(); ++x) {
993 #if CONFIG_VP9_HIGHBITDEPTH
994 if (UUT_->use_highbd_ == 0 || UUT_->use_highbd_ == 8) {
995 r = prng.Rand8Extremes();
997 r = prng.Rand16() & mask_;
1000 r = prng.Rand8Extremes();
1002 assign_val(out, y * kOutputStride + x, r);
1003 assign_val(ref, y * kOutputStride + x, r);
1007 for (int axis = 0; axis < 2; axis++) {
1009 while (seed_val < 256) {
1010 for (int y = 0; y < 8; ++y) {
1011 for (int x = 0; x < 8; ++x) {
1012 #if CONFIG_VP9_HIGHBITDEPTH
1013 assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
1014 ((seed_val >> (axis ? y : x)) & 1) * mask_);
1016 assign_val(in, y * kOutputStride + x - SUBPEL_TAPS / 2 + 1,
1017 ((seed_val >> (axis ? y : x)) & 1) * 255);
1019 if (axis) seed_val++;
1027 if (axis) seed_val += 8;
1029 for (int filter_bank = 0; filter_bank < kNumFilterBanks; ++filter_bank) {
1030 const InterpKernel *filters =
1031 vp9_filter_kernels[static_cast<INTERP_FILTER>(filter_bank)];
1032 for (int filter_x = 0; filter_x < kNumFilters; ++filter_x) {
1033 for (int filter_y = 0; filter_y < kNumFilters; ++filter_y) {
1034 wrapper_filter_block2d_8_c(in, kInputStride, filters[filter_x],
1035 filters[filter_y], ref, kOutputStride,
1036 Width(), Height(), UUT_->use_highbd_);
1037 if (filter_x && filter_y)
1038 ASM_REGISTER_STATE_CHECK(
1039 UUT_->hv8_[0](in, kInputStride, out, kOutputStride, filters,
1040 filter_x, 16, filter_y, 16, Width(), Height()));
1042 ASM_REGISTER_STATE_CHECK(
1043 UUT_->v8_[0](in, kInputStride, out, kOutputStride, filters, 0,
1044 16, filter_y, 16, Width(), Height()));
1046 ASM_REGISTER_STATE_CHECK(
1047 UUT_->h8_[0](in, kInputStride, out, kOutputStride, filters,
1048 filter_x, 16, 0, 16, Width(), Height()));
1050 ASM_REGISTER_STATE_CHECK(UUT_->copy_[0](in, kInputStride, out,
1051 kOutputStride, NULL, 0, 0,
1052 0, 0, Width(), Height()));
1054 for (int y = 0; y < Height(); ++y) {
1055 for (int x = 0; x < Width(); ++x)
1056 ASSERT_EQ(lookup(ref, y * kOutputStride + x),
1057 lookup(out, y * kOutputStride + x))
1058 << "mismatch at (" << x << "," << y << "), "
1059 << "filters (" << filter_bank << "," << filter_x << ","
1069 /* This test exercises that enough rows and columns are filtered with every
1070 possible initial fractional positions and scaling steps. */
1071 #if !CONFIG_VP9_HIGHBITDEPTH
1072 static const ConvolveFunc scaled_2d_c_funcs[2] = { vpx_scaled_2d_c,
1073 vpx_scaled_avg_2d_c };
1075 TEST_P(ConvolveTest, CheckScalingFiltering) {
1076 uint8_t *const in = input();
1077 uint8_t *const out = output();
1078 uint8_t ref[kOutputStride * kMaxDimension];
1080 ::libvpx_test::ACMRandom prng;
1081 for (int y = 0; y < Height(); ++y) {
1082 for (int x = 0; x < Width(); ++x) {
1083 const uint16_t r = prng.Rand8Extremes();
1084 assign_val(in, y * kInputStride + x, r);
1088 for (int i = 0; i < 2; ++i) {
1089 for (INTERP_FILTER filter_type = 0; filter_type < 4; ++filter_type) {
1090 const InterpKernel *const eighttap = vp9_filter_kernels[filter_type];
1091 for (int frac = 0; frac < 16; ++frac) {
1092 for (int step = 1; step <= 32; ++step) {
1093 /* Test the horizontal and vertical filters in combination. */
1094 scaled_2d_c_funcs[i](in, kInputStride, ref, kOutputStride, eighttap,
1095 frac, step, frac, step, Width(), Height());
1096 ASM_REGISTER_STATE_CHECK(
1097 UUT_->shv8_[i](in, kInputStride, out, kOutputStride, eighttap,
1098 frac, step, frac, step, Width(), Height()));
1102 for (int y = 0; y < Height(); ++y) {
1103 for (int x = 0; x < Width(); ++x) {
1104 ASSERT_EQ(lookup(ref, y * kOutputStride + x),
1105 lookup(out, y * kOutputStride + x))
1106 << "x == " << x << ", y == " << y << ", frac == " << frac
1107 << ", step == " << step;
1117 using std::make_tuple;
1119 #if CONFIG_VP9_HIGHBITDEPTH
1120 #define WRAP(func, bd) \
1121 void wrap_##func##_##bd( \
1122 const uint8_t *src, ptrdiff_t src_stride, uint8_t *dst, \
1123 ptrdiff_t dst_stride, const InterpKernel *filter, int x0_q4, \
1124 int x_step_q4, int y0_q4, int y_step_q4, int w, int h) { \
1125 vpx_highbd_##func(reinterpret_cast<const uint16_t *>(src), src_stride, \
1126 reinterpret_cast<uint16_t *>(dst), dst_stride, filter, \
1127 x0_q4, x_step_q4, y0_q4, y_step_q4, w, h, bd); \
1130 #if HAVE_SSE2 && ARCH_X86_64
1131 WRAP(convolve_copy_sse2, 8)
1132 WRAP(convolve_avg_sse2, 8)
1133 WRAP(convolve_copy_sse2, 10)
1134 WRAP(convolve_avg_sse2, 10)
1135 WRAP(convolve_copy_sse2, 12)
1136 WRAP(convolve_avg_sse2, 12)
1137 WRAP(convolve8_horiz_sse2, 8)
1138 WRAP(convolve8_avg_horiz_sse2, 8)
1139 WRAP(convolve8_vert_sse2, 8)
1140 WRAP(convolve8_avg_vert_sse2, 8)
1141 WRAP(convolve8_sse2, 8)
1142 WRAP(convolve8_avg_sse2, 8)
1143 WRAP(convolve8_horiz_sse2, 10)
1144 WRAP(convolve8_avg_horiz_sse2, 10)
1145 WRAP(convolve8_vert_sse2, 10)
1146 WRAP(convolve8_avg_vert_sse2, 10)
1147 WRAP(convolve8_sse2, 10)
1148 WRAP(convolve8_avg_sse2, 10)
1149 WRAP(convolve8_horiz_sse2, 12)
1150 WRAP(convolve8_avg_horiz_sse2, 12)
1151 WRAP(convolve8_vert_sse2, 12)
1152 WRAP(convolve8_avg_vert_sse2, 12)
1153 WRAP(convolve8_sse2, 12)
1154 WRAP(convolve8_avg_sse2, 12)
1155 #endif // HAVE_SSE2 && ARCH_X86_64
1158 WRAP(convolve_copy_avx2, 8)
1159 WRAP(convolve_avg_avx2, 8)
1160 WRAP(convolve8_horiz_avx2, 8)
1161 WRAP(convolve8_avg_horiz_avx2, 8)
1162 WRAP(convolve8_vert_avx2, 8)
1163 WRAP(convolve8_avg_vert_avx2, 8)
1164 WRAP(convolve8_avx2, 8)
1165 WRAP(convolve8_avg_avx2, 8)
1167 WRAP(convolve_copy_avx2, 10)
1168 WRAP(convolve_avg_avx2, 10)
1169 WRAP(convolve8_avx2, 10)
1170 WRAP(convolve8_horiz_avx2, 10)
1171 WRAP(convolve8_vert_avx2, 10)
1172 WRAP(convolve8_avg_avx2, 10)
1173 WRAP(convolve8_avg_horiz_avx2, 10)
1174 WRAP(convolve8_avg_vert_avx2, 10)
1176 WRAP(convolve_copy_avx2, 12)
1177 WRAP(convolve_avg_avx2, 12)
1178 WRAP(convolve8_avx2, 12)
1179 WRAP(convolve8_horiz_avx2, 12)
1180 WRAP(convolve8_vert_avx2, 12)
1181 WRAP(convolve8_avg_avx2, 12)
1182 WRAP(convolve8_avg_horiz_avx2, 12)
1183 WRAP(convolve8_avg_vert_avx2, 12)
1187 WRAP(convolve_copy_neon, 8)
1188 WRAP(convolve_avg_neon, 8)
1189 WRAP(convolve_copy_neon, 10)
1190 WRAP(convolve_avg_neon, 10)
1191 WRAP(convolve_copy_neon, 12)
1192 WRAP(convolve_avg_neon, 12)
1193 WRAP(convolve8_horiz_neon, 8)
1194 WRAP(convolve8_avg_horiz_neon, 8)
1195 WRAP(convolve8_vert_neon, 8)
1196 WRAP(convolve8_avg_vert_neon, 8)
1197 WRAP(convolve8_neon, 8)
1198 WRAP(convolve8_avg_neon, 8)
1199 WRAP(convolve8_horiz_neon, 10)
1200 WRAP(convolve8_avg_horiz_neon, 10)
1201 WRAP(convolve8_vert_neon, 10)
1202 WRAP(convolve8_avg_vert_neon, 10)
1203 WRAP(convolve8_neon, 10)
1204 WRAP(convolve8_avg_neon, 10)
1205 WRAP(convolve8_horiz_neon, 12)
1206 WRAP(convolve8_avg_horiz_neon, 12)
1207 WRAP(convolve8_vert_neon, 12)
1208 WRAP(convolve8_avg_vert_neon, 12)
1209 WRAP(convolve8_neon, 12)
1210 WRAP(convolve8_avg_neon, 12)
1213 WRAP(convolve_copy_c, 8)
1214 WRAP(convolve_avg_c, 8)
1215 WRAP(convolve8_horiz_c, 8)
1216 WRAP(convolve8_avg_horiz_c, 8)
1217 WRAP(convolve8_vert_c, 8)
1218 WRAP(convolve8_avg_vert_c, 8)
1219 WRAP(convolve8_c, 8)
1220 WRAP(convolve8_avg_c, 8)
1221 WRAP(convolve_copy_c, 10)
1222 WRAP(convolve_avg_c, 10)
1223 WRAP(convolve8_horiz_c, 10)
1224 WRAP(convolve8_avg_horiz_c, 10)
1225 WRAP(convolve8_vert_c, 10)
1226 WRAP(convolve8_avg_vert_c, 10)
1227 WRAP(convolve8_c, 10)
1228 WRAP(convolve8_avg_c, 10)
1229 WRAP(convolve_copy_c, 12)
1230 WRAP(convolve_avg_c, 12)
1231 WRAP(convolve8_horiz_c, 12)
1232 WRAP(convolve8_avg_horiz_c, 12)
1233 WRAP(convolve8_vert_c, 12)
1234 WRAP(convolve8_avg_vert_c, 12)
1235 WRAP(convolve8_c, 12)
1236 WRAP(convolve8_avg_c, 12)
1239 const ConvolveFunctions convolve8_c(
1240 wrap_convolve_copy_c_8, wrap_convolve_avg_c_8, wrap_convolve8_horiz_c_8,
1241 wrap_convolve8_avg_horiz_c_8, wrap_convolve8_vert_c_8,
1242 wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8, wrap_convolve8_avg_c_8,
1243 wrap_convolve8_horiz_c_8, wrap_convolve8_avg_horiz_c_8,
1244 wrap_convolve8_vert_c_8, wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8,
1245 wrap_convolve8_avg_c_8, 8);
1246 const ConvolveFunctions convolve10_c(
1247 wrap_convolve_copy_c_10, wrap_convolve_avg_c_10, wrap_convolve8_horiz_c_10,
1248 wrap_convolve8_avg_horiz_c_10, wrap_convolve8_vert_c_10,
1249 wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10, wrap_convolve8_avg_c_10,
1250 wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
1251 wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10,
1252 wrap_convolve8_avg_c_10, 10);
1253 const ConvolveFunctions convolve12_c(
1254 wrap_convolve_copy_c_12, wrap_convolve_avg_c_12, wrap_convolve8_horiz_c_12,
1255 wrap_convolve8_avg_horiz_c_12, wrap_convolve8_vert_c_12,
1256 wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12, wrap_convolve8_avg_c_12,
1257 wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
1258 wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12,
1259 wrap_convolve8_avg_c_12, 12);
1260 const ConvolveParam kArrayConvolve_c[] = {
1261 ALL_SIZES(convolve8_c), ALL_SIZES(convolve10_c), ALL_SIZES(convolve12_c)
1265 const ConvolveFunctions convolve8_c(
1266 vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_c,
1267 vpx_convolve8_avg_horiz_c, vpx_convolve8_vert_c, vpx_convolve8_avg_vert_c,
1268 vpx_convolve8_c, vpx_convolve8_avg_c, vpx_scaled_horiz_c,
1269 vpx_scaled_avg_horiz_c, vpx_scaled_vert_c, vpx_scaled_avg_vert_c,
1270 vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
1271 const ConvolveParam kArrayConvolve_c[] = { ALL_SIZES(convolve8_c) };
1273 INSTANTIATE_TEST_CASE_P(C, ConvolveTest, ::testing::ValuesIn(kArrayConvolve_c));
1275 #if HAVE_SSE2 && ARCH_X86_64
1276 #if CONFIG_VP9_HIGHBITDEPTH
1277 const ConvolveFunctions convolve8_sse2(
1278 wrap_convolve_copy_sse2_8, wrap_convolve_avg_sse2_8,
1279 wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
1280 wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
1281 wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8,
1282 wrap_convolve8_horiz_sse2_8, wrap_convolve8_avg_horiz_sse2_8,
1283 wrap_convolve8_vert_sse2_8, wrap_convolve8_avg_vert_sse2_8,
1284 wrap_convolve8_sse2_8, wrap_convolve8_avg_sse2_8, 8);
1285 const ConvolveFunctions convolve10_sse2(
1286 wrap_convolve_copy_sse2_10, wrap_convolve_avg_sse2_10,
1287 wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
1288 wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
1289 wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10,
1290 wrap_convolve8_horiz_sse2_10, wrap_convolve8_avg_horiz_sse2_10,
1291 wrap_convolve8_vert_sse2_10, wrap_convolve8_avg_vert_sse2_10,
1292 wrap_convolve8_sse2_10, wrap_convolve8_avg_sse2_10, 10);
1293 const ConvolveFunctions convolve12_sse2(
1294 wrap_convolve_copy_sse2_12, wrap_convolve_avg_sse2_12,
1295 wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
1296 wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
1297 wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12,
1298 wrap_convolve8_horiz_sse2_12, wrap_convolve8_avg_horiz_sse2_12,
1299 wrap_convolve8_vert_sse2_12, wrap_convolve8_avg_vert_sse2_12,
1300 wrap_convolve8_sse2_12, wrap_convolve8_avg_sse2_12, 12);
1301 const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2),
1302 ALL_SIZES(convolve10_sse2),
1303 ALL_SIZES(convolve12_sse2) };
1305 const ConvolveFunctions convolve8_sse2(
1306 vpx_convolve_copy_sse2, vpx_convolve_avg_sse2, vpx_convolve8_horiz_sse2,
1307 vpx_convolve8_avg_horiz_sse2, vpx_convolve8_vert_sse2,
1308 vpx_convolve8_avg_vert_sse2, vpx_convolve8_sse2, vpx_convolve8_avg_sse2,
1309 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1310 vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
1312 const ConvolveParam kArrayConvolve_sse2[] = { ALL_SIZES(convolve8_sse2) };
1313 #endif // CONFIG_VP9_HIGHBITDEPTH
1314 INSTANTIATE_TEST_CASE_P(SSE2, ConvolveTest,
1315 ::testing::ValuesIn(kArrayConvolve_sse2));
1319 const ConvolveFunctions convolve8_ssse3(
1320 vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_ssse3,
1321 vpx_convolve8_avg_horiz_ssse3, vpx_convolve8_vert_ssse3,
1322 vpx_convolve8_avg_vert_ssse3, vpx_convolve8_ssse3, vpx_convolve8_avg_ssse3,
1323 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1324 vpx_scaled_avg_vert_c, vpx_scaled_2d_ssse3, vpx_scaled_avg_2d_c, 0);
1326 const ConvolveParam kArrayConvolve8_ssse3[] = { ALL_SIZES(convolve8_ssse3) };
1327 INSTANTIATE_TEST_CASE_P(SSSE3, ConvolveTest,
1328 ::testing::ValuesIn(kArrayConvolve8_ssse3));
1332 #if CONFIG_VP9_HIGHBITDEPTH
1333 const ConvolveFunctions convolve8_avx2(
1334 wrap_convolve_copy_avx2_8, wrap_convolve_avg_avx2_8,
1335 wrap_convolve8_horiz_avx2_8, wrap_convolve8_avg_horiz_avx2_8,
1336 wrap_convolve8_vert_avx2_8, wrap_convolve8_avg_vert_avx2_8,
1337 wrap_convolve8_avx2_8, wrap_convolve8_avg_avx2_8, wrap_convolve8_horiz_c_8,
1338 wrap_convolve8_avg_horiz_c_8, wrap_convolve8_vert_c_8,
1339 wrap_convolve8_avg_vert_c_8, wrap_convolve8_c_8, wrap_convolve8_avg_c_8, 8);
1340 const ConvolveFunctions convolve10_avx2(
1341 wrap_convolve_copy_avx2_10, wrap_convolve_avg_avx2_10,
1342 wrap_convolve8_horiz_avx2_10, wrap_convolve8_avg_horiz_avx2_10,
1343 wrap_convolve8_vert_avx2_10, wrap_convolve8_avg_vert_avx2_10,
1344 wrap_convolve8_avx2_10, wrap_convolve8_avg_avx2_10,
1345 wrap_convolve8_horiz_c_10, wrap_convolve8_avg_horiz_c_10,
1346 wrap_convolve8_vert_c_10, wrap_convolve8_avg_vert_c_10, wrap_convolve8_c_10,
1347 wrap_convolve8_avg_c_10, 10);
1348 const ConvolveFunctions convolve12_avx2(
1349 wrap_convolve_copy_avx2_12, wrap_convolve_avg_avx2_12,
1350 wrap_convolve8_horiz_avx2_12, wrap_convolve8_avg_horiz_avx2_12,
1351 wrap_convolve8_vert_avx2_12, wrap_convolve8_avg_vert_avx2_12,
1352 wrap_convolve8_avx2_12, wrap_convolve8_avg_avx2_12,
1353 wrap_convolve8_horiz_c_12, wrap_convolve8_avg_horiz_c_12,
1354 wrap_convolve8_vert_c_12, wrap_convolve8_avg_vert_c_12, wrap_convolve8_c_12,
1355 wrap_convolve8_avg_c_12, 12);
1356 const ConvolveParam kArrayConvolve8_avx2[] = { ALL_SIZES(convolve8_avx2),
1357 ALL_SIZES(convolve10_avx2),
1358 ALL_SIZES(convolve12_avx2) };
1359 INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest,
1360 ::testing::ValuesIn(kArrayConvolve8_avx2));
1361 #else // !CONFIG_VP9_HIGHBITDEPTH
1362 const ConvolveFunctions convolve8_avx2(
1363 vpx_convolve_copy_c, vpx_convolve_avg_c, vpx_convolve8_horiz_avx2,
1364 vpx_convolve8_avg_horiz_avx2, vpx_convolve8_vert_avx2,
1365 vpx_convolve8_avg_vert_avx2, vpx_convolve8_avx2, vpx_convolve8_avg_avx2,
1366 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1367 vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
1368 const ConvolveParam kArrayConvolve8_avx2[] = { ALL_SIZES(convolve8_avx2) };
1369 INSTANTIATE_TEST_CASE_P(AVX2, ConvolveTest,
1370 ::testing::ValuesIn(kArrayConvolve8_avx2));
1371 #endif // CONFIG_VP9_HIGHBITDEPTH
1375 #if CONFIG_VP9_HIGHBITDEPTH
1376 const ConvolveFunctions convolve8_neon(
1377 wrap_convolve_copy_neon_8, wrap_convolve_avg_neon_8,
1378 wrap_convolve8_horiz_neon_8, wrap_convolve8_avg_horiz_neon_8,
1379 wrap_convolve8_vert_neon_8, wrap_convolve8_avg_vert_neon_8,
1380 wrap_convolve8_neon_8, wrap_convolve8_avg_neon_8,
1381 wrap_convolve8_horiz_neon_8, wrap_convolve8_avg_horiz_neon_8,
1382 wrap_convolve8_vert_neon_8, wrap_convolve8_avg_vert_neon_8,
1383 wrap_convolve8_neon_8, wrap_convolve8_avg_neon_8, 8);
1384 const ConvolveFunctions convolve10_neon(
1385 wrap_convolve_copy_neon_10, wrap_convolve_avg_neon_10,
1386 wrap_convolve8_horiz_neon_10, wrap_convolve8_avg_horiz_neon_10,
1387 wrap_convolve8_vert_neon_10, wrap_convolve8_avg_vert_neon_10,
1388 wrap_convolve8_neon_10, wrap_convolve8_avg_neon_10,
1389 wrap_convolve8_horiz_neon_10, wrap_convolve8_avg_horiz_neon_10,
1390 wrap_convolve8_vert_neon_10, wrap_convolve8_avg_vert_neon_10,
1391 wrap_convolve8_neon_10, wrap_convolve8_avg_neon_10, 10);
1392 const ConvolveFunctions convolve12_neon(
1393 wrap_convolve_copy_neon_12, wrap_convolve_avg_neon_12,
1394 wrap_convolve8_horiz_neon_12, wrap_convolve8_avg_horiz_neon_12,
1395 wrap_convolve8_vert_neon_12, wrap_convolve8_avg_vert_neon_12,
1396 wrap_convolve8_neon_12, wrap_convolve8_avg_neon_12,
1397 wrap_convolve8_horiz_neon_12, wrap_convolve8_avg_horiz_neon_12,
1398 wrap_convolve8_vert_neon_12, wrap_convolve8_avg_vert_neon_12,
1399 wrap_convolve8_neon_12, wrap_convolve8_avg_neon_12, 12);
1400 const ConvolveParam kArrayConvolve_neon[] = { ALL_SIZES(convolve8_neon),
1401 ALL_SIZES(convolve10_neon),
1402 ALL_SIZES(convolve12_neon) };
1404 const ConvolveFunctions convolve8_neon(
1405 vpx_convolve_copy_neon, vpx_convolve_avg_neon, vpx_convolve8_horiz_neon,
1406 vpx_convolve8_avg_horiz_neon, vpx_convolve8_vert_neon,
1407 vpx_convolve8_avg_vert_neon, vpx_convolve8_neon, vpx_convolve8_avg_neon,
1408 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1409 vpx_scaled_avg_vert_c, vpx_scaled_2d_neon, vpx_scaled_avg_2d_c, 0);
1411 const ConvolveParam kArrayConvolve_neon[] = { ALL_SIZES(convolve8_neon) };
1412 #endif // CONFIG_VP9_HIGHBITDEPTH
1413 INSTANTIATE_TEST_CASE_P(NEON, ConvolveTest,
1414 ::testing::ValuesIn(kArrayConvolve_neon));
1418 const ConvolveFunctions convolve8_dspr2(
1419 vpx_convolve_copy_dspr2, vpx_convolve_avg_dspr2, vpx_convolve8_horiz_dspr2,
1420 vpx_convolve8_avg_horiz_dspr2, vpx_convolve8_vert_dspr2,
1421 vpx_convolve8_avg_vert_dspr2, vpx_convolve8_dspr2, vpx_convolve8_avg_dspr2,
1422 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1423 vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
1425 const ConvolveParam kArrayConvolve8_dspr2[] = { ALL_SIZES(convolve8_dspr2) };
1426 INSTANTIATE_TEST_CASE_P(DSPR2, ConvolveTest,
1427 ::testing::ValuesIn(kArrayConvolve8_dspr2));
1428 #endif // HAVE_DSPR2
1431 const ConvolveFunctions convolve8_msa(
1432 vpx_convolve_copy_msa, vpx_convolve_avg_msa, vpx_convolve8_horiz_msa,
1433 vpx_convolve8_avg_horiz_msa, vpx_convolve8_vert_msa,
1434 vpx_convolve8_avg_vert_msa, vpx_convolve8_msa, vpx_convolve8_avg_msa,
1435 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1436 vpx_scaled_avg_vert_c, vpx_scaled_2d_msa, vpx_scaled_avg_2d_c, 0);
1438 const ConvolveParam kArrayConvolve8_msa[] = { ALL_SIZES(convolve8_msa) };
1439 INSTANTIATE_TEST_CASE_P(MSA, ConvolveTest,
1440 ::testing::ValuesIn(kArrayConvolve8_msa));
1444 const ConvolveFunctions convolve8_vsx(
1445 vpx_convolve_copy_vsx, vpx_convolve_avg_vsx, vpx_convolve8_horiz_vsx,
1446 vpx_convolve8_avg_horiz_vsx, vpx_convolve8_vert_vsx,
1447 vpx_convolve8_avg_vert_vsx, vpx_convolve8_vsx, vpx_convolve8_avg_vsx,
1448 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1449 vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
1450 const ConvolveParam kArrayConvolve_vsx[] = { ALL_SIZES(convolve8_vsx) };
1451 INSTANTIATE_TEST_CASE_P(VSX, ConvolveTest,
1452 ::testing::ValuesIn(kArrayConvolve_vsx));
1456 const ConvolveFunctions convolve8_mmi(
1457 vpx_convolve_copy_c, vpx_convolve_avg_mmi, vpx_convolve8_horiz_mmi,
1458 vpx_convolve8_avg_horiz_mmi, vpx_convolve8_vert_mmi,
1459 vpx_convolve8_avg_vert_mmi, vpx_convolve8_mmi, vpx_convolve8_avg_mmi,
1460 vpx_scaled_horiz_c, vpx_scaled_avg_horiz_c, vpx_scaled_vert_c,
1461 vpx_scaled_avg_vert_c, vpx_scaled_2d_c, vpx_scaled_avg_2d_c, 0);
1462 const ConvolveParam kArrayConvolve_mmi[] = { ALL_SIZES(convolve8_mmi) };
1463 INSTANTIATE_TEST_CASE_P(MMI, ConvolveTest,
1464 ::testing::ValuesIn(kArrayConvolve_mmi));