2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "./vpx_config.h"
12 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_ports/mem.h"
15 #include "vpx/vpx_integer.h"
17 #include "vpx_dsp/variance.h"
19 static const uint8_t bilinear_filters[8][2] = {
20 { 128, 0 }, { 112, 16 }, { 96, 32 }, { 80, 48 },
21 { 64, 64 }, { 48, 80 }, { 32, 96 }, { 16, 112 },
24 uint32_t vpx_get4x4sse_cs_c(const uint8_t *a, int a_stride, const uint8_t *b,
29 for (r = 0; r < 4; ++r) {
30 for (c = 0; c < 4; ++c) {
31 int diff = a[c] - b[c];
32 distortion += diff * diff;
42 uint32_t vpx_get_mb_ss_c(const int16_t *a) {
43 unsigned int i, sum = 0;
45 for (i = 0; i < 256; ++i) {
52 static void variance(const uint8_t *a, int a_stride, const uint8_t *b,
53 int b_stride, int w, int h, uint32_t *sse, int *sum) {
59 for (i = 0; i < h; ++i) {
60 for (j = 0; j < w; ++j) {
61 const int diff = a[j] - b[j];
71 // Applies a 1-D 2-tap bilinear filter to the source block in either horizontal
72 // or vertical direction to produce the filtered output block. Used to implement
73 // the first-pass of 2-D separable filter.
75 // Produces int16_t output to retain precision for the next pass. Two filter
76 // taps should sum to FILTER_WEIGHT. pixel_step defines whether the filter is
77 // applied horizontally (pixel_step = 1) or vertically (pixel_step = stride).
78 // It defines the offset required to move from one input to the next.
79 static void var_filter_block2d_bil_first_pass(const uint8_t *a, uint16_t *b,
80 unsigned int src_pixels_per_line,
82 unsigned int output_height,
83 unsigned int output_width,
84 const uint8_t *filter) {
87 for (i = 0; i < output_height; ++i) {
88 for (j = 0; j < output_width; ++j) {
89 b[j] = ROUND_POWER_OF_TWO(
90 (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
95 a += src_pixels_per_line - output_width;
100 // Applies a 1-D 2-tap bilinear filter to the source block in either horizontal
101 // or vertical direction to produce the filtered output block. Used to implement
102 // the second-pass of 2-D separable filter.
104 // Requires 16-bit input as produced by filter_block2d_bil_first_pass. Two
105 // filter taps should sum to FILTER_WEIGHT. pixel_step defines whether the
106 // filter is applied horizontally (pixel_step = 1) or vertically
107 // (pixel_step = stride). It defines the offset required to move from one input
108 // to the next. Output is 8-bit.
109 static void var_filter_block2d_bil_second_pass(const uint16_t *a, uint8_t *b,
110 unsigned int src_pixels_per_line,
111 unsigned int pixel_step,
112 unsigned int output_height,
113 unsigned int output_width,
114 const uint8_t *filter) {
117 for (i = 0; i < output_height; ++i) {
118 for (j = 0; j < output_width; ++j) {
119 b[j] = ROUND_POWER_OF_TWO(
120 (int)a[0] * filter[0] + (int)a[pixel_step] * filter[1], FILTER_BITS);
124 a += src_pixels_per_line - output_width;
130 uint32_t vpx_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
131 const uint8_t *b, int b_stride, \
134 variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
135 return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
138 #define SUBPIX_VAR(W, H) \
139 uint32_t vpx_sub_pixel_variance##W##x##H##_c( \
140 const uint8_t *a, int a_stride, int xoffset, int yoffset, \
141 const uint8_t *b, int b_stride, uint32_t *sse) { \
142 uint16_t fdata3[(H + 1) * W]; \
143 uint8_t temp2[H * W]; \
145 var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
146 bilinear_filters[xoffset]); \
147 var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
148 bilinear_filters[yoffset]); \
150 return vpx_variance##W##x##H##_c(temp2, W, b, b_stride, sse); \
153 #define SUBPIX_AVG_VAR(W, H) \
154 uint32_t vpx_sub_pixel_avg_variance##W##x##H##_c( \
155 const uint8_t *a, int a_stride, int xoffset, int yoffset, \
156 const uint8_t *b, int b_stride, uint32_t *sse, \
157 const uint8_t *second_pred) { \
158 uint16_t fdata3[(H + 1) * W]; \
159 uint8_t temp2[H * W]; \
160 DECLARE_ALIGNED(16, uint8_t, temp3[H * W]); \
162 var_filter_block2d_bil_first_pass(a, fdata3, a_stride, 1, H + 1, W, \
163 bilinear_filters[xoffset]); \
164 var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
165 bilinear_filters[yoffset]); \
167 vpx_comp_avg_pred(temp3, second_pred, W, H, temp2, W); \
169 return vpx_variance##W##x##H##_c(temp3, W, b, b_stride, sse); \
172 /* Identical to the variance call except it takes an additional parameter, sum,
173 * and returns that value using pass-by-reference instead of returning
176 #define GET_VAR(W, H) \
177 void vpx_get##W##x##H##var_c(const uint8_t *a, int a_stride, \
178 const uint8_t *b, int b_stride, uint32_t *sse, \
180 variance(a, a_stride, b, b_stride, W, H, sse, sum); \
183 /* Identical to the variance call except it does not calculate the
184 * sse - sum^2 / w*h and returns sse in addtion to modifying the passed in
188 uint32_t vpx_mse##W##x##H##_c(const uint8_t *a, int a_stride, \
189 const uint8_t *b, int b_stride, \
192 variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
196 /* All three forms of the variance are available in the same sizes. */
197 #define VARIANCES(W, H) \
224 void vpx_comp_avg_pred_c(uint8_t *comp_pred, const uint8_t *pred, int width,
225 int height, const uint8_t *ref, int ref_stride) {
228 for (i = 0; i < height; ++i) {
229 for (j = 0; j < width; ++j) {
230 const int tmp = pred[j] + ref[j];
231 comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
239 #if CONFIG_VP9_HIGHBITDEPTH
240 static void highbd_variance64(const uint8_t *a8, int a_stride,
241 const uint8_t *b8, int b_stride, int w, int h,
242 uint64_t *sse, int64_t *sum) {
245 uint16_t *a = CONVERT_TO_SHORTPTR(a8);
246 uint16_t *b = CONVERT_TO_SHORTPTR(b8);
250 for (i = 0; i < h; ++i) {
251 for (j = 0; j < w; ++j) {
252 const int diff = a[j] - b[j];
261 static void highbd_8_variance(const uint8_t *a8, int a_stride,
262 const uint8_t *b8, int b_stride, int w, int h,
263 uint32_t *sse, int *sum) {
264 uint64_t sse_long = 0;
265 int64_t sum_long = 0;
266 highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
267 *sse = (uint32_t)sse_long;
268 *sum = (int)sum_long;
271 static void highbd_10_variance(const uint8_t *a8, int a_stride,
272 const uint8_t *b8, int b_stride, int w, int h,
273 uint32_t *sse, int *sum) {
274 uint64_t sse_long = 0;
275 int64_t sum_long = 0;
276 highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
277 *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 4);
278 *sum = (int)ROUND_POWER_OF_TWO(sum_long, 2);
281 static void highbd_12_variance(const uint8_t *a8, int a_stride,
282 const uint8_t *b8, int b_stride, int w, int h,
283 uint32_t *sse, int *sum) {
284 uint64_t sse_long = 0;
285 int64_t sum_long = 0;
286 highbd_variance64(a8, a_stride, b8, b_stride, w, h, &sse_long, &sum_long);
287 *sse = (uint32_t)ROUND_POWER_OF_TWO(sse_long, 8);
288 *sum = (int)ROUND_POWER_OF_TWO(sum_long, 4);
291 #define HIGHBD_VAR(W, H) \
292 uint32_t vpx_highbd_8_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
293 const uint8_t *b, int b_stride, \
296 highbd_8_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
297 return *sse - (uint32_t)(((int64_t)sum * sum) / (W * H)); \
300 uint32_t vpx_highbd_10_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
301 const uint8_t *b, int b_stride, \
305 highbd_10_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
306 var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
307 return (var >= 0) ? (uint32_t)var : 0; \
310 uint32_t vpx_highbd_12_variance##W##x##H##_c(const uint8_t *a, int a_stride, \
311 const uint8_t *b, int b_stride, \
315 highbd_12_variance(a, a_stride, b, b_stride, W, H, sse, &sum); \
316 var = (int64_t)(*sse) - (((int64_t)sum * sum) / (W * H)); \
317 return (var >= 0) ? (uint32_t)var : 0; \
320 #define HIGHBD_GET_VAR(S) \
321 void vpx_highbd_8_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
322 const uint8_t *ref, int ref_stride, \
323 uint32_t *sse, int *sum) { \
324 highbd_8_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
327 void vpx_highbd_10_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
328 const uint8_t *ref, int ref_stride, \
329 uint32_t *sse, int *sum) { \
330 highbd_10_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
333 void vpx_highbd_12_get##S##x##S##var_c(const uint8_t *src, int src_stride, \
334 const uint8_t *ref, int ref_stride, \
335 uint32_t *sse, int *sum) { \
336 highbd_12_variance(src, src_stride, ref, ref_stride, S, S, sse, sum); \
339 #define HIGHBD_MSE(W, H) \
340 uint32_t vpx_highbd_8_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
341 const uint8_t *ref, int ref_stride, \
344 highbd_8_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
348 uint32_t vpx_highbd_10_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
349 const uint8_t *ref, int ref_stride, \
352 highbd_10_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
356 uint32_t vpx_highbd_12_mse##W##x##H##_c(const uint8_t *src, int src_stride, \
357 const uint8_t *ref, int ref_stride, \
360 highbd_12_variance(src, src_stride, ref, ref_stride, W, H, sse, &sum); \
364 static void highbd_var_filter_block2d_bil_first_pass(
365 const uint8_t *src_ptr8, uint16_t *output_ptr,
366 unsigned int src_pixels_per_line, int pixel_step,
367 unsigned int output_height, unsigned int output_width,
368 const uint8_t *filter) {
370 uint16_t *src_ptr = CONVERT_TO_SHORTPTR(src_ptr8);
371 for (i = 0; i < output_height; ++i) {
372 for (j = 0; j < output_width; ++j) {
373 output_ptr[j] = ROUND_POWER_OF_TWO(
374 (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
381 src_ptr += src_pixels_per_line - output_width;
382 output_ptr += output_width;
386 static void highbd_var_filter_block2d_bil_second_pass(
387 const uint16_t *src_ptr, uint16_t *output_ptr,
388 unsigned int src_pixels_per_line, unsigned int pixel_step,
389 unsigned int output_height, unsigned int output_width,
390 const uint8_t *filter) {
393 for (i = 0; i < output_height; ++i) {
394 for (j = 0; j < output_width; ++j) {
395 output_ptr[j] = ROUND_POWER_OF_TWO(
396 (int)src_ptr[0] * filter[0] + (int)src_ptr[pixel_step] * filter[1],
401 src_ptr += src_pixels_per_line - output_width;
402 output_ptr += output_width;
406 #define HIGHBD_SUBPIX_VAR(W, H) \
407 uint32_t vpx_highbd_8_sub_pixel_variance##W##x##H##_c( \
408 const uint8_t *src, int src_stride, int xoffset, int yoffset, \
409 const uint8_t *dst, int dst_stride, uint32_t *sse) { \
410 uint16_t fdata3[(H + 1) * W]; \
411 uint16_t temp2[H * W]; \
413 highbd_var_filter_block2d_bil_first_pass( \
414 src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
415 highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
416 bilinear_filters[yoffset]); \
418 return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
419 dst, dst_stride, sse); \
422 uint32_t vpx_highbd_10_sub_pixel_variance##W##x##H##_c( \
423 const uint8_t *src, int src_stride, int xoffset, int yoffset, \
424 const uint8_t *dst, int dst_stride, uint32_t *sse) { \
425 uint16_t fdata3[(H + 1) * W]; \
426 uint16_t temp2[H * W]; \
428 highbd_var_filter_block2d_bil_first_pass( \
429 src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
430 highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
431 bilinear_filters[yoffset]); \
433 return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
434 dst, dst_stride, sse); \
437 uint32_t vpx_highbd_12_sub_pixel_variance##W##x##H##_c( \
438 const uint8_t *src, int src_stride, int xoffset, int yoffset, \
439 const uint8_t *dst, int dst_stride, uint32_t *sse) { \
440 uint16_t fdata3[(H + 1) * W]; \
441 uint16_t temp2[H * W]; \
443 highbd_var_filter_block2d_bil_first_pass( \
444 src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
445 highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
446 bilinear_filters[yoffset]); \
448 return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp2), W, \
449 dst, dst_stride, sse); \
452 #define HIGHBD_SUBPIX_AVG_VAR(W, H) \
453 uint32_t vpx_highbd_8_sub_pixel_avg_variance##W##x##H##_c( \
454 const uint8_t *src, int src_stride, int xoffset, int yoffset, \
455 const uint8_t *dst, int dst_stride, uint32_t *sse, \
456 const uint8_t *second_pred) { \
457 uint16_t fdata3[(H + 1) * W]; \
458 uint16_t temp2[H * W]; \
459 DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
461 highbd_var_filter_block2d_bil_first_pass( \
462 src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
463 highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
464 bilinear_filters[yoffset]); \
466 vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
467 CONVERT_TO_BYTEPTR(temp2), W); \
469 return vpx_highbd_8_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
470 dst, dst_stride, sse); \
473 uint32_t vpx_highbd_10_sub_pixel_avg_variance##W##x##H##_c( \
474 const uint8_t *src, int src_stride, int xoffset, int yoffset, \
475 const uint8_t *dst, int dst_stride, uint32_t *sse, \
476 const uint8_t *second_pred) { \
477 uint16_t fdata3[(H + 1) * W]; \
478 uint16_t temp2[H * W]; \
479 DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
481 highbd_var_filter_block2d_bil_first_pass( \
482 src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
483 highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
484 bilinear_filters[yoffset]); \
486 vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
487 CONVERT_TO_BYTEPTR(temp2), W); \
489 return vpx_highbd_10_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
490 dst, dst_stride, sse); \
493 uint32_t vpx_highbd_12_sub_pixel_avg_variance##W##x##H##_c( \
494 const uint8_t *src, int src_stride, int xoffset, int yoffset, \
495 const uint8_t *dst, int dst_stride, uint32_t *sse, \
496 const uint8_t *second_pred) { \
497 uint16_t fdata3[(H + 1) * W]; \
498 uint16_t temp2[H * W]; \
499 DECLARE_ALIGNED(16, uint16_t, temp3[H * W]); \
501 highbd_var_filter_block2d_bil_first_pass( \
502 src, fdata3, src_stride, 1, H + 1, W, bilinear_filters[xoffset]); \
503 highbd_var_filter_block2d_bil_second_pass(fdata3, temp2, W, W, H, W, \
504 bilinear_filters[yoffset]); \
506 vpx_highbd_comp_avg_pred(temp3, second_pred, W, H, \
507 CONVERT_TO_BYTEPTR(temp2), W); \
509 return vpx_highbd_12_variance##W##x##H##_c(CONVERT_TO_BYTEPTR(temp3), W, \
510 dst, dst_stride, sse); \
513 /* All three forms of the variance are available in the same sizes. */
514 #define HIGHBD_VARIANCES(W, H) \
516 HIGHBD_SUBPIX_VAR(W, H) \
517 HIGHBD_SUBPIX_AVG_VAR(W, H)
519 HIGHBD_VARIANCES(64, 64)
520 HIGHBD_VARIANCES(64, 32)
521 HIGHBD_VARIANCES(32, 64)
522 HIGHBD_VARIANCES(32, 32)
523 HIGHBD_VARIANCES(32, 16)
524 HIGHBD_VARIANCES(16, 32)
525 HIGHBD_VARIANCES(16, 16)
526 HIGHBD_VARIANCES(16, 8)
527 HIGHBD_VARIANCES(8, 16)
528 HIGHBD_VARIANCES(8, 8)
529 HIGHBD_VARIANCES(8, 4)
530 HIGHBD_VARIANCES(4, 8)
531 HIGHBD_VARIANCES(4, 4)
541 void vpx_highbd_comp_avg_pred(uint16_t *comp_pred, const uint8_t *pred8,
542 int width, int height, const uint8_t *ref8,
545 uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
546 uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
547 for (i = 0; i < height; ++i) {
548 for (j = 0; j < width; ++j) {
549 const int tmp = pred[j] + ref[j];
550 comp_pred[j] = ROUND_POWER_OF_TWO(tmp, 1);
557 #endif // CONFIG_VP9_HIGHBITDEPTH