2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "./vpx_dsp_rtcd.h"
14 #include "./vpx_config.h"
16 #include "vpx/vpx_integer.h"
17 #include "vpx_ports/mem.h"
19 static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
20 const int32x4_t a = vpaddlq_s16(v_16x8);
21 const int64x2_t b = vpaddlq_s32(a);
22 const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
23 vreinterpret_s32_s64(vget_high_s64(b)));
24 return vget_lane_s32(c, 0);
27 static INLINE int horizontal_add_s32x4(const int32x4_t v_32x4) {
28 const int64x2_t b = vpaddlq_s32(v_32x4);
29 const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
30 vreinterpret_s32_s64(vget_high_s64(b)));
31 return vget_lane_s32(c, 0);
34 // w * h must be less than 2048 or sum_s16 may overflow.
35 // Process a block of any size where the width is divisible by 16.
36 static void variance_neon_w16(const uint8_t *a, int a_stride, const uint8_t *b,
37 int b_stride, int w, int h, uint32_t *sse,
40 int16x8_t sum_s16 = vdupq_n_s16(0);
41 int32x4_t sse_lo_s32 = vdupq_n_s32(0);
42 int32x4_t sse_hi_s32 = vdupq_n_s32(0);
44 for (i = 0; i < h; ++i) {
45 for (j = 0; j < w; j += 16) {
46 const uint8x16_t a_u8 = vld1q_u8(a + j);
47 const uint8x16_t b_u8 = vld1q_u8(b + j);
49 const uint16x8_t diff_lo_u16 =
50 vsubl_u8(vget_low_u8(a_u8), vget_low_u8(b_u8));
51 const uint16x8_t diff_hi_u16 =
52 vsubl_u8(vget_high_u8(a_u8), vget_high_u8(b_u8));
54 const int16x8_t diff_lo_s16 = vreinterpretq_s16_u16(diff_lo_u16);
55 const int16x8_t diff_hi_s16 = vreinterpretq_s16_u16(diff_hi_u16);
57 sum_s16 = vaddq_s16(sum_s16, diff_lo_s16);
58 sum_s16 = vaddq_s16(sum_s16, diff_hi_s16);
60 sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_low_s16(diff_lo_s16),
61 vget_low_s16(diff_lo_s16));
62 sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_high_s16(diff_lo_s16),
63 vget_high_s16(diff_lo_s16));
65 sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_low_s16(diff_hi_s16),
66 vget_low_s16(diff_hi_s16));
67 sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_hi_s16),
68 vget_high_s16(diff_hi_s16));
74 *sum = horizontal_add_s16x8(sum_s16);
75 *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(sse_lo_s32, sse_hi_s32));
78 // w * h must be less than 2048 or sum_s16 may overflow.
79 // Process a block of width 8 two rows at a time.
80 static void variance_neon_w8x2(const uint8_t *a, int a_stride, const uint8_t *b,
81 int b_stride, int h, uint32_t *sse, int *sum) {
83 int16x8_t sum_s16 = vdupq_n_s16(0);
84 int32x4_t sse_lo_s32 = vdupq_n_s32(0);
85 int32x4_t sse_hi_s32 = vdupq_n_s32(0);
88 const uint8x8_t a_0_u8 = vld1_u8(a);
89 const uint8x8_t a_1_u8 = vld1_u8(a + a_stride);
90 const uint8x8_t b_0_u8 = vld1_u8(b);
91 const uint8x8_t b_1_u8 = vld1_u8(b + b_stride);
92 const uint16x8_t diff_0_u16 = vsubl_u8(a_0_u8, b_0_u8);
93 const uint16x8_t diff_1_u16 = vsubl_u8(a_1_u8, b_1_u8);
94 const int16x8_t diff_0_s16 = vreinterpretq_s16_u16(diff_0_u16);
95 const int16x8_t diff_1_s16 = vreinterpretq_s16_u16(diff_1_u16);
96 sum_s16 = vaddq_s16(sum_s16, diff_0_s16);
97 sum_s16 = vaddq_s16(sum_s16, diff_1_s16);
98 sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_low_s16(diff_0_s16),
99 vget_low_s16(diff_0_s16));
100 sse_lo_s32 = vmlal_s16(sse_lo_s32, vget_low_s16(diff_1_s16),
101 vget_low_s16(diff_1_s16));
102 sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_0_s16),
103 vget_high_s16(diff_0_s16));
104 sse_hi_s32 = vmlal_s16(sse_hi_s32, vget_high_s16(diff_1_s16),
105 vget_high_s16(diff_1_s16));
106 a += a_stride + a_stride;
107 b += b_stride + b_stride;
111 *sum = horizontal_add_s16x8(sum_s16);
112 *sse = (uint32_t)horizontal_add_s32x4(vaddq_s32(sse_lo_s32, sse_hi_s32));
115 void vpx_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
116 int b_stride, unsigned int *sse, int *sum) {
117 variance_neon_w8x2(a, a_stride, b, b_stride, 8, sse, sum);
120 void vpx_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
121 int b_stride, unsigned int *sse, int *sum) {
122 variance_neon_w16(a, a_stride, b, b_stride, 16, 16, sse, sum);
125 #define varianceNxM(n, m, shift) \
126 unsigned int vpx_variance##n##x##m##_neon(const uint8_t *a, int a_stride, \
127 const uint8_t *b, int b_stride, \
128 unsigned int *sse) { \
131 variance_neon_w8x2(a, a_stride, b, b_stride, m, sse, &sum); \
133 variance_neon_w16(a, a_stride, b, b_stride, n, m, sse, &sum); \
134 if (n * m < 16 * 16) \
135 return *sse - ((sum * sum) >> shift); \
137 return *sse - (uint32_t)(((int64_t)sum * sum) >> shift); \
140 varianceNxM(8, 4, 5);
141 varianceNxM(8, 8, 6);
142 varianceNxM(8, 16, 7);
143 varianceNxM(16, 8, 7);
144 varianceNxM(16, 16, 8);
145 varianceNxM(16, 32, 9);
146 varianceNxM(32, 16, 9);
147 varianceNxM(32, 32, 10);
149 unsigned int vpx_variance32x64_neon(const uint8_t *a, int a_stride,
150 const uint8_t *b, int b_stride,
154 variance_neon_w16(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1);
155 variance_neon_w16(a + (32 * a_stride), a_stride, b + (32 * b_stride),
156 b_stride, 32, 32, &sse2, &sum2);
159 return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
162 unsigned int vpx_variance64x32_neon(const uint8_t *a, int a_stride,
163 const uint8_t *b, int b_stride,
167 variance_neon_w16(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
168 variance_neon_w16(a + (16 * a_stride), a_stride, b + (16 * b_stride),
169 b_stride, 64, 16, &sse2, &sum2);
172 return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
175 unsigned int vpx_variance64x64_neon(const uint8_t *a, int a_stride,
176 const uint8_t *b, int b_stride,
181 variance_neon_w16(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
182 variance_neon_w16(a + (16 * a_stride), a_stride, b + (16 * b_stride),
183 b_stride, 64, 16, &sse2, &sum2);
187 variance_neon_w16(a + (16 * 2 * a_stride), a_stride, b + (16 * 2 * b_stride),
188 b_stride, 64, 16, &sse2, &sum2);
192 variance_neon_w16(a + (16 * 3 * a_stride), a_stride, b + (16 * 3 * b_stride),
193 b_stride, 64, 16, &sse2, &sum2);
196 return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 12);
199 unsigned int vpx_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
200 const unsigned char *ref_ptr, int recon_stride,
203 int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
205 uint8x16_t q0u8, q1u8, q2u8, q3u8;
206 int32x4_t q7s32, q8s32, q9s32, q10s32;
207 uint16x8_t q11u16, q12u16, q13u16, q14u16;
210 q7s32 = vdupq_n_s32(0);
211 q8s32 = vdupq_n_s32(0);
212 q9s32 = vdupq_n_s32(0);
213 q10s32 = vdupq_n_s32(0);
215 for (i = 0; i < 8; i++) { // mse16x16_neon_loop
216 q0u8 = vld1q_u8(src_ptr);
217 src_ptr += source_stride;
218 q1u8 = vld1q_u8(src_ptr);
219 src_ptr += source_stride;
220 q2u8 = vld1q_u8(ref_ptr);
221 ref_ptr += recon_stride;
222 q3u8 = vld1q_u8(ref_ptr);
223 ref_ptr += recon_stride;
225 q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
226 q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
227 q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
228 q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
230 d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
231 d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
232 q7s32 = vmlal_s16(q7s32, d22s16, d22s16);
233 q8s32 = vmlal_s16(q8s32, d23s16, d23s16);
235 d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
236 d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
237 q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
238 q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
240 d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
241 d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
242 q7s32 = vmlal_s16(q7s32, d26s16, d26s16);
243 q8s32 = vmlal_s16(q8s32, d27s16, d27s16);
245 d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
246 d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
247 q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
248 q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
251 q7s32 = vaddq_s32(q7s32, q8s32);
252 q9s32 = vaddq_s32(q9s32, q10s32);
253 q10s32 = vaddq_s32(q7s32, q9s32);
255 q1s64 = vpaddlq_s32(q10s32);
256 d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
258 vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0);
259 return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
262 unsigned int vpx_get4x4sse_cs_neon(const unsigned char *src_ptr,
264 const unsigned char *ref_ptr,
266 int16x4_t d22s16, d24s16, d26s16, d28s16;
268 uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
269 int32x4_t q7s32, q8s32, q9s32, q10s32;
270 uint16x8_t q11u16, q12u16, q13u16, q14u16;
273 d0u8 = vld1_u8(src_ptr);
274 src_ptr += source_stride;
275 d4u8 = vld1_u8(ref_ptr);
276 ref_ptr += recon_stride;
277 d1u8 = vld1_u8(src_ptr);
278 src_ptr += source_stride;
279 d5u8 = vld1_u8(ref_ptr);
280 ref_ptr += recon_stride;
281 d2u8 = vld1_u8(src_ptr);
282 src_ptr += source_stride;
283 d6u8 = vld1_u8(ref_ptr);
284 ref_ptr += recon_stride;
285 d3u8 = vld1_u8(src_ptr);
286 src_ptr += source_stride;
287 d7u8 = vld1_u8(ref_ptr);
288 ref_ptr += recon_stride;
290 q11u16 = vsubl_u8(d0u8, d4u8);
291 q12u16 = vsubl_u8(d1u8, d5u8);
292 q13u16 = vsubl_u8(d2u8, d6u8);
293 q14u16 = vsubl_u8(d3u8, d7u8);
295 d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16));
296 d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16));
297 d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16));
298 d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16));
300 q7s32 = vmull_s16(d22s16, d22s16);
301 q8s32 = vmull_s16(d24s16, d24s16);
302 q9s32 = vmull_s16(d26s16, d26s16);
303 q10s32 = vmull_s16(d28s16, d28s16);
305 q7s32 = vaddq_s32(q7s32, q8s32);
306 q9s32 = vaddq_s32(q9s32, q10s32);
307 q9s32 = vaddq_s32(q7s32, q9s32);
309 q1s64 = vpaddlq_s32(q9s32);
310 d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
312 return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);