2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "./vpx_dsp_rtcd.h"
15 static void hadamard8x8_one_pass(int16x8_t *a0, int16x8_t *a1,
16 int16x8_t *a2, int16x8_t *a3,
17 int16x8_t *a4, int16x8_t *a5,
18 int16x8_t *a6, int16x8_t *a7) {
19 const int16x8_t b0 = vaddq_s16(*a0, *a1);
20 const int16x8_t b1 = vsubq_s16(*a0, *a1);
21 const int16x8_t b2 = vaddq_s16(*a2, *a3);
22 const int16x8_t b3 = vsubq_s16(*a2, *a3);
23 const int16x8_t b4 = vaddq_s16(*a4, *a5);
24 const int16x8_t b5 = vsubq_s16(*a4, *a5);
25 const int16x8_t b6 = vaddq_s16(*a6, *a7);
26 const int16x8_t b7 = vsubq_s16(*a6, *a7);
28 const int16x8_t c0 = vaddq_s16(b0, b2);
29 const int16x8_t c1 = vaddq_s16(b1, b3);
30 const int16x8_t c2 = vsubq_s16(b0, b2);
31 const int16x8_t c3 = vsubq_s16(b1, b3);
32 const int16x8_t c4 = vaddq_s16(b4, b6);
33 const int16x8_t c5 = vaddq_s16(b5, b7);
34 const int16x8_t c6 = vsubq_s16(b4, b6);
35 const int16x8_t c7 = vsubq_s16(b5, b7);
37 *a0 = vaddq_s16(c0, c4);
38 *a1 = vsubq_s16(c2, c6);
39 *a2 = vsubq_s16(c0, c4);
40 *a3 = vaddq_s16(c2, c6);
41 *a4 = vaddq_s16(c3, c7);
42 *a5 = vsubq_s16(c3, c7);
43 *a6 = vsubq_s16(c1, c5);
44 *a7 = vaddq_s16(c1, c5);
47 // TODO(johannkoenig): Make a transpose library and dedup with idct. Consider
48 // reversing transpose order which may make it easier for the compiler to
49 // reconcile the vtrn.64 moves.
50 static void transpose8x8(int16x8_t *a0, int16x8_t *a1,
51 int16x8_t *a2, int16x8_t *a3,
52 int16x8_t *a4, int16x8_t *a5,
53 int16x8_t *a6, int16x8_t *a7) {
54 // Swap 64 bit elements. Goes from:
55 // a0: 00 01 02 03 04 05 06 07
56 // a1: 08 09 10 11 12 13 14 15
57 // a2: 16 17 18 19 20 21 22 23
58 // a3: 24 25 26 27 28 29 30 31
59 // a4: 32 33 34 35 36 37 38 39
60 // a5: 40 41 42 43 44 45 46 47
61 // a6: 48 49 50 51 52 53 54 55
62 // a7: 56 57 58 59 60 61 62 63
64 // a04_lo: 00 01 02 03 32 33 34 35
65 // a15_lo: 08 09 10 11 40 41 42 43
66 // a26_lo: 16 17 18 19 48 49 50 51
67 // a37_lo: 24 25 26 27 56 57 58 59
68 // a04_hi: 04 05 06 07 36 37 38 39
69 // a15_hi: 12 13 14 15 44 45 46 47
70 // a26_hi: 20 21 22 23 52 53 54 55
71 // a37_hi: 28 29 30 31 60 61 62 63
72 const int16x8_t a04_lo = vcombine_s16(vget_low_s16(*a0), vget_low_s16(*a4));
73 const int16x8_t a15_lo = vcombine_s16(vget_low_s16(*a1), vget_low_s16(*a5));
74 const int16x8_t a26_lo = vcombine_s16(vget_low_s16(*a2), vget_low_s16(*a6));
75 const int16x8_t a37_lo = vcombine_s16(vget_low_s16(*a3), vget_low_s16(*a7));
76 const int16x8_t a04_hi = vcombine_s16(vget_high_s16(*a0), vget_high_s16(*a4));
77 const int16x8_t a15_hi = vcombine_s16(vget_high_s16(*a1), vget_high_s16(*a5));
78 const int16x8_t a26_hi = vcombine_s16(vget_high_s16(*a2), vget_high_s16(*a6));
79 const int16x8_t a37_hi = vcombine_s16(vget_high_s16(*a3), vget_high_s16(*a7));
81 // Swap 32 bit elements resulting in:
83 // 00 01 16 17 32 33 48 49
84 // 02 03 18 19 34 35 50 51
86 // 08 09 24 25 40 41 56 57
87 // 10 11 26 27 42 43 58 59
89 // 04 05 20 21 36 37 52 53
90 // 06 07 22 23 38 39 54 55
92 // 12 13 28 29 44 45 60 61
93 // 14 15 30 31 46 47 62 63
94 const int32x4x2_t a0246_lo = vtrnq_s32(vreinterpretq_s32_s16(a04_lo),
95 vreinterpretq_s32_s16(a26_lo));
96 const int32x4x2_t a1357_lo = vtrnq_s32(vreinterpretq_s32_s16(a15_lo),
97 vreinterpretq_s32_s16(a37_lo));
98 const int32x4x2_t a0246_hi = vtrnq_s32(vreinterpretq_s32_s16(a04_hi),
99 vreinterpretq_s32_s16(a26_hi));
100 const int32x4x2_t a1357_hi = vtrnq_s32(vreinterpretq_s32_s16(a15_hi),
101 vreinterpretq_s32_s16(a37_hi));
103 // Swap 16 bit elements resulting in:
105 // 00 08 16 24 32 40 48 56
106 // 01 09 17 25 33 41 49 57
108 // 02 10 18 26 34 42 50 58
109 // 03 11 19 27 35 43 51 59
111 // 04 12 20 28 36 44 52 60
112 // 05 13 21 29 37 45 53 61
114 // 06 14 22 30 38 46 54 62
115 // 07 15 23 31 39 47 55 63
116 const int16x8x2_t b0 = vtrnq_s16(vreinterpretq_s16_s32(a0246_lo.val[0]),
117 vreinterpretq_s16_s32(a1357_lo.val[0]));
118 const int16x8x2_t b1 = vtrnq_s16(vreinterpretq_s16_s32(a0246_lo.val[1]),
119 vreinterpretq_s16_s32(a1357_lo.val[1]));
120 const int16x8x2_t b2 = vtrnq_s16(vreinterpretq_s16_s32(a0246_hi.val[0]),
121 vreinterpretq_s16_s32(a1357_hi.val[0]));
122 const int16x8x2_t b3 = vtrnq_s16(vreinterpretq_s16_s32(a0246_hi.val[1]),
123 vreinterpretq_s16_s32(a1357_hi.val[1]));
135 void vpx_hadamard_8x8_neon(const int16_t *src_diff, int src_stride,
137 int16x8_t a0 = vld1q_s16(src_diff);
138 int16x8_t a1 = vld1q_s16(src_diff + src_stride);
139 int16x8_t a2 = vld1q_s16(src_diff + 2 * src_stride);
140 int16x8_t a3 = vld1q_s16(src_diff + 3 * src_stride);
141 int16x8_t a4 = vld1q_s16(src_diff + 4 * src_stride);
142 int16x8_t a5 = vld1q_s16(src_diff + 5 * src_stride);
143 int16x8_t a6 = vld1q_s16(src_diff + 6 * src_stride);
144 int16x8_t a7 = vld1q_s16(src_diff + 7 * src_stride);
146 hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
148 transpose8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
150 hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
152 // Skip the second transpose because it is not required.
154 vst1q_s16(coeff + 0, a0);
155 vst1q_s16(coeff + 8, a1);
156 vst1q_s16(coeff + 16, a2);
157 vst1q_s16(coeff + 24, a3);
158 vst1q_s16(coeff + 32, a4);
159 vst1q_s16(coeff + 40, a5);
160 vst1q_s16(coeff + 48, a6);
161 vst1q_s16(coeff + 56, a7);