2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "./vpx_config.h"
14 #include "vpx_dsp/txfm_common.h"
16 #define LOAD_FROM_TRANSPOSED(prev, first, second) \
17 q14s16 = vld1q_s16(trans_buf + first * 8); \
18 q13s16 = vld1q_s16(trans_buf + second * 8);
20 #define LOAD_FROM_OUTPUT(prev, first, second, qA, qB) \
21 qA = vld1q_s16(out + first * 32); \
22 qB = vld1q_s16(out + second * 32);
24 #define STORE_IN_OUTPUT(prev, first, second, qA, qB) \
25 vst1q_s16(out + first * 32, qA); \
26 vst1q_s16(out + second * 32, qB);
28 #define STORE_COMBINE_CENTER_RESULTS(r10, r9) \
29 __STORE_COMBINE_CENTER_RESULTS(r10, r9, stride, q6s16, q7s16, q8s16, q9s16);
30 static INLINE void __STORE_COMBINE_CENTER_RESULTS(uint8_t *p1, uint8_t *p2,
31 int stride, int16x8_t q6s16,
35 int16x4_t d8s16, d9s16, d10s16, d11s16;
37 d8s16 = vld1_s16((int16_t *)p1);
39 d11s16 = vld1_s16((int16_t *)p2);
41 d9s16 = vld1_s16((int16_t *)p1);
42 d10s16 = vld1_s16((int16_t *)p2);
44 q7s16 = vrshrq_n_s16(q7s16, 6);
45 q8s16 = vrshrq_n_s16(q8s16, 6);
46 q9s16 = vrshrq_n_s16(q9s16, 6);
47 q6s16 = vrshrq_n_s16(q6s16, 6);
49 q7s16 = vreinterpretq_s16_u16(
50 vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d9s16)));
51 q8s16 = vreinterpretq_s16_u16(
52 vaddw_u8(vreinterpretq_u16_s16(q8s16), vreinterpret_u8_s16(d10s16)));
53 q9s16 = vreinterpretq_s16_u16(
54 vaddw_u8(vreinterpretq_u16_s16(q9s16), vreinterpret_u8_s16(d11s16)));
55 q6s16 = vreinterpretq_s16_u16(
56 vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d8s16)));
58 d9s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16));
59 d10s16 = vreinterpret_s16_u8(vqmovun_s16(q8s16));
60 d11s16 = vreinterpret_s16_u8(vqmovun_s16(q9s16));
61 d8s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16));
63 vst1_s16((int16_t *)p1, d9s16);
65 vst1_s16((int16_t *)p2, d10s16);
67 vst1_s16((int16_t *)p1, d8s16);
68 vst1_s16((int16_t *)p2, d11s16);
72 #define STORE_COMBINE_EXTREME_RESULTS(r7, r6) \
73 __STORE_COMBINE_EXTREME_RESULTS(r7, r6, stride, q4s16, q5s16, q6s16, q7s16);
74 static INLINE void __STORE_COMBINE_EXTREME_RESULTS(uint8_t *p1, uint8_t *p2,
75 int stride, int16x8_t q4s16,
79 int16x4_t d4s16, d5s16, d6s16, d7s16;
81 d4s16 = vld1_s16((int16_t *)p1);
83 d7s16 = vld1_s16((int16_t *)p2);
85 d5s16 = vld1_s16((int16_t *)p1);
86 d6s16 = vld1_s16((int16_t *)p2);
88 q5s16 = vrshrq_n_s16(q5s16, 6);
89 q6s16 = vrshrq_n_s16(q6s16, 6);
90 q7s16 = vrshrq_n_s16(q7s16, 6);
91 q4s16 = vrshrq_n_s16(q4s16, 6);
93 q5s16 = vreinterpretq_s16_u16(
94 vaddw_u8(vreinterpretq_u16_s16(q5s16), vreinterpret_u8_s16(d5s16)));
95 q6s16 = vreinterpretq_s16_u16(
96 vaddw_u8(vreinterpretq_u16_s16(q6s16), vreinterpret_u8_s16(d6s16)));
97 q7s16 = vreinterpretq_s16_u16(
98 vaddw_u8(vreinterpretq_u16_s16(q7s16), vreinterpret_u8_s16(d7s16)));
99 q4s16 = vreinterpretq_s16_u16(
100 vaddw_u8(vreinterpretq_u16_s16(q4s16), vreinterpret_u8_s16(d4s16)));
102 d5s16 = vreinterpret_s16_u8(vqmovun_s16(q5s16));
103 d6s16 = vreinterpret_s16_u8(vqmovun_s16(q6s16));
104 d7s16 = vreinterpret_s16_u8(vqmovun_s16(q7s16));
105 d4s16 = vreinterpret_s16_u8(vqmovun_s16(q4s16));
107 vst1_s16((int16_t *)p1, d5s16);
109 vst1_s16((int16_t *)p2, d6s16);
111 vst1_s16((int16_t *)p2, d7s16);
112 vst1_s16((int16_t *)p1, d4s16);
116 #define DO_BUTTERFLY_STD(const_1, const_2, qA, qB) \
117 DO_BUTTERFLY(q14s16, q13s16, const_1, const_2, qA, qB);
118 static INLINE void DO_BUTTERFLY(int16x8_t q14s16, int16x8_t q13s16,
119 int16_t first_const, int16_t second_const,
120 int16x8_t *qAs16, int16x8_t *qBs16) {
121 int16x4_t d30s16, d31s16;
122 int32x4_t q8s32, q9s32, q10s32, q11s32, q12s32, q15s32;
123 int16x4_t dCs16, dDs16, dAs16, dBs16;
125 dCs16 = vget_low_s16(q14s16);
126 dDs16 = vget_high_s16(q14s16);
127 dAs16 = vget_low_s16(q13s16);
128 dBs16 = vget_high_s16(q13s16);
130 d30s16 = vdup_n_s16(first_const);
131 d31s16 = vdup_n_s16(second_const);
133 q8s32 = vmull_s16(dCs16, d30s16);
134 q10s32 = vmull_s16(dAs16, d31s16);
135 q9s32 = vmull_s16(dDs16, d30s16);
136 q11s32 = vmull_s16(dBs16, d31s16);
137 q12s32 = vmull_s16(dCs16, d31s16);
139 q8s32 = vsubq_s32(q8s32, q10s32);
140 q9s32 = vsubq_s32(q9s32, q11s32);
142 q10s32 = vmull_s16(dDs16, d31s16);
143 q11s32 = vmull_s16(dAs16, d30s16);
144 q15s32 = vmull_s16(dBs16, d30s16);
146 q11s32 = vaddq_s32(q12s32, q11s32);
147 q10s32 = vaddq_s32(q10s32, q15s32);
149 *qAs16 = vcombine_s16(vqrshrn_n_s32(q8s32, 14), vqrshrn_n_s32(q9s32, 14));
150 *qBs16 = vcombine_s16(vqrshrn_n_s32(q11s32, 14), vqrshrn_n_s32(q10s32, 14));
154 static INLINE void idct32_transpose_pair(int16_t *input, int16_t *t_buf) {
157 const int stride = 32;
158 int16x4_t d16s16, d17s16, d18s16, d19s16, d20s16, d21s16, d22s16, d23s16;
159 int16x4_t d24s16, d25s16, d26s16, d27s16, d28s16, d29s16, d30s16, d31s16;
160 int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
161 int32x4x2_t q0x2s32, q1x2s32, q2x2s32, q3x2s32;
162 int16x8x2_t q0x2s16, q1x2s16, q2x2s16, q3x2s16;
164 for (i = 0; i < 4; i++, input += 8) {
166 q8s16 = vld1q_s16(in);
168 q9s16 = vld1q_s16(in);
170 q10s16 = vld1q_s16(in);
172 q11s16 = vld1q_s16(in);
174 q12s16 = vld1q_s16(in);
176 q13s16 = vld1q_s16(in);
178 q14s16 = vld1q_s16(in);
180 q15s16 = vld1q_s16(in);
182 d16s16 = vget_low_s16(q8s16);
183 d17s16 = vget_high_s16(q8s16);
184 d18s16 = vget_low_s16(q9s16);
185 d19s16 = vget_high_s16(q9s16);
186 d20s16 = vget_low_s16(q10s16);
187 d21s16 = vget_high_s16(q10s16);
188 d22s16 = vget_low_s16(q11s16);
189 d23s16 = vget_high_s16(q11s16);
190 d24s16 = vget_low_s16(q12s16);
191 d25s16 = vget_high_s16(q12s16);
192 d26s16 = vget_low_s16(q13s16);
193 d27s16 = vget_high_s16(q13s16);
194 d28s16 = vget_low_s16(q14s16);
195 d29s16 = vget_high_s16(q14s16);
196 d30s16 = vget_low_s16(q15s16);
197 d31s16 = vget_high_s16(q15s16);
199 q8s16 = vcombine_s16(d16s16, d24s16); // vswp d17, d24
200 q9s16 = vcombine_s16(d18s16, d26s16); // vswp d19, d26
201 q10s16 = vcombine_s16(d20s16, d28s16); // vswp d21, d28
202 q11s16 = vcombine_s16(d22s16, d30s16); // vswp d23, d30
203 q12s16 = vcombine_s16(d17s16, d25s16);
204 q13s16 = vcombine_s16(d19s16, d27s16);
205 q14s16 = vcombine_s16(d21s16, d29s16);
206 q15s16 = vcombine_s16(d23s16, d31s16);
209 vtrnq_s32(vreinterpretq_s32_s16(q8s16), vreinterpretq_s32_s16(q10s16));
211 vtrnq_s32(vreinterpretq_s32_s16(q9s16), vreinterpretq_s32_s16(q11s16));
213 vtrnq_s32(vreinterpretq_s32_s16(q12s16), vreinterpretq_s32_s16(q14s16));
215 vtrnq_s32(vreinterpretq_s32_s16(q13s16), vreinterpretq_s32_s16(q15s16));
217 q0x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[0]), // q8
218 vreinterpretq_s16_s32(q1x2s32.val[0])); // q9
219 q1x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q0x2s32.val[1]), // q10
220 vreinterpretq_s16_s32(q1x2s32.val[1])); // q11
221 q2x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[0]), // q12
222 vreinterpretq_s16_s32(q3x2s32.val[0])); // q13
223 q3x2s16 = vtrnq_s16(vreinterpretq_s16_s32(q2x2s32.val[1]), // q14
224 vreinterpretq_s16_s32(q3x2s32.val[1])); // q15
226 vst1q_s16(t_buf, q0x2s16.val[0]);
228 vst1q_s16(t_buf, q0x2s16.val[1]);
230 vst1q_s16(t_buf, q1x2s16.val[0]);
232 vst1q_s16(t_buf, q1x2s16.val[1]);
234 vst1q_s16(t_buf, q2x2s16.val[0]);
236 vst1q_s16(t_buf, q2x2s16.val[1]);
238 vst1q_s16(t_buf, q3x2s16.val[0]);
240 vst1q_s16(t_buf, q3x2s16.val[1]);
246 static INLINE void idct32_bands_end_1st_pass(int16_t *out, int16x8_t q2s16,
247 int16x8_t q3s16, int16x8_t q6s16,
248 int16x8_t q7s16, int16x8_t q8s16,
249 int16x8_t q9s16, int16x8_t q10s16,
250 int16x8_t q11s16, int16x8_t q12s16,
251 int16x8_t q13s16, int16x8_t q14s16,
253 int16x8_t q0s16, q1s16, q4s16, q5s16;
255 STORE_IN_OUTPUT(17, 16, 17, q6s16, q7s16);
256 STORE_IN_OUTPUT(17, 14, 15, q8s16, q9s16);
258 LOAD_FROM_OUTPUT(15, 30, 31, q0s16, q1s16);
259 q4s16 = vaddq_s16(q2s16, q1s16);
260 q5s16 = vaddq_s16(q3s16, q0s16);
261 q6s16 = vsubq_s16(q3s16, q0s16);
262 q7s16 = vsubq_s16(q2s16, q1s16);
263 STORE_IN_OUTPUT(31, 30, 31, q6s16, q7s16);
264 STORE_IN_OUTPUT(31, 0, 1, q4s16, q5s16);
266 LOAD_FROM_OUTPUT(1, 12, 13, q0s16, q1s16);
267 q2s16 = vaddq_s16(q10s16, q1s16);
268 q3s16 = vaddq_s16(q11s16, q0s16);
269 q4s16 = vsubq_s16(q11s16, q0s16);
270 q5s16 = vsubq_s16(q10s16, q1s16);
272 LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16);
273 q8s16 = vaddq_s16(q4s16, q1s16);
274 q9s16 = vaddq_s16(q5s16, q0s16);
275 q6s16 = vsubq_s16(q5s16, q0s16);
276 q7s16 = vsubq_s16(q4s16, q1s16);
277 STORE_IN_OUTPUT(19, 18, 19, q6s16, q7s16);
278 STORE_IN_OUTPUT(19, 12, 13, q8s16, q9s16);
280 LOAD_FROM_OUTPUT(13, 28, 29, q0s16, q1s16);
281 q4s16 = vaddq_s16(q2s16, q1s16);
282 q5s16 = vaddq_s16(q3s16, q0s16);
283 q6s16 = vsubq_s16(q3s16, q0s16);
284 q7s16 = vsubq_s16(q2s16, q1s16);
285 STORE_IN_OUTPUT(29, 28, 29, q6s16, q7s16);
286 STORE_IN_OUTPUT(29, 2, 3, q4s16, q5s16);
288 LOAD_FROM_OUTPUT(3, 10, 11, q0s16, q1s16);
289 q2s16 = vaddq_s16(q12s16, q1s16);
290 q3s16 = vaddq_s16(q13s16, q0s16);
291 q4s16 = vsubq_s16(q13s16, q0s16);
292 q5s16 = vsubq_s16(q12s16, q1s16);
294 LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16);
295 q8s16 = vaddq_s16(q4s16, q1s16);
296 q9s16 = vaddq_s16(q5s16, q0s16);
297 q6s16 = vsubq_s16(q5s16, q0s16);
298 q7s16 = vsubq_s16(q4s16, q1s16);
299 STORE_IN_OUTPUT(21, 20, 21, q6s16, q7s16);
300 STORE_IN_OUTPUT(21, 10, 11, q8s16, q9s16);
302 LOAD_FROM_OUTPUT(11, 26, 27, q0s16, q1s16);
303 q4s16 = vaddq_s16(q2s16, q1s16);
304 q5s16 = vaddq_s16(q3s16, q0s16);
305 q6s16 = vsubq_s16(q3s16, q0s16);
306 q7s16 = vsubq_s16(q2s16, q1s16);
307 STORE_IN_OUTPUT(27, 26, 27, q6s16, q7s16);
308 STORE_IN_OUTPUT(27, 4, 5, q4s16, q5s16);
310 LOAD_FROM_OUTPUT(5, 8, 9, q0s16, q1s16);
311 q2s16 = vaddq_s16(q14s16, q1s16);
312 q3s16 = vaddq_s16(q15s16, q0s16);
313 q4s16 = vsubq_s16(q15s16, q0s16);
314 q5s16 = vsubq_s16(q14s16, q1s16);
316 LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16);
317 q8s16 = vaddq_s16(q4s16, q1s16);
318 q9s16 = vaddq_s16(q5s16, q0s16);
319 q6s16 = vsubq_s16(q5s16, q0s16);
320 q7s16 = vsubq_s16(q4s16, q1s16);
321 STORE_IN_OUTPUT(23, 22, 23, q6s16, q7s16);
322 STORE_IN_OUTPUT(23, 8, 9, q8s16, q9s16);
324 LOAD_FROM_OUTPUT(9, 24, 25, q0s16, q1s16);
325 q4s16 = vaddq_s16(q2s16, q1s16);
326 q5s16 = vaddq_s16(q3s16, q0s16);
327 q6s16 = vsubq_s16(q3s16, q0s16);
328 q7s16 = vsubq_s16(q2s16, q1s16);
329 STORE_IN_OUTPUT(25, 24, 25, q6s16, q7s16);
330 STORE_IN_OUTPUT(25, 6, 7, q4s16, q5s16);
334 static INLINE void idct32_bands_end_2nd_pass(
335 int16_t *out, uint8_t *dest, int stride, int16x8_t q2s16, int16x8_t q3s16,
336 int16x8_t q6s16, int16x8_t q7s16, int16x8_t q8s16, int16x8_t q9s16,
337 int16x8_t q10s16, int16x8_t q11s16, int16x8_t q12s16, int16x8_t q13s16,
338 int16x8_t q14s16, int16x8_t q15s16) {
339 uint8_t *r6 = dest + 31 * stride;
340 uint8_t *r7 = dest /* + 0 * stride*/;
341 uint8_t *r9 = dest + 15 * stride;
342 uint8_t *r10 = dest + 16 * stride;
343 int str2 = stride << 1;
344 int16x8_t q0s16, q1s16, q4s16, q5s16;
346 STORE_COMBINE_CENTER_RESULTS(r10, r9);
350 LOAD_FROM_OUTPUT(17, 30, 31, q0s16, q1s16)
351 q4s16 = vaddq_s16(q2s16, q1s16);
352 q5s16 = vaddq_s16(q3s16, q0s16);
353 q6s16 = vsubq_s16(q3s16, q0s16);
354 q7s16 = vsubq_s16(q2s16, q1s16);
355 STORE_COMBINE_EXTREME_RESULTS(r7, r6);
359 LOAD_FROM_OUTPUT(31, 12, 13, q0s16, q1s16)
360 q2s16 = vaddq_s16(q10s16, q1s16);
361 q3s16 = vaddq_s16(q11s16, q0s16);
362 q4s16 = vsubq_s16(q11s16, q0s16);
363 q5s16 = vsubq_s16(q10s16, q1s16);
365 LOAD_FROM_OUTPUT(13, 18, 19, q0s16, q1s16)
366 q8s16 = vaddq_s16(q4s16, q1s16);
367 q9s16 = vaddq_s16(q5s16, q0s16);
368 q6s16 = vsubq_s16(q5s16, q0s16);
369 q7s16 = vsubq_s16(q4s16, q1s16);
370 STORE_COMBINE_CENTER_RESULTS(r10, r9);
374 LOAD_FROM_OUTPUT(19, 28, 29, q0s16, q1s16)
375 q4s16 = vaddq_s16(q2s16, q1s16);
376 q5s16 = vaddq_s16(q3s16, q0s16);
377 q6s16 = vsubq_s16(q3s16, q0s16);
378 q7s16 = vsubq_s16(q2s16, q1s16);
379 STORE_COMBINE_EXTREME_RESULTS(r7, r6);
383 LOAD_FROM_OUTPUT(29, 10, 11, q0s16, q1s16)
384 q2s16 = vaddq_s16(q12s16, q1s16);
385 q3s16 = vaddq_s16(q13s16, q0s16);
386 q4s16 = vsubq_s16(q13s16, q0s16);
387 q5s16 = vsubq_s16(q12s16, q1s16);
389 LOAD_FROM_OUTPUT(11, 20, 21, q0s16, q1s16)
390 q8s16 = vaddq_s16(q4s16, q1s16);
391 q9s16 = vaddq_s16(q5s16, q0s16);
392 q6s16 = vsubq_s16(q5s16, q0s16);
393 q7s16 = vsubq_s16(q4s16, q1s16);
394 STORE_COMBINE_CENTER_RESULTS(r10, r9);
398 LOAD_FROM_OUTPUT(21, 26, 27, q0s16, q1s16)
399 q4s16 = vaddq_s16(q2s16, q1s16);
400 q5s16 = vaddq_s16(q3s16, q0s16);
401 q6s16 = vsubq_s16(q3s16, q0s16);
402 q7s16 = vsubq_s16(q2s16, q1s16);
403 STORE_COMBINE_EXTREME_RESULTS(r7, r6);
407 LOAD_FROM_OUTPUT(27, 8, 9, q0s16, q1s16)
408 q2s16 = vaddq_s16(q14s16, q1s16);
409 q3s16 = vaddq_s16(q15s16, q0s16);
410 q4s16 = vsubq_s16(q15s16, q0s16);
411 q5s16 = vsubq_s16(q14s16, q1s16);
413 LOAD_FROM_OUTPUT(9, 22, 23, q0s16, q1s16)
414 q8s16 = vaddq_s16(q4s16, q1s16);
415 q9s16 = vaddq_s16(q5s16, q0s16);
416 q6s16 = vsubq_s16(q5s16, q0s16);
417 q7s16 = vsubq_s16(q4s16, q1s16);
418 STORE_COMBINE_CENTER_RESULTS(r10, r9);
420 LOAD_FROM_OUTPUT(23, 24, 25, q0s16, q1s16)
421 q4s16 = vaddq_s16(q2s16, q1s16);
422 q5s16 = vaddq_s16(q3s16, q0s16);
423 q6s16 = vsubq_s16(q3s16, q0s16);
424 q7s16 = vsubq_s16(q2s16, q1s16);
425 STORE_COMBINE_EXTREME_RESULTS(r7, r6);
429 void vpx_idct32x32_1024_add_neon(int16_t *input, uint8_t *dest, int stride) {
430 int i, idct32_pass_loop;
431 int16_t trans_buf[32 * 8];
432 int16_t pass1[32 * 32];
433 int16_t pass2[32 * 32];
435 int16x8_t q0s16, q1s16, q2s16, q3s16, q4s16, q5s16, q6s16, q7s16;
436 int16x8_t q8s16, q9s16, q10s16, q11s16, q12s16, q13s16, q14s16, q15s16;
438 for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2;
440 input = pass1, // the input of pass2 is the result of pass1
442 for (i = 0; i < 4; i++, input += 32 * 8, out += 8) { // idct32_bands_loop
443 idct32_transpose_pair(input, trans_buf);
445 // -----------------------------------------
446 // BLOCK A: 16-19,28-31
447 // -----------------------------------------
448 // generate 16,17,30,31
450 LOAD_FROM_TRANSPOSED(0, 1, 31)
451 DO_BUTTERFLY_STD(cospi_31_64, cospi_1_64, &q0s16, &q2s16)
452 LOAD_FROM_TRANSPOSED(31, 17, 15)
453 DO_BUTTERFLY_STD(cospi_15_64, cospi_17_64, &q1s16, &q3s16)
455 q4s16 = vaddq_s16(q0s16, q1s16);
456 q13s16 = vsubq_s16(q0s16, q1s16);
457 q6s16 = vaddq_s16(q2s16, q3s16);
458 q14s16 = vsubq_s16(q2s16, q3s16);
460 DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q5s16, &q7s16)
462 // generate 18,19,28,29
464 LOAD_FROM_TRANSPOSED(15, 9, 23)
465 DO_BUTTERFLY_STD(cospi_23_64, cospi_9_64, &q0s16, &q2s16)
466 LOAD_FROM_TRANSPOSED(23, 25, 7)
467 DO_BUTTERFLY_STD(cospi_7_64, cospi_25_64, &q1s16, &q3s16)
469 q13s16 = vsubq_s16(q3s16, q2s16);
470 q3s16 = vaddq_s16(q3s16, q2s16);
471 q14s16 = vsubq_s16(q1s16, q0s16);
472 q2s16 = vaddq_s16(q1s16, q0s16);
474 DO_BUTTERFLY_STD(-cospi_4_64, -cospi_28_64, &q1s16, &q0s16)
476 q8s16 = vaddq_s16(q4s16, q2s16);
477 q9s16 = vaddq_s16(q5s16, q0s16);
478 q10s16 = vaddq_s16(q7s16, q1s16);
479 q15s16 = vaddq_s16(q6s16, q3s16);
480 q13s16 = vsubq_s16(q5s16, q0s16);
481 q14s16 = vsubq_s16(q7s16, q1s16);
482 STORE_IN_OUTPUT(0, 16, 31, q8s16, q15s16)
483 STORE_IN_OUTPUT(31, 17, 30, q9s16, q10s16)
485 DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q0s16, &q1s16)
486 STORE_IN_OUTPUT(30, 29, 18, q1s16, q0s16)
488 q13s16 = vsubq_s16(q4s16, q2s16);
489 q14s16 = vsubq_s16(q6s16, q3s16);
491 DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q4s16, &q6s16)
492 STORE_IN_OUTPUT(18, 19, 28, q4s16, q6s16)
494 // -----------------------------------------
495 // BLOCK B: 20-23,24-27
496 // -----------------------------------------
497 // generate 20,21,26,27
499 LOAD_FROM_TRANSPOSED(7, 5, 27)
500 DO_BUTTERFLY_STD(cospi_27_64, cospi_5_64, &q0s16, &q2s16)
501 LOAD_FROM_TRANSPOSED(27, 21, 11)
502 DO_BUTTERFLY_STD(cospi_11_64, cospi_21_64, &q1s16, &q3s16)
504 q13s16 = vsubq_s16(q0s16, q1s16);
505 q0s16 = vaddq_s16(q0s16, q1s16);
506 q14s16 = vsubq_s16(q2s16, q3s16);
507 q2s16 = vaddq_s16(q2s16, q3s16);
509 DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16)
511 // generate 22,23,24,25
513 LOAD_FROM_TRANSPOSED(11, 13, 19)
514 DO_BUTTERFLY_STD(cospi_19_64, cospi_13_64, &q5s16, &q7s16)
515 LOAD_FROM_TRANSPOSED(19, 29, 3)
516 DO_BUTTERFLY_STD(cospi_3_64, cospi_29_64, &q4s16, &q6s16)
518 q14s16 = vsubq_s16(q4s16, q5s16);
519 q5s16 = vaddq_s16(q4s16, q5s16);
520 q13s16 = vsubq_s16(q6s16, q7s16);
521 q6s16 = vaddq_s16(q6s16, q7s16);
523 DO_BUTTERFLY_STD(-cospi_20_64, -cospi_12_64, &q4s16, &q7s16)
525 q10s16 = vaddq_s16(q7s16, q1s16);
526 q11s16 = vaddq_s16(q5s16, q0s16);
527 q12s16 = vaddq_s16(q6s16, q2s16);
528 q15s16 = vaddq_s16(q4s16, q3s16);
530 LOAD_FROM_OUTPUT(28, 16, 17, q14s16, q13s16)
531 q8s16 = vaddq_s16(q14s16, q11s16);
532 q9s16 = vaddq_s16(q13s16, q10s16);
533 q13s16 = vsubq_s16(q13s16, q10s16);
534 q11s16 = vsubq_s16(q14s16, q11s16);
535 STORE_IN_OUTPUT(17, 17, 16, q9s16, q8s16)
536 LOAD_FROM_OUTPUT(16, 30, 31, q14s16, q9s16)
537 q8s16 = vsubq_s16(q9s16, q12s16);
538 q10s16 = vaddq_s16(q14s16, q15s16);
539 q14s16 = vsubq_s16(q14s16, q15s16);
540 q12s16 = vaddq_s16(q9s16, q12s16);
541 STORE_IN_OUTPUT(31, 30, 31, q10s16, q12s16)
543 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
544 STORE_IN_OUTPUT(31, 25, 22, q14s16, q13s16)
547 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
548 STORE_IN_OUTPUT(22, 24, 23, q14s16, q13s16)
550 q14s16 = vsubq_s16(q5s16, q0s16);
551 q13s16 = vsubq_s16(q6s16, q2s16);
552 DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q5s16, &q6s16);
553 q14s16 = vsubq_s16(q7s16, q1s16);
554 q13s16 = vsubq_s16(q4s16, q3s16);
555 DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q0s16, &q1s16);
557 LOAD_FROM_OUTPUT(23, 18, 19, q14s16, q13s16)
558 q8s16 = vaddq_s16(q14s16, q1s16);
559 q9s16 = vaddq_s16(q13s16, q6s16);
560 q13s16 = vsubq_s16(q13s16, q6s16);
561 q1s16 = vsubq_s16(q14s16, q1s16);
562 STORE_IN_OUTPUT(19, 18, 19, q8s16, q9s16)
563 LOAD_FROM_OUTPUT(19, 28, 29, q8s16, q9s16)
564 q14s16 = vsubq_s16(q8s16, q5s16);
565 q10s16 = vaddq_s16(q8s16, q5s16);
566 q11s16 = vaddq_s16(q9s16, q0s16);
567 q0s16 = vsubq_s16(q9s16, q0s16);
568 STORE_IN_OUTPUT(29, 28, 29, q10s16, q11s16)
570 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q13s16, &q14s16)
571 STORE_IN_OUTPUT(29, 20, 27, q13s16, q14s16)
572 DO_BUTTERFLY(q0s16, q1s16, cospi_16_64, cospi_16_64, &q1s16, &q0s16);
573 STORE_IN_OUTPUT(27, 21, 26, q1s16, q0s16)
575 // -----------------------------------------
576 // BLOCK C: 8-10,11-15
577 // -----------------------------------------
578 // generate 8,9,14,15
580 LOAD_FROM_TRANSPOSED(3, 2, 30)
581 DO_BUTTERFLY_STD(cospi_30_64, cospi_2_64, &q0s16, &q2s16)
582 LOAD_FROM_TRANSPOSED(30, 18, 14)
583 DO_BUTTERFLY_STD(cospi_14_64, cospi_18_64, &q1s16, &q3s16)
585 q13s16 = vsubq_s16(q0s16, q1s16);
586 q0s16 = vaddq_s16(q0s16, q1s16);
587 q14s16 = vsubq_s16(q2s16, q3s16);
588 q2s16 = vaddq_s16(q2s16, q3s16);
590 DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q1s16, &q3s16)
592 // generate 10,11,12,13
594 LOAD_FROM_TRANSPOSED(14, 10, 22)
595 DO_BUTTERFLY_STD(cospi_22_64, cospi_10_64, &q5s16, &q7s16)
596 LOAD_FROM_TRANSPOSED(22, 26, 6)
597 DO_BUTTERFLY_STD(cospi_6_64, cospi_26_64, &q4s16, &q6s16)
599 q14s16 = vsubq_s16(q4s16, q5s16);
600 q5s16 = vaddq_s16(q4s16, q5s16);
601 q13s16 = vsubq_s16(q6s16, q7s16);
602 q6s16 = vaddq_s16(q6s16, q7s16);
604 DO_BUTTERFLY_STD(-cospi_8_64, -cospi_24_64, &q4s16, &q7s16)
606 q8s16 = vaddq_s16(q0s16, q5s16);
607 q9s16 = vaddq_s16(q1s16, q7s16);
608 q13s16 = vsubq_s16(q1s16, q7s16);
609 q14s16 = vsubq_s16(q3s16, q4s16);
610 q10s16 = vaddq_s16(q3s16, q4s16);
611 q15s16 = vaddq_s16(q2s16, q6s16);
612 STORE_IN_OUTPUT(26, 8, 15, q8s16, q15s16)
613 STORE_IN_OUTPUT(15, 9, 14, q9s16, q10s16)
615 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
616 STORE_IN_OUTPUT(14, 13, 10, q3s16, q1s16)
617 q13s16 = vsubq_s16(q0s16, q5s16);
618 q14s16 = vsubq_s16(q2s16, q6s16);
619 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
620 STORE_IN_OUTPUT(10, 11, 12, q1s16, q3s16)
622 // -----------------------------------------
624 // -----------------------------------------
627 LOAD_FROM_TRANSPOSED(6, 4, 28)
628 DO_BUTTERFLY_STD(cospi_28_64, cospi_4_64, &q0s16, &q2s16)
629 LOAD_FROM_TRANSPOSED(28, 20, 12)
630 DO_BUTTERFLY_STD(cospi_12_64, cospi_20_64, &q1s16, &q3s16)
632 q13s16 = vsubq_s16(q0s16, q1s16);
633 q0s16 = vaddq_s16(q0s16, q1s16);
634 q14s16 = vsubq_s16(q2s16, q3s16);
635 q2s16 = vaddq_s16(q2s16, q3s16);
637 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q1s16, &q3s16)
641 LOAD_FROM_TRANSPOSED(12, 0, 16)
642 DO_BUTTERFLY_STD(cospi_16_64, cospi_16_64, &q5s16, &q7s16)
643 LOAD_FROM_TRANSPOSED(16, 8, 24)
644 DO_BUTTERFLY_STD(cospi_24_64, cospi_8_64, &q14s16, &q6s16)
646 q4s16 = vaddq_s16(q7s16, q6s16);
647 q7s16 = vsubq_s16(q7s16, q6s16);
648 q6s16 = vsubq_s16(q5s16, q14s16);
649 q5s16 = vaddq_s16(q5s16, q14s16);
651 q8s16 = vaddq_s16(q4s16, q2s16);
652 q9s16 = vaddq_s16(q5s16, q3s16);
653 q10s16 = vaddq_s16(q6s16, q1s16);
654 q11s16 = vaddq_s16(q7s16, q0s16);
655 q12s16 = vsubq_s16(q7s16, q0s16);
656 q13s16 = vsubq_s16(q6s16, q1s16);
657 q14s16 = vsubq_s16(q5s16, q3s16);
658 q15s16 = vsubq_s16(q4s16, q2s16);
660 LOAD_FROM_OUTPUT(12, 14, 15, q0s16, q1s16)
661 q2s16 = vaddq_s16(q8s16, q1s16);
662 q3s16 = vaddq_s16(q9s16, q0s16);
663 q4s16 = vsubq_s16(q9s16, q0s16);
664 q5s16 = vsubq_s16(q8s16, q1s16);
665 LOAD_FROM_OUTPUT(15, 16, 17, q0s16, q1s16)
666 q8s16 = vaddq_s16(q4s16, q1s16);
667 q9s16 = vaddq_s16(q5s16, q0s16);
668 q6s16 = vsubq_s16(q5s16, q0s16);
669 q7s16 = vsubq_s16(q4s16, q1s16);
671 if (idct32_pass_loop == 0) {
672 idct32_bands_end_1st_pass(out, q2s16, q3s16, q6s16, q7s16, q8s16, q9s16,
673 q10s16, q11s16, q12s16, q13s16, q14s16,
676 idct32_bands_end_2nd_pass(out, dest, stride, q2s16, q3s16, q6s16, q7s16,
677 q8s16, q9s16, q10s16, q11s16, q12s16, q13s16,