2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "./vpx_config.h"
14 #include "./vpx_dsp_rtcd.h"
15 #include "vpx_dsp/arm/idct_neon.h"
16 #include "vpx_dsp/arm/transpose_neon.h"
17 #include "vpx_dsp/txfm_common.h"
19 static INLINE void load_from_transformed(const int16_t *const trans_buf,
20 const int first, const int second,
22 int16x8_t *const q1) {
23 *q0 = vld1q_s16(trans_buf + first * 8);
24 *q1 = vld1q_s16(trans_buf + second * 8);
27 static INLINE void load_from_output(const int16_t *const out, const int first,
28 const int second, int16x8_t *const q0,
29 int16x8_t *const q1) {
30 *q0 = vld1q_s16(out + first * 32);
31 *q1 = vld1q_s16(out + second * 32);
34 static INLINE void store_in_output(int16_t *const out, const int first,
35 const int second, const int16x8_t q0,
37 vst1q_s16(out + first * 32, q0);
38 vst1q_s16(out + second * 32, q1);
41 static INLINE void store_combine_results(uint8_t *p1, uint8_t *p2,
42 const int stride, int16x8_t q0,
43 int16x8_t q1, int16x8_t q2,
54 q0 = vrshrq_n_s16(q0, 6);
55 q1 = vrshrq_n_s16(q1, 6);
56 q2 = vrshrq_n_s16(q2, 6);
57 q3 = vrshrq_n_s16(q3, 6);
59 q0 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q0), d[0]));
60 q1 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q1), d[1]));
61 q2 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q2), d[2]));
62 q3 = vreinterpretq_s16_u16(vaddw_u8(vreinterpretq_u16_s16(q3), d[3]));
64 d[0] = vqmovun_s16(q0);
65 d[1] = vqmovun_s16(q1);
66 d[2] = vqmovun_s16(q2);
67 d[3] = vqmovun_s16(q3);
77 static INLINE void highbd_store_combine_results_bd8(uint16_t *p1, uint16_t *p2,
79 int16x8_t q0, int16x8_t q1,
91 q0 = vrshrq_n_s16(q0, 6);
92 q1 = vrshrq_n_s16(q1, 6);
93 q2 = vrshrq_n_s16(q2, 6);
94 q3 = vrshrq_n_s16(q3, 6);
96 q0 = vaddq_s16(q0, vreinterpretq_s16_u16(d[0]));
97 q1 = vaddq_s16(q1, vreinterpretq_s16_u16(d[1]));
98 q2 = vaddq_s16(q2, vreinterpretq_s16_u16(d[2]));
99 q3 = vaddq_s16(q3, vreinterpretq_s16_u16(d[3]));
101 d[0] = vmovl_u8(vqmovun_s16(q0));
102 d[1] = vmovl_u8(vqmovun_s16(q1));
103 d[2] = vmovl_u8(vqmovun_s16(q2));
104 d[3] = vmovl_u8(vqmovun_s16(q3));
114 static INLINE void do_butterfly(const int16x8_t qIn0, const int16x8_t qIn1,
115 const int16_t first_const,
116 const int16_t second_const,
117 int16x8_t *const qOut0,
118 int16x8_t *const qOut1) {
122 d[0] = vget_low_s16(qIn0);
123 d[1] = vget_high_s16(qIn0);
124 d[2] = vget_low_s16(qIn1);
125 d[3] = vget_high_s16(qIn1);
127 // Note: using v{mul, mla, mls}l_n_s16 here slows down 35% with gcc 4.9.
128 d[4] = vdup_n_s16(first_const);
129 d[5] = vdup_n_s16(second_const);
131 q[0] = vmull_s16(d[0], d[4]);
132 q[1] = vmull_s16(d[1], d[4]);
133 q[0] = vmlsl_s16(q[0], d[2], d[5]);
134 q[1] = vmlsl_s16(q[1], d[3], d[5]);
136 q[2] = vmull_s16(d[0], d[5]);
137 q[3] = vmull_s16(d[1], d[5]);
138 q[2] = vmlal_s16(q[2], d[2], d[4]);
139 q[3] = vmlal_s16(q[3], d[3], d[4]);
141 *qOut0 = vcombine_s16(vrshrn_n_s32(q[0], DCT_CONST_BITS),
142 vrshrn_n_s32(q[1], DCT_CONST_BITS));
143 *qOut1 = vcombine_s16(vrshrn_n_s32(q[2], DCT_CONST_BITS),
144 vrshrn_n_s32(q[3], DCT_CONST_BITS));
147 static INLINE void load_s16x8q(const int16_t *in, int16x8_t *const s0,
148 int16x8_t *const s1, int16x8_t *const s2,
149 int16x8_t *const s3, int16x8_t *const s4,
150 int16x8_t *const s5, int16x8_t *const s6,
151 int16x8_t *const s7) {
169 static INLINE void transpose_and_store_s16_8x8(int16x8_t a0, int16x8_t a1,
170 int16x8_t a2, int16x8_t a3,
171 int16x8_t a4, int16x8_t a5,
172 int16x8_t a6, int16x8_t a7,
174 transpose_s16_8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
194 static INLINE void idct32_transpose_pair(const int16_t *input, int16_t *t_buf) {
196 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
198 for (i = 0; i < 4; i++, input += 8) {
199 load_s16x8q(input, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
200 transpose_and_store_s16_8x8(s0, s1, s2, s3, s4, s5, s6, s7, &t_buf);
204 #if CONFIG_VP9_HIGHBITDEPTH
205 static INLINE void load_s16x8q_tran_low(
206 const tran_low_t *in, int16x8_t *const s0, int16x8_t *const s1,
207 int16x8_t *const s2, int16x8_t *const s3, int16x8_t *const s4,
208 int16x8_t *const s5, int16x8_t *const s6, int16x8_t *const s7) {
209 *s0 = load_tran_low_to_s16q(in);
211 *s1 = load_tran_low_to_s16q(in);
213 *s2 = load_tran_low_to_s16q(in);
215 *s3 = load_tran_low_to_s16q(in);
217 *s4 = load_tran_low_to_s16q(in);
219 *s5 = load_tran_low_to_s16q(in);
221 *s6 = load_tran_low_to_s16q(in);
223 *s7 = load_tran_low_to_s16q(in);
226 static INLINE void idct32_transpose_pair_tran_low(const tran_low_t *input,
229 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7;
231 for (i = 0; i < 4; i++, input += 8) {
232 load_s16x8q_tran_low(input, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
233 transpose_and_store_s16_8x8(s0, s1, s2, s3, s4, s5, s6, s7, &t_buf);
236 #else // !CONFIG_VP9_HIGHBITDEPTH
237 #define idct32_transpose_pair_tran_low idct32_transpose_pair
238 #endif // CONFIG_VP9_HIGHBITDEPTH
240 static INLINE void idct32_bands_end_1st_pass(int16_t *const out,
241 int16x8_t *const q) {
242 store_in_output(out, 16, 17, q[6], q[7]);
243 store_in_output(out, 14, 15, q[8], q[9]);
245 load_from_output(out, 30, 31, &q[0], &q[1]);
246 q[4] = vaddq_s16(q[2], q[1]);
247 q[5] = vaddq_s16(q[3], q[0]);
248 q[6] = vsubq_s16(q[3], q[0]);
249 q[7] = vsubq_s16(q[2], q[1]);
250 store_in_output(out, 30, 31, q[6], q[7]);
251 store_in_output(out, 0, 1, q[4], q[5]);
253 load_from_output(out, 12, 13, &q[0], &q[1]);
254 q[2] = vaddq_s16(q[10], q[1]);
255 q[3] = vaddq_s16(q[11], q[0]);
256 q[4] = vsubq_s16(q[11], q[0]);
257 q[5] = vsubq_s16(q[10], q[1]);
259 load_from_output(out, 18, 19, &q[0], &q[1]);
260 q[8] = vaddq_s16(q[4], q[1]);
261 q[9] = vaddq_s16(q[5], q[0]);
262 q[6] = vsubq_s16(q[5], q[0]);
263 q[7] = vsubq_s16(q[4], q[1]);
264 store_in_output(out, 18, 19, q[6], q[7]);
265 store_in_output(out, 12, 13, q[8], q[9]);
267 load_from_output(out, 28, 29, &q[0], &q[1]);
268 q[4] = vaddq_s16(q[2], q[1]);
269 q[5] = vaddq_s16(q[3], q[0]);
270 q[6] = vsubq_s16(q[3], q[0]);
271 q[7] = vsubq_s16(q[2], q[1]);
272 store_in_output(out, 28, 29, q[6], q[7]);
273 store_in_output(out, 2, 3, q[4], q[5]);
275 load_from_output(out, 10, 11, &q[0], &q[1]);
276 q[2] = vaddq_s16(q[12], q[1]);
277 q[3] = vaddq_s16(q[13], q[0]);
278 q[4] = vsubq_s16(q[13], q[0]);
279 q[5] = vsubq_s16(q[12], q[1]);
281 load_from_output(out, 20, 21, &q[0], &q[1]);
282 q[8] = vaddq_s16(q[4], q[1]);
283 q[9] = vaddq_s16(q[5], q[0]);
284 q[6] = vsubq_s16(q[5], q[0]);
285 q[7] = vsubq_s16(q[4], q[1]);
286 store_in_output(out, 20, 21, q[6], q[7]);
287 store_in_output(out, 10, 11, q[8], q[9]);
289 load_from_output(out, 26, 27, &q[0], &q[1]);
290 q[4] = vaddq_s16(q[2], q[1]);
291 q[5] = vaddq_s16(q[3], q[0]);
292 q[6] = vsubq_s16(q[3], q[0]);
293 q[7] = vsubq_s16(q[2], q[1]);
294 store_in_output(out, 26, 27, q[6], q[7]);
295 store_in_output(out, 4, 5, q[4], q[5]);
297 load_from_output(out, 8, 9, &q[0], &q[1]);
298 q[2] = vaddq_s16(q[14], q[1]);
299 q[3] = vaddq_s16(q[15], q[0]);
300 q[4] = vsubq_s16(q[15], q[0]);
301 q[5] = vsubq_s16(q[14], q[1]);
303 load_from_output(out, 22, 23, &q[0], &q[1]);
304 q[8] = vaddq_s16(q[4], q[1]);
305 q[9] = vaddq_s16(q[5], q[0]);
306 q[6] = vsubq_s16(q[5], q[0]);
307 q[7] = vsubq_s16(q[4], q[1]);
308 store_in_output(out, 22, 23, q[6], q[7]);
309 store_in_output(out, 8, 9, q[8], q[9]);
311 load_from_output(out, 24, 25, &q[0], &q[1]);
312 q[4] = vaddq_s16(q[2], q[1]);
313 q[5] = vaddq_s16(q[3], q[0]);
314 q[6] = vsubq_s16(q[3], q[0]);
315 q[7] = vsubq_s16(q[2], q[1]);
316 store_in_output(out, 24, 25, q[6], q[7]);
317 store_in_output(out, 6, 7, q[4], q[5]);
320 static INLINE void idct32_bands_end_2nd_pass(const int16_t *const out,
323 int16x8_t *const q) {
324 uint8_t *dest0 = dest + 0 * stride;
325 uint8_t *dest1 = dest + 31 * stride;
326 uint8_t *dest2 = dest + 16 * stride;
327 uint8_t *dest3 = dest + 15 * stride;
328 const int str2 = stride << 1;
330 store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]);
334 load_from_output(out, 30, 31, &q[0], &q[1]);
335 q[4] = final_add(q[2], q[1]);
336 q[5] = final_add(q[3], q[0]);
337 q[6] = final_sub(q[3], q[0]);
338 q[7] = final_sub(q[2], q[1]);
339 store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]);
343 load_from_output(out, 12, 13, &q[0], &q[1]);
344 q[2] = vaddq_s16(q[10], q[1]);
345 q[3] = vaddq_s16(q[11], q[0]);
346 q[4] = vsubq_s16(q[11], q[0]);
347 q[5] = vsubq_s16(q[10], q[1]);
349 load_from_output(out, 18, 19, &q[0], &q[1]);
350 q[8] = final_add(q[4], q[1]);
351 q[9] = final_add(q[5], q[0]);
352 q[6] = final_sub(q[5], q[0]);
353 q[7] = final_sub(q[4], q[1]);
354 store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]);
358 load_from_output(out, 28, 29, &q[0], &q[1]);
359 q[4] = final_add(q[2], q[1]);
360 q[5] = final_add(q[3], q[0]);
361 q[6] = final_sub(q[3], q[0]);
362 q[7] = final_sub(q[2], q[1]);
363 store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]);
367 load_from_output(out, 10, 11, &q[0], &q[1]);
368 q[2] = vaddq_s16(q[12], q[1]);
369 q[3] = vaddq_s16(q[13], q[0]);
370 q[4] = vsubq_s16(q[13], q[0]);
371 q[5] = vsubq_s16(q[12], q[1]);
373 load_from_output(out, 20, 21, &q[0], &q[1]);
374 q[8] = final_add(q[4], q[1]);
375 q[9] = final_add(q[5], q[0]);
376 q[6] = final_sub(q[5], q[0]);
377 q[7] = final_sub(q[4], q[1]);
378 store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]);
382 load_from_output(out, 26, 27, &q[0], &q[1]);
383 q[4] = final_add(q[2], q[1]);
384 q[5] = final_add(q[3], q[0]);
385 q[6] = final_sub(q[3], q[0]);
386 q[7] = final_sub(q[2], q[1]);
387 store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]);
391 load_from_output(out, 8, 9, &q[0], &q[1]);
392 q[2] = vaddq_s16(q[14], q[1]);
393 q[3] = vaddq_s16(q[15], q[0]);
394 q[4] = vsubq_s16(q[15], q[0]);
395 q[5] = vsubq_s16(q[14], q[1]);
397 load_from_output(out, 22, 23, &q[0], &q[1]);
398 q[8] = final_add(q[4], q[1]);
399 q[9] = final_add(q[5], q[0]);
400 q[6] = final_sub(q[5], q[0]);
401 q[7] = final_sub(q[4], q[1]);
402 store_combine_results(dest2, dest3, stride, q[6], q[7], q[8], q[9]);
404 load_from_output(out, 24, 25, &q[0], &q[1]);
405 q[4] = final_add(q[2], q[1]);
406 q[5] = final_add(q[3], q[0]);
407 q[6] = final_sub(q[3], q[0]);
408 q[7] = final_sub(q[2], q[1]);
409 store_combine_results(dest0, dest1, stride, q[4], q[5], q[6], q[7]);
412 static INLINE void highbd_idct32_bands_end_2nd_pass_bd8(
413 const int16_t *const out, uint16_t *const dest, const int stride,
414 int16x8_t *const q) {
415 uint16_t *dest0 = dest + 0 * stride;
416 uint16_t *dest1 = dest + 31 * stride;
417 uint16_t *dest2 = dest + 16 * stride;
418 uint16_t *dest3 = dest + 15 * stride;
419 const int str2 = stride << 1;
421 highbd_store_combine_results_bd8(dest2, dest3, stride, q[6], q[7], q[8],
426 load_from_output(out, 30, 31, &q[0], &q[1]);
427 q[4] = final_add(q[2], q[1]);
428 q[5] = final_add(q[3], q[0]);
429 q[6] = final_sub(q[3], q[0]);
430 q[7] = final_sub(q[2], q[1]);
431 highbd_store_combine_results_bd8(dest0, dest1, stride, q[4], q[5], q[6],
436 load_from_output(out, 12, 13, &q[0], &q[1]);
437 q[2] = vaddq_s16(q[10], q[1]);
438 q[3] = vaddq_s16(q[11], q[0]);
439 q[4] = vsubq_s16(q[11], q[0]);
440 q[5] = vsubq_s16(q[10], q[1]);
442 load_from_output(out, 18, 19, &q[0], &q[1]);
443 q[8] = final_add(q[4], q[1]);
444 q[9] = final_add(q[5], q[0]);
445 q[6] = final_sub(q[5], q[0]);
446 q[7] = final_sub(q[4], q[1]);
447 highbd_store_combine_results_bd8(dest2, dest3, stride, q[6], q[7], q[8],
452 load_from_output(out, 28, 29, &q[0], &q[1]);
453 q[4] = final_add(q[2], q[1]);
454 q[5] = final_add(q[3], q[0]);
455 q[6] = final_sub(q[3], q[0]);
456 q[7] = final_sub(q[2], q[1]);
457 highbd_store_combine_results_bd8(dest0, dest1, stride, q[4], q[5], q[6],
462 load_from_output(out, 10, 11, &q[0], &q[1]);
463 q[2] = vaddq_s16(q[12], q[1]);
464 q[3] = vaddq_s16(q[13], q[0]);
465 q[4] = vsubq_s16(q[13], q[0]);
466 q[5] = vsubq_s16(q[12], q[1]);
468 load_from_output(out, 20, 21, &q[0], &q[1]);
469 q[8] = final_add(q[4], q[1]);
470 q[9] = final_add(q[5], q[0]);
471 q[6] = final_sub(q[5], q[0]);
472 q[7] = final_sub(q[4], q[1]);
473 highbd_store_combine_results_bd8(dest2, dest3, stride, q[6], q[7], q[8],
478 load_from_output(out, 26, 27, &q[0], &q[1]);
479 q[4] = final_add(q[2], q[1]);
480 q[5] = final_add(q[3], q[0]);
481 q[6] = final_sub(q[3], q[0]);
482 q[7] = final_sub(q[2], q[1]);
483 highbd_store_combine_results_bd8(dest0, dest1, stride, q[4], q[5], q[6],
488 load_from_output(out, 8, 9, &q[0], &q[1]);
489 q[2] = vaddq_s16(q[14], q[1]);
490 q[3] = vaddq_s16(q[15], q[0]);
491 q[4] = vsubq_s16(q[15], q[0]);
492 q[5] = vsubq_s16(q[14], q[1]);
494 load_from_output(out, 22, 23, &q[0], &q[1]);
495 q[8] = final_add(q[4], q[1]);
496 q[9] = final_add(q[5], q[0]);
497 q[6] = final_sub(q[5], q[0]);
498 q[7] = final_sub(q[4], q[1]);
499 highbd_store_combine_results_bd8(dest2, dest3, stride, q[6], q[7], q[8],
502 load_from_output(out, 24, 25, &q[0], &q[1]);
503 q[4] = final_add(q[2], q[1]);
504 q[5] = final_add(q[3], q[0]);
505 q[6] = final_sub(q[3], q[0]);
506 q[7] = final_sub(q[2], q[1]);
507 highbd_store_combine_results_bd8(dest0, dest1, stride, q[4], q[5], q[6],
511 void vpx_idct32_32_neon(const tran_low_t *input, uint8_t *dest,
512 const int stride, const int highbd_flag) {
513 int i, idct32_pass_loop;
514 int16_t trans_buf[32 * 8];
515 int16_t pass1[32 * 32];
516 int16_t pass2[32 * 32];
517 const int16_t *input_pass2 = pass1; // input of pass2 is the result of pass1
520 uint16_t *dst = CAST_TO_SHORTPTR(dest);
522 for (idct32_pass_loop = 0, out = pass1; idct32_pass_loop < 2;
523 idct32_pass_loop++, out = pass2) {
524 for (i = 0; i < 4; i++, out += 8) { // idct32_bands_loop
525 if (idct32_pass_loop == 0) {
526 idct32_transpose_pair_tran_low(input, trans_buf);
529 idct32_transpose_pair(input_pass2, trans_buf);
530 input_pass2 += 32 * 8;
533 // -----------------------------------------
534 // BLOCK A: 16-19,28-31
535 // -----------------------------------------
536 // generate 16,17,30,31
538 load_from_transformed(trans_buf, 1, 31, &q[14], &q[13]);
539 do_butterfly(q[14], q[13], cospi_31_64, cospi_1_64, &q[0], &q[2]);
540 load_from_transformed(trans_buf, 17, 15, &q[14], &q[13]);
541 do_butterfly(q[14], q[13], cospi_15_64, cospi_17_64, &q[1], &q[3]);
543 q[4] = vaddq_s16(q[0], q[1]);
544 q[13] = vsubq_s16(q[0], q[1]);
545 q[6] = vaddq_s16(q[2], q[3]);
546 q[14] = vsubq_s16(q[2], q[3]);
548 do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[5], &q[7]);
550 // generate 18,19,28,29
552 load_from_transformed(trans_buf, 9, 23, &q[14], &q[13]);
553 do_butterfly(q[14], q[13], cospi_23_64, cospi_9_64, &q[0], &q[2]);
554 load_from_transformed(trans_buf, 25, 7, &q[14], &q[13]);
555 do_butterfly(q[14], q[13], cospi_7_64, cospi_25_64, &q[1], &q[3]);
557 q[13] = vsubq_s16(q[3], q[2]);
558 q[3] = vaddq_s16(q[3], q[2]);
559 q[14] = vsubq_s16(q[1], q[0]);
560 q[2] = vaddq_s16(q[1], q[0]);
562 do_butterfly(q[14], q[13], -cospi_4_64, -cospi_28_64, &q[1], &q[0]);
564 q[8] = vaddq_s16(q[4], q[2]);
565 q[9] = vaddq_s16(q[5], q[0]);
566 q[10] = vaddq_s16(q[7], q[1]);
567 q[15] = vaddq_s16(q[6], q[3]);
568 q[13] = vsubq_s16(q[5], q[0]);
569 q[14] = vsubq_s16(q[7], q[1]);
570 store_in_output(out, 16, 31, q[8], q[15]);
571 store_in_output(out, 17, 30, q[9], q[10]);
573 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[0], &q[1]);
574 store_in_output(out, 29, 18, q[1], q[0]);
576 q[13] = vsubq_s16(q[4], q[2]);
577 q[14] = vsubq_s16(q[6], q[3]);
579 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[4], &q[6]);
580 store_in_output(out, 19, 28, q[4], q[6]);
582 // -----------------------------------------
583 // BLOCK B: 20-23,24-27
584 // -----------------------------------------
585 // generate 20,21,26,27
587 load_from_transformed(trans_buf, 5, 27, &q[14], &q[13]);
588 do_butterfly(q[14], q[13], cospi_27_64, cospi_5_64, &q[0], &q[2]);
589 load_from_transformed(trans_buf, 21, 11, &q[14], &q[13]);
590 do_butterfly(q[14], q[13], cospi_11_64, cospi_21_64, &q[1], &q[3]);
592 q[13] = vsubq_s16(q[0], q[1]);
593 q[0] = vaddq_s16(q[0], q[1]);
594 q[14] = vsubq_s16(q[2], q[3]);
595 q[2] = vaddq_s16(q[2], q[3]);
597 do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
599 // generate 22,23,24,25
601 load_from_transformed(trans_buf, 13, 19, &q[14], &q[13]);
602 do_butterfly(q[14], q[13], cospi_19_64, cospi_13_64, &q[5], &q[7]);
603 load_from_transformed(trans_buf, 29, 3, &q[14], &q[13]);
604 do_butterfly(q[14], q[13], cospi_3_64, cospi_29_64, &q[4], &q[6]);
606 q[14] = vsubq_s16(q[4], q[5]);
607 q[5] = vaddq_s16(q[4], q[5]);
608 q[13] = vsubq_s16(q[6], q[7]);
609 q[6] = vaddq_s16(q[6], q[7]);
611 do_butterfly(q[14], q[13], -cospi_20_64, -cospi_12_64, &q[4], &q[7]);
613 q[10] = vaddq_s16(q[7], q[1]);
614 q[11] = vaddq_s16(q[5], q[0]);
615 q[12] = vaddq_s16(q[6], q[2]);
616 q[15] = vaddq_s16(q[4], q[3]);
618 load_from_output(out, 16, 17, &q[14], &q[13]);
619 q[8] = vaddq_s16(q[14], q[11]);
620 q[9] = vaddq_s16(q[13], q[10]);
621 q[13] = vsubq_s16(q[13], q[10]);
622 q[11] = vsubq_s16(q[14], q[11]);
623 store_in_output(out, 17, 16, q[9], q[8]);
624 load_from_output(out, 30, 31, &q[14], &q[9]);
625 q[8] = vsubq_s16(q[9], q[12]);
626 q[10] = vaddq_s16(q[14], q[15]);
627 q[14] = vsubq_s16(q[14], q[15]);
628 q[12] = vaddq_s16(q[9], q[12]);
629 store_in_output(out, 30, 31, q[10], q[12]);
631 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]);
632 store_in_output(out, 25, 22, q[14], q[13]);
633 do_butterfly(q[8], q[11], cospi_16_64, cospi_16_64, &q[13], &q[14]);
634 store_in_output(out, 24, 23, q[14], q[13]);
636 q[14] = vsubq_s16(q[5], q[0]);
637 q[13] = vsubq_s16(q[6], q[2]);
638 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[5], &q[6]);
639 q[14] = vsubq_s16(q[7], q[1]);
640 q[13] = vsubq_s16(q[4], q[3]);
641 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[0], &q[1]);
643 load_from_output(out, 18, 19, &q[14], &q[13]);
644 q[8] = vaddq_s16(q[14], q[1]);
645 q[9] = vaddq_s16(q[13], q[6]);
646 q[13] = vsubq_s16(q[13], q[6]);
647 q[1] = vsubq_s16(q[14], q[1]);
648 store_in_output(out, 18, 19, q[8], q[9]);
649 load_from_output(out, 28, 29, &q[8], &q[9]);
650 q[14] = vsubq_s16(q[8], q[5]);
651 q[10] = vaddq_s16(q[8], q[5]);
652 q[11] = vaddq_s16(q[9], q[0]);
653 q[0] = vsubq_s16(q[9], q[0]);
654 store_in_output(out, 28, 29, q[10], q[11]);
656 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[13], &q[14]);
657 store_in_output(out, 20, 27, q[13], q[14]);
658 do_butterfly(q[0], q[1], cospi_16_64, cospi_16_64, &q[1], &q[0]);
659 store_in_output(out, 21, 26, q[1], q[0]);
661 // -----------------------------------------
662 // BLOCK C: 8-10,11-15
663 // -----------------------------------------
664 // generate 8,9,14,15
666 load_from_transformed(trans_buf, 2, 30, &q[14], &q[13]);
667 do_butterfly(q[14], q[13], cospi_30_64, cospi_2_64, &q[0], &q[2]);
668 load_from_transformed(trans_buf, 18, 14, &q[14], &q[13]);
669 do_butterfly(q[14], q[13], cospi_14_64, cospi_18_64, &q[1], &q[3]);
671 q[13] = vsubq_s16(q[0], q[1]);
672 q[0] = vaddq_s16(q[0], q[1]);
673 q[14] = vsubq_s16(q[2], q[3]);
674 q[2] = vaddq_s16(q[2], q[3]);
676 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[1], &q[3]);
678 // generate 10,11,12,13
680 load_from_transformed(trans_buf, 10, 22, &q[14], &q[13]);
681 do_butterfly(q[14], q[13], cospi_22_64, cospi_10_64, &q[5], &q[7]);
682 load_from_transformed(trans_buf, 26, 6, &q[14], &q[13]);
683 do_butterfly(q[14], q[13], cospi_6_64, cospi_26_64, &q[4], &q[6]);
685 q[14] = vsubq_s16(q[4], q[5]);
686 q[5] = vaddq_s16(q[4], q[5]);
687 q[13] = vsubq_s16(q[6], q[7]);
688 q[6] = vaddq_s16(q[6], q[7]);
690 do_butterfly(q[14], q[13], -cospi_8_64, -cospi_24_64, &q[4], &q[7]);
692 q[8] = vaddq_s16(q[0], q[5]);
693 q[9] = vaddq_s16(q[1], q[7]);
694 q[13] = vsubq_s16(q[1], q[7]);
695 q[14] = vsubq_s16(q[3], q[4]);
696 q[10] = vaddq_s16(q[3], q[4]);
697 q[15] = vaddq_s16(q[2], q[6]);
698 store_in_output(out, 8, 15, q[8], q[15]);
699 store_in_output(out, 9, 14, q[9], q[10]);
701 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
702 store_in_output(out, 13, 10, q[3], q[1]);
703 q[13] = vsubq_s16(q[0], q[5]);
704 q[14] = vsubq_s16(q[2], q[6]);
705 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
706 store_in_output(out, 11, 12, q[1], q[3]);
708 // -----------------------------------------
710 // -----------------------------------------
713 load_from_transformed(trans_buf, 4, 28, &q[14], &q[13]);
714 do_butterfly(q[14], q[13], cospi_28_64, cospi_4_64, &q[0], &q[2]);
715 load_from_transformed(trans_buf, 20, 12, &q[14], &q[13]);
716 do_butterfly(q[14], q[13], cospi_12_64, cospi_20_64, &q[1], &q[3]);
718 q[13] = vsubq_s16(q[0], q[1]);
719 q[0] = vaddq_s16(q[0], q[1]);
720 q[14] = vsubq_s16(q[2], q[3]);
721 q[2] = vaddq_s16(q[2], q[3]);
723 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[1], &q[3]);
727 load_from_transformed(trans_buf, 0, 16, &q[14], &q[13]);
728 do_butterfly(q[14], q[13], cospi_16_64, cospi_16_64, &q[5], &q[7]);
729 load_from_transformed(trans_buf, 8, 24, &q[14], &q[13]);
730 do_butterfly(q[14], q[13], cospi_24_64, cospi_8_64, &q[14], &q[6]);
732 q[4] = vaddq_s16(q[7], q[6]);
733 q[7] = vsubq_s16(q[7], q[6]);
734 q[6] = vsubq_s16(q[5], q[14]);
735 q[5] = vaddq_s16(q[5], q[14]);
737 q[8] = vaddq_s16(q[4], q[2]);
738 q[9] = vaddq_s16(q[5], q[3]);
739 q[10] = vaddq_s16(q[6], q[1]);
740 q[11] = vaddq_s16(q[7], q[0]);
741 q[12] = vsubq_s16(q[7], q[0]);
742 q[13] = vsubq_s16(q[6], q[1]);
743 q[14] = vsubq_s16(q[5], q[3]);
744 q[15] = vsubq_s16(q[4], q[2]);
746 load_from_output(out, 14, 15, &q[0], &q[1]);
747 q[2] = vaddq_s16(q[8], q[1]);
748 q[3] = vaddq_s16(q[9], q[0]);
749 q[4] = vsubq_s16(q[9], q[0]);
750 q[5] = vsubq_s16(q[8], q[1]);
751 load_from_output(out, 16, 17, &q[0], &q[1]);
752 q[8] = final_add(q[4], q[1]);
753 q[9] = final_add(q[5], q[0]);
754 q[6] = final_sub(q[5], q[0]);
755 q[7] = final_sub(q[4], q[1]);
757 if (idct32_pass_loop == 0) {
758 idct32_bands_end_1st_pass(out, q);
761 highbd_idct32_bands_end_2nd_pass_bd8(out, dst, stride, q);
764 idct32_bands_end_2nd_pass(out, dest, stride, q);
772 void vpx_idct32x32_1024_add_neon(const tran_low_t *input, uint8_t *dest,
774 vpx_idct32_32_neon(input, dest, stride, 0);