2 * Copyright (c) 2014 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/arm/idct_neon.h"
15 #include "vpx_dsp/arm/mem_neon.h"
16 #include "vpx_dsp/txfm_common.h"
18 static INLINE void wrap_low_4x2(const int32x4_t *const t32, int16x4_t *const d0,
19 int16x4_t *const d1) {
20 *d0 = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
21 *d1 = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
24 static INLINE void idct_cospi_8_24_d_kernel(const int16x4_t s0,
26 const int16x4_t cospi_0_8_16_24,
27 int32x4_t *const t32) {
28 t32[0] = vmull_lane_s16(s0, cospi_0_8_16_24, 3);
29 t32[1] = vmull_lane_s16(s1, cospi_0_8_16_24, 3);
30 t32[0] = vmlsl_lane_s16(t32[0], s1, cospi_0_8_16_24, 1);
31 t32[1] = vmlal_lane_s16(t32[1], s0, cospi_0_8_16_24, 1);
34 static INLINE void idct_cospi_8_24_d(const int16x4_t s0, const int16x4_t s1,
35 const int16x4_t cospi_0_8_16_24,
36 int16x4_t *const d0, int16x4_t *const d1) {
39 idct_cospi_8_24_d_kernel(s0, s1, cospi_0_8_16_24, t32);
40 wrap_low_4x2(t32, d0, d1);
43 static INLINE void idct_cospi_8_24_neg_d(const int16x4_t s0, const int16x4_t s1,
44 const int16x4_t cospi_0_8_16_24,
46 int16x4_t *const d1) {
49 idct_cospi_8_24_d_kernel(s0, s1, cospi_0_8_16_24, t32);
50 t32[1] = vnegq_s32(t32[1]);
51 wrap_low_4x2(t32, d0, d1);
54 static INLINE void idct_cospi_16_16_d(const int16x4_t s0, const int16x4_t s1,
55 const int16x4_t cospi_0_8_16_24,
57 int16x4_t *const d1) {
60 t32[2] = vmull_lane_s16(s1, cospi_0_8_16_24, 2);
61 t32[0] = vmlsl_lane_s16(t32[2], s0, cospi_0_8_16_24, 2);
62 t32[1] = vmlal_lane_s16(t32[2], s0, cospi_0_8_16_24, 2);
63 wrap_low_4x2(t32, d0, d1);
66 static INLINE void idct16x16_add_store(const int16x8_t *const out,
67 uint8_t *dest, const int stride) {
68 // Add the result to dest
69 idct16x16_add8x1(out[0], &dest, stride);
70 idct16x16_add8x1(out[1], &dest, stride);
71 idct16x16_add8x1(out[2], &dest, stride);
72 idct16x16_add8x1(out[3], &dest, stride);
73 idct16x16_add8x1(out[4], &dest, stride);
74 idct16x16_add8x1(out[5], &dest, stride);
75 idct16x16_add8x1(out[6], &dest, stride);
76 idct16x16_add8x1(out[7], &dest, stride);
77 idct16x16_add8x1(out[8], &dest, stride);
78 idct16x16_add8x1(out[9], &dest, stride);
79 idct16x16_add8x1(out[10], &dest, stride);
80 idct16x16_add8x1(out[11], &dest, stride);
81 idct16x16_add8x1(out[12], &dest, stride);
82 idct16x16_add8x1(out[13], &dest, stride);
83 idct16x16_add8x1(out[14], &dest, stride);
84 idct16x16_add8x1(out[15], &dest, stride);
87 static INLINE void idct16x16_add_store_bd8(int16x8_t *const out, uint16_t *dest,
89 // Add the result to dest
90 const int16x8_t max = vdupq_n_s16((1 << 8) - 1);
91 out[0] = vrshrq_n_s16(out[0], 6);
92 out[1] = vrshrq_n_s16(out[1], 6);
93 out[2] = vrshrq_n_s16(out[2], 6);
94 out[3] = vrshrq_n_s16(out[3], 6);
95 out[4] = vrshrq_n_s16(out[4], 6);
96 out[5] = vrshrq_n_s16(out[5], 6);
97 out[6] = vrshrq_n_s16(out[6], 6);
98 out[7] = vrshrq_n_s16(out[7], 6);
99 out[8] = vrshrq_n_s16(out[8], 6);
100 out[9] = vrshrq_n_s16(out[9], 6);
101 out[10] = vrshrq_n_s16(out[10], 6);
102 out[11] = vrshrq_n_s16(out[11], 6);
103 out[12] = vrshrq_n_s16(out[12], 6);
104 out[13] = vrshrq_n_s16(out[13], 6);
105 out[14] = vrshrq_n_s16(out[14], 6);
106 out[15] = vrshrq_n_s16(out[15], 6);
107 highbd_idct16x16_add8x1(out[0], max, &dest, stride);
108 highbd_idct16x16_add8x1(out[1], max, &dest, stride);
109 highbd_idct16x16_add8x1(out[2], max, &dest, stride);
110 highbd_idct16x16_add8x1(out[3], max, &dest, stride);
111 highbd_idct16x16_add8x1(out[4], max, &dest, stride);
112 highbd_idct16x16_add8x1(out[5], max, &dest, stride);
113 highbd_idct16x16_add8x1(out[6], max, &dest, stride);
114 highbd_idct16x16_add8x1(out[7], max, &dest, stride);
115 highbd_idct16x16_add8x1(out[8], max, &dest, stride);
116 highbd_idct16x16_add8x1(out[9], max, &dest, stride);
117 highbd_idct16x16_add8x1(out[10], max, &dest, stride);
118 highbd_idct16x16_add8x1(out[11], max, &dest, stride);
119 highbd_idct16x16_add8x1(out[12], max, &dest, stride);
120 highbd_idct16x16_add8x1(out[13], max, &dest, stride);
121 highbd_idct16x16_add8x1(out[14], max, &dest, stride);
122 highbd_idct16x16_add8x1(out[15], max, &dest, stride);
125 void vpx_idct16x16_256_add_half1d(const void *const input, int16_t *output,
126 void *const dest, const int stride,
127 const int highbd_flag) {
128 const int16x8_t cospis0 = vld1q_s16(kCospi);
129 const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
130 const int16x4_t cospi_0_8_16_24 = vget_low_s16(cospis0);
131 const int16x4_t cospi_4_12_20N_28 = vget_high_s16(cospis0);
132 const int16x4_t cospi_2_30_10_22 = vget_low_s16(cospis1);
133 const int16x4_t cospi_6_26N_14_18N = vget_high_s16(cospis1);
134 int16x8_t in[16], step1[16], step2[16], out[16];
138 const tran_low_t *inputT = (const tran_low_t *)input;
139 in[0] = load_tran_low_to_s16q(inputT);
141 in[8] = load_tran_low_to_s16q(inputT);
143 in[1] = load_tran_low_to_s16q(inputT);
145 in[9] = load_tran_low_to_s16q(inputT);
147 in[2] = load_tran_low_to_s16q(inputT);
149 in[10] = load_tran_low_to_s16q(inputT);
151 in[3] = load_tran_low_to_s16q(inputT);
153 in[11] = load_tran_low_to_s16q(inputT);
155 in[4] = load_tran_low_to_s16q(inputT);
157 in[12] = load_tran_low_to_s16q(inputT);
159 in[5] = load_tran_low_to_s16q(inputT);
161 in[13] = load_tran_low_to_s16q(inputT);
163 in[6] = load_tran_low_to_s16q(inputT);
165 in[14] = load_tran_low_to_s16q(inputT);
167 in[7] = load_tran_low_to_s16q(inputT);
169 in[15] = load_tran_low_to_s16q(inputT);
171 const int16_t *inputT = (const int16_t *)input;
172 in[0] = vld1q_s16(inputT);
174 in[8] = vld1q_s16(inputT);
176 in[1] = vld1q_s16(inputT);
178 in[9] = vld1q_s16(inputT);
180 in[2] = vld1q_s16(inputT);
182 in[10] = vld1q_s16(inputT);
184 in[3] = vld1q_s16(inputT);
186 in[11] = vld1q_s16(inputT);
188 in[4] = vld1q_s16(inputT);
190 in[12] = vld1q_s16(inputT);
192 in[5] = vld1q_s16(inputT);
194 in[13] = vld1q_s16(inputT);
196 in[6] = vld1q_s16(inputT);
198 in[14] = vld1q_s16(inputT);
200 in[7] = vld1q_s16(inputT);
202 in[15] = vld1q_s16(inputT);
206 transpose_s16_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
208 transpose_s16_8x8(&in[8], &in[9], &in[10], &in[11], &in[12], &in[13], &in[14],
212 step1[0] = in[0 / 2];
213 step1[1] = in[16 / 2];
214 step1[2] = in[8 / 2];
215 step1[3] = in[24 / 2];
216 step1[4] = in[4 / 2];
217 step1[5] = in[20 / 2];
218 step1[6] = in[12 / 2];
219 step1[7] = in[28 / 2];
220 step1[8] = in[2 / 2];
221 step1[9] = in[18 / 2];
222 step1[10] = in[10 / 2];
223 step1[11] = in[26 / 2];
224 step1[12] = in[6 / 2];
225 step1[13] = in[22 / 2];
226 step1[14] = in[14 / 2];
227 step1[15] = in[30 / 2];
238 idct_cospi_2_30(step1[8], step1[15], cospi_2_30_10_22, &step2[8], &step2[15]);
239 idct_cospi_14_18(step1[9], step1[14], cospi_6_26N_14_18N, &step2[9],
241 idct_cospi_10_22(step1[10], step1[13], cospi_2_30_10_22, &step2[10],
243 idct_cospi_6_26(step1[11], step1[12], cospi_6_26N_14_18N, &step2[11],
251 idct_cospi_4_28(step2[4], step2[7], cospi_4_12_20N_28, &step1[4], &step1[7]);
252 idct_cospi_12_20(step2[5], step2[6], cospi_4_12_20N_28, &step1[5], &step1[6]);
253 step1[8] = vaddq_s16(step2[8], step2[9]);
254 step1[9] = vsubq_s16(step2[8], step2[9]);
255 step1[10] = vsubq_s16(step2[11], step2[10]);
256 step1[11] = vaddq_s16(step2[11], step2[10]);
257 step1[12] = vaddq_s16(step2[12], step2[13]);
258 step1[13] = vsubq_s16(step2[12], step2[13]);
259 step1[14] = vsubq_s16(step2[15], step2[14]);
260 step1[15] = vaddq_s16(step2[15], step2[14]);
263 idct_cospi_16_16_q(step1[1], step1[0], cospi_0_8_16_24, &step2[1], &step2[0]);
264 idct_cospi_8_24_q(step1[2], step1[3], cospi_0_8_16_24, &step2[2], &step2[3]);
265 step2[4] = vaddq_s16(step1[4], step1[5]);
266 step2[5] = vsubq_s16(step1[4], step1[5]);
267 step2[6] = vsubq_s16(step1[7], step1[6]);
268 step2[7] = vaddq_s16(step1[7], step1[6]);
270 idct_cospi_8_24_q(step1[14], step1[9], cospi_0_8_16_24, &step2[9],
272 idct_cospi_8_24_neg_q(step1[13], step1[10], cospi_0_8_16_24, &step2[13],
274 step2[11] = step1[11];
275 step2[12] = step1[12];
276 step2[15] = step1[15];
279 step1[0] = vaddq_s16(step2[0], step2[3]);
280 step1[1] = vaddq_s16(step2[1], step2[2]);
281 step1[2] = vsubq_s16(step2[1], step2[2]);
282 step1[3] = vsubq_s16(step2[0], step2[3]);
284 idct_cospi_16_16_q(step2[5], step2[6], cospi_0_8_16_24, &step1[5], &step1[6]);
286 step1[8] = vaddq_s16(step2[8], step2[11]);
287 step1[9] = vaddq_s16(step2[9], step2[10]);
288 step1[10] = vsubq_s16(step2[9], step2[10]);
289 step1[11] = vsubq_s16(step2[8], step2[11]);
290 step1[12] = vsubq_s16(step2[15], step2[12]);
291 step1[13] = vsubq_s16(step2[14], step2[13]);
292 step1[14] = vaddq_s16(step2[14], step2[13]);
293 step1[15] = vaddq_s16(step2[15], step2[12]);
296 step2[0] = vaddq_s16(step1[0], step1[7]);
297 step2[1] = vaddq_s16(step1[1], step1[6]);
298 step2[2] = vaddq_s16(step1[2], step1[5]);
299 step2[3] = vaddq_s16(step1[3], step1[4]);
300 step2[4] = vsubq_s16(step1[3], step1[4]);
301 step2[5] = vsubq_s16(step1[2], step1[5]);
302 step2[6] = vsubq_s16(step1[1], step1[6]);
303 step2[7] = vsubq_s16(step1[0], step1[7]);
304 idct_cospi_16_16_q(step1[10], step1[13], cospi_0_8_16_24, &step2[10],
306 idct_cospi_16_16_q(step1[11], step1[12], cospi_0_8_16_24, &step2[11],
310 step2[14] = step1[14];
311 step2[15] = step1[15];
314 idct16x16_add_stage7(step2, out);
317 idct16x16_store_pass1(out, output);
320 idct16x16_add_store_bd8(out, dest, stride);
322 idct16x16_add_store(out, dest, stride);
327 void vpx_idct16x16_38_add_half1d(const void *const input, int16_t *const output,
328 void *const dest, const int stride,
329 const int highbd_flag) {
330 const int16x8_t cospis0 = vld1q_s16(kCospi);
331 const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
332 const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
333 const int16x8_t cospisd1 = vaddq_s16(cospis1, cospis1);
334 const int16x4_t cospi_0_8_16_24 = vget_low_s16(cospis0);
335 const int16x4_t cospid_0_8_16_24 = vget_low_s16(cospisd0);
336 const int16x4_t cospid_4_12_20N_28 = vget_high_s16(cospisd0);
337 const int16x4_t cospid_2_30_10_22 = vget_low_s16(cospisd1);
338 const int16x4_t cospid_6_26_14_18N = vget_high_s16(cospisd1);
339 int16x8_t in[8], step1[16], step2[16], out[16];
343 const tran_low_t *inputT = (const tran_low_t *)input;
344 in[0] = load_tran_low_to_s16q(inputT);
346 in[1] = load_tran_low_to_s16q(inputT);
348 in[2] = load_tran_low_to_s16q(inputT);
350 in[3] = load_tran_low_to_s16q(inputT);
352 in[4] = load_tran_low_to_s16q(inputT);
354 in[5] = load_tran_low_to_s16q(inputT);
356 in[6] = load_tran_low_to_s16q(inputT);
358 in[7] = load_tran_low_to_s16q(inputT);
360 const int16_t *inputT = (const int16_t *)input;
361 in[0] = vld1q_s16(inputT);
363 in[1] = vld1q_s16(inputT);
365 in[2] = vld1q_s16(inputT);
367 in[3] = vld1q_s16(inputT);
369 in[4] = vld1q_s16(inputT);
371 in[5] = vld1q_s16(inputT);
373 in[6] = vld1q_s16(inputT);
375 in[7] = vld1q_s16(inputT);
379 transpose_s16_8x8(&in[0], &in[1], &in[2], &in[3], &in[4], &in[5], &in[6],
383 step1[0] = in[0 / 2];
384 step1[2] = in[8 / 2];
385 step1[4] = in[4 / 2];
386 step1[6] = in[12 / 2];
387 step1[8] = in[2 / 2];
388 step1[10] = in[10 / 2];
389 step1[12] = in[6 / 2];
390 step1[14] = in[14 / 2]; // 0 in pass 1
397 step2[8] = vqrdmulhq_lane_s16(step1[8], cospid_2_30_10_22, 1);
398 step2[9] = vqrdmulhq_lane_s16(step1[14], cospid_6_26_14_18N, 3);
399 step2[10] = vqrdmulhq_lane_s16(step1[10], cospid_2_30_10_22, 3);
400 step2[11] = vqrdmulhq_lane_s16(step1[12], cospid_6_26_14_18N, 1);
401 step2[12] = vqrdmulhq_lane_s16(step1[12], cospid_6_26_14_18N, 0);
402 step2[13] = vqrdmulhq_lane_s16(step1[10], cospid_2_30_10_22, 2);
403 step2[14] = vqrdmulhq_lane_s16(step1[14], cospid_6_26_14_18N, 2);
404 step2[15] = vqrdmulhq_lane_s16(step1[8], cospid_2_30_10_22, 0);
409 step1[4] = vqrdmulhq_lane_s16(step2[4], cospid_4_12_20N_28, 3);
410 step1[5] = vqrdmulhq_lane_s16(step2[6], cospid_4_12_20N_28, 2);
411 step1[6] = vqrdmulhq_lane_s16(step2[6], cospid_4_12_20N_28, 1);
412 step1[7] = vqrdmulhq_lane_s16(step2[4], cospid_4_12_20N_28, 0);
413 step1[8] = vaddq_s16(step2[8], step2[9]);
414 step1[9] = vsubq_s16(step2[8], step2[9]);
415 step1[10] = vsubq_s16(step2[11], step2[10]);
416 step1[11] = vaddq_s16(step2[11], step2[10]);
417 step1[12] = vaddq_s16(step2[12], step2[13]);
418 step1[13] = vsubq_s16(step2[12], step2[13]);
419 step1[14] = vsubq_s16(step2[15], step2[14]);
420 step1[15] = vaddq_s16(step2[15], step2[14]);
423 step2[0] = step2[1] = vqrdmulhq_lane_s16(step1[0], cospid_0_8_16_24, 2);
424 step2[2] = vqrdmulhq_lane_s16(step1[2], cospid_0_8_16_24, 3);
425 step2[3] = vqrdmulhq_lane_s16(step1[2], cospid_0_8_16_24, 1);
426 step2[4] = vaddq_s16(step1[4], step1[5]);
427 step2[5] = vsubq_s16(step1[4], step1[5]);
428 step2[6] = vsubq_s16(step1[7], step1[6]);
429 step2[7] = vaddq_s16(step1[7], step1[6]);
431 idct_cospi_8_24_q(step1[14], step1[9], cospi_0_8_16_24, &step2[9],
433 idct_cospi_8_24_neg_q(step1[13], step1[10], cospi_0_8_16_24, &step2[13],
435 step2[11] = step1[11];
436 step2[12] = step1[12];
437 step2[15] = step1[15];
440 step1[0] = vaddq_s16(step2[0], step2[3]);
441 step1[1] = vaddq_s16(step2[1], step2[2]);
442 step1[2] = vsubq_s16(step2[1], step2[2]);
443 step1[3] = vsubq_s16(step2[0], step2[3]);
445 idct_cospi_16_16_q(step2[5], step2[6], cospi_0_8_16_24, &step1[5], &step1[6]);
447 step1[8] = vaddq_s16(step2[8], step2[11]);
448 step1[9] = vaddq_s16(step2[9], step2[10]);
449 step1[10] = vsubq_s16(step2[9], step2[10]);
450 step1[11] = vsubq_s16(step2[8], step2[11]);
451 step1[12] = vsubq_s16(step2[15], step2[12]);
452 step1[13] = vsubq_s16(step2[14], step2[13]);
453 step1[14] = vaddq_s16(step2[14], step2[13]);
454 step1[15] = vaddq_s16(step2[15], step2[12]);
457 step2[0] = vaddq_s16(step1[0], step1[7]);
458 step2[1] = vaddq_s16(step1[1], step1[6]);
459 step2[2] = vaddq_s16(step1[2], step1[5]);
460 step2[3] = vaddq_s16(step1[3], step1[4]);
461 step2[4] = vsubq_s16(step1[3], step1[4]);
462 step2[5] = vsubq_s16(step1[2], step1[5]);
463 step2[6] = vsubq_s16(step1[1], step1[6]);
464 step2[7] = vsubq_s16(step1[0], step1[7]);
465 idct_cospi_16_16_q(step1[10], step1[13], cospi_0_8_16_24, &step2[10],
467 idct_cospi_16_16_q(step1[11], step1[12], cospi_0_8_16_24, &step2[11],
471 step2[14] = step1[14];
472 step2[15] = step1[15];
475 idct16x16_add_stage7(step2, out);
478 idct16x16_store_pass1(out, output);
481 idct16x16_add_store_bd8(out, dest, stride);
483 idct16x16_add_store(out, dest, stride);
488 void vpx_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
490 const int16x8_t cospis0 = vld1q_s16(kCospi);
491 const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
492 const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
493 const int16x8_t cospisd1 = vaddq_s16(cospis1, cospis1);
494 const int16x4_t cospi_0_8_16_24 = vget_low_s16(cospis0);
495 const int16x4_t cospid_0_8_16_24 = vget_low_s16(cospisd0);
496 const int16x4_t cospid_4_12_20N_28 = vget_high_s16(cospisd0);
497 const int16x4_t cospid_2_30_10_22 = vget_low_s16(cospisd1);
498 const int16x4_t cospid_6_26_14_18N = vget_high_s16(cospisd1);
499 int16x4_t in[4], step1[16], step2[16], out[16];
502 in[0] = load_tran_low_to_s16d(input);
504 in[1] = load_tran_low_to_s16d(input);
506 in[2] = load_tran_low_to_s16d(input);
508 in[3] = load_tran_low_to_s16d(input);
511 transpose_s16_4x4d(&in[0], &in[1], &in[2], &in[3]);
514 step1[0] = in[0 / 2];
515 step1[4] = in[4 / 2];
516 step1[8] = in[2 / 2];
517 step1[12] = in[6 / 2];
522 step2[8] = vqrdmulh_lane_s16(step1[8], cospid_2_30_10_22, 1);
523 step2[11] = vqrdmulh_lane_s16(step1[12], cospid_6_26_14_18N, 1);
524 step2[12] = vqrdmulh_lane_s16(step1[12], cospid_6_26_14_18N, 0);
525 step2[15] = vqrdmulh_lane_s16(step1[8], cospid_2_30_10_22, 0);
529 step1[4] = vqrdmulh_lane_s16(step2[4], cospid_4_12_20N_28, 3);
530 step1[7] = vqrdmulh_lane_s16(step2[4], cospid_4_12_20N_28, 0);
533 step1[10] = step2[11];
534 step1[11] = step2[11];
535 step1[12] = step2[12];
536 step1[13] = step2[12];
537 step1[14] = step2[15];
538 step1[15] = step2[15];
541 step2[0] = step2[1] = vqrdmulh_lane_s16(step1[0], cospid_0_8_16_24, 2);
547 idct_cospi_8_24_d(step1[14], step1[9], cospi_0_8_16_24, &step2[9],
549 idct_cospi_8_24_neg_d(step1[13], step1[10], cospi_0_8_16_24, &step2[13],
551 step2[11] = step1[11];
552 step2[12] = step1[12];
553 step2[15] = step1[15];
561 idct_cospi_16_16_d(step2[5], step2[6], cospi_0_8_16_24, &step1[5], &step1[6]);
563 step1[8] = vadd_s16(step2[8], step2[11]);
564 step1[9] = vadd_s16(step2[9], step2[10]);
565 step1[10] = vsub_s16(step2[9], step2[10]);
566 step1[11] = vsub_s16(step2[8], step2[11]);
567 step1[12] = vsub_s16(step2[15], step2[12]);
568 step1[13] = vsub_s16(step2[14], step2[13]);
569 step1[14] = vadd_s16(step2[14], step2[13]);
570 step1[15] = vadd_s16(step2[15], step2[12]);
573 step2[0] = vadd_s16(step1[0], step1[7]);
574 step2[1] = vadd_s16(step1[1], step1[6]);
575 step2[2] = vadd_s16(step1[2], step1[5]);
576 step2[3] = vadd_s16(step1[3], step1[4]);
577 step2[4] = vsub_s16(step1[3], step1[4]);
578 step2[5] = vsub_s16(step1[2], step1[5]);
579 step2[6] = vsub_s16(step1[1], step1[6]);
580 step2[7] = vsub_s16(step1[0], step1[7]);
581 idct_cospi_16_16_d(step1[10], step1[13], cospi_0_8_16_24, &step2[10],
583 idct_cospi_16_16_d(step1[11], step1[12], cospi_0_8_16_24, &step2[11],
587 step2[14] = step1[14];
588 step2[15] = step1[15];
591 out[0] = vadd_s16(step2[0], step2[15]);
592 out[1] = vadd_s16(step2[1], step2[14]);
593 out[2] = vadd_s16(step2[2], step2[13]);
594 out[3] = vadd_s16(step2[3], step2[12]);
595 out[4] = vadd_s16(step2[4], step2[11]);
596 out[5] = vadd_s16(step2[5], step2[10]);
597 out[6] = vadd_s16(step2[6], step2[9]);
598 out[7] = vadd_s16(step2[7], step2[8]);
599 out[8] = vsub_s16(step2[7], step2[8]);
600 out[9] = vsub_s16(step2[6], step2[9]);
601 out[10] = vsub_s16(step2[5], step2[10]);
602 out[11] = vsub_s16(step2[4], step2[11]);
603 out[12] = vsub_s16(step2[3], step2[12]);
604 out[13] = vsub_s16(step2[2], step2[13]);
605 out[14] = vsub_s16(step2[1], step2[14]);
606 out[15] = vsub_s16(step2[0], step2[15]);
608 // pass 1: save the result into output
609 vst1_s16(output, out[0]);
611 vst1_s16(output, out[1]);
613 vst1_s16(output, out[2]);
615 vst1_s16(output, out[3]);
617 vst1_s16(output, out[4]);
619 vst1_s16(output, out[5]);
621 vst1_s16(output, out[6]);
623 vst1_s16(output, out[7]);
625 vst1_s16(output, out[8]);
627 vst1_s16(output, out[9]);
629 vst1_s16(output, out[10]);
631 vst1_s16(output, out[11]);
633 vst1_s16(output, out[12]);
635 vst1_s16(output, out[13]);
637 vst1_s16(output, out[14]);
639 vst1_s16(output, out[15]);
642 void vpx_idct16x16_10_add_half1d_pass2(const int16_t *input,
643 int16_t *const output, void *const dest,
645 const int highbd_flag) {
646 const int16x8_t cospis0 = vld1q_s16(kCospi);
647 const int16x8_t cospis1 = vld1q_s16(kCospi + 8);
648 const int16x8_t cospisd0 = vaddq_s16(cospis0, cospis0);
649 const int16x8_t cospisd1 = vaddq_s16(cospis1, cospis1);
650 const int16x4_t cospi_0_8_16_24 = vget_low_s16(cospis0);
651 const int16x4_t cospid_0_8_16_24 = vget_low_s16(cospisd0);
652 const int16x4_t cospid_4_12_20N_28 = vget_high_s16(cospisd0);
653 const int16x4_t cospid_2_30_10_22 = vget_low_s16(cospisd1);
654 const int16x4_t cospid_6_26_14_18N = vget_high_s16(cospisd1);
656 int16x8_t in[4], step1[16], step2[16], out[16];
659 ind[0] = vld1_s16(input);
661 ind[1] = vld1_s16(input);
663 ind[2] = vld1_s16(input);
665 ind[3] = vld1_s16(input);
667 ind[4] = vld1_s16(input);
669 ind[5] = vld1_s16(input);
671 ind[6] = vld1_s16(input);
673 ind[7] = vld1_s16(input);
676 transpose_s16_4x8(ind[0], ind[1], ind[2], ind[3], ind[4], ind[5], ind[6],
677 ind[7], &in[0], &in[1], &in[2], &in[3]);
680 step1[0] = in[0 / 2];
681 step1[4] = in[4 / 2];
682 step1[8] = in[2 / 2];
683 step1[12] = in[6 / 2];
688 step2[8] = vqrdmulhq_lane_s16(step1[8], cospid_2_30_10_22, 1);
689 step2[11] = vqrdmulhq_lane_s16(step1[12], cospid_6_26_14_18N, 1);
690 step2[12] = vqrdmulhq_lane_s16(step1[12], cospid_6_26_14_18N, 0);
691 step2[15] = vqrdmulhq_lane_s16(step1[8], cospid_2_30_10_22, 0);
695 step1[4] = vqrdmulhq_lane_s16(step2[4], cospid_4_12_20N_28, 3);
696 step1[7] = vqrdmulhq_lane_s16(step2[4], cospid_4_12_20N_28, 0);
699 step1[10] = step2[11];
700 step1[11] = step2[11];
701 step1[12] = step2[12];
702 step1[13] = step2[12];
703 step1[14] = step2[15];
704 step1[15] = step2[15];
707 step2[0] = step2[1] = vqrdmulhq_lane_s16(step1[0], cospid_0_8_16_24, 2);
713 idct_cospi_8_24_q(step1[14], step1[9], cospi_0_8_16_24, &step2[9],
715 idct_cospi_8_24_neg_q(step1[13], step1[10], cospi_0_8_16_24, &step2[13],
717 step2[11] = step1[11];
718 step2[12] = step1[12];
719 step2[15] = step1[15];
727 idct_cospi_16_16_q(step2[5], step2[6], cospi_0_8_16_24, &step1[5], &step1[6]);
729 step1[8] = vaddq_s16(step2[8], step2[11]);
730 step1[9] = vaddq_s16(step2[9], step2[10]);
731 step1[10] = vsubq_s16(step2[9], step2[10]);
732 step1[11] = vsubq_s16(step2[8], step2[11]);
733 step1[12] = vsubq_s16(step2[15], step2[12]);
734 step1[13] = vsubq_s16(step2[14], step2[13]);
735 step1[14] = vaddq_s16(step2[14], step2[13]);
736 step1[15] = vaddq_s16(step2[15], step2[12]);
739 step2[0] = vaddq_s16(step1[0], step1[7]);
740 step2[1] = vaddq_s16(step1[1], step1[6]);
741 step2[2] = vaddq_s16(step1[2], step1[5]);
742 step2[3] = vaddq_s16(step1[3], step1[4]);
743 step2[4] = vsubq_s16(step1[3], step1[4]);
744 step2[5] = vsubq_s16(step1[2], step1[5]);
745 step2[6] = vsubq_s16(step1[1], step1[6]);
746 step2[7] = vsubq_s16(step1[0], step1[7]);
747 idct_cospi_16_16_q(step1[10], step1[13], cospi_0_8_16_24, &step2[10],
749 idct_cospi_16_16_q(step1[11], step1[12], cospi_0_8_16_24, &step2[11],
753 step2[14] = step1[14];
754 step2[15] = step1[15];
757 idct16x16_add_stage7(step2, out);
760 idct16x16_store_pass1(out, output);
763 idct16x16_add_store_bd8(out, dest, stride);
765 idct16x16_add_store(out, dest, stride);
770 void vpx_idct16x16_256_add_neon(const tran_low_t *input, uint8_t *dest,
772 int16_t row_idct_output[16 * 16];
775 // Parallel idct on the upper 8 rows
776 vpx_idct16x16_256_add_half1d(input, row_idct_output, dest, stride, 0);
778 // Parallel idct on the lower 8 rows
779 vpx_idct16x16_256_add_half1d(input + 8 * 16, row_idct_output + 8, dest,
783 // Parallel idct to get the left 8 columns
784 vpx_idct16x16_256_add_half1d(row_idct_output, NULL, dest, stride, 0);
786 // Parallel idct to get the right 8 columns
787 vpx_idct16x16_256_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride,
791 void vpx_idct16x16_38_add_neon(const tran_low_t *input, uint8_t *dest,
793 int16_t row_idct_output[16 * 16];
796 // Parallel idct on the upper 8 rows
797 vpx_idct16x16_38_add_half1d(input, row_idct_output, dest, stride, 0);
800 // Parallel idct to get the left 8 columns
801 vpx_idct16x16_38_add_half1d(row_idct_output, NULL, dest, stride, 0);
803 // Parallel idct to get the right 8 columns
804 vpx_idct16x16_38_add_half1d(row_idct_output + 16 * 8, NULL, dest + 8, stride,
808 void vpx_idct16x16_10_add_neon(const tran_low_t *input, uint8_t *dest,
810 int16_t row_idct_output[4 * 16];
813 // Parallel idct on the upper 8 rows
814 vpx_idct16x16_10_add_half1d_pass1(input, row_idct_output);
817 // Parallel idct to get the left 8 columns
818 vpx_idct16x16_10_add_half1d_pass2(row_idct_output, NULL, dest, stride, 0);
820 // Parallel idct to get the right 8 columns
821 vpx_idct16x16_10_add_half1d_pass2(row_idct_output + 4 * 8, NULL, dest + 8,