2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #ifndef VPX_DSP_ARM_IDCT_NEON_H_
12 #define VPX_DSP_ARM_IDCT_NEON_H_
16 #include "./vpx_config.h"
17 #include "vpx_dsp/arm/transpose_neon.h"
18 #include "vpx_dsp/txfm_common.h"
19 #include "vpx_dsp/vpx_dsp_common.h"
21 DECLARE_ALIGNED(16, static const int16_t, kCospi[16]) = {
22 16384 /* cospi_0_64 */, 15137 /* cospi_8_64 */,
23 11585 /* cospi_16_64 */, 6270 /* cospi_24_64 */,
24 16069 /* cospi_4_64 */, 13623 /* cospi_12_64 */,
25 -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */,
26 16305 /* cospi_2_64 */, 1606 /* cospi_30_64 */,
27 14449 /* cospi_10_64 */, 7723 /* cospi_22_64 */,
28 15679 /* cospi_6_64 */, -4756 /* -cospi_26_64 */,
29 12665 /* cospi_14_64 */, -10394 /* -cospi_18_64 */
32 DECLARE_ALIGNED(16, static const int32_t, kCospi32[16]) = {
33 16384 /* cospi_0_64 */, 15137 /* cospi_8_64 */,
34 11585 /* cospi_16_64 */, 6270 /* cospi_24_64 */,
35 16069 /* cospi_4_64 */, 13623 /* cospi_12_64 */,
36 -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */,
37 16305 /* cospi_2_64 */, 1606 /* cospi_30_64 */,
38 14449 /* cospi_10_64 */, 7723 /* cospi_22_64 */,
39 15679 /* cospi_6_64 */, -4756 /* -cospi_26_64 */,
40 12665 /* cospi_14_64 */, -10394 /* -cospi_18_64 */
43 //------------------------------------------------------------------------------
44 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
46 static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
47 #if CONFIG_VP9_HIGHBITDEPTH
48 const int32x4x2_t v0 = vld2q_s32(buf);
49 const int32x4x2_t v1 = vld2q_s32(buf + 8);
50 const int16x4_t s0 = vmovn_s32(v0.val[0]);
51 const int16x4_t s1 = vmovn_s32(v0.val[1]);
52 const int16x4_t s2 = vmovn_s32(v1.val[0]);
53 const int16x4_t s3 = vmovn_s32(v1.val[1]);
55 res.val[0] = vcombine_s16(s0, s2);
56 res.val[1] = vcombine_s16(s1, s3);
59 return vld2q_s16(buf);
63 static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
64 #if CONFIG_VP9_HIGHBITDEPTH
65 const int32x4_t v0 = vld1q_s32(buf);
66 const int32x4_t v1 = vld1q_s32(buf + 4);
67 const int16x4_t s0 = vmovn_s32(v0);
68 const int16x4_t s1 = vmovn_s32(v1);
69 return vcombine_s16(s0, s1);
71 return vld1q_s16(buf);
75 static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
76 #if CONFIG_VP9_HIGHBITDEPTH
77 const int32x4_t v0 = vld1q_s32(buf);
84 static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
85 #if CONFIG_VP9_HIGHBITDEPTH
86 const int32x4_t v0 = vmovl_s16(vget_low_s16(a));
87 const int32x4_t v1 = vmovl_s16(vget_high_s16(a));
89 vst1q_s32(buf + 4, v1);
95 //------------------------------------------------------------------------------
96 // Use saturating add/sub to avoid overflow in 2nd pass in high bit-depth
97 static INLINE int16x8_t final_add(const int16x8_t a, const int16x8_t b) {
98 #if CONFIG_VP9_HIGHBITDEPTH
99 return vqaddq_s16(a, b);
101 return vaddq_s16(a, b);
105 static INLINE int16x8_t final_sub(const int16x8_t a, const int16x8_t b) {
106 #if CONFIG_VP9_HIGHBITDEPTH
107 return vqsubq_s16(a, b);
109 return vsubq_s16(a, b);
113 //------------------------------------------------------------------------------
115 static INLINE int32x4x2_t highbd_idct_add_dual(const int32x4x2_t s0,
116 const int32x4x2_t s1) {
118 t.val[0] = vaddq_s32(s0.val[0], s1.val[0]);
119 t.val[1] = vaddq_s32(s0.val[1], s1.val[1]);
123 static INLINE int32x4x2_t highbd_idct_sub_dual(const int32x4x2_t s0,
124 const int32x4x2_t s1) {
126 t.val[0] = vsubq_s32(s0.val[0], s1.val[0]);
127 t.val[1] = vsubq_s32(s0.val[1], s1.val[1]);
131 //------------------------------------------------------------------------------
133 // Multiply a by a_const. Saturate, shift and narrow by DCT_CONST_BITS.
134 static INLINE int16x8_t multiply_shift_and_narrow_s16(const int16x8_t a,
135 const int16_t a_const) {
136 // Shift by DCT_CONST_BITS + rounding will be within 16 bits for well formed
137 // streams. See WRAPLOW and dct_const_round_shift for details.
138 // This instruction doubles the result and returns the high half, essentially
139 // resulting in a right shift by 15. By multiplying the constant first that
140 // becomes a right shift by DCT_CONST_BITS.
141 // The largest possible value used here is
142 // vpx_dsp/txfm_common.h:cospi_1_64 = 16364 (* 2 = 32728) a which falls *just*
143 // within the range of int16_t (+32767 / -32768) even when negated.
144 return vqrdmulhq_n_s16(a, a_const * 2);
147 // Add a and b, then multiply by ab_const. Shift and narrow by DCT_CONST_BITS.
148 static INLINE int16x8_t add_multiply_shift_and_narrow_s16(
149 const int16x8_t a, const int16x8_t b, const int16_t ab_const) {
150 // In both add_ and it's pair, sub_, the input for well-formed streams will be
151 // well within 16 bits (input to the idct is the difference between two frames
152 // and will be within -255 to 255, or 9 bits)
153 // However, for inputs over about 25,000 (valid for int16_t, but not for idct
154 // input) this function can not use vaddq_s16.
155 // In order to match existing behavior and intentionally out of range tests,
156 // expand the addition up to 32 bits to prevent truncation.
157 int32x4_t temp_low = vaddl_s16(vget_low_s16(a), vget_low_s16(b));
158 int32x4_t temp_high = vaddl_s16(vget_high_s16(a), vget_high_s16(b));
159 temp_low = vmulq_n_s32(temp_low, ab_const);
160 temp_high = vmulq_n_s32(temp_high, ab_const);
161 return vcombine_s16(vrshrn_n_s32(temp_low, DCT_CONST_BITS),
162 vrshrn_n_s32(temp_high, DCT_CONST_BITS));
165 // Subtract b from a, then multiply by ab_const. Shift and narrow by
167 static INLINE int16x8_t sub_multiply_shift_and_narrow_s16(
168 const int16x8_t a, const int16x8_t b, const int16_t ab_const) {
169 int32x4_t temp_low = vsubl_s16(vget_low_s16(a), vget_low_s16(b));
170 int32x4_t temp_high = vsubl_s16(vget_high_s16(a), vget_high_s16(b));
171 temp_low = vmulq_n_s32(temp_low, ab_const);
172 temp_high = vmulq_n_s32(temp_high, ab_const);
173 return vcombine_s16(vrshrn_n_s32(temp_low, DCT_CONST_BITS),
174 vrshrn_n_s32(temp_high, DCT_CONST_BITS));
177 // Multiply a by a_const and b by b_const, then accumulate. Shift and narrow by
179 static INLINE int16x8_t multiply_accumulate_shift_and_narrow_s16(
180 const int16x8_t a, const int16_t a_const, const int16x8_t b,
181 const int16_t b_const) {
182 int32x4_t temp_low = vmull_n_s16(vget_low_s16(a), a_const);
183 int32x4_t temp_high = vmull_n_s16(vget_high_s16(a), a_const);
184 temp_low = vmlal_n_s16(temp_low, vget_low_s16(b), b_const);
185 temp_high = vmlal_n_s16(temp_high, vget_high_s16(b), b_const);
186 return vcombine_s16(vrshrn_n_s32(temp_low, DCT_CONST_BITS),
187 vrshrn_n_s32(temp_high, DCT_CONST_BITS));
190 //------------------------------------------------------------------------------
192 // Note: The following 4 functions could use 32-bit operations for bit-depth 10.
193 // However, although it's 20% faster with gcc, it's 20% slower with clang.
194 // Use 64-bit operations for now.
196 // Multiply a by a_const. Saturate, shift and narrow by DCT_CONST_BITS.
197 static INLINE int32x4x2_t
198 multiply_shift_and_narrow_s32_dual(const int32x4x2_t a, const int32_t a_const) {
201 b[0] = vmull_n_s32(vget_low_s32(a.val[0]), a_const);
202 b[1] = vmull_n_s32(vget_high_s32(a.val[0]), a_const);
203 b[2] = vmull_n_s32(vget_low_s32(a.val[1]), a_const);
204 b[3] = vmull_n_s32(vget_high_s32(a.val[1]), a_const);
205 c.val[0] = vcombine_s32(vrshrn_n_s64(b[0], DCT_CONST_BITS),
206 vrshrn_n_s64(b[1], DCT_CONST_BITS));
207 c.val[1] = vcombine_s32(vrshrn_n_s64(b[2], DCT_CONST_BITS),
208 vrshrn_n_s64(b[3], DCT_CONST_BITS));
212 // Add a and b, then multiply by ab_const. Shift and narrow by DCT_CONST_BITS.
213 static INLINE int32x4x2_t add_multiply_shift_and_narrow_s32_dual(
214 const int32x4x2_t a, const int32x4x2_t b, const int32_t ab_const) {
215 const int32x4_t temp_low = vaddq_s32(a.val[0], b.val[0]);
216 const int32x4_t temp_high = vaddq_s32(a.val[1], b.val[1]);
219 c[0] = vmull_n_s32(vget_low_s32(temp_low), ab_const);
220 c[1] = vmull_n_s32(vget_high_s32(temp_low), ab_const);
221 c[2] = vmull_n_s32(vget_low_s32(temp_high), ab_const);
222 c[3] = vmull_n_s32(vget_high_s32(temp_high), ab_const);
223 d.val[0] = vcombine_s32(vrshrn_n_s64(c[0], DCT_CONST_BITS),
224 vrshrn_n_s64(c[1], DCT_CONST_BITS));
225 d.val[1] = vcombine_s32(vrshrn_n_s64(c[2], DCT_CONST_BITS),
226 vrshrn_n_s64(c[3], DCT_CONST_BITS));
230 // Subtract b from a, then multiply by ab_const. Shift and narrow by
232 static INLINE int32x4x2_t sub_multiply_shift_and_narrow_s32_dual(
233 const int32x4x2_t a, const int32x4x2_t b, const int32_t ab_const) {
234 const int32x4_t temp_low = vsubq_s32(a.val[0], b.val[0]);
235 const int32x4_t temp_high = vsubq_s32(a.val[1], b.val[1]);
238 c[0] = vmull_n_s32(vget_low_s32(temp_low), ab_const);
239 c[1] = vmull_n_s32(vget_high_s32(temp_low), ab_const);
240 c[2] = vmull_n_s32(vget_low_s32(temp_high), ab_const);
241 c[3] = vmull_n_s32(vget_high_s32(temp_high), ab_const);
242 d.val[0] = vcombine_s32(vrshrn_n_s64(c[0], DCT_CONST_BITS),
243 vrshrn_n_s64(c[1], DCT_CONST_BITS));
244 d.val[1] = vcombine_s32(vrshrn_n_s64(c[2], DCT_CONST_BITS),
245 vrshrn_n_s64(c[3], DCT_CONST_BITS));
249 // Multiply a by a_const and b by b_const, then accumulate. Shift and narrow by
251 static INLINE int32x4x2_t multiply_accumulate_shift_and_narrow_s32_dual(
252 const int32x4x2_t a, const int32_t a_const, const int32x4x2_t b,
253 const int32_t b_const) {
256 c[0] = vmull_n_s32(vget_low_s32(a.val[0]), a_const);
257 c[1] = vmull_n_s32(vget_high_s32(a.val[0]), a_const);
258 c[2] = vmull_n_s32(vget_low_s32(a.val[1]), a_const);
259 c[3] = vmull_n_s32(vget_high_s32(a.val[1]), a_const);
260 c[0] = vmlal_n_s32(c[0], vget_low_s32(b.val[0]), b_const);
261 c[1] = vmlal_n_s32(c[1], vget_high_s32(b.val[0]), b_const);
262 c[2] = vmlal_n_s32(c[2], vget_low_s32(b.val[1]), b_const);
263 c[3] = vmlal_n_s32(c[3], vget_high_s32(b.val[1]), b_const);
264 d.val[0] = vcombine_s32(vrshrn_n_s64(c[0], DCT_CONST_BITS),
265 vrshrn_n_s64(c[1], DCT_CONST_BITS));
266 d.val[1] = vcombine_s32(vrshrn_n_s64(c[2], DCT_CONST_BITS),
267 vrshrn_n_s64(c[3], DCT_CONST_BITS));
271 // Shift the output down by 6 and add it to the destination buffer.
272 static INLINE void add_and_store_u8_s16(const int16x8_t a0, const int16x8_t a1,
273 const int16x8_t a2, const int16x8_t a3,
274 const int16x8_t a4, const int16x8_t a5,
275 const int16x8_t a6, const int16x8_t a7,
276 uint8_t *b, const int b_stride) {
277 uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7;
278 int16x8_t c0, c1, c2, c3, c4, c5, c6, c7;
297 c0 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b0)), a0, 6);
298 c1 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b1)), a1, 6);
299 c2 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b2)), a2, 6);
300 c3 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b3)), a3, 6);
301 c4 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b4)), a4, 6);
302 c5 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b5)), a5, 6);
303 c6 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b6)), a6, 6);
304 c7 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b7)), a7, 6);
306 b0 = vqmovun_s16(c0);
307 b1 = vqmovun_s16(c1);
308 b2 = vqmovun_s16(c2);
309 b3 = vqmovun_s16(c3);
310 b4 = vqmovun_s16(c4);
311 b5 = vqmovun_s16(c5);
312 b6 = vqmovun_s16(c6);
313 b7 = vqmovun_s16(c7);
332 static INLINE uint8x16_t create_dcq(const int16_t dc) {
333 // Clip both sides and gcc may compile to assembly 'usat'.
334 const int16_t t = (dc < 0) ? 0 : ((dc > 255) ? 255 : dc);
335 return vdupq_n_u8((uint8_t)t);
338 static INLINE void idct4x4_16_kernel_bd8(const int16x4_t cospis,
340 int16x8_t *const a1) {
341 int16x4_t b0, b1, b2, b3;
342 int32x4_t c0, c1, c2, c3;
345 transpose_s16_4x4q(a0, a1);
346 b0 = vget_low_s16(*a0);
347 b1 = vget_high_s16(*a0);
348 b2 = vget_low_s16(*a1);
349 b3 = vget_high_s16(*a1);
350 c0 = vmull_lane_s16(b0, cospis, 2);
351 c2 = vmull_lane_s16(b1, cospis, 2);
352 c1 = vsubq_s32(c0, c2);
353 c0 = vaddq_s32(c0, c2);
354 c2 = vmull_lane_s16(b2, cospis, 3);
355 c3 = vmull_lane_s16(b2, cospis, 1);
356 c2 = vmlsl_lane_s16(c2, b3, cospis, 1);
357 c3 = vmlal_lane_s16(c3, b3, cospis, 3);
358 b0 = vrshrn_n_s32(c0, DCT_CONST_BITS);
359 b1 = vrshrn_n_s32(c1, DCT_CONST_BITS);
360 b2 = vrshrn_n_s32(c2, DCT_CONST_BITS);
361 b3 = vrshrn_n_s32(c3, DCT_CONST_BITS);
362 d0 = vcombine_s16(b0, b1);
363 d1 = vcombine_s16(b3, b2);
364 *a0 = vaddq_s16(d0, d1);
365 *a1 = vsubq_s16(d0, d1);
368 static INLINE void idct8x8_12_pass1_bd8(
369 const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
370 int16x4_t *const io0, int16x4_t *const io1, int16x4_t *const io2,
371 int16x4_t *const io3, int16x4_t *const io4, int16x4_t *const io5,
372 int16x4_t *const io6, int16x4_t *const io7) {
373 int16x4_t step1[8], step2[8];
376 transpose_s16_4x4d(io0, io1, io2, io3);
379 step1[4] = vqrdmulh_lane_s16(*io1, cospisd1, 3);
380 step1[5] = vqrdmulh_lane_s16(*io3, cospisd1, 2);
381 step1[6] = vqrdmulh_lane_s16(*io3, cospisd1, 1);
382 step1[7] = vqrdmulh_lane_s16(*io1, cospisd1, 0);
385 step2[1] = vqrdmulh_lane_s16(*io0, cospisd0, 2);
386 step2[2] = vqrdmulh_lane_s16(*io2, cospisd0, 3);
387 step2[3] = vqrdmulh_lane_s16(*io2, cospisd0, 1);
389 step2[4] = vadd_s16(step1[4], step1[5]);
390 step2[5] = vsub_s16(step1[4], step1[5]);
391 step2[6] = vsub_s16(step1[7], step1[6]);
392 step2[7] = vadd_s16(step1[7], step1[6]);
395 step1[0] = vadd_s16(step2[1], step2[3]);
396 step1[1] = vadd_s16(step2[1], step2[2]);
397 step1[2] = vsub_s16(step2[1], step2[2]);
398 step1[3] = vsub_s16(step2[1], step2[3]);
400 t32[1] = vmull_lane_s16(step2[6], cospis0, 2);
401 t32[0] = vmlsl_lane_s16(t32[1], step2[5], cospis0, 2);
402 t32[1] = vmlal_lane_s16(t32[1], step2[5], cospis0, 2);
403 step1[5] = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
404 step1[6] = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
407 *io0 = vadd_s16(step1[0], step2[7]);
408 *io1 = vadd_s16(step1[1], step1[6]);
409 *io2 = vadd_s16(step1[2], step1[5]);
410 *io3 = vadd_s16(step1[3], step2[4]);
411 *io4 = vsub_s16(step1[3], step2[4]);
412 *io5 = vsub_s16(step1[2], step1[5]);
413 *io6 = vsub_s16(step1[1], step1[6]);
414 *io7 = vsub_s16(step1[0], step2[7]);
417 static INLINE void idct8x8_12_pass2_bd8(
418 const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
419 const int16x4_t input0, const int16x4_t input1, const int16x4_t input2,
420 const int16x4_t input3, const int16x4_t input4, const int16x4_t input5,
421 const int16x4_t input6, const int16x4_t input7, int16x8_t *const output0,
422 int16x8_t *const output1, int16x8_t *const output2,
423 int16x8_t *const output3, int16x8_t *const output4,
424 int16x8_t *const output5, int16x8_t *const output6,
425 int16x8_t *const output7) {
427 int16x8_t step1[8], step2[8];
431 transpose_s16_4x8(input0, input1, input2, input3, input4, input5, input6,
432 input7, &in[0], &in[1], &in[2], &in[3]);
435 step1[4] = vqrdmulhq_lane_s16(in[1], cospisd1, 3);
436 step1[5] = vqrdmulhq_lane_s16(in[3], cospisd1, 2);
437 step1[6] = vqrdmulhq_lane_s16(in[3], cospisd1, 1);
438 step1[7] = vqrdmulhq_lane_s16(in[1], cospisd1, 0);
441 step2[1] = vqrdmulhq_lane_s16(in[0], cospisd0, 2);
442 step2[2] = vqrdmulhq_lane_s16(in[2], cospisd0, 3);
443 step2[3] = vqrdmulhq_lane_s16(in[2], cospisd0, 1);
445 step2[4] = vaddq_s16(step1[4], step1[5]);
446 step2[5] = vsubq_s16(step1[4], step1[5]);
447 step2[6] = vsubq_s16(step1[7], step1[6]);
448 step2[7] = vaddq_s16(step1[7], step1[6]);
451 step1[0] = vaddq_s16(step2[1], step2[3]);
452 step1[1] = vaddq_s16(step2[1], step2[2]);
453 step1[2] = vsubq_s16(step2[1], step2[2]);
454 step1[3] = vsubq_s16(step2[1], step2[3]);
456 t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
457 t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
458 t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
459 t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
460 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
461 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
462 t16[0] = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
463 t16[1] = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
464 t16[2] = vrshrn_n_s32(t32[2], DCT_CONST_BITS);
465 t16[3] = vrshrn_n_s32(t32[3], DCT_CONST_BITS);
466 step1[5] = vcombine_s16(t16[0], t16[1]);
467 step1[6] = vcombine_s16(t16[2], t16[3]);
470 *output0 = vaddq_s16(step1[0], step2[7]);
471 *output1 = vaddq_s16(step1[1], step1[6]);
472 *output2 = vaddq_s16(step1[2], step1[5]);
473 *output3 = vaddq_s16(step1[3], step2[4]);
474 *output4 = vsubq_s16(step1[3], step2[4]);
475 *output5 = vsubq_s16(step1[2], step1[5]);
476 *output6 = vsubq_s16(step1[1], step1[6]);
477 *output7 = vsubq_s16(step1[0], step2[7]);
480 static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
481 const int16x4_t cospis1,
482 int16x8_t *const io0, int16x8_t *const io1,
483 int16x8_t *const io2, int16x8_t *const io3,
484 int16x8_t *const io4, int16x8_t *const io5,
485 int16x8_t *const io6,
486 int16x8_t *const io7) {
487 int16x4_t input_1l, input_1h, input_3l, input_3h, input_5l, input_5h,
489 int16x4_t step1l[4], step1h[4];
490 int16x8_t step1[8], step2[8];
494 transpose_s16_8x8(io0, io1, io2, io3, io4, io5, io6, io7);
497 input_1l = vget_low_s16(*io1);
498 input_1h = vget_high_s16(*io1);
499 input_3l = vget_low_s16(*io3);
500 input_3h = vget_high_s16(*io3);
501 input_5l = vget_low_s16(*io5);
502 input_5h = vget_high_s16(*io5);
503 input_7l = vget_low_s16(*io7);
504 input_7h = vget_high_s16(*io7);
505 step1l[0] = vget_low_s16(*io0);
506 step1h[0] = vget_high_s16(*io0);
507 step1l[1] = vget_low_s16(*io2);
508 step1h[1] = vget_high_s16(*io2);
509 step1l[2] = vget_low_s16(*io4);
510 step1h[2] = vget_high_s16(*io4);
511 step1l[3] = vget_low_s16(*io6);
512 step1h[3] = vget_high_s16(*io6);
514 t32[0] = vmull_lane_s16(input_1l, cospis1, 3);
515 t32[1] = vmull_lane_s16(input_1h, cospis1, 3);
516 t32[2] = vmull_lane_s16(input_3l, cospis1, 2);
517 t32[3] = vmull_lane_s16(input_3h, cospis1, 2);
518 t32[4] = vmull_lane_s16(input_3l, cospis1, 1);
519 t32[5] = vmull_lane_s16(input_3h, cospis1, 1);
520 t32[6] = vmull_lane_s16(input_1l, cospis1, 0);
521 t32[7] = vmull_lane_s16(input_1h, cospis1, 0);
522 t32[0] = vmlsl_lane_s16(t32[0], input_7l, cospis1, 0);
523 t32[1] = vmlsl_lane_s16(t32[1], input_7h, cospis1, 0);
524 t32[2] = vmlal_lane_s16(t32[2], input_5l, cospis1, 1);
525 t32[3] = vmlal_lane_s16(t32[3], input_5h, cospis1, 1);
526 t32[4] = vmlsl_lane_s16(t32[4], input_5l, cospis1, 2);
527 t32[5] = vmlsl_lane_s16(t32[5], input_5h, cospis1, 2);
528 t32[6] = vmlal_lane_s16(t32[6], input_7l, cospis1, 3);
529 t32[7] = vmlal_lane_s16(t32[7], input_7h, cospis1, 3);
530 t16[0] = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
531 t16[1] = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
532 t16[2] = vrshrn_n_s32(t32[2], DCT_CONST_BITS);
533 t16[3] = vrshrn_n_s32(t32[3], DCT_CONST_BITS);
534 t16[4] = vrshrn_n_s32(t32[4], DCT_CONST_BITS);
535 t16[5] = vrshrn_n_s32(t32[5], DCT_CONST_BITS);
536 t16[6] = vrshrn_n_s32(t32[6], DCT_CONST_BITS);
537 t16[7] = vrshrn_n_s32(t32[7], DCT_CONST_BITS);
538 step1[4] = vcombine_s16(t16[0], t16[1]);
539 step1[5] = vcombine_s16(t16[2], t16[3]);
540 step1[6] = vcombine_s16(t16[4], t16[5]);
541 step1[7] = vcombine_s16(t16[6], t16[7]);
544 t32[2] = vmull_lane_s16(step1l[0], cospis0, 2);
545 t32[3] = vmull_lane_s16(step1h[0], cospis0, 2);
546 t32[4] = vmull_lane_s16(step1l[1], cospis0, 3);
547 t32[5] = vmull_lane_s16(step1h[1], cospis0, 3);
548 t32[6] = vmull_lane_s16(step1l[1], cospis0, 1);
549 t32[7] = vmull_lane_s16(step1h[1], cospis0, 1);
550 t32[0] = vmlal_lane_s16(t32[2], step1l[2], cospis0, 2);
551 t32[1] = vmlal_lane_s16(t32[3], step1h[2], cospis0, 2);
552 t32[2] = vmlsl_lane_s16(t32[2], step1l[2], cospis0, 2);
553 t32[3] = vmlsl_lane_s16(t32[3], step1h[2], cospis0, 2);
554 t32[4] = vmlsl_lane_s16(t32[4], step1l[3], cospis0, 1);
555 t32[5] = vmlsl_lane_s16(t32[5], step1h[3], cospis0, 1);
556 t32[6] = vmlal_lane_s16(t32[6], step1l[3], cospis0, 3);
557 t32[7] = vmlal_lane_s16(t32[7], step1h[3], cospis0, 3);
558 t16[0] = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
559 t16[1] = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
560 t16[2] = vrshrn_n_s32(t32[2], DCT_CONST_BITS);
561 t16[3] = vrshrn_n_s32(t32[3], DCT_CONST_BITS);
562 t16[4] = vrshrn_n_s32(t32[4], DCT_CONST_BITS);
563 t16[5] = vrshrn_n_s32(t32[5], DCT_CONST_BITS);
564 t16[6] = vrshrn_n_s32(t32[6], DCT_CONST_BITS);
565 t16[7] = vrshrn_n_s32(t32[7], DCT_CONST_BITS);
566 step2[0] = vcombine_s16(t16[0], t16[1]);
567 step2[1] = vcombine_s16(t16[2], t16[3]);
568 step2[2] = vcombine_s16(t16[4], t16[5]);
569 step2[3] = vcombine_s16(t16[6], t16[7]);
571 step2[4] = vaddq_s16(step1[4], step1[5]);
572 step2[5] = vsubq_s16(step1[4], step1[5]);
573 step2[6] = vsubq_s16(step1[7], step1[6]);
574 step2[7] = vaddq_s16(step1[7], step1[6]);
577 step1[0] = vaddq_s16(step2[0], step2[3]);
578 step1[1] = vaddq_s16(step2[1], step2[2]);
579 step1[2] = vsubq_s16(step2[1], step2[2]);
580 step1[3] = vsubq_s16(step2[0], step2[3]);
582 t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
583 t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
584 t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
585 t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
586 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
587 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
588 t16[0] = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
589 t16[1] = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
590 t16[2] = vrshrn_n_s32(t32[2], DCT_CONST_BITS);
591 t16[3] = vrshrn_n_s32(t32[3], DCT_CONST_BITS);
592 step1[5] = vcombine_s16(t16[0], t16[1]);
593 step1[6] = vcombine_s16(t16[2], t16[3]);
596 *io0 = vaddq_s16(step1[0], step2[7]);
597 *io1 = vaddq_s16(step1[1], step1[6]);
598 *io2 = vaddq_s16(step1[2], step1[5]);
599 *io3 = vaddq_s16(step1[3], step2[4]);
600 *io4 = vsubq_s16(step1[3], step2[4]);
601 *io5 = vsubq_s16(step1[2], step1[5]);
602 *io6 = vsubq_s16(step1[1], step1[6]);
603 *io7 = vsubq_s16(step1[0], step2[7]);
606 static INLINE void idct16x16_add_wrap_low_8x2(const int32x4_t *const t32,
608 int16x8_t *const d1) {
611 t16[0] = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
612 t16[1] = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
613 t16[2] = vrshrn_n_s32(t32[2], DCT_CONST_BITS);
614 t16[3] = vrshrn_n_s32(t32[3], DCT_CONST_BITS);
615 *d0 = vcombine_s16(t16[0], t16[1]);
616 *d1 = vcombine_s16(t16[2], t16[3]);
619 static INLINE void idct_cospi_8_24_q_kernel(const int16x8_t s0,
621 const int16x4_t cospi_0_8_16_24,
622 int32x4_t *const t32) {
623 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_0_8_16_24, 3);
624 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_0_8_16_24, 3);
625 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_0_8_16_24, 3);
626 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_0_8_16_24, 3);
627 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_0_8_16_24, 1);
628 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_0_8_16_24, 1);
629 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_0_8_16_24, 1);
630 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_0_8_16_24, 1);
633 static INLINE void idct_cospi_8_24_q(const int16x8_t s0, const int16x8_t s1,
634 const int16x4_t cospi_0_8_16_24,
635 int16x8_t *const d0, int16x8_t *const d1) {
638 idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t32);
639 idct16x16_add_wrap_low_8x2(t32, d0, d1);
642 static INLINE void idct_cospi_8_24_neg_q(const int16x8_t s0, const int16x8_t s1,
643 const int16x4_t cospi_0_8_16_24,
645 int16x8_t *const d1) {
648 idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t32);
649 t32[2] = vnegq_s32(t32[2]);
650 t32[3] = vnegq_s32(t32[3]);
651 idct16x16_add_wrap_low_8x2(t32, d0, d1);
654 static INLINE void idct_cospi_16_16_q(const int16x8_t s0, const int16x8_t s1,
655 const int16x4_t cospi_0_8_16_24,
657 int16x8_t *const d1) {
660 t32[4] = vmull_lane_s16(vget_low_s16(s1), cospi_0_8_16_24, 2);
661 t32[5] = vmull_lane_s16(vget_high_s16(s1), cospi_0_8_16_24, 2);
662 t32[0] = vmlsl_lane_s16(t32[4], vget_low_s16(s0), cospi_0_8_16_24, 2);
663 t32[1] = vmlsl_lane_s16(t32[5], vget_high_s16(s0), cospi_0_8_16_24, 2);
664 t32[2] = vmlal_lane_s16(t32[4], vget_low_s16(s0), cospi_0_8_16_24, 2);
665 t32[3] = vmlal_lane_s16(t32[5], vget_high_s16(s0), cospi_0_8_16_24, 2);
666 idct16x16_add_wrap_low_8x2(t32, d0, d1);
669 static INLINE void idct_cospi_2_30(const int16x8_t s0, const int16x8_t s1,
670 const int16x4_t cospi_2_30_10_22,
671 int16x8_t *const d0, int16x8_t *const d1) {
674 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 1);
675 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 1);
676 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 1);
677 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 1);
678 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 0);
679 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 0);
680 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 0);
681 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 0);
682 idct16x16_add_wrap_low_8x2(t32, d0, d1);
685 static INLINE void idct_cospi_4_28(const int16x8_t s0, const int16x8_t s1,
686 const int16x4_t cospi_4_12_20N_28,
687 int16x8_t *const d0, int16x8_t *const d1) {
690 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 3);
691 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 3);
692 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 3);
693 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 3);
694 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 0);
695 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 0);
696 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 0);
697 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 0);
698 idct16x16_add_wrap_low_8x2(t32, d0, d1);
701 static INLINE void idct_cospi_6_26(const int16x8_t s0, const int16x8_t s1,
702 const int16x4_t cospi_6_26N_14_18N,
703 int16x8_t *const d0, int16x8_t *const d1) {
706 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26N_14_18N, 0);
707 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26N_14_18N, 0);
708 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26N_14_18N, 0);
709 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26N_14_18N, 0);
710 t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26N_14_18N, 1);
711 t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26N_14_18N, 1);
712 t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26N_14_18N, 1);
713 t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26N_14_18N, 1);
714 idct16x16_add_wrap_low_8x2(t32, d0, d1);
717 static INLINE void idct_cospi_10_22(const int16x8_t s0, const int16x8_t s1,
718 const int16x4_t cospi_2_30_10_22,
719 int16x8_t *const d0, int16x8_t *const d1) {
722 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 3);
723 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 3);
724 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 3);
725 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 3);
726 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 2);
727 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 2);
728 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 2);
729 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 2);
730 idct16x16_add_wrap_low_8x2(t32, d0, d1);
733 static INLINE void idct_cospi_12_20(const int16x8_t s0, const int16x8_t s1,
734 const int16x4_t cospi_4_12_20N_28,
735 int16x8_t *const d0, int16x8_t *const d1) {
738 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 1);
739 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 1);
740 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 1);
741 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 1);
742 t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 2);
743 t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 2);
744 t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 2);
745 t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 2);
746 idct16x16_add_wrap_low_8x2(t32, d0, d1);
749 static INLINE void idct_cospi_14_18(const int16x8_t s0, const int16x8_t s1,
750 const int16x4_t cospi_6_26N_14_18N,
751 int16x8_t *const d0, int16x8_t *const d1) {
754 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26N_14_18N, 2);
755 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26N_14_18N, 2);
756 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26N_14_18N, 2);
757 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26N_14_18N, 2);
758 t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26N_14_18N, 3);
759 t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26N_14_18N, 3);
760 t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26N_14_18N, 3);
761 t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26N_14_18N, 3);
762 idct16x16_add_wrap_low_8x2(t32, d0, d1);
765 static INLINE void idct16x16_add_stage7(const int16x8_t *const step2,
766 int16x8_t *const out) {
767 #if CONFIG_VP9_HIGHBITDEPTH
768 // Use saturating add/sub to avoid overflow in 2nd pass
769 out[0] = vqaddq_s16(step2[0], step2[15]);
770 out[1] = vqaddq_s16(step2[1], step2[14]);
771 out[2] = vqaddq_s16(step2[2], step2[13]);
772 out[3] = vqaddq_s16(step2[3], step2[12]);
773 out[4] = vqaddq_s16(step2[4], step2[11]);
774 out[5] = vqaddq_s16(step2[5], step2[10]);
775 out[6] = vqaddq_s16(step2[6], step2[9]);
776 out[7] = vqaddq_s16(step2[7], step2[8]);
777 out[8] = vqsubq_s16(step2[7], step2[8]);
778 out[9] = vqsubq_s16(step2[6], step2[9]);
779 out[10] = vqsubq_s16(step2[5], step2[10]);
780 out[11] = vqsubq_s16(step2[4], step2[11]);
781 out[12] = vqsubq_s16(step2[3], step2[12]);
782 out[13] = vqsubq_s16(step2[2], step2[13]);
783 out[14] = vqsubq_s16(step2[1], step2[14]);
784 out[15] = vqsubq_s16(step2[0], step2[15]);
786 out[0] = vaddq_s16(step2[0], step2[15]);
787 out[1] = vaddq_s16(step2[1], step2[14]);
788 out[2] = vaddq_s16(step2[2], step2[13]);
789 out[3] = vaddq_s16(step2[3], step2[12]);
790 out[4] = vaddq_s16(step2[4], step2[11]);
791 out[5] = vaddq_s16(step2[5], step2[10]);
792 out[6] = vaddq_s16(step2[6], step2[9]);
793 out[7] = vaddq_s16(step2[7], step2[8]);
794 out[8] = vsubq_s16(step2[7], step2[8]);
795 out[9] = vsubq_s16(step2[6], step2[9]);
796 out[10] = vsubq_s16(step2[5], step2[10]);
797 out[11] = vsubq_s16(step2[4], step2[11]);
798 out[12] = vsubq_s16(step2[3], step2[12]);
799 out[13] = vsubq_s16(step2[2], step2[13]);
800 out[14] = vsubq_s16(step2[1], step2[14]);
801 out[15] = vsubq_s16(step2[0], step2[15]);
805 static INLINE void idct16x16_store_pass1(const int16x8_t *const out,
807 // Save the result into output
808 vst1q_s16(output, out[0]);
810 vst1q_s16(output, out[1]);
812 vst1q_s16(output, out[2]);
814 vst1q_s16(output, out[3]);
816 vst1q_s16(output, out[4]);
818 vst1q_s16(output, out[5]);
820 vst1q_s16(output, out[6]);
822 vst1q_s16(output, out[7]);
824 vst1q_s16(output, out[8]);
826 vst1q_s16(output, out[9]);
828 vst1q_s16(output, out[10]);
830 vst1q_s16(output, out[11]);
832 vst1q_s16(output, out[12]);
834 vst1q_s16(output, out[13]);
836 vst1q_s16(output, out[14]);
838 vst1q_s16(output, out[15]);
841 static INLINE void idct16x16_add8x1(int16x8_t res, uint8_t **dest,
843 uint8x8_t d = vld1_u8(*dest);
846 res = vrshrq_n_s16(res, 6);
847 q = vaddw_u8(vreinterpretq_u16_s16(res), d);
848 d = vqmovun_s16(vreinterpretq_s16_u16(q));
853 static INLINE void highbd_idct16x16_add8x1(int16x8_t res, const int16x8_t max,
854 uint16_t **dest, const int stride) {
855 uint16x8_t d = vld1q_u16(*dest);
857 res = vqaddq_s16(res, vreinterpretq_s16_u16(d));
858 res = vminq_s16(res, max);
859 d = vqshluq_n_s16(res, 0);
864 static INLINE void highbd_idct16x16_add8x1_bd8(int16x8_t res, uint16_t **dest,
866 uint16x8_t d = vld1q_u16(*dest);
868 res = vrsraq_n_s16(vreinterpretq_s16_u16(d), res, 6);
869 d = vmovl_u8(vqmovun_s16(res));
874 static INLINE void highbd_add_and_store_bd8(const int16x8_t *const a,
875 uint16_t *out, const int b_stride) {
876 highbd_idct16x16_add8x1_bd8(a[0], &out, b_stride);
877 highbd_idct16x16_add8x1_bd8(a[1], &out, b_stride);
878 highbd_idct16x16_add8x1_bd8(a[2], &out, b_stride);
879 highbd_idct16x16_add8x1_bd8(a[3], &out, b_stride);
880 highbd_idct16x16_add8x1_bd8(a[4], &out, b_stride);
881 highbd_idct16x16_add8x1_bd8(a[5], &out, b_stride);
882 highbd_idct16x16_add8x1_bd8(a[6], &out, b_stride);
883 highbd_idct16x16_add8x1_bd8(a[7], &out, b_stride);
884 highbd_idct16x16_add8x1_bd8(a[8], &out, b_stride);
885 highbd_idct16x16_add8x1_bd8(a[9], &out, b_stride);
886 highbd_idct16x16_add8x1_bd8(a[10], &out, b_stride);
887 highbd_idct16x16_add8x1_bd8(a[11], &out, b_stride);
888 highbd_idct16x16_add8x1_bd8(a[12], &out, b_stride);
889 highbd_idct16x16_add8x1_bd8(a[13], &out, b_stride);
890 highbd_idct16x16_add8x1_bd8(a[14], &out, b_stride);
891 highbd_idct16x16_add8x1_bd8(a[15], &out, b_stride);
892 highbd_idct16x16_add8x1_bd8(a[16], &out, b_stride);
893 highbd_idct16x16_add8x1_bd8(a[17], &out, b_stride);
894 highbd_idct16x16_add8x1_bd8(a[18], &out, b_stride);
895 highbd_idct16x16_add8x1_bd8(a[19], &out, b_stride);
896 highbd_idct16x16_add8x1_bd8(a[20], &out, b_stride);
897 highbd_idct16x16_add8x1_bd8(a[21], &out, b_stride);
898 highbd_idct16x16_add8x1_bd8(a[22], &out, b_stride);
899 highbd_idct16x16_add8x1_bd8(a[23], &out, b_stride);
900 highbd_idct16x16_add8x1_bd8(a[24], &out, b_stride);
901 highbd_idct16x16_add8x1_bd8(a[25], &out, b_stride);
902 highbd_idct16x16_add8x1_bd8(a[26], &out, b_stride);
903 highbd_idct16x16_add8x1_bd8(a[27], &out, b_stride);
904 highbd_idct16x16_add8x1_bd8(a[28], &out, b_stride);
905 highbd_idct16x16_add8x1_bd8(a[29], &out, b_stride);
906 highbd_idct16x16_add8x1_bd8(a[30], &out, b_stride);
907 highbd_idct16x16_add8x1_bd8(a[31], &out, b_stride);
910 static INLINE void highbd_idct16x16_add_store(const int32x4x2_t *const out,
911 uint16_t *dest, const int stride,
913 // Add the result to dest
914 const int16x8_t max = vdupq_n_s16((1 << bd) - 1);
916 o[0] = vcombine_s16(vrshrn_n_s32(out[0].val[0], 6),
917 vrshrn_n_s32(out[0].val[1], 6));
918 o[1] = vcombine_s16(vrshrn_n_s32(out[1].val[0], 6),
919 vrshrn_n_s32(out[1].val[1], 6));
920 o[2] = vcombine_s16(vrshrn_n_s32(out[2].val[0], 6),
921 vrshrn_n_s32(out[2].val[1], 6));
922 o[3] = vcombine_s16(vrshrn_n_s32(out[3].val[0], 6),
923 vrshrn_n_s32(out[3].val[1], 6));
924 o[4] = vcombine_s16(vrshrn_n_s32(out[4].val[0], 6),
925 vrshrn_n_s32(out[4].val[1], 6));
926 o[5] = vcombine_s16(vrshrn_n_s32(out[5].val[0], 6),
927 vrshrn_n_s32(out[5].val[1], 6));
928 o[6] = vcombine_s16(vrshrn_n_s32(out[6].val[0], 6),
929 vrshrn_n_s32(out[6].val[1], 6));
930 o[7] = vcombine_s16(vrshrn_n_s32(out[7].val[0], 6),
931 vrshrn_n_s32(out[7].val[1], 6));
932 o[8] = vcombine_s16(vrshrn_n_s32(out[8].val[0], 6),
933 vrshrn_n_s32(out[8].val[1], 6));
934 o[9] = vcombine_s16(vrshrn_n_s32(out[9].val[0], 6),
935 vrshrn_n_s32(out[9].val[1], 6));
936 o[10] = vcombine_s16(vrshrn_n_s32(out[10].val[0], 6),
937 vrshrn_n_s32(out[10].val[1], 6));
938 o[11] = vcombine_s16(vrshrn_n_s32(out[11].val[0], 6),
939 vrshrn_n_s32(out[11].val[1], 6));
940 o[12] = vcombine_s16(vrshrn_n_s32(out[12].val[0], 6),
941 vrshrn_n_s32(out[12].val[1], 6));
942 o[13] = vcombine_s16(vrshrn_n_s32(out[13].val[0], 6),
943 vrshrn_n_s32(out[13].val[1], 6));
944 o[14] = vcombine_s16(vrshrn_n_s32(out[14].val[0], 6),
945 vrshrn_n_s32(out[14].val[1], 6));
946 o[15] = vcombine_s16(vrshrn_n_s32(out[15].val[0], 6),
947 vrshrn_n_s32(out[15].val[1], 6));
948 highbd_idct16x16_add8x1(o[0], max, &dest, stride);
949 highbd_idct16x16_add8x1(o[1], max, &dest, stride);
950 highbd_idct16x16_add8x1(o[2], max, &dest, stride);
951 highbd_idct16x16_add8x1(o[3], max, &dest, stride);
952 highbd_idct16x16_add8x1(o[4], max, &dest, stride);
953 highbd_idct16x16_add8x1(o[5], max, &dest, stride);
954 highbd_idct16x16_add8x1(o[6], max, &dest, stride);
955 highbd_idct16x16_add8x1(o[7], max, &dest, stride);
956 highbd_idct16x16_add8x1(o[8], max, &dest, stride);
957 highbd_idct16x16_add8x1(o[9], max, &dest, stride);
958 highbd_idct16x16_add8x1(o[10], max, &dest, stride);
959 highbd_idct16x16_add8x1(o[11], max, &dest, stride);
960 highbd_idct16x16_add8x1(o[12], max, &dest, stride);
961 highbd_idct16x16_add8x1(o[13], max, &dest, stride);
962 highbd_idct16x16_add8x1(o[14], max, &dest, stride);
963 highbd_idct16x16_add8x1(o[15], max, &dest, stride);
966 void vpx_idct16x16_256_add_half1d(const void *const input, int16_t *output,
967 void *const dest, const int stride,
968 const int highbd_flag);
970 void vpx_idct16x16_38_add_half1d(const void *const input, int16_t *const output,
971 void *const dest, const int stride,
972 const int highbd_flag);
974 void vpx_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
977 void vpx_idct16x16_10_add_half1d_pass2(const int16_t *input,
978 int16_t *const output, void *const dest,
979 const int stride, const int highbd_flag);
981 void vpx_idct32_32_neon(const tran_low_t *input, uint8_t *dest,
982 const int stride, const int highbd_flag);
984 void vpx_idct32_12_neon(const tran_low_t *const input, int16_t *output);
985 void vpx_idct32_16_neon(const int16_t *const input, void *const output,
986 const int stride, const int highbd_flag);
988 void vpx_idct32_6_neon(const tran_low_t *input, int16_t *output);
989 void vpx_idct32_8_neon(const int16_t *input, void *const output, int stride,
990 const int highbd_flag);
992 #endif // VPX_DSP_ARM_IDCT_NEON_H_