2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #ifndef VPX_DSP_ARM_IDCT_NEON_H_
12 #define VPX_DSP_ARM_IDCT_NEON_H_
16 #include "./vpx_config.h"
17 #include "vpx_dsp/arm/transpose_neon.h"
18 #include "vpx_dsp/vpx_dsp_common.h"
20 DECLARE_ALIGNED(16, static const int16_t, kCospi[16]) = {
21 16384 /* cospi_0_64 */, 15137 /* cospi_8_64 */,
22 11585 /* cospi_16_64 */, 6270 /* cospi_24_64 */,
23 16069 /* cospi_4_64 */, 13623 /* cospi_12_64 */,
24 -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */,
25 16305 /* cospi_2_64 */, 1606 /* cospi_30_64 */,
26 14449 /* cospi_10_64 */, 7723 /* cospi_22_64 */,
27 15679 /* cospi_6_64 */, -4756 /* -cospi_26_64 */,
28 12665 /* cospi_14_64 */, -10394 /* -cospi_18_64 */
31 DECLARE_ALIGNED(16, static const int32_t, kCospi32[8]) = {
32 16384 /* cospi_0_64 */, 15137 /* cospi_8_64 */,
33 11585 /* cospi_16_64 */, 6270 /* cospi_24_64 */,
34 16069 /* cospi_4_64 */, 13623 /* cospi_12_64 */,
35 -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */
38 //------------------------------------------------------------------------------
39 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
41 static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
42 #if CONFIG_VP9_HIGHBITDEPTH
43 const int32x4x2_t v0 = vld2q_s32(buf);
44 const int32x4x2_t v1 = vld2q_s32(buf + 8);
45 const int16x4_t s0 = vmovn_s32(v0.val[0]);
46 const int16x4_t s1 = vmovn_s32(v0.val[1]);
47 const int16x4_t s2 = vmovn_s32(v1.val[0]);
48 const int16x4_t s3 = vmovn_s32(v1.val[1]);
50 res.val[0] = vcombine_s16(s0, s2);
51 res.val[1] = vcombine_s16(s1, s3);
54 return vld2q_s16(buf);
58 static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
59 #if CONFIG_VP9_HIGHBITDEPTH
60 const int32x4_t v0 = vld1q_s32(buf);
61 const int32x4_t v1 = vld1q_s32(buf + 4);
62 const int16x4_t s0 = vmovn_s32(v0);
63 const int16x4_t s1 = vmovn_s32(v1);
64 return vcombine_s16(s0, s1);
66 return vld1q_s16(buf);
70 static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
71 #if CONFIG_VP9_HIGHBITDEPTH
72 const int32x4_t v0 = vld1q_s32(buf);
79 static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
80 #if CONFIG_VP9_HIGHBITDEPTH
81 const int32x4_t v0 = vmovl_s16(vget_low_s16(a));
82 const int32x4_t v1 = vmovl_s16(vget_high_s16(a));
84 vst1q_s32(buf + 4, v1);
90 //------------------------------------------------------------------------------
92 // Multiply a by a_const. Saturate, shift and narrow by 14.
93 static INLINE int16x8_t multiply_shift_and_narrow_s16(const int16x8_t a,
94 const int16_t a_const) {
95 // Shift by 14 + rounding will be within 16 bits for well formed streams.
96 // See WRAPLOW and dct_const_round_shift for details.
97 // This instruction doubles the result and returns the high half, essentially
98 // resulting in a right shift by 15. By multiplying the constant first that
99 // becomes a right shift by 14.
100 // The largest possible value used here is
101 // vpx_dsp/txfm_common.h:cospi_1_64 = 16364 (* 2 = 32728) a which falls *just*
102 // within the range of int16_t (+32767 / -32768) even when negated.
103 return vqrdmulhq_n_s16(a, a_const * 2);
106 // Add a and b, then multiply by ab_const. Shift and narrow by 14.
107 static INLINE int16x8_t add_multiply_shift_and_narrow_s16(
108 const int16x8_t a, const int16x8_t b, const int16_t ab_const) {
109 // In both add_ and it's pair, sub_, the input for well-formed streams will be
110 // well within 16 bits (input to the idct is the difference between two frames
111 // and will be within -255 to 255, or 9 bits)
112 // However, for inputs over about 25,000 (valid for int16_t, but not for idct
113 // input) this function can not use vaddq_s16.
114 // In order to match existing behavior and intentionally out of range tests,
115 // expand the addition up to 32 bits to prevent truncation.
116 int32x4_t temp_low = vaddl_s16(vget_low_s16(a), vget_low_s16(b));
117 int32x4_t temp_high = vaddl_s16(vget_high_s16(a), vget_high_s16(b));
118 temp_low = vmulq_n_s32(temp_low, ab_const);
119 temp_high = vmulq_n_s32(temp_high, ab_const);
120 return vcombine_s16(vrshrn_n_s32(temp_low, 14), vrshrn_n_s32(temp_high, 14));
123 // Subtract b from a, then multiply by ab_const. Shift and narrow by 14.
124 static INLINE int16x8_t sub_multiply_shift_and_narrow_s16(
125 const int16x8_t a, const int16x8_t b, const int16_t ab_const) {
126 int32x4_t temp_low = vsubl_s16(vget_low_s16(a), vget_low_s16(b));
127 int32x4_t temp_high = vsubl_s16(vget_high_s16(a), vget_high_s16(b));
128 temp_low = vmulq_n_s32(temp_low, ab_const);
129 temp_high = vmulq_n_s32(temp_high, ab_const);
130 return vcombine_s16(vrshrn_n_s32(temp_low, 14), vrshrn_n_s32(temp_high, 14));
133 // Multiply a by a_const and b by b_const, then accumulate. Shift and narrow by
135 static INLINE int16x8_t multiply_accumulate_shift_and_narrow_s16(
136 const int16x8_t a, const int16_t a_const, const int16x8_t b,
137 const int16_t b_const) {
138 int32x4_t temp_low = vmull_n_s16(vget_low_s16(a), a_const);
139 int32x4_t temp_high = vmull_n_s16(vget_high_s16(a), a_const);
140 temp_low = vmlal_n_s16(temp_low, vget_low_s16(b), b_const);
141 temp_high = vmlal_n_s16(temp_high, vget_high_s16(b), b_const);
142 return vcombine_s16(vrshrn_n_s32(temp_low, 14), vrshrn_n_s32(temp_high, 14));
145 // Shift the output down by 6 and add it to the destination buffer.
146 static INLINE void add_and_store_u8_s16(const int16x8_t a0, const int16x8_t a1,
147 const int16x8_t a2, const int16x8_t a3,
148 const int16x8_t a4, const int16x8_t a5,
149 const int16x8_t a6, const int16x8_t a7,
150 uint8_t *b, const int b_stride) {
151 uint8x8_t b0, b1, b2, b3, b4, b5, b6, b7;
152 int16x8_t c0, c1, c2, c3, c4, c5, c6, c7;
171 c0 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b0)), a0, 6);
172 c1 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b1)), a1, 6);
173 c2 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b2)), a2, 6);
174 c3 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b3)), a3, 6);
175 c4 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b4)), a4, 6);
176 c5 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b5)), a5, 6);
177 c6 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b6)), a6, 6);
178 c7 = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b7)), a7, 6);
180 b0 = vqmovun_s16(c0);
181 b1 = vqmovun_s16(c1);
182 b2 = vqmovun_s16(c2);
183 b3 = vqmovun_s16(c3);
184 b4 = vqmovun_s16(c4);
185 b5 = vqmovun_s16(c5);
186 b6 = vqmovun_s16(c6);
187 b7 = vqmovun_s16(c7);
206 static INLINE uint8x16_t create_dcq(const int16_t dc) {
207 // Clip both sides and gcc may compile to assembly 'usat'.
208 const int16_t t = (dc < 0) ? 0 : ((dc > 255) ? 255 : dc);
209 return vdupq_n_u8((uint8_t)t);
212 static INLINE void idct4x4_16_kernel_bd8(const int16x4_t cospis,
214 int16x8_t *const a1) {
215 int16x4_t b0, b1, b2, b3;
216 int32x4_t c0, c1, c2, c3;
219 transpose_s16_4x4q(a0, a1);
220 b0 = vget_low_s16(*a0);
221 b1 = vget_high_s16(*a0);
222 b2 = vget_low_s16(*a1);
223 b3 = vget_high_s16(*a1);
224 c0 = vmull_lane_s16(b0, cospis, 2);
225 c2 = vmull_lane_s16(b1, cospis, 2);
226 c1 = vsubq_s32(c0, c2);
227 c0 = vaddq_s32(c0, c2);
228 c2 = vmull_lane_s16(b2, cospis, 3);
229 c3 = vmull_lane_s16(b2, cospis, 1);
230 c2 = vmlsl_lane_s16(c2, b3, cospis, 1);
231 c3 = vmlal_lane_s16(c3, b3, cospis, 3);
232 b0 = vrshrn_n_s32(c0, 14);
233 b1 = vrshrn_n_s32(c1, 14);
234 b2 = vrshrn_n_s32(c2, 14);
235 b3 = vrshrn_n_s32(c3, 14);
236 d0 = vcombine_s16(b0, b1);
237 d1 = vcombine_s16(b3, b2);
238 *a0 = vaddq_s16(d0, d1);
239 *a1 = vsubq_s16(d0, d1);
242 static INLINE void idct8x8_12_pass1_bd8(
243 const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
244 int16x4_t *const io0, int16x4_t *const io1, int16x4_t *const io2,
245 int16x4_t *const io3, int16x4_t *const io4, int16x4_t *const io5,
246 int16x4_t *const io6, int16x4_t *const io7) {
247 int16x4_t step1[8], step2[8];
250 transpose_s16_4x4d(io0, io1, io2, io3);
253 step1[4] = vqrdmulh_lane_s16(*io1, cospisd1, 3);
254 step1[5] = vqrdmulh_lane_s16(*io3, cospisd1, 2);
255 step1[6] = vqrdmulh_lane_s16(*io3, cospisd1, 1);
256 step1[7] = vqrdmulh_lane_s16(*io1, cospisd1, 0);
259 step2[1] = vqrdmulh_lane_s16(*io0, cospisd0, 2);
260 step2[2] = vqrdmulh_lane_s16(*io2, cospisd0, 3);
261 step2[3] = vqrdmulh_lane_s16(*io2, cospisd0, 1);
263 step2[4] = vadd_s16(step1[4], step1[5]);
264 step2[5] = vsub_s16(step1[4], step1[5]);
265 step2[6] = vsub_s16(step1[7], step1[6]);
266 step2[7] = vadd_s16(step1[7], step1[6]);
269 step1[0] = vadd_s16(step2[1], step2[3]);
270 step1[1] = vadd_s16(step2[1], step2[2]);
271 step1[2] = vsub_s16(step2[1], step2[2]);
272 step1[3] = vsub_s16(step2[1], step2[3]);
274 t32[1] = vmull_lane_s16(step2[6], cospis0, 2);
275 t32[0] = vmlsl_lane_s16(t32[1], step2[5], cospis0, 2);
276 t32[1] = vmlal_lane_s16(t32[1], step2[5], cospis0, 2);
277 step1[5] = vrshrn_n_s32(t32[0], 14);
278 step1[6] = vrshrn_n_s32(t32[1], 14);
281 *io0 = vadd_s16(step1[0], step2[7]);
282 *io1 = vadd_s16(step1[1], step1[6]);
283 *io2 = vadd_s16(step1[2], step1[5]);
284 *io3 = vadd_s16(step1[3], step2[4]);
285 *io4 = vsub_s16(step1[3], step2[4]);
286 *io5 = vsub_s16(step1[2], step1[5]);
287 *io6 = vsub_s16(step1[1], step1[6]);
288 *io7 = vsub_s16(step1[0], step2[7]);
291 static INLINE void idct8x8_12_pass2_bd8(
292 const int16x4_t cospis0, const int16x4_t cospisd0, const int16x4_t cospisd1,
293 const int16x4_t input0, const int16x4_t input1, const int16x4_t input2,
294 const int16x4_t input3, const int16x4_t input4, const int16x4_t input5,
295 const int16x4_t input6, const int16x4_t input7, int16x8_t *const output0,
296 int16x8_t *const output1, int16x8_t *const output2,
297 int16x8_t *const output3, int16x8_t *const output4,
298 int16x8_t *const output5, int16x8_t *const output6,
299 int16x8_t *const output7) {
301 int16x8_t step1[8], step2[8];
305 transpose_s16_4x8(input0, input1, input2, input3, input4, input5, input6,
306 input7, &in[0], &in[1], &in[2], &in[3]);
309 step1[4] = vqrdmulhq_lane_s16(in[1], cospisd1, 3);
310 step1[5] = vqrdmulhq_lane_s16(in[3], cospisd1, 2);
311 step1[6] = vqrdmulhq_lane_s16(in[3], cospisd1, 1);
312 step1[7] = vqrdmulhq_lane_s16(in[1], cospisd1, 0);
315 step2[1] = vqrdmulhq_lane_s16(in[0], cospisd0, 2);
316 step2[2] = vqrdmulhq_lane_s16(in[2], cospisd0, 3);
317 step2[3] = vqrdmulhq_lane_s16(in[2], cospisd0, 1);
319 step2[4] = vaddq_s16(step1[4], step1[5]);
320 step2[5] = vsubq_s16(step1[4], step1[5]);
321 step2[6] = vsubq_s16(step1[7], step1[6]);
322 step2[7] = vaddq_s16(step1[7], step1[6]);
325 step1[0] = vaddq_s16(step2[1], step2[3]);
326 step1[1] = vaddq_s16(step2[1], step2[2]);
327 step1[2] = vsubq_s16(step2[1], step2[2]);
328 step1[3] = vsubq_s16(step2[1], step2[3]);
330 t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
331 t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
332 t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
333 t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
334 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
335 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
336 t16[0] = vrshrn_n_s32(t32[0], 14);
337 t16[1] = vrshrn_n_s32(t32[1], 14);
338 t16[2] = vrshrn_n_s32(t32[2], 14);
339 t16[3] = vrshrn_n_s32(t32[3], 14);
340 step1[5] = vcombine_s16(t16[0], t16[1]);
341 step1[6] = vcombine_s16(t16[2], t16[3]);
344 *output0 = vaddq_s16(step1[0], step2[7]);
345 *output1 = vaddq_s16(step1[1], step1[6]);
346 *output2 = vaddq_s16(step1[2], step1[5]);
347 *output3 = vaddq_s16(step1[3], step2[4]);
348 *output4 = vsubq_s16(step1[3], step2[4]);
349 *output5 = vsubq_s16(step1[2], step1[5]);
350 *output6 = vsubq_s16(step1[1], step1[6]);
351 *output7 = vsubq_s16(step1[0], step2[7]);
354 static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
355 const int16x4_t cospis1,
356 int16x8_t *const io0, int16x8_t *const io1,
357 int16x8_t *const io2, int16x8_t *const io3,
358 int16x8_t *const io4, int16x8_t *const io5,
359 int16x8_t *const io6,
360 int16x8_t *const io7) {
361 int16x4_t input_1l, input_1h, input_3l, input_3h, input_5l, input_5h,
363 int16x4_t step1l[4], step1h[4];
364 int16x8_t step1[8], step2[8];
368 transpose_s16_8x8(io0, io1, io2, io3, io4, io5, io6, io7);
371 input_1l = vget_low_s16(*io1);
372 input_1h = vget_high_s16(*io1);
373 input_3l = vget_low_s16(*io3);
374 input_3h = vget_high_s16(*io3);
375 input_5l = vget_low_s16(*io5);
376 input_5h = vget_high_s16(*io5);
377 input_7l = vget_low_s16(*io7);
378 input_7h = vget_high_s16(*io7);
379 step1l[0] = vget_low_s16(*io0);
380 step1h[0] = vget_high_s16(*io0);
381 step1l[1] = vget_low_s16(*io2);
382 step1h[1] = vget_high_s16(*io2);
383 step1l[2] = vget_low_s16(*io4);
384 step1h[2] = vget_high_s16(*io4);
385 step1l[3] = vget_low_s16(*io6);
386 step1h[3] = vget_high_s16(*io6);
388 t32[0] = vmull_lane_s16(input_1l, cospis1, 3);
389 t32[1] = vmull_lane_s16(input_1h, cospis1, 3);
390 t32[2] = vmull_lane_s16(input_3l, cospis1, 2);
391 t32[3] = vmull_lane_s16(input_3h, cospis1, 2);
392 t32[4] = vmull_lane_s16(input_3l, cospis1, 1);
393 t32[5] = vmull_lane_s16(input_3h, cospis1, 1);
394 t32[6] = vmull_lane_s16(input_1l, cospis1, 0);
395 t32[7] = vmull_lane_s16(input_1h, cospis1, 0);
396 t32[0] = vmlsl_lane_s16(t32[0], input_7l, cospis1, 0);
397 t32[1] = vmlsl_lane_s16(t32[1], input_7h, cospis1, 0);
398 t32[2] = vmlal_lane_s16(t32[2], input_5l, cospis1, 1);
399 t32[3] = vmlal_lane_s16(t32[3], input_5h, cospis1, 1);
400 t32[4] = vmlsl_lane_s16(t32[4], input_5l, cospis1, 2);
401 t32[5] = vmlsl_lane_s16(t32[5], input_5h, cospis1, 2);
402 t32[6] = vmlal_lane_s16(t32[6], input_7l, cospis1, 3);
403 t32[7] = vmlal_lane_s16(t32[7], input_7h, cospis1, 3);
404 t16[0] = vrshrn_n_s32(t32[0], 14);
405 t16[1] = vrshrn_n_s32(t32[1], 14);
406 t16[2] = vrshrn_n_s32(t32[2], 14);
407 t16[3] = vrshrn_n_s32(t32[3], 14);
408 t16[4] = vrshrn_n_s32(t32[4], 14);
409 t16[5] = vrshrn_n_s32(t32[5], 14);
410 t16[6] = vrshrn_n_s32(t32[6], 14);
411 t16[7] = vrshrn_n_s32(t32[7], 14);
412 step1[4] = vcombine_s16(t16[0], t16[1]);
413 step1[5] = vcombine_s16(t16[2], t16[3]);
414 step1[6] = vcombine_s16(t16[4], t16[5]);
415 step1[7] = vcombine_s16(t16[6], t16[7]);
418 t32[2] = vmull_lane_s16(step1l[0], cospis0, 2);
419 t32[3] = vmull_lane_s16(step1h[0], cospis0, 2);
420 t32[4] = vmull_lane_s16(step1l[1], cospis0, 3);
421 t32[5] = vmull_lane_s16(step1h[1], cospis0, 3);
422 t32[6] = vmull_lane_s16(step1l[1], cospis0, 1);
423 t32[7] = vmull_lane_s16(step1h[1], cospis0, 1);
424 t32[0] = vmlal_lane_s16(t32[2], step1l[2], cospis0, 2);
425 t32[1] = vmlal_lane_s16(t32[3], step1h[2], cospis0, 2);
426 t32[2] = vmlsl_lane_s16(t32[2], step1l[2], cospis0, 2);
427 t32[3] = vmlsl_lane_s16(t32[3], step1h[2], cospis0, 2);
428 t32[4] = vmlsl_lane_s16(t32[4], step1l[3], cospis0, 1);
429 t32[5] = vmlsl_lane_s16(t32[5], step1h[3], cospis0, 1);
430 t32[6] = vmlal_lane_s16(t32[6], step1l[3], cospis0, 3);
431 t32[7] = vmlal_lane_s16(t32[7], step1h[3], cospis0, 3);
432 t16[0] = vrshrn_n_s32(t32[0], 14);
433 t16[1] = vrshrn_n_s32(t32[1], 14);
434 t16[2] = vrshrn_n_s32(t32[2], 14);
435 t16[3] = vrshrn_n_s32(t32[3], 14);
436 t16[4] = vrshrn_n_s32(t32[4], 14);
437 t16[5] = vrshrn_n_s32(t32[5], 14);
438 t16[6] = vrshrn_n_s32(t32[6], 14);
439 t16[7] = vrshrn_n_s32(t32[7], 14);
440 step2[0] = vcombine_s16(t16[0], t16[1]);
441 step2[1] = vcombine_s16(t16[2], t16[3]);
442 step2[2] = vcombine_s16(t16[4], t16[5]);
443 step2[3] = vcombine_s16(t16[6], t16[7]);
445 step2[4] = vaddq_s16(step1[4], step1[5]);
446 step2[5] = vsubq_s16(step1[4], step1[5]);
447 step2[6] = vsubq_s16(step1[7], step1[6]);
448 step2[7] = vaddq_s16(step1[7], step1[6]);
451 step1[0] = vaddq_s16(step2[0], step2[3]);
452 step1[1] = vaddq_s16(step2[1], step2[2]);
453 step1[2] = vsubq_s16(step2[1], step2[2]);
454 step1[3] = vsubq_s16(step2[0], step2[3]);
456 t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
457 t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
458 t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
459 t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
460 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
461 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
462 t16[0] = vrshrn_n_s32(t32[0], 14);
463 t16[1] = vrshrn_n_s32(t32[1], 14);
464 t16[2] = vrshrn_n_s32(t32[2], 14);
465 t16[3] = vrshrn_n_s32(t32[3], 14);
466 step1[5] = vcombine_s16(t16[0], t16[1]);
467 step1[6] = vcombine_s16(t16[2], t16[3]);
470 *io0 = vaddq_s16(step1[0], step2[7]);
471 *io1 = vaddq_s16(step1[1], step1[6]);
472 *io2 = vaddq_s16(step1[2], step1[5]);
473 *io3 = vaddq_s16(step1[3], step2[4]);
474 *io4 = vsubq_s16(step1[3], step2[4]);
475 *io5 = vsubq_s16(step1[2], step1[5]);
476 *io6 = vsubq_s16(step1[1], step1[6]);
477 *io7 = vsubq_s16(step1[0], step2[7]);
480 static INLINE void idct16x16_add_wrap_low_8x2(const int32x4_t *const t32,
482 int16x8_t *const d1) {
485 t16[0] = vrshrn_n_s32(t32[0], 14);
486 t16[1] = vrshrn_n_s32(t32[1], 14);
487 t16[2] = vrshrn_n_s32(t32[2], 14);
488 t16[3] = vrshrn_n_s32(t32[3], 14);
489 *d0 = vcombine_s16(t16[0], t16[1]);
490 *d1 = vcombine_s16(t16[2], t16[3]);
493 static INLINE void idct_cospi_8_24_q_kernel(const int16x8_t s0,
495 const int16x4_t cospi_0_8_16_24,
496 int32x4_t *const t32) {
497 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_0_8_16_24, 3);
498 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_0_8_16_24, 3);
499 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_0_8_16_24, 3);
500 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_0_8_16_24, 3);
501 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_0_8_16_24, 1);
502 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_0_8_16_24, 1);
503 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_0_8_16_24, 1);
504 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_0_8_16_24, 1);
507 static INLINE void idct_cospi_8_24_q(const int16x8_t s0, const int16x8_t s1,
508 const int16x4_t cospi_0_8_16_24,
509 int16x8_t *const d0, int16x8_t *const d1) {
512 idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t32);
513 idct16x16_add_wrap_low_8x2(t32, d0, d1);
516 static INLINE void idct_cospi_8_24_neg_q(const int16x8_t s0, const int16x8_t s1,
517 const int16x4_t cospi_0_8_16_24,
519 int16x8_t *const d1) {
522 idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t32);
523 t32[2] = vnegq_s32(t32[2]);
524 t32[3] = vnegq_s32(t32[3]);
525 idct16x16_add_wrap_low_8x2(t32, d0, d1);
528 static INLINE void idct_cospi_16_16_q(const int16x8_t s0, const int16x8_t s1,
529 const int16x4_t cospi_0_8_16_24,
531 int16x8_t *const d1) {
534 t32[4] = vmull_lane_s16(vget_low_s16(s1), cospi_0_8_16_24, 2);
535 t32[5] = vmull_lane_s16(vget_high_s16(s1), cospi_0_8_16_24, 2);
536 t32[0] = vmlsl_lane_s16(t32[4], vget_low_s16(s0), cospi_0_8_16_24, 2);
537 t32[1] = vmlsl_lane_s16(t32[5], vget_high_s16(s0), cospi_0_8_16_24, 2);
538 t32[2] = vmlal_lane_s16(t32[4], vget_low_s16(s0), cospi_0_8_16_24, 2);
539 t32[3] = vmlal_lane_s16(t32[5], vget_high_s16(s0), cospi_0_8_16_24, 2);
540 idct16x16_add_wrap_low_8x2(t32, d0, d1);
543 static INLINE void idct16x16_add8x1(int16x8_t res, uint8_t **dest,
545 uint8x8_t d = vld1_u8(*dest);
548 res = vrshrq_n_s16(res, 6);
549 q = vaddw_u8(vreinterpretq_u16_s16(res), d);
550 d = vqmovun_s16(vreinterpretq_s16_u16(q));
555 #endif // VPX_DSP_ARM_IDCT_NEON_H_