]> granicus.if.org Git - libvpx/blob - third_party/libyuv/source/scale_neon64.cc
fb68b67d29c7c8bc1e7b84048824c138ef24aee1
[libvpx] / third_party / libyuv / source / scale_neon64.cc
1 /*
2  *  Copyright 2014 The LibYuv Project Authors. All rights reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS. All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11 #include "libyuv/scale.h"
12 #include "libyuv/row.h"
13 #include "libyuv/scale_row.h"
14
15 #ifdef __cplusplus
16 namespace libyuv {
17 extern "C" {
18 #endif
19
20 // This module is for GCC Neon armv8 64 bit.
21 #if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
22
23 // Read 32x1 throw away even pixels, and write 16x1.
24 void ScaleRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
25                         uint8* dst, int dst_width) {
26   asm volatile (
27   "1:                                          \n"
28     // load even pixels into v0, odd into v1
29     MEMACCESS(0)
30     "ld2        {v0.16b,v1.16b}, [%0], #32    \n"
31     "subs       %2, %2, #16                    \n"  // 16 processed per loop
32     MEMACCESS(1)
33     "st1        {v1.16b}, [%1], #16            \n"  // store odd pixels
34     "b.gt       1b                             \n"
35   : "+r"(src_ptr),          // %0
36     "+r"(dst),              // %1
37     "+r"(dst_width)         // %2
38   :
39   : "v0", "v1"              // Clobber List
40   );
41 }
42
43 // Read 32x2 average down and write 16x1.
44 void ScaleRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
45                            uint8* dst, int dst_width) {
46   asm volatile (
47     // change the stride to row 2 pointer
48     "add        %1, %1, %0                     \n"
49   "1:                                          \n"
50     MEMACCESS(0)
51     "ld1        {v0.16b,v1.16b}, [%0], #32    \n"  // load row 1 and post inc
52     MEMACCESS(1)
53     "ld1        {v2.16b, v3.16b}, [%1], #32    \n"  // load row 2 and post inc
54     "subs       %3, %3, #16                    \n"  // 16 processed per loop
55     "uaddlp     v0.8h, v0.16b                  \n"  // row 1 add adjacent
56     "uaddlp     v1.8h, v1.16b                  \n"
57     "uadalp     v0.8h, v2.16b                  \n"  // row 2 add adjacent + row1
58     "uadalp     v1.8h, v3.16b                  \n"
59     "rshrn      v0.8b, v0.8h, #2               \n"  // downshift, round and pack
60     "rshrn2     v0.16b, v1.8h, #2              \n"
61     MEMACCESS(2)
62     "st1        {v0.16b}, [%2], #16            \n"
63     "b.gt       1b                             \n"
64   : "+r"(src_ptr),          // %0
65     "+r"(src_stride),       // %1
66     "+r"(dst),              // %2
67     "+r"(dst_width)         // %3
68   :
69   : "v0", "v1", "v2", "v3"     // Clobber List
70   );
71 }
72
73 void ScaleRowDown4_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
74                         uint8* dst_ptr, int dst_width) {
75   asm volatile (
76   "1:                                          \n"
77     MEMACCESS(0)
78     "ld4     {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32          \n"  // src line 0
79     "subs       %2, %2, #8                     \n"  // 8 processed per loop
80     MEMACCESS(1)
81     "st1     {v2.8b}, [%1], #8                 \n"
82     "b.gt       1b                             \n"
83   : "+r"(src_ptr),          // %0
84     "+r"(dst_ptr),          // %1
85     "+r"(dst_width)         // %2
86   :
87   : "v0", "v1", "v2", "v3", "memory", "cc"
88   );
89 }
90
91 void ScaleRowDown4Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
92                            uint8* dst_ptr, int dst_width) {
93   const uint8* src_ptr1 = src_ptr + src_stride;
94   const uint8* src_ptr2 = src_ptr + src_stride * 2;
95   const uint8* src_ptr3 = src_ptr + src_stride * 3;
96 asm volatile (
97   "1:                                          \n"
98     MEMACCESS(0)
99     "ld1     {v0.16b}, [%0], #16               \n"   // load up 16x4
100     MEMACCESS(3)
101     "ld1     {v1.16b}, [%2], #16               \n"
102     MEMACCESS(4)
103     "ld1     {v2.16b}, [%3], #16               \n"
104     MEMACCESS(5)
105     "ld1     {v3.16b}, [%4], #16               \n"
106     "subs    %5, %5, #4                        \n"
107     "uaddlp  v0.8h, v0.16b                     \n"
108     "uadalp  v0.8h, v1.16b                     \n"
109     "uadalp  v0.8h, v2.16b                     \n"
110     "uadalp  v0.8h, v3.16b                     \n"
111     "addp    v0.8h, v0.8h, v0.8h               \n"
112     "rshrn   v0.8b, v0.8h, #4                  \n"   // divide by 16 w/rounding
113     MEMACCESS(1)
114     "st1    {v0.s}[0], [%1], #4                \n"
115     "b.gt       1b                             \n"
116   : "+r"(src_ptr),   // %0
117     "+r"(dst_ptr),   // %1
118     "+r"(src_ptr1),  // %2
119     "+r"(src_ptr2),  // %3
120     "+r"(src_ptr3),  // %4
121     "+r"(dst_width)  // %5
122   :
123   : "v0", "v1", "v2", "v3", "memory", "cc"
124   );
125 }
126
127 // Down scale from 4 to 3 pixels. Use the neon multilane read/write
128 // to load up the every 4th pixel into a 4 different registers.
129 // Point samples 32 pixels to 24 pixels.
130 void ScaleRowDown34_NEON(const uint8* src_ptr,
131                          ptrdiff_t src_stride,
132                          uint8* dst_ptr, int dst_width) {
133   asm volatile (
134   "1:                                                  \n"
135     MEMACCESS(0)
136     "ld4       {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32                \n"  // src line 0
137     "subs      %2, %2, #24                             \n"
138     "orr       v2.16b, v3.16b, v3.16b                  \n"  // order v0, v1, v2
139     MEMACCESS(1)
140     "st3       {v0.8b,v1.8b,v2.8b}, [%1], #24                \n"
141     "b.gt      1b                                      \n"
142   : "+r"(src_ptr),          // %0
143     "+r"(dst_ptr),          // %1
144     "+r"(dst_width)         // %2
145   :
146   : "v0", "v1", "v2", "v3", "memory", "cc"
147   );
148 }
149
150 void ScaleRowDown34_0_Box_NEON(const uint8* src_ptr,
151                                ptrdiff_t src_stride,
152                                uint8* dst_ptr, int dst_width) {
153   asm volatile (
154     "movi      v20.8b, #3                              \n"
155     "add       %3, %3, %0                              \n"
156   "1:                                                  \n"
157     MEMACCESS(0)
158     "ld4       {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32                \n"  // src line 0
159     MEMACCESS(3)
160     "ld4       {v4.8b,v5.8b,v6.8b,v7.8b}, [%3], #32                \n"  // src line 1
161     "subs         %2, %2, #24                          \n"
162
163     // filter src line 0 with src line 1
164     // expand chars to shorts to allow for room
165     // when adding lines together
166     "ushll     v16.8h, v4.8b, #0                       \n"
167     "ushll     v17.8h, v5.8b, #0                       \n"
168     "ushll     v18.8h, v6.8b, #0                       \n"
169     "ushll     v19.8h, v7.8b, #0                       \n"
170
171     // 3 * line_0 + line_1
172     "umlal     v16.8h, v0.8b, v20.8b                   \n"
173     "umlal     v17.8h, v1.8b, v20.8b                   \n"
174     "umlal     v18.8h, v2.8b, v20.8b                   \n"
175     "umlal     v19.8h, v3.8b, v20.8b                   \n"
176
177     // (3 * line_0 + line_1) >> 2
178     "uqrshrn   v0.8b, v16.8h, #2                       \n"
179     "uqrshrn   v1.8b, v17.8h, #2                       \n"
180     "uqrshrn   v2.8b, v18.8h, #2                       \n"
181     "uqrshrn   v3.8b, v19.8h, #2                       \n"
182
183     // a0 = (src[0] * 3 + s[1] * 1) >> 2
184     "ushll     v16.8h, v1.8b, #0                       \n"
185     "umlal     v16.8h, v0.8b, v20.8b                   \n"
186     "uqrshrn   v0.8b, v16.8h, #2                       \n"
187
188     // a1 = (src[1] * 1 + s[2] * 1) >> 1
189     "urhadd    v1.8b, v1.8b, v2.8b                     \n"
190
191     // a2 = (src[2] * 1 + s[3] * 3) >> 2
192     "ushll     v16.8h, v2.8b, #0                       \n"
193     "umlal     v16.8h, v3.8b, v20.8b                   \n"
194     "uqrshrn   v2.8b, v16.8h, #2                       \n"
195
196     MEMACCESS(1)
197     "st3       {v0.8b,v1.8b,v2.8b}, [%1], #24                \n"
198
199     "b.gt      1b                                      \n"
200   : "+r"(src_ptr),          // %0
201     "+r"(dst_ptr),          // %1
202     "+r"(dst_width),        // %2
203     "+r"(src_stride)        // %3
204   :
205   : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19",
206     "v20", "memory", "cc"
207   );
208 }
209
210 void ScaleRowDown34_1_Box_NEON(const uint8* src_ptr,
211                                ptrdiff_t src_stride,
212                                uint8* dst_ptr, int dst_width) {
213   asm volatile (
214     "movi      v20.8b, #3                              \n"
215     "add       %3, %3, %0                              \n"
216   "1:                                                  \n"
217     MEMACCESS(0)
218     "ld4       {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32                \n"  // src line 0
219     MEMACCESS(3)
220     "ld4       {v4.8b,v5.8b,v6.8b,v7.8b}, [%3], #32                \n"  // src line 1
221     "subs         %2, %2, #24                          \n"
222     // average src line 0 with src line 1
223     "urhadd    v0.8b, v0.8b, v4.8b                     \n"
224     "urhadd    v1.8b, v1.8b, v5.8b                     \n"
225     "urhadd    v2.8b, v2.8b, v6.8b                     \n"
226     "urhadd    v3.8b, v3.8b, v7.8b                     \n"
227
228     // a0 = (src[0] * 3 + s[1] * 1) >> 2
229     "ushll     v4.8h, v1.8b, #0                        \n"
230     "umlal     v4.8h, v0.8b, v20.8b                    \n"
231     "uqrshrn   v0.8b, v4.8h, #2                        \n"
232
233     // a1 = (src[1] * 1 + s[2] * 1) >> 1
234     "urhadd    v1.8b, v1.8b, v2.8b                     \n"
235
236     // a2 = (src[2] * 1 + s[3] * 3) >> 2
237     "ushll     v4.8h, v2.8b, #0                        \n"
238     "umlal     v4.8h, v3.8b, v20.8b                    \n"
239     "uqrshrn   v2.8b, v4.8h, #2                        \n"
240
241     MEMACCESS(1)
242     "st3       {v0.8b,v1.8b,v2.8b}, [%1], #24                \n"
243     "b.gt      1b                                      \n"
244   : "+r"(src_ptr),          // %0
245     "+r"(dst_ptr),          // %1
246     "+r"(dst_width),        // %2
247     "+r"(src_stride)        // %3
248   :
249   : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "memory", "cc"
250   );
251 }
252
253 static uvec8 kShuf38 =
254   { 0, 3, 6, 8, 11, 14, 16, 19, 22, 24, 27, 30, 0, 0, 0, 0 };
255 static uvec8 kShuf38_2 =
256   { 0, 16, 32, 2, 18, 33, 4, 20, 34, 6, 22, 35, 0, 0, 0, 0 };
257 static vec16 kMult38_Div6 =
258   { 65536 / 12, 65536 / 12, 65536 / 12, 65536 / 12,
259     65536 / 12, 65536 / 12, 65536 / 12, 65536 / 12 };
260 static vec16 kMult38_Div9 =
261   { 65536 / 18, 65536 / 18, 65536 / 18, 65536 / 18,
262     65536 / 18, 65536 / 18, 65536 / 18, 65536 / 18 };
263
264 // 32 -> 12
265 void ScaleRowDown38_NEON(const uint8* src_ptr,
266                          ptrdiff_t src_stride,
267                          uint8* dst_ptr, int dst_width) {
268   asm volatile (
269     MEMACCESS(3)
270     "ld1       {v3.16b}, [%3]                          \n"
271   "1:                                                  \n"
272     MEMACCESS(0)
273     "ld1       {v0.16b,v1.16b}, [%0], #32             \n"
274     "subs      %2, %2, #12                             \n"
275     "tbl       v2.16b, {v0.16b,v1.16b}, v3.16b        \n"
276     MEMACCESS(1)
277     "st1       {v2.8b}, [%1], #8                       \n"
278     MEMACCESS(1)
279     "st1       {v2.s}[2], [%1], #4                     \n"
280     "b.gt      1b                                      \n"
281   : "+r"(src_ptr),          // %0
282     "+r"(dst_ptr),          // %1
283     "+r"(dst_width)         // %2
284   : "r"(&kShuf38)           // %3
285   : "v0", "v1", "v2", "v3", "memory", "cc"
286   );
287 }
288
289 // 32x3 -> 12x1
290 void OMITFP ScaleRowDown38_3_Box_NEON(const uint8* src_ptr,
291                                       ptrdiff_t src_stride,
292                                       uint8* dst_ptr, int dst_width) {
293   const uint8* src_ptr1 = src_ptr + src_stride * 2;
294   ptrdiff_t tmp_src_stride = src_stride;
295
296   asm volatile (
297     MEMACCESS(5)
298     "ld1       {v29.8h}, [%5]                          \n"
299     MEMACCESS(6)
300     "ld1       {v30.16b}, [%6]                         \n"
301     MEMACCESS(7)
302     "ld1       {v31.8h}, [%7]                          \n"
303     "add       %2, %2, %0                              \n"
304   "1:                                                  \n"
305
306     // 00 40 01 41 02 42 03 43
307     // 10 50 11 51 12 52 13 53
308     // 20 60 21 61 22 62 23 63
309     // 30 70 31 71 32 72 33 73
310     MEMACCESS(0)
311     "ld4       {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32                \n"
312     MEMACCESS(3)
313     "ld4       {v4.8b,v5.8b,v6.8b,v7.8b}, [%2], #32                \n"
314     MEMACCESS(4)
315     "ld4       {v16.8b,v17.8b,v18.8b,v19.8b}, [%3], #32              \n"
316     "subs      %4, %4, #12                             \n"
317
318     // Shuffle the input data around to get align the data
319     //  so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
320     // 00 10 01 11 02 12 03 13
321     // 40 50 41 51 42 52 43 53
322     "trn1      v20.8b, v0.8b, v1.8b                    \n"
323     "trn2      v21.8b, v0.8b, v1.8b                    \n"
324     "trn1      v22.8b, v4.8b, v5.8b                    \n"
325     "trn2      v23.8b, v4.8b, v5.8b                    \n"
326     "trn1      v24.8b, v16.8b, v17.8b                  \n"
327     "trn2      v25.8b, v16.8b, v17.8b                  \n"
328
329     // 20 30 21 31 22 32 23 33
330     // 60 70 61 71 62 72 63 73
331     "trn1      v0.8b, v2.8b, v3.8b                     \n"
332     "trn2      v1.8b, v2.8b, v3.8b                     \n"
333     "trn1      v4.8b, v6.8b, v7.8b                     \n"
334     "trn2      v5.8b, v6.8b, v7.8b                     \n"
335     "trn1      v16.8b, v18.8b, v19.8b                  \n"
336     "trn2      v17.8b, v18.8b, v19.8b                  \n"
337
338     // 00+10 01+11 02+12 03+13
339     // 40+50 41+51 42+52 43+53
340     "uaddlp    v20.4h, v20.8b                          \n"
341     "uaddlp    v21.4h, v21.8b                          \n"
342     "uaddlp    v22.4h, v22.8b                          \n"
343     "uaddlp    v23.4h, v23.8b                          \n"
344     "uaddlp    v24.4h, v24.8b                          \n"
345     "uaddlp    v25.4h, v25.8b                          \n"
346
347     // 60+70 61+71 62+72 63+73
348     "uaddlp    v1.4h, v1.8b                            \n"
349     "uaddlp    v5.4h, v5.8b                            \n"
350     "uaddlp    v17.4h, v17.8b                          \n"
351
352     // combine source lines
353     "add       v20.4h, v20.4h, v22.4h                  \n"
354     "add       v21.4h, v21.4h, v23.4h                  \n"
355     "add       v20.4h, v20.4h, v24.4h                  \n"
356     "add       v21.4h, v21.4h, v25.4h                  \n"
357     "add       v2.4h, v1.4h, v5.4h                     \n"
358     "add       v2.4h, v2.4h, v17.4h                    \n"
359
360     // dst_ptr[3] = (s[6 + st * 0] + s[7 + st * 0]
361     //             + s[6 + st * 1] + s[7 + st * 1]
362     //             + s[6 + st * 2] + s[7 + st * 2]) / 6
363     "sqrdmulh  v2.8h, v2.8h, v29.8h                    \n"
364     "xtn       v2.8b,  v2.8h                           \n"
365
366     // Shuffle 2,3 reg around so that 2 can be added to the
367     //  0,1 reg and 3 can be added to the 4,5 reg. This
368     //  requires expanding from u8 to u16 as the 0,1 and 4,5
369     //  registers are already expanded. Then do transposes
370     //  to get aligned.
371     // xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
372     "ushll     v16.8h, v16.8b, #0                      \n"
373     "uaddl     v0.8h, v0.8b, v4.8b                     \n"
374
375     // combine source lines
376     "add       v0.8h, v0.8h, v16.8h                    \n"
377
378     // xx 20 xx 21 xx 22 xx 23
379     // xx 30 xx 31 xx 32 xx 33
380     "trn1      v1.8h, v0.8h, v0.8h                     \n"
381     "trn2      v4.8h, v0.8h, v0.8h                     \n"
382     "xtn       v0.4h, v1.4s                            \n"
383     "xtn       v4.4h, v4.4s                            \n"
384
385     // 0+1+2, 3+4+5
386     "add       v20.8h, v20.8h, v0.8h                   \n"
387     "add       v21.8h, v21.8h, v4.8h                   \n"
388
389     // Need to divide, but can't downshift as the the value
390     //  isn't a power of 2. So multiply by 65536 / n
391     //  and take the upper 16 bits.
392     "sqrdmulh  v0.8h, v20.8h, v31.8h                   \n"
393     "sqrdmulh  v1.8h, v21.8h, v31.8h                   \n"
394
395     // Align for table lookup, vtbl requires registers to
396     //  be adjacent
397     "tbl       v3.16b, {v0.16b, v1.16b, v2.16b}, v30.16b \n"
398
399     MEMACCESS(1)
400     "st1       {v3.8b}, [%1], #8                       \n"
401     MEMACCESS(1)
402     "st1       {v3.s}[2], [%1], #4                     \n"
403     "b.gt      1b                                      \n"
404   : "+r"(src_ptr),          // %0
405     "+r"(dst_ptr),          // %1
406     "+r"(tmp_src_stride),   // %2
407     "+r"(src_ptr1),         // %3
408     "+r"(dst_width)         // %4
409   : "r"(&kMult38_Div6),     // %5
410     "r"(&kShuf38_2),        // %6
411     "r"(&kMult38_Div9)      // %7
412   : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
413     "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v29",
414     "v30", "v31", "memory", "cc"
415   );
416 }
417
418 // 32x2 -> 12x1
419 void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr,
420                                ptrdiff_t src_stride,
421                                uint8* dst_ptr, int dst_width) {
422   // TODO(fbarchard): use src_stride directly for clang 3.5+.
423   ptrdiff_t tmp_src_stride = src_stride;
424   asm volatile (
425     MEMACCESS(4)
426     "ld1       {v30.8h}, [%4]                          \n"
427     MEMACCESS(5)
428     "ld1       {v31.16b}, [%5]                         \n"
429     "add       %2, %2, %0                              \n"
430   "1:                                                  \n"
431
432     // 00 40 01 41 02 42 03 43
433     // 10 50 11 51 12 52 13 53
434     // 20 60 21 61 22 62 23 63
435     // 30 70 31 71 32 72 33 73
436     MEMACCESS(0)
437     "ld4       {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32                \n"
438     MEMACCESS(3)
439     "ld4       {v4.8b,v5.8b,v6.8b,v7.8b}, [%2], #32                \n"
440     "subs      %3, %3, #12                             \n"
441
442     // Shuffle the input data around to get align the data
443     //  so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
444     // 00 10 01 11 02 12 03 13
445     // 40 50 41 51 42 52 43 53
446     "trn1      v16.8b, v0.8b, v1.8b                    \n"
447     "trn2      v17.8b, v0.8b, v1.8b                    \n"
448     "trn1      v18.8b, v4.8b, v5.8b                    \n"
449     "trn2      v19.8b, v4.8b, v5.8b                    \n"
450
451     // 20 30 21 31 22 32 23 33
452     // 60 70 61 71 62 72 63 73
453     "trn1      v0.8b, v2.8b, v3.8b                     \n"
454     "trn2      v1.8b, v2.8b, v3.8b                     \n"
455     "trn1      v4.8b, v6.8b, v7.8b                     \n"
456     "trn2      v5.8b, v6.8b, v7.8b                     \n"
457
458     // 00+10 01+11 02+12 03+13
459     // 40+50 41+51 42+52 43+53
460     "uaddlp    v16.4h, v16.8b                          \n"
461     "uaddlp    v17.4h, v17.8b                          \n"
462     "uaddlp    v18.4h, v18.8b                          \n"
463     "uaddlp    v19.4h, v19.8b                          \n"
464
465     // 60+70 61+71 62+72 63+73
466     "uaddlp    v1.4h, v1.8b                            \n"
467     "uaddlp    v5.4h, v5.8b                            \n"
468
469     // combine source lines
470     "add       v16.4h, v16.4h, v18.4h                  \n"
471     "add       v17.4h, v17.4h, v19.4h                  \n"
472     "add       v2.4h, v1.4h, v5.4h                     \n"
473
474     // dst_ptr[3] = (s[6] + s[7] + s[6+st] + s[7+st]) / 4
475     "uqrshrn   v2.8b, v2.8h, #2                        \n"
476
477     // Shuffle 2,3 reg around so that 2 can be added to the
478     //  0,1 reg and 3 can be added to the 4,5 reg. This
479     //  requires expanding from u8 to u16 as the 0,1 and 4,5
480     //  registers are already expanded. Then do transposes
481     //  to get aligned.
482     // xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
483
484     // combine source lines
485     "uaddl     v0.8h, v0.8b, v4.8b                     \n"
486
487     // xx 20 xx 21 xx 22 xx 23
488     // xx 30 xx 31 xx 32 xx 33
489     "trn1      v1.8h, v0.8h, v0.8h                     \n"
490     "trn2      v4.8h, v0.8h, v0.8h                     \n"
491     "xtn       v0.4h, v1.4s                            \n"
492     "xtn       v4.4h, v4.4s                            \n"
493
494     // 0+1+2, 3+4+5
495     "add       v16.8h, v16.8h, v0.8h                   \n"
496     "add       v17.8h, v17.8h, v4.8h                   \n"
497
498     // Need to divide, but can't downshift as the the value
499     //  isn't a power of 2. So multiply by 65536 / n
500     //  and take the upper 16 bits.
501     "sqrdmulh  v0.8h, v16.8h, v30.8h                   \n"
502     "sqrdmulh  v1.8h, v17.8h, v30.8h                   \n"
503
504     // Align for table lookup, vtbl requires registers to
505     //  be adjacent
506
507     "tbl       v3.16b, {v0.16b, v1.16b, v2.16b}, v31.16b \n"
508
509     MEMACCESS(1)
510     "st1       {v3.8b}, [%1], #8                       \n"
511     MEMACCESS(1)
512     "st1       {v3.s}[2], [%1], #4                     \n"
513     "b.gt      1b                                      \n"
514   : "+r"(src_ptr),         // %0
515     "+r"(dst_ptr),         // %1
516     "+r"(tmp_src_stride),  // %2
517     "+r"(dst_width)        // %3
518   : "r"(&kMult38_Div6),    // %4
519     "r"(&kShuf38_2)        // %5
520   : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
521     "v18", "v19", "v30", "v31", "memory", "cc"
522   );
523 }
524
525 // 16x2 -> 16x1
526 void ScaleFilterRows_NEON(uint8* dst_ptr,
527                           const uint8* src_ptr, ptrdiff_t src_stride,
528                           int dst_width, int source_y_fraction) {
529     int y_fraction = 256 - source_y_fraction;
530   asm volatile (
531     "cmp          %4, #0                       \n"
532     "b.eq         100f                         \n"
533     "add          %2, %2, %1                   \n"
534     "cmp          %4, #64                      \n"
535     "b.eq         75f                          \n"
536     "cmp          %4, #128                     \n"
537     "b.eq         50f                          \n"
538     "cmp          %4, #192                     \n"
539     "b.eq         25f                          \n"
540
541     "dup          v5.8b, %w4                   \n"
542     "dup          v4.8b, %w5                   \n"
543     // General purpose row blend.
544   "1:                                          \n"
545     MEMACCESS(1)
546     "ld1          {v0.16b}, [%1], #16          \n"
547     MEMACCESS(2)
548     "ld1          {v1.16b}, [%2], #16          \n"
549     "subs         %3, %3, #16                  \n"
550     "umull        v6.8h, v0.8b, v4.8b          \n"
551     "umull2       v7.8h, v0.16b, v4.16b        \n"
552     "umlal        v6.8h, v1.8b, v5.8b          \n"
553     "umlal2       v7.8h, v1.16b, v5.16b        \n"
554     "rshrn        v0.8b, v6.8h, #8             \n"
555     "rshrn2       v0.16b, v7.8h, #8            \n"
556     MEMACCESS(0)
557     "st1          {v0.16b}, [%0], #16          \n"
558     "b.gt         1b                           \n"
559     "b            99f                          \n"
560
561     // Blend 25 / 75.
562   "25:                                         \n"
563     MEMACCESS(1)
564     "ld1          {v0.16b}, [%1], #16          \n"
565     MEMACCESS(2)
566     "ld1          {v1.16b}, [%2], #16          \n"
567     "subs         %3, %3, #16                  \n"
568     "urhadd       v0.16b, v0.16b, v1.16b       \n"
569     "urhadd       v0.16b, v0.16b, v1.16b       \n"
570     MEMACCESS(0)
571     "st1          {v0.16b}, [%0], #16          \n"
572     "b.gt         25b                          \n"
573     "b            99f                          \n"
574
575     // Blend 50 / 50.
576   "50:                                         \n"
577     MEMACCESS(1)
578     "ld1          {v0.16b}, [%1], #16          \n"
579     MEMACCESS(2)
580     "ld1          {v1.16b}, [%2], #16          \n"
581     "subs         %3, %3, #16                  \n"
582     "urhadd       v0.16b, v0.16b, v1.16b       \n"
583     MEMACCESS(0)
584     "st1          {v0.16b}, [%0], #16          \n"
585     "b.gt         50b                          \n"
586     "b            99f                          \n"
587
588     // Blend 75 / 25.
589   "75:                                         \n"
590     MEMACCESS(1)
591     "ld1          {v1.16b}, [%1], #16          \n"
592     MEMACCESS(2)
593     "ld1          {v0.16b}, [%2], #16          \n"
594     "subs         %3, %3, #16                  \n"
595     "urhadd       v0.16b, v0.16b, v1.16b       \n"
596     "urhadd       v0.16b, v0.16b, v1.16b       \n"
597     MEMACCESS(0)
598     "st1          {v0.16b}, [%0], #16          \n"
599     "b.gt         75b                          \n"
600     "b            99f                          \n"
601
602     // Blend 100 / 0 - Copy row unchanged.
603   "100:                                        \n"
604     MEMACCESS(1)
605     "ld1          {v0.16b}, [%1], #16          \n"
606     "subs         %3, %3, #16                  \n"
607     MEMACCESS(0)
608     "st1          {v0.16b}, [%0], #16          \n"
609     "b.gt         100b                         \n"
610
611   "99:                                         \n"
612     MEMACCESS(0)
613     "st1          {v0.b}[15], [%0]             \n"
614   : "+r"(dst_ptr),          // %0
615     "+r"(src_ptr),          // %1
616     "+r"(src_stride),       // %2
617     "+r"(dst_width),        // %3
618     "+r"(source_y_fraction),// %4
619     "+r"(y_fraction)        // %5
620   :
621   : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory", "cc"
622   );
623 }
624
625 void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
626                             uint8* dst, int dst_width) {
627   asm volatile (
628   "1:                                          \n"
629     // load even pixels into q0, odd into q1
630     MEMACCESS (0)
631     "ld2        {v0.4s, v1.4s}, [%0], #32      \n"
632     MEMACCESS (0)
633     "ld2        {v2.4s, v3.4s}, [%0], #32      \n"
634     "subs       %2, %2, #8                     \n"  // 8 processed per loop
635     MEMACCESS (1)
636     "st1        {v1.16b}, [%1], #16            \n"  // store odd pixels
637     MEMACCESS (1)
638     "st1        {v3.16b}, [%1], #16            \n"
639     "b.gt       1b                             \n"
640   : "+r" (src_ptr),          // %0
641     "+r" (dst),              // %1
642     "+r" (dst_width)         // %2
643   :
644   : "memory", "cc", "v0", "v1", "v2", "v3"  // Clobber List
645   );
646 }
647
648 void ScaleARGBRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
649                                uint8* dst, int dst_width) {
650   asm volatile (
651     // change the stride to row 2 pointer
652     "add        %1, %1, %0                     \n"
653   "1:                                          \n"
654     MEMACCESS (0)
655     "ld4        {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64   \n"  // load 8 ARGB pixels.
656     "subs       %3, %3, #8                     \n"  // 8 processed per loop.
657     "uaddlp     v0.8h, v0.16b                  \n"  // B 16 bytes -> 8 shorts.
658     "uaddlp     v1.8h, v1.16b                  \n"  // G 16 bytes -> 8 shorts.
659     "uaddlp     v2.8h, v2.16b                  \n"  // R 16 bytes -> 8 shorts.
660     "uaddlp     v3.8h, v3.16b                  \n"  // A 16 bytes -> 8 shorts.
661     MEMACCESS (1)
662     "ld4        {v16.16b,v17.16b,v18.16b,v19.16b}, [%1], #64 \n"  // load 8 more ARGB pixels.
663     "uadalp     v0.8h, v16.16b                 \n"  // B 16 bytes -> 8 shorts.
664     "uadalp     v1.8h, v17.16b                 \n"  // G 16 bytes -> 8 shorts.
665     "uadalp     v2.8h, v18.16b                 \n"  // R 16 bytes -> 8 shorts.
666     "uadalp     v3.8h, v19.16b                 \n"  // A 16 bytes -> 8 shorts.
667     "rshrn      v0.8b, v0.8h, #2               \n"  // downshift, round and pack
668     "rshrn      v1.8b, v1.8h, #2               \n"
669     "rshrn      v2.8b, v2.8h, #2               \n"
670     "rshrn      v3.8b, v3.8h, #2               \n"
671     MEMACCESS (2)
672     "st4        {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32     \n"
673     "b.gt       1b                             \n"
674   : "+r" (src_ptr),          // %0
675     "+r" (src_stride),       // %1
676     "+r" (dst),              // %2
677     "+r" (dst_width)         // %3
678   :
679   : "memory", "cc", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19"
680   );
681 }
682
683 // Reads 4 pixels at a time.
684 // Alignment requirement: src_argb 4 byte aligned.
685 void ScaleARGBRowDownEven_NEON(const uint8* src_argb,  ptrdiff_t src_stride,
686                                int src_stepx, uint8* dst_argb, int dst_width) {
687   asm volatile (
688   "1:                                          \n"
689     MEMACCESS(0)
690     "ld1        {v0.s}[0], [%0], %3            \n"
691     MEMACCESS(0)
692     "ld1        {v0.s}[1], [%0], %3            \n"
693     MEMACCESS(0)
694     "ld1        {v0.s}[2], [%0], %3            \n"
695     MEMACCESS(0)
696     "ld1        {v0.s}[3], [%0], %3            \n"
697     "subs       %2, %2, #4                     \n"  // 4 pixels per loop.
698     MEMACCESS(1)
699     "st1        {v0.16b}, [%1], #16            \n"
700     "b.gt       1b                             \n"
701   : "+r"(src_argb),    // %0
702     "+r"(dst_argb),    // %1
703     "+r"(dst_width)    // %2
704   : "r"(static_cast<ptrdiff_t>(src_stepx * 4)) // %3
705   : "memory", "cc", "v0"
706   );
707 }
708
709 // Reads 4 pixels at a time.
710 // Alignment requirement: src_argb 4 byte aligned.
711 // TODO, might be worth another optimization pass in future.
712 // It could be upgraded to 8 pixels at a time to start with.
713 void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride,
714                                   int src_stepx,
715                                   uint8* dst_argb, int dst_width) {
716   asm volatile (
717     "add        %1, %1, %0                     \n"
718   "1:                                          \n"
719     MEMACCESS(0)
720     "ld1     {v0.8b}, [%0], %4                 \n"  // Read 4 2x2 blocks -> 2x1
721     MEMACCESS(1)
722     "ld1     {v1.8b}, [%1], %4                 \n"
723     MEMACCESS(0)
724     "ld1     {v2.8b}, [%0], %4                 \n"
725     MEMACCESS(1)
726     "ld1     {v3.8b}, [%1], %4                 \n"
727     MEMACCESS(0)
728     "ld1     {v4.8b}, [%0], %4                 \n"
729     MEMACCESS(1)
730     "ld1     {v5.8b}, [%1], %4                 \n"
731     MEMACCESS(0)
732     "ld1     {v6.8b}, [%0], %4                 \n"
733     MEMACCESS(1)
734     "ld1     {v7.8b}, [%1], %4                 \n"
735     "uaddl   v0.8h, v0.8b, v1.8b               \n"
736     "uaddl   v2.8h, v2.8b, v3.8b               \n"
737     "uaddl   v4.8h, v4.8b, v5.8b               \n"
738     "uaddl   v6.8h, v6.8b, v7.8b               \n"
739     "mov     v16.d[1], v0.d[1]                 \n"  // ab_cd -> ac_bd
740     "mov     v0.d[1], v2.d[0]                  \n"
741     "mov     v2.d[0], v16.d[1]                 \n"
742     "mov     v16.d[1], v4.d[1]                 \n"  // ef_gh -> eg_fh
743     "mov     v4.d[1], v6.d[0]                  \n"
744     "mov     v6.d[0], v16.d[1]                 \n"
745     "add     v0.8h, v0.8h, v2.8h               \n"  // (a+b)_(c+d)
746     "add     v4.8h, v4.8h, v6.8h               \n"  // (e+f)_(g+h)
747     "rshrn   v0.8b, v0.8h, #2                  \n"  // first 2 pixels.
748     "rshrn2  v0.16b, v4.8h, #2                 \n"  // next 2 pixels.
749     "subs       %3, %3, #4                     \n"  // 4 pixels per loop.
750     MEMACCESS(2)
751     "st1     {v0.16b}, [%2], #16               \n"
752     "b.gt       1b                             \n"
753   : "+r"(src_argb),    // %0
754     "+r"(src_stride),  // %1
755     "+r"(dst_argb),    // %2
756     "+r"(dst_width)    // %3
757   : "r"(src_stepx * 4) // %4
758   : "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"
759   );
760 }
761 #endif  // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
762
763 #ifdef __cplusplus
764 }  // extern "C"
765 }  // namespace libyuv
766 #endif