2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include <immintrin.h>
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/x86/convolve.h"
15 #include "vpx_ports/mem.h"
17 // filters for 16_h8 and 16_v8
18 DECLARE_ALIGNED(32, static const uint8_t, filt1_global_avx2[32]) = {
19 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8,
20 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
23 DECLARE_ALIGNED(32, static const uint8_t, filt2_global_avx2[32]) = {
24 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10,
25 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
28 DECLARE_ALIGNED(32, static const uint8_t, filt3_global_avx2[32]) = {
29 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12,
30 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12
33 DECLARE_ALIGNED(32, static const uint8_t, filt4_global_avx2[32]) = {
34 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14,
35 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14
38 #if defined(__clang__)
39 #if (__clang_major__ > 0 && __clang_major__ < 3) || \
40 (__clang_major__ == 3 && __clang_minor__ <= 3) || \
41 (defined(__APPLE__) && defined(__apple_build_version__) && \
42 ((__clang_major__ == 4 && __clang_minor__ <= 2) || \
43 (__clang_major__ == 5 && __clang_minor__ == 0)))
44 #define MM256_BROADCASTSI128_SI256(x) \
45 _mm_broadcastsi128_si256((__m128i const *)&(x))
46 #else // clang > 3.3, and not 5.0 on macosx.
47 #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
48 #endif // clang <= 3.3
49 #elif defined(__GNUC__)
50 #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ <= 6)
51 #define MM256_BROADCASTSI128_SI256(x) \
52 _mm_broadcastsi128_si256((__m128i const *)&(x))
53 #elif __GNUC__ == 4 && __GNUC_MINOR__ == 7
54 #define MM256_BROADCASTSI128_SI256(x) _mm_broadcastsi128_si256(x)
56 #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
58 #else // !(gcc || clang)
59 #define MM256_BROADCASTSI128_SI256(x) _mm256_broadcastsi128_si256(x)
62 static INLINE void vpx_filter_block1d16_h8_X_avx2(
63 const uint8_t *src_ptr, ptrdiff_t src_pixels_per_line, uint8_t *output_ptr,
64 ptrdiff_t output_pitch, uint32_t output_height, const int16_t *filter,
66 __m128i filtersReg, outReg1, outReg2;
67 __m256i addFilterReg64, filt1Reg, filt2Reg, filt3Reg, filt4Reg;
68 __m256i firstFilters, secondFilters, thirdFilters, forthFilters;
69 __m256i srcRegFilt32b1_1, srcRegFilt32b2_1, srcRegFilt32b2, srcRegFilt32b3;
70 __m256i srcReg32b1, srcReg32b2, filtersReg32;
72 ptrdiff_t src_stride, dst_stride;
74 // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64
75 addFilterReg64 = _mm256_set1_epi32((int)0x0400040u);
76 filtersReg = _mm_loadu_si128((const __m128i *)filter);
77 // converting the 16 bit (short) to 8 bit (byte) and have the same data
78 // in both lanes of 128 bit register.
79 filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
80 // have the same data in both lanes of a 256 bit register
81 filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg);
83 // duplicate only the first 16 bits (first and second byte)
84 // across 256 bit register
85 firstFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x100u));
86 // duplicate only the second 16 bits (third and forth byte)
87 // across 256 bit register
88 secondFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x302u));
89 // duplicate only the third 16 bits (fifth and sixth byte)
90 // across 256 bit register
91 thirdFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x504u));
92 // duplicate only the forth 16 bits (seventh and eighth byte)
93 // across 256 bit register
94 forthFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x706u));
96 filt1Reg = _mm256_load_si256((__m256i const *)filt1_global_avx2);
97 filt2Reg = _mm256_load_si256((__m256i const *)filt2_global_avx2);
98 filt3Reg = _mm256_load_si256((__m256i const *)filt3_global_avx2);
99 filt4Reg = _mm256_load_si256((__m256i const *)filt4_global_avx2);
101 // multiple the size of the source and destination stride by two
102 src_stride = src_pixels_per_line << 1;
103 dst_stride = output_pitch << 1;
104 for (i = output_height; i > 1; i -= 2) {
105 // load the 2 strides of source
107 _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr - 3)));
108 srcReg32b1 = _mm256_inserti128_si256(
110 _mm_loadu_si128((const __m128i *)(src_ptr + src_pixels_per_line - 3)),
113 // filter the source buffer
114 srcRegFilt32b1_1 = _mm256_shuffle_epi8(srcReg32b1, filt1Reg);
115 srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b1, filt4Reg);
117 // multiply 2 adjacent elements with the filter and add the result
118 srcRegFilt32b1_1 = _mm256_maddubs_epi16(srcRegFilt32b1_1, firstFilters);
119 srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, forthFilters);
121 // add and saturate the results together
122 srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, srcRegFilt32b2);
124 // filter the source buffer
125 srcRegFilt32b3 = _mm256_shuffle_epi8(srcReg32b1, filt2Reg);
126 srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b1, filt3Reg);
128 // multiply 2 adjacent elements with the filter and add the result
129 srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, secondFilters);
130 srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters);
132 // add and saturate the results together
133 srcRegFilt32b1_1 = _mm256_adds_epi16(
134 srcRegFilt32b1_1, _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2));
136 // reading 2 strides of the next 16 bytes
137 // (part of it was being read by earlier read)
139 _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr + 5)));
140 srcReg32b2 = _mm256_inserti128_si256(
142 _mm_loadu_si128((const __m128i *)(src_ptr + src_pixels_per_line + 5)),
145 // add and saturate the results together
146 srcRegFilt32b1_1 = _mm256_adds_epi16(
147 srcRegFilt32b1_1, _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2));
149 // filter the source buffer
150 srcRegFilt32b2_1 = _mm256_shuffle_epi8(srcReg32b2, filt1Reg);
151 srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b2, filt4Reg);
153 // multiply 2 adjacent elements with the filter and add the result
154 srcRegFilt32b2_1 = _mm256_maddubs_epi16(srcRegFilt32b2_1, firstFilters);
155 srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, forthFilters);
157 // add and saturate the results together
158 srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, srcRegFilt32b2);
160 // filter the source buffer
161 srcRegFilt32b3 = _mm256_shuffle_epi8(srcReg32b2, filt2Reg);
162 srcRegFilt32b2 = _mm256_shuffle_epi8(srcReg32b2, filt3Reg);
164 // multiply 2 adjacent elements with the filter and add the result
165 srcRegFilt32b3 = _mm256_maddubs_epi16(srcRegFilt32b3, secondFilters);
166 srcRegFilt32b2 = _mm256_maddubs_epi16(srcRegFilt32b2, thirdFilters);
168 // add and saturate the results together
169 srcRegFilt32b2_1 = _mm256_adds_epi16(
170 srcRegFilt32b2_1, _mm256_min_epi16(srcRegFilt32b3, srcRegFilt32b2));
171 srcRegFilt32b2_1 = _mm256_adds_epi16(
172 srcRegFilt32b2_1, _mm256_max_epi16(srcRegFilt32b3, srcRegFilt32b2));
174 srcRegFilt32b1_1 = _mm256_adds_epi16(srcRegFilt32b1_1, addFilterReg64);
176 srcRegFilt32b2_1 = _mm256_adds_epi16(srcRegFilt32b2_1, addFilterReg64);
178 // shift by 7 bit each 16 bit
179 srcRegFilt32b1_1 = _mm256_srai_epi16(srcRegFilt32b1_1, 7);
180 srcRegFilt32b2_1 = _mm256_srai_epi16(srcRegFilt32b2_1, 7);
182 // shrink to 8 bit each 16 bits, the first lane contain the first
183 // convolve result and the second lane contain the second convolve
185 srcRegFilt32b1_1 = _mm256_packus_epi16(srcRegFilt32b1_1, srcRegFilt32b2_1);
187 src_ptr += src_stride;
189 // average if necessary
190 outReg1 = _mm256_castsi256_si128(srcRegFilt32b1_1);
191 outReg2 = _mm256_extractf128_si256(srcRegFilt32b1_1, 1);
193 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr));
194 outReg2 = _mm_avg_epu8(
195 outReg2, _mm_load_si128((__m128i *)(output_ptr + output_pitch)));
199 _mm_store_si128((__m128i *)output_ptr, outReg1);
201 // save the next 16 bits
202 _mm_store_si128((__m128i *)(output_ptr + output_pitch), outReg2);
204 output_ptr += dst_stride;
207 // if the number of strides is odd.
208 // process only 16 bytes
210 __m128i srcReg1, srcReg2, srcRegFilt1_1, srcRegFilt2_1;
211 __m128i srcRegFilt2, srcRegFilt3;
213 srcReg1 = _mm_loadu_si128((const __m128i *)(src_ptr - 3));
215 // filter the source buffer
216 srcRegFilt1_1 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt1Reg));
217 srcRegFilt2 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt4Reg));
219 // multiply 2 adjacent elements with the filter and add the result
221 _mm_maddubs_epi16(srcRegFilt1_1, _mm256_castsi256_si128(firstFilters));
223 _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(forthFilters));
225 // add and saturate the results together
226 srcRegFilt1_1 = _mm_adds_epi16(srcRegFilt1_1, srcRegFilt2);
228 // filter the source buffer
229 srcRegFilt3 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt2Reg));
230 srcRegFilt2 = _mm_shuffle_epi8(srcReg1, _mm256_castsi256_si128(filt3Reg));
232 // multiply 2 adjacent elements with the filter and add the result
234 _mm_maddubs_epi16(srcRegFilt3, _mm256_castsi256_si128(secondFilters));
236 _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(thirdFilters));
238 // add and saturate the results together
240 _mm_adds_epi16(srcRegFilt1_1, _mm_min_epi16(srcRegFilt3, srcRegFilt2));
242 // reading the next 16 bytes
243 // (part of it was being read by earlier read)
244 srcReg2 = _mm_loadu_si128((const __m128i *)(src_ptr + 5));
246 // add and saturate the results together
248 _mm_adds_epi16(srcRegFilt1_1, _mm_max_epi16(srcRegFilt3, srcRegFilt2));
250 // filter the source buffer
251 srcRegFilt2_1 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt1Reg));
252 srcRegFilt2 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt4Reg));
254 // multiply 2 adjacent elements with the filter and add the result
256 _mm_maddubs_epi16(srcRegFilt2_1, _mm256_castsi256_si128(firstFilters));
258 _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(forthFilters));
260 // add and saturate the results together
261 srcRegFilt2_1 = _mm_adds_epi16(srcRegFilt2_1, srcRegFilt2);
263 // filter the source buffer
264 srcRegFilt3 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt2Reg));
265 srcRegFilt2 = _mm_shuffle_epi8(srcReg2, _mm256_castsi256_si128(filt3Reg));
267 // multiply 2 adjacent elements with the filter and add the result
269 _mm_maddubs_epi16(srcRegFilt3, _mm256_castsi256_si128(secondFilters));
271 _mm_maddubs_epi16(srcRegFilt2, _mm256_castsi256_si128(thirdFilters));
273 // add and saturate the results together
275 _mm_adds_epi16(srcRegFilt2_1, _mm_min_epi16(srcRegFilt3, srcRegFilt2));
277 _mm_adds_epi16(srcRegFilt2_1, _mm_max_epi16(srcRegFilt3, srcRegFilt2));
280 _mm_adds_epi16(srcRegFilt1_1, _mm256_castsi256_si128(addFilterReg64));
283 _mm_adds_epi16(srcRegFilt2_1, _mm256_castsi256_si128(addFilterReg64));
285 // shift by 7 bit each 16 bit
286 srcRegFilt1_1 = _mm_srai_epi16(srcRegFilt1_1, 7);
287 srcRegFilt2_1 = _mm_srai_epi16(srcRegFilt2_1, 7);
289 // shrink to 8 bit each 16 bits, the first lane contain the first
290 // convolve result and the second lane contain the second convolve
292 outReg1 = _mm_packus_epi16(srcRegFilt1_1, srcRegFilt2_1);
294 // average if necessary
296 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr));
300 _mm_store_si128((__m128i *)output_ptr, outReg1);
304 static void vpx_filter_block1d16_h8_avx2(
305 const uint8_t *src_ptr, ptrdiff_t src_stride, uint8_t *output_ptr,
306 ptrdiff_t dst_stride, uint32_t output_height, const int16_t *filter) {
307 vpx_filter_block1d16_h8_X_avx2(src_ptr, src_stride, output_ptr, dst_stride,
308 output_height, filter, 0);
311 static void vpx_filter_block1d16_h8_avg_avx2(
312 const uint8_t *src_ptr, ptrdiff_t src_stride, uint8_t *output_ptr,
313 ptrdiff_t dst_stride, uint32_t output_height, const int16_t *filter) {
314 vpx_filter_block1d16_h8_X_avx2(src_ptr, src_stride, output_ptr, dst_stride,
315 output_height, filter, 1);
318 static INLINE void vpx_filter_block1d16_v8_X_avx2(
319 const uint8_t *src_ptr, ptrdiff_t src_pitch, uint8_t *output_ptr,
320 ptrdiff_t out_pitch, uint32_t output_height, const int16_t *filter,
322 __m128i filtersReg, outReg1, outReg2;
323 __m256i addFilterReg64;
324 __m256i srcReg32b1, srcReg32b2, srcReg32b3, srcReg32b4, srcReg32b5;
325 __m256i srcReg32b6, srcReg32b7, srcReg32b8, srcReg32b9, srcReg32b10;
326 __m256i srcReg32b11, srcReg32b12, filtersReg32;
327 __m256i firstFilters, secondFilters, thirdFilters, forthFilters;
329 ptrdiff_t src_stride, dst_stride;
331 // create a register with 0,64,0,64,0,64,0,64,0,64,0,64,0,64,0,64
332 addFilterReg64 = _mm256_set1_epi32((int)0x0400040u);
333 filtersReg = _mm_loadu_si128((const __m128i *)filter);
334 // converting the 16 bit (short) to 8 bit (byte) and have the
335 // same data in both lanes of 128 bit register.
336 filtersReg = _mm_packs_epi16(filtersReg, filtersReg);
337 // have the same data in both lanes of a 256 bit register
338 filtersReg32 = MM256_BROADCASTSI128_SI256(filtersReg);
340 // duplicate only the first 16 bits (first and second byte)
341 // across 256 bit register
342 firstFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x100u));
343 // duplicate only the second 16 bits (third and forth byte)
344 // across 256 bit register
345 secondFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x302u));
346 // duplicate only the third 16 bits (fifth and sixth byte)
347 // across 256 bit register
348 thirdFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x504u));
349 // duplicate only the forth 16 bits (seventh and eighth byte)
350 // across 256 bit register
351 forthFilters = _mm256_shuffle_epi8(filtersReg32, _mm256_set1_epi16(0x706u));
353 // multiple the size of the source and destination stride by two
354 src_stride = src_pitch << 1;
355 dst_stride = out_pitch << 1;
357 // load 16 bytes 7 times in stride of src_pitch
359 _mm256_castsi128_si256(_mm_loadu_si128((const __m128i *)(src_ptr)));
360 srcReg32b2 = _mm256_castsi128_si256(
361 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch)));
362 srcReg32b3 = _mm256_castsi128_si256(
363 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 2)));
364 srcReg32b4 = _mm256_castsi128_si256(
365 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 3)));
366 srcReg32b5 = _mm256_castsi128_si256(
367 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 4)));
368 srcReg32b6 = _mm256_castsi128_si256(
369 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 5)));
370 srcReg32b7 = _mm256_castsi128_si256(
371 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 6)));
373 // have each consecutive loads on the same 256 register
374 srcReg32b1 = _mm256_inserti128_si256(srcReg32b1,
375 _mm256_castsi256_si128(srcReg32b2), 1);
376 srcReg32b2 = _mm256_inserti128_si256(srcReg32b2,
377 _mm256_castsi256_si128(srcReg32b3), 1);
378 srcReg32b3 = _mm256_inserti128_si256(srcReg32b3,
379 _mm256_castsi256_si128(srcReg32b4), 1);
380 srcReg32b4 = _mm256_inserti128_si256(srcReg32b4,
381 _mm256_castsi256_si128(srcReg32b5), 1);
382 srcReg32b5 = _mm256_inserti128_si256(srcReg32b5,
383 _mm256_castsi256_si128(srcReg32b6), 1);
384 srcReg32b6 = _mm256_inserti128_si256(srcReg32b6,
385 _mm256_castsi256_si128(srcReg32b7), 1);
387 // merge every two consecutive registers except the last one
388 srcReg32b10 = _mm256_unpacklo_epi8(srcReg32b1, srcReg32b2);
389 srcReg32b1 = _mm256_unpackhi_epi8(srcReg32b1, srcReg32b2);
392 srcReg32b11 = _mm256_unpacklo_epi8(srcReg32b3, srcReg32b4);
395 srcReg32b3 = _mm256_unpackhi_epi8(srcReg32b3, srcReg32b4);
398 srcReg32b2 = _mm256_unpacklo_epi8(srcReg32b5, srcReg32b6);
401 srcReg32b5 = _mm256_unpackhi_epi8(srcReg32b5, srcReg32b6);
403 for (i = output_height; i > 1; i -= 2) {
404 // load the last 2 loads of 16 bytes and have every two
405 // consecutive loads in the same 256 bit register
406 srcReg32b8 = _mm256_castsi128_si256(
407 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7)));
408 srcReg32b7 = _mm256_inserti128_si256(srcReg32b7,
409 _mm256_castsi256_si128(srcReg32b8), 1);
410 srcReg32b9 = _mm256_castsi128_si256(
411 _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 8)));
412 srcReg32b8 = _mm256_inserti128_si256(srcReg32b8,
413 _mm256_castsi256_si128(srcReg32b9), 1);
415 // merge every two consecutive registers
417 srcReg32b4 = _mm256_unpacklo_epi8(srcReg32b7, srcReg32b8);
418 srcReg32b7 = _mm256_unpackhi_epi8(srcReg32b7, srcReg32b8);
420 // multiply 2 adjacent elements with the filter and add the result
421 srcReg32b10 = _mm256_maddubs_epi16(srcReg32b10, firstFilters);
422 srcReg32b6 = _mm256_maddubs_epi16(srcReg32b4, forthFilters);
424 // add and saturate the results together
425 srcReg32b10 = _mm256_adds_epi16(srcReg32b10, srcReg32b6);
427 // multiply 2 adjacent elements with the filter and add the result
428 srcReg32b8 = _mm256_maddubs_epi16(srcReg32b11, secondFilters);
429 srcReg32b12 = _mm256_maddubs_epi16(srcReg32b2, thirdFilters);
431 // add and saturate the results together
432 srcReg32b10 = _mm256_adds_epi16(srcReg32b10,
433 _mm256_min_epi16(srcReg32b8, srcReg32b12));
434 srcReg32b10 = _mm256_adds_epi16(srcReg32b10,
435 _mm256_max_epi16(srcReg32b8, srcReg32b12));
437 // multiply 2 adjacent elements with the filter and add the result
438 srcReg32b1 = _mm256_maddubs_epi16(srcReg32b1, firstFilters);
439 srcReg32b6 = _mm256_maddubs_epi16(srcReg32b7, forthFilters);
441 srcReg32b1 = _mm256_adds_epi16(srcReg32b1, srcReg32b6);
443 // multiply 2 adjacent elements with the filter and add the result
444 srcReg32b8 = _mm256_maddubs_epi16(srcReg32b3, secondFilters);
445 srcReg32b12 = _mm256_maddubs_epi16(srcReg32b5, thirdFilters);
447 // add and saturate the results together
448 srcReg32b1 = _mm256_adds_epi16(srcReg32b1,
449 _mm256_min_epi16(srcReg32b8, srcReg32b12));
450 srcReg32b1 = _mm256_adds_epi16(srcReg32b1,
451 _mm256_max_epi16(srcReg32b8, srcReg32b12));
453 srcReg32b10 = _mm256_adds_epi16(srcReg32b10, addFilterReg64);
454 srcReg32b1 = _mm256_adds_epi16(srcReg32b1, addFilterReg64);
456 // shift by 7 bit each 16 bit
457 srcReg32b10 = _mm256_srai_epi16(srcReg32b10, 7);
458 srcReg32b1 = _mm256_srai_epi16(srcReg32b1, 7);
460 // shrink to 8 bit each 16 bits, the first lane contain the first
461 // convolve result and the second lane contain the second convolve
463 srcReg32b1 = _mm256_packus_epi16(srcReg32b10, srcReg32b1);
465 src_ptr += src_stride;
467 // average if necessary
468 outReg1 = _mm256_castsi256_si128(srcReg32b1);
469 outReg2 = _mm256_extractf128_si256(srcReg32b1, 1);
471 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr));
472 outReg2 = _mm_avg_epu8(
473 outReg2, _mm_load_si128((__m128i *)(output_ptr + out_pitch)));
477 _mm_store_si128((__m128i *)output_ptr, outReg1);
479 // save the next 16 bits
480 _mm_store_si128((__m128i *)(output_ptr + out_pitch), outReg2);
482 output_ptr += dst_stride;
484 // save part of the registers for next strides
485 srcReg32b10 = srcReg32b11;
486 srcReg32b1 = srcReg32b3;
487 srcReg32b11 = srcReg32b2;
488 srcReg32b3 = srcReg32b5;
489 srcReg32b2 = srcReg32b4;
490 srcReg32b5 = srcReg32b7;
491 srcReg32b7 = srcReg32b9;
494 __m128i srcRegFilt1, srcRegFilt3, srcRegFilt4, srcRegFilt5;
495 __m128i srcRegFilt6, srcRegFilt7, srcRegFilt8;
496 // load the last 16 bytes
497 srcRegFilt8 = _mm_loadu_si128((const __m128i *)(src_ptr + src_pitch * 7));
499 // merge the last 2 results together
501 _mm_unpacklo_epi8(_mm256_castsi256_si128(srcReg32b7), srcRegFilt8);
503 _mm_unpackhi_epi8(_mm256_castsi256_si128(srcReg32b7), srcRegFilt8);
505 // multiply 2 adjacent elements with the filter and add the result
506 srcRegFilt1 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b10),
507 _mm256_castsi256_si128(firstFilters));
509 _mm_maddubs_epi16(srcRegFilt4, _mm256_castsi256_si128(forthFilters));
510 srcRegFilt3 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b1),
511 _mm256_castsi256_si128(firstFilters));
513 _mm_maddubs_epi16(srcRegFilt7, _mm256_castsi256_si128(forthFilters));
515 // add and saturate the results together
516 srcRegFilt1 = _mm_adds_epi16(srcRegFilt1, srcRegFilt4);
517 srcRegFilt3 = _mm_adds_epi16(srcRegFilt3, srcRegFilt7);
519 // multiply 2 adjacent elements with the filter and add the result
520 srcRegFilt4 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b11),
521 _mm256_castsi256_si128(secondFilters));
522 srcRegFilt5 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b3),
523 _mm256_castsi256_si128(secondFilters));
525 // multiply 2 adjacent elements with the filter and add the result
526 srcRegFilt6 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b2),
527 _mm256_castsi256_si128(thirdFilters));
528 srcRegFilt7 = _mm_maddubs_epi16(_mm256_castsi256_si128(srcReg32b5),
529 _mm256_castsi256_si128(thirdFilters));
531 // add and saturate the results together
533 _mm_adds_epi16(srcRegFilt1, _mm_min_epi16(srcRegFilt4, srcRegFilt6));
535 _mm_adds_epi16(srcRegFilt3, _mm_min_epi16(srcRegFilt5, srcRegFilt7));
537 // add and saturate the results together
539 _mm_adds_epi16(srcRegFilt1, _mm_max_epi16(srcRegFilt4, srcRegFilt6));
541 _mm_adds_epi16(srcRegFilt3, _mm_max_epi16(srcRegFilt5, srcRegFilt7));
544 _mm_adds_epi16(srcRegFilt1, _mm256_castsi256_si128(addFilterReg64));
546 _mm_adds_epi16(srcRegFilt3, _mm256_castsi256_si128(addFilterReg64));
548 // shift by 7 bit each 16 bit
549 srcRegFilt1 = _mm_srai_epi16(srcRegFilt1, 7);
550 srcRegFilt3 = _mm_srai_epi16(srcRegFilt3, 7);
552 // shrink to 8 bit each 16 bits, the first lane contain the first
553 // convolve result and the second lane contain the second convolve
555 outReg1 = _mm_packus_epi16(srcRegFilt1, srcRegFilt3);
557 // average if necessary
559 outReg1 = _mm_avg_epu8(outReg1, _mm_load_si128((__m128i *)output_ptr));
563 _mm_store_si128((__m128i *)output_ptr, outReg1);
567 static void vpx_filter_block1d16_v8_avx2(const uint8_t *src_ptr,
568 ptrdiff_t src_stride, uint8_t *dst_ptr,
569 ptrdiff_t dst_stride, uint32_t height,
570 const int16_t *filter) {
571 vpx_filter_block1d16_v8_X_avx2(src_ptr, src_stride, dst_ptr, dst_stride,
575 static void vpx_filter_block1d16_v8_avg_avx2(
576 const uint8_t *src_ptr, ptrdiff_t src_stride, uint8_t *dst_ptr,
577 ptrdiff_t dst_stride, uint32_t height, const int16_t *filter) {
578 vpx_filter_block1d16_v8_X_avx2(src_ptr, src_stride, dst_ptr, dst_stride,
582 #if HAVE_AVX2 && HAVE_SSSE3
583 filter8_1dfunction vpx_filter_block1d4_v8_ssse3;
585 filter8_1dfunction vpx_filter_block1d8_v8_intrin_ssse3;
586 filter8_1dfunction vpx_filter_block1d8_h8_intrin_ssse3;
587 filter8_1dfunction vpx_filter_block1d4_h8_intrin_ssse3;
588 #define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_intrin_ssse3
589 #define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_intrin_ssse3
590 #define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_intrin_ssse3
592 filter8_1dfunction vpx_filter_block1d8_v8_ssse3;
593 filter8_1dfunction vpx_filter_block1d8_h8_ssse3;
594 filter8_1dfunction vpx_filter_block1d4_h8_ssse3;
595 #define vpx_filter_block1d8_v8_avx2 vpx_filter_block1d8_v8_ssse3
596 #define vpx_filter_block1d8_h8_avx2 vpx_filter_block1d8_h8_ssse3
597 #define vpx_filter_block1d4_h8_avx2 vpx_filter_block1d4_h8_ssse3
598 #endif // ARCH_X86_64
599 filter8_1dfunction vpx_filter_block1d8_v8_avg_ssse3;
600 filter8_1dfunction vpx_filter_block1d8_h8_avg_ssse3;
601 filter8_1dfunction vpx_filter_block1d4_v8_avg_ssse3;
602 filter8_1dfunction vpx_filter_block1d4_h8_avg_ssse3;
603 #define vpx_filter_block1d8_v8_avg_avx2 vpx_filter_block1d8_v8_avg_ssse3
604 #define vpx_filter_block1d8_h8_avg_avx2 vpx_filter_block1d8_h8_avg_ssse3
605 #define vpx_filter_block1d4_v8_avg_avx2 vpx_filter_block1d4_v8_avg_ssse3
606 #define vpx_filter_block1d4_h8_avg_avx2 vpx_filter_block1d4_h8_avg_ssse3
607 filter8_1dfunction vpx_filter_block1d16_v2_ssse3;
608 filter8_1dfunction vpx_filter_block1d16_h2_ssse3;
609 filter8_1dfunction vpx_filter_block1d8_v2_ssse3;
610 filter8_1dfunction vpx_filter_block1d8_h2_ssse3;
611 filter8_1dfunction vpx_filter_block1d4_v2_ssse3;
612 filter8_1dfunction vpx_filter_block1d4_h2_ssse3;
613 #define vpx_filter_block1d4_v8_avx2 vpx_filter_block1d4_v8_ssse3
614 #define vpx_filter_block1d16_v2_avx2 vpx_filter_block1d16_v2_ssse3
615 #define vpx_filter_block1d16_h2_avx2 vpx_filter_block1d16_h2_ssse3
616 #define vpx_filter_block1d8_v2_avx2 vpx_filter_block1d8_v2_ssse3
617 #define vpx_filter_block1d8_h2_avx2 vpx_filter_block1d8_h2_ssse3
618 #define vpx_filter_block1d4_v2_avx2 vpx_filter_block1d4_v2_ssse3
619 #define vpx_filter_block1d4_h2_avx2 vpx_filter_block1d4_h2_ssse3
620 filter8_1dfunction vpx_filter_block1d16_v2_avg_ssse3;
621 filter8_1dfunction vpx_filter_block1d16_h2_avg_ssse3;
622 filter8_1dfunction vpx_filter_block1d8_v2_avg_ssse3;
623 filter8_1dfunction vpx_filter_block1d8_h2_avg_ssse3;
624 filter8_1dfunction vpx_filter_block1d4_v2_avg_ssse3;
625 filter8_1dfunction vpx_filter_block1d4_h2_avg_ssse3;
626 #define vpx_filter_block1d16_v2_avg_avx2 vpx_filter_block1d16_v2_avg_ssse3
627 #define vpx_filter_block1d16_h2_avg_avx2 vpx_filter_block1d16_h2_avg_ssse3
628 #define vpx_filter_block1d8_v2_avg_avx2 vpx_filter_block1d8_v2_avg_ssse3
629 #define vpx_filter_block1d8_h2_avg_avx2 vpx_filter_block1d8_h2_avg_ssse3
630 #define vpx_filter_block1d4_v2_avg_avx2 vpx_filter_block1d4_v2_avg_ssse3
631 #define vpx_filter_block1d4_h2_avg_avx2 vpx_filter_block1d4_h2_avg_ssse3
632 // void vpx_convolve8_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
633 // uint8_t *dst, ptrdiff_t dst_stride,
634 // const InterpKernel *filter, int x0_q4,
635 // int32_t x_step_q4, int y0_q4, int y_step_q4,
637 // void vpx_convolve8_vert_avx2(const uint8_t *src, ptrdiff_t src_stride,
638 // uint8_t *dst, ptrdiff_t dst_stride,
639 // const InterpKernel *filter, int x0_q4,
640 // int32_t x_step_q4, int y0_q4, int y_step_q4,
642 // void vpx_convolve8_avg_horiz_avx2(const uint8_t *src, ptrdiff_t src_stride,
643 // uint8_t *dst, ptrdiff_t dst_stride,
644 // const InterpKernel *filter, int x0_q4,
645 // int32_t x_step_q4, int y0_q4,
646 // int y_step_q4, int w, int h);
647 // void vpx_convolve8_avg_vert_avx2(const uint8_t *src, ptrdiff_t src_stride,
648 // uint8_t *dst, ptrdiff_t dst_stride,
649 // const InterpKernel *filter, int x0_q4,
650 // int32_t x_step_q4, int y0_q4,
651 // int y_step_q4, int w, int h);
652 FUN_CONV_1D(horiz, x0_q4, x_step_q4, h, src, , avx2);
653 FUN_CONV_1D(vert, y0_q4, y_step_q4, v, src - src_stride * 3, , avx2);
654 FUN_CONV_1D(avg_horiz, x0_q4, x_step_q4, h, src, avg_, avx2);
655 FUN_CONV_1D(avg_vert, y0_q4, y_step_q4, v, src - src_stride * 3, avg_, avx2);
657 // void vpx_convolve8_avx2(const uint8_t *src, ptrdiff_t src_stride,
658 // uint8_t *dst, ptrdiff_t dst_stride,
659 // const InterpKernel *filter, int x0_q4,
660 // int32_t x_step_q4, int y0_q4, int y_step_q4,
662 // void vpx_convolve8_avg_avx2(const uint8_t *src, ptrdiff_t src_stride,
663 // uint8_t *dst, ptrdiff_t dst_stride,
664 // const InterpKernel *filter, int x0_q4,
665 // int32_t x_step_q4, int y0_q4, int y_step_q4,
668 FUN_CONV_2D(avg_, avx2);
669 #endif // HAVE_AX2 && HAVE_SSSE3