]> granicus.if.org Git - libvpx/blob - vpx_dsp/x86/inv_txfm_sse2.h
ppc: Add vpx_sadnxmx4d_vsx for n,m = {8, 16, 32 ,64}
[libvpx] / vpx_dsp / x86 / inv_txfm_sse2.h
1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10
11 #ifndef VPX_DSP_X86_INV_TXFM_SSE2_H_
12 #define VPX_DSP_X86_INV_TXFM_SSE2_H_
13
14 #include <emmintrin.h>  // SSE2
15 #include "./vpx_config.h"
16 #include "vpx/vpx_integer.h"
17 #include "vpx_dsp/inv_txfm.h"
18 #include "vpx_dsp/x86/txfm_common_sse2.h"
19
20 // perform 8x8 transpose
21 static INLINE void array_transpose_8x8(__m128i *in, __m128i *res) {
22   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
23   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
24   const __m128i tr0_2 = _mm_unpackhi_epi16(in[0], in[1]);
25   const __m128i tr0_3 = _mm_unpackhi_epi16(in[2], in[3]);
26   const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
27   const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
28   const __m128i tr0_6 = _mm_unpackhi_epi16(in[4], in[5]);
29   const __m128i tr0_7 = _mm_unpackhi_epi16(in[6], in[7]);
30
31   const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
32   const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_4, tr0_5);
33   const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
34   const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_4, tr0_5);
35   const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_2, tr0_3);
36   const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);
37   const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_2, tr0_3);
38   const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);
39
40   res[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
41   res[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
42   res[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
43   res[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
44   res[4] = _mm_unpacklo_epi64(tr1_4, tr1_5);
45   res[5] = _mm_unpackhi_epi64(tr1_4, tr1_5);
46   res[6] = _mm_unpacklo_epi64(tr1_6, tr1_7);
47   res[7] = _mm_unpackhi_epi64(tr1_6, tr1_7);
48 }
49 #define TRANSPOSE_8X8(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, \
50                       out2, out3, out4, out5, out6, out7)                 \
51   {                                                                       \
52     const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);                   \
53     const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);                   \
54     const __m128i tr0_2 = _mm_unpackhi_epi16(in0, in1);                   \
55     const __m128i tr0_3 = _mm_unpackhi_epi16(in2, in3);                   \
56     const __m128i tr0_4 = _mm_unpacklo_epi16(in4, in5);                   \
57     const __m128i tr0_5 = _mm_unpacklo_epi16(in6, in7);                   \
58     const __m128i tr0_6 = _mm_unpackhi_epi16(in4, in5);                   \
59     const __m128i tr0_7 = _mm_unpackhi_epi16(in6, in7);                   \
60                                                                           \
61     const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);               \
62     const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);               \
63     const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);               \
64     const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);               \
65     const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);               \
66     const __m128i tr1_5 = _mm_unpacklo_epi32(tr0_6, tr0_7);               \
67     const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);               \
68     const __m128i tr1_7 = _mm_unpackhi_epi32(tr0_6, tr0_7);               \
69                                                                           \
70     out0 = _mm_unpacklo_epi64(tr1_0, tr1_4);                              \
71     out1 = _mm_unpackhi_epi64(tr1_0, tr1_4);                              \
72     out2 = _mm_unpacklo_epi64(tr1_2, tr1_6);                              \
73     out3 = _mm_unpackhi_epi64(tr1_2, tr1_6);                              \
74     out4 = _mm_unpacklo_epi64(tr1_1, tr1_5);                              \
75     out5 = _mm_unpackhi_epi64(tr1_1, tr1_5);                              \
76     out6 = _mm_unpacklo_epi64(tr1_3, tr1_7);                              \
77     out7 = _mm_unpackhi_epi64(tr1_3, tr1_7);                              \
78   }
79
80 #define TRANSPOSE_8X4(in0, in1, in2, in3, out0, out1)   \
81   {                                                     \
82     const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1); \
83     const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3); \
84                                                         \
85     in0 = _mm_unpacklo_epi32(tr0_0, tr0_1); /* i1 i0 */ \
86     in1 = _mm_unpackhi_epi32(tr0_0, tr0_1); /* i3 i2 */ \
87   }
88
89 static INLINE void array_transpose_4X8(__m128i *in, __m128i *out) {
90   const __m128i tr0_0 = _mm_unpacklo_epi16(in[0], in[1]);
91   const __m128i tr0_1 = _mm_unpacklo_epi16(in[2], in[3]);
92   const __m128i tr0_4 = _mm_unpacklo_epi16(in[4], in[5]);
93   const __m128i tr0_5 = _mm_unpacklo_epi16(in[6], in[7]);
94
95   const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
96   const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
97   const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);
98   const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);
99
100   out[0] = _mm_unpacklo_epi64(tr1_0, tr1_4);
101   out[1] = _mm_unpackhi_epi64(tr1_0, tr1_4);
102   out[2] = _mm_unpacklo_epi64(tr1_2, tr1_6);
103   out[3] = _mm_unpackhi_epi64(tr1_2, tr1_6);
104 }
105
106 static INLINE void array_transpose_16x16(__m128i *res0, __m128i *res1) {
107   __m128i tbuf[8];
108   array_transpose_8x8(res0, res0);
109   array_transpose_8x8(res1, tbuf);
110   array_transpose_8x8(res0 + 8, res1);
111   array_transpose_8x8(res1 + 8, res1 + 8);
112
113   res0[8] = tbuf[0];
114   res0[9] = tbuf[1];
115   res0[10] = tbuf[2];
116   res0[11] = tbuf[3];
117   res0[12] = tbuf[4];
118   res0[13] = tbuf[5];
119   res0[14] = tbuf[6];
120   res0[15] = tbuf[7];
121 }
122
123 // Function to allow 8 bit optimisations to be used when profile 0 is used with
124 // highbitdepth enabled
125 static INLINE __m128i load_input_data(const tran_low_t *data) {
126 #if CONFIG_VP9_HIGHBITDEPTH
127   return octa_set_epi16(data[0], data[1], data[2], data[3], data[4], data[5],
128                         data[6], data[7]);
129 #else
130   return _mm_load_si128((const __m128i *)data);
131 #endif
132 }
133
134 static INLINE void load_buffer_8x16(const tran_low_t *input, __m128i *in) {
135   in[0] = load_input_data(input + 0 * 16);
136   in[1] = load_input_data(input + 1 * 16);
137   in[2] = load_input_data(input + 2 * 16);
138   in[3] = load_input_data(input + 3 * 16);
139   in[4] = load_input_data(input + 4 * 16);
140   in[5] = load_input_data(input + 5 * 16);
141   in[6] = load_input_data(input + 6 * 16);
142   in[7] = load_input_data(input + 7 * 16);
143
144   in[8] = load_input_data(input + 8 * 16);
145   in[9] = load_input_data(input + 9 * 16);
146   in[10] = load_input_data(input + 10 * 16);
147   in[11] = load_input_data(input + 11 * 16);
148   in[12] = load_input_data(input + 12 * 16);
149   in[13] = load_input_data(input + 13 * 16);
150   in[14] = load_input_data(input + 14 * 16);
151   in[15] = load_input_data(input + 15 * 16);
152 }
153
154 #define RECON_AND_STORE(dest, in_x)                  \
155   {                                                  \
156     __m128i d0 = _mm_loadl_epi64((__m128i *)(dest)); \
157     d0 = _mm_unpacklo_epi8(d0, zero);                \
158     d0 = _mm_add_epi16(in_x, d0);                    \
159     d0 = _mm_packus_epi16(d0, d0);                   \
160     _mm_storel_epi64((__m128i *)(dest), d0);         \
161   }
162
163 static INLINE void write_buffer_8x16(uint8_t *dest, __m128i *in, int stride) {
164   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
165   const __m128i zero = _mm_setzero_si128();
166   // Final rounding and shift
167   in[0] = _mm_adds_epi16(in[0], final_rounding);
168   in[1] = _mm_adds_epi16(in[1], final_rounding);
169   in[2] = _mm_adds_epi16(in[2], final_rounding);
170   in[3] = _mm_adds_epi16(in[3], final_rounding);
171   in[4] = _mm_adds_epi16(in[4], final_rounding);
172   in[5] = _mm_adds_epi16(in[5], final_rounding);
173   in[6] = _mm_adds_epi16(in[6], final_rounding);
174   in[7] = _mm_adds_epi16(in[7], final_rounding);
175   in[8] = _mm_adds_epi16(in[8], final_rounding);
176   in[9] = _mm_adds_epi16(in[9], final_rounding);
177   in[10] = _mm_adds_epi16(in[10], final_rounding);
178   in[11] = _mm_adds_epi16(in[11], final_rounding);
179   in[12] = _mm_adds_epi16(in[12], final_rounding);
180   in[13] = _mm_adds_epi16(in[13], final_rounding);
181   in[14] = _mm_adds_epi16(in[14], final_rounding);
182   in[15] = _mm_adds_epi16(in[15], final_rounding);
183
184   in[0] = _mm_srai_epi16(in[0], 6);
185   in[1] = _mm_srai_epi16(in[1], 6);
186   in[2] = _mm_srai_epi16(in[2], 6);
187   in[3] = _mm_srai_epi16(in[3], 6);
188   in[4] = _mm_srai_epi16(in[4], 6);
189   in[5] = _mm_srai_epi16(in[5], 6);
190   in[6] = _mm_srai_epi16(in[6], 6);
191   in[7] = _mm_srai_epi16(in[7], 6);
192   in[8] = _mm_srai_epi16(in[8], 6);
193   in[9] = _mm_srai_epi16(in[9], 6);
194   in[10] = _mm_srai_epi16(in[10], 6);
195   in[11] = _mm_srai_epi16(in[11], 6);
196   in[12] = _mm_srai_epi16(in[12], 6);
197   in[13] = _mm_srai_epi16(in[13], 6);
198   in[14] = _mm_srai_epi16(in[14], 6);
199   in[15] = _mm_srai_epi16(in[15], 6);
200
201   RECON_AND_STORE(dest + 0 * stride, in[0]);
202   RECON_AND_STORE(dest + 1 * stride, in[1]);
203   RECON_AND_STORE(dest + 2 * stride, in[2]);
204   RECON_AND_STORE(dest + 3 * stride, in[3]);
205   RECON_AND_STORE(dest + 4 * stride, in[4]);
206   RECON_AND_STORE(dest + 5 * stride, in[5]);
207   RECON_AND_STORE(dest + 6 * stride, in[6]);
208   RECON_AND_STORE(dest + 7 * stride, in[7]);
209   RECON_AND_STORE(dest + 8 * stride, in[8]);
210   RECON_AND_STORE(dest + 9 * stride, in[9]);
211   RECON_AND_STORE(dest + 10 * stride, in[10]);
212   RECON_AND_STORE(dest + 11 * stride, in[11]);
213   RECON_AND_STORE(dest + 12 * stride, in[12]);
214   RECON_AND_STORE(dest + 13 * stride, in[13]);
215   RECON_AND_STORE(dest + 14 * stride, in[14]);
216   RECON_AND_STORE(dest + 15 * stride, in[15]);
217 }
218
219 #define TRANSPOSE_4X8_10(tmp0, tmp1, tmp2, tmp3, out0, out1, out2, out3) \
220   {                                                                      \
221     const __m128i tr0_0 = _mm_unpackhi_epi16(tmp0, tmp1);                \
222     const __m128i tr0_1 = _mm_unpacklo_epi16(tmp1, tmp0);                \
223     const __m128i tr0_4 = _mm_unpacklo_epi16(tmp2, tmp3);                \
224     const __m128i tr0_5 = _mm_unpackhi_epi16(tmp3, tmp2);                \
225                                                                          \
226     const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);              \
227     const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);              \
228     const __m128i tr1_4 = _mm_unpacklo_epi32(tr0_4, tr0_5);              \
229     const __m128i tr1_6 = _mm_unpackhi_epi32(tr0_4, tr0_5);              \
230                                                                          \
231     out0 = _mm_unpacklo_epi64(tr1_0, tr1_4);                             \
232     out1 = _mm_unpackhi_epi64(tr1_0, tr1_4);                             \
233     out2 = _mm_unpacklo_epi64(tr1_2, tr1_6);                             \
234     out3 = _mm_unpackhi_epi64(tr1_2, tr1_6);                             \
235   }
236
237 #define TRANSPOSE_8X8_10(in0, in1, in2, in3, out0, out1) \
238   {                                                      \
239     const __m128i tr0_0 = _mm_unpacklo_epi16(in0, in1);  \
240     const __m128i tr0_1 = _mm_unpacklo_epi16(in2, in3);  \
241     out0 = _mm_unpacklo_epi32(tr0_0, tr0_1);             \
242     out1 = _mm_unpackhi_epi32(tr0_0, tr0_1);             \
243   }
244
245 // Define Macro for multiplying elements by constants and adding them together.
246 #define MULTIPLICATION_AND_ADD(lo_0, hi_0, lo_1, hi_1, cst0, cst1, cst2, cst3, \
247                                res0, res1, res2, res3)                         \
248   {                                                                            \
249     tmp0 = _mm_madd_epi16(lo_0, cst0);                                         \
250     tmp1 = _mm_madd_epi16(hi_0, cst0);                                         \
251     tmp2 = _mm_madd_epi16(lo_0, cst1);                                         \
252     tmp3 = _mm_madd_epi16(hi_0, cst1);                                         \
253     tmp4 = _mm_madd_epi16(lo_1, cst2);                                         \
254     tmp5 = _mm_madd_epi16(hi_1, cst2);                                         \
255     tmp6 = _mm_madd_epi16(lo_1, cst3);                                         \
256     tmp7 = _mm_madd_epi16(hi_1, cst3);                                         \
257                                                                                \
258     tmp0 = _mm_add_epi32(tmp0, rounding);                                      \
259     tmp1 = _mm_add_epi32(tmp1, rounding);                                      \
260     tmp2 = _mm_add_epi32(tmp2, rounding);                                      \
261     tmp3 = _mm_add_epi32(tmp3, rounding);                                      \
262     tmp4 = _mm_add_epi32(tmp4, rounding);                                      \
263     tmp5 = _mm_add_epi32(tmp5, rounding);                                      \
264     tmp6 = _mm_add_epi32(tmp6, rounding);                                      \
265     tmp7 = _mm_add_epi32(tmp7, rounding);                                      \
266                                                                                \
267     tmp0 = _mm_srai_epi32(tmp0, DCT_CONST_BITS);                               \
268     tmp1 = _mm_srai_epi32(tmp1, DCT_CONST_BITS);                               \
269     tmp2 = _mm_srai_epi32(tmp2, DCT_CONST_BITS);                               \
270     tmp3 = _mm_srai_epi32(tmp3, DCT_CONST_BITS);                               \
271     tmp4 = _mm_srai_epi32(tmp4, DCT_CONST_BITS);                               \
272     tmp5 = _mm_srai_epi32(tmp5, DCT_CONST_BITS);                               \
273     tmp6 = _mm_srai_epi32(tmp6, DCT_CONST_BITS);                               \
274     tmp7 = _mm_srai_epi32(tmp7, DCT_CONST_BITS);                               \
275                                                                                \
276     res0 = _mm_packs_epi32(tmp0, tmp1);                                        \
277     res1 = _mm_packs_epi32(tmp2, tmp3);                                        \
278     res2 = _mm_packs_epi32(tmp4, tmp5);                                        \
279     res3 = _mm_packs_epi32(tmp6, tmp7);                                        \
280   }
281
282 static INLINE void recon_and_store4x4_sse2(const __m128i *const in,
283                                            uint8_t *const dest,
284                                            const int stride) {
285   const __m128i zero = _mm_setzero_si128();
286   __m128i d[2];
287
288   // Reconstruction and Store
289   d[0] = _mm_cvtsi32_si128(*(const int *)(dest));
290   d[1] = _mm_cvtsi32_si128(*(const int *)(dest + stride * 3));
291   d[0] = _mm_unpacklo_epi32(d[0],
292                             _mm_cvtsi32_si128(*(const int *)(dest + stride)));
293   d[1] = _mm_unpacklo_epi32(
294       _mm_cvtsi32_si128(*(const int *)(dest + stride * 2)), d[1]);
295   d[0] = _mm_unpacklo_epi8(d[0], zero);
296   d[1] = _mm_unpacklo_epi8(d[1], zero);
297   d[0] = _mm_add_epi16(d[0], in[0]);
298   d[1] = _mm_add_epi16(d[1], in[1]);
299   d[0] = _mm_packus_epi16(d[0], d[1]);
300
301   *(int *)dest = _mm_cvtsi128_si32(d[0]);
302   d[0] = _mm_srli_si128(d[0], 4);
303   *(int *)(dest + stride) = _mm_cvtsi128_si32(d[0]);
304   d[0] = _mm_srli_si128(d[0], 4);
305   *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d[0]);
306   d[0] = _mm_srli_si128(d[0], 4);
307   *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d[0]);
308 }
309
310 void idct4_sse2(__m128i *in);
311 void idct8_sse2(__m128i *in);
312 void idct16_sse2(__m128i *in0, __m128i *in1);
313 void iadst4_sse2(__m128i *in);
314 void iadst8_sse2(__m128i *in);
315 void iadst16_sse2(__m128i *in0, __m128i *in1);
316
317 #endif  // VPX_DSP_X86_INV_TXFM_SSE2_H_