2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "./vpx_dsp_rtcd.h"
12 #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
13 #include "vpx_dsp/x86/inv_txfm_sse2.h"
14 #include "vpx_dsp/x86/transpose_sse2.h"
15 #include "vpx_dsp/x86/txfm_common_sse2.h"
17 static INLINE __m128i dct_const_round_shift_4_sse2(const __m128i in0,
19 const __m128i t0 = _mm_unpacklo_epi32(in0, in1); // 0, 1
20 const __m128i t1 = _mm_unpackhi_epi32(in0, in1); // 2, 3
21 const __m128i t2 = _mm_unpacklo_epi64(t0, t1); // 0, 1, 2, 3
22 return dct_const_round_shift_sse2(t2);
25 static INLINE __m128i wraplow_16bit_sse2(const __m128i in0, const __m128i in1,
26 const __m128i rounding) {
28 temp[0] = _mm_add_epi32(in0, rounding);
29 temp[1] = _mm_add_epi32(in1, rounding);
30 temp[0] = _mm_srai_epi32(temp[0], 4);
31 temp[1] = _mm_srai_epi32(temp[1], 4);
32 return _mm_packs_epi32(temp[0], temp[1]);
35 static INLINE void highbd_idct4_small_sse2(__m128i *const io) {
36 const __m128i cospi_p16_p16 = _mm_setr_epi32(cospi_16_64, 0, cospi_16_64, 0);
37 const __m128i cospi_p08_p08 = _mm_setr_epi32(cospi_8_64, 0, cospi_8_64, 0);
38 const __m128i cospi_p24_p24 = _mm_setr_epi32(cospi_24_64, 0, cospi_24_64, 0);
39 __m128i temp1[4], temp2[4], step[4];
41 transpose_32bit_4x4(&io[0], &io[1], &io[2], &io[3]);
43 // Note: There is no 32-bit signed multiply SIMD instruction in SSE2.
44 // _mm_mul_epu32() is used which can only guarantee the lower 32-bit
45 // (signed) result is meaningful, which is enough in this function.
48 temp1[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2]
49 temp2[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2]
50 temp1[1] = _mm_srli_si128(temp1[0], 4); // 1, 3
51 temp2[1] = _mm_srli_si128(temp2[0], 4); // 1, 3
52 temp1[0] = _mm_mul_epu32(temp1[0], cospi_p16_p16); // ([0] + [2])*cospi_16_64
53 temp1[1] = _mm_mul_epu32(temp1[1], cospi_p16_p16); // ([0] + [2])*cospi_16_64
54 temp2[0] = _mm_mul_epu32(temp2[0], cospi_p16_p16); // ([0] - [2])*cospi_16_64
55 temp2[1] = _mm_mul_epu32(temp2[1], cospi_p16_p16); // ([0] - [2])*cospi_16_64
56 step[0] = dct_const_round_shift_4_sse2(temp1[0], temp1[1]);
57 step[1] = dct_const_round_shift_4_sse2(temp2[0], temp2[1]);
59 temp1[3] = _mm_srli_si128(io[1], 4);
60 temp2[3] = _mm_srli_si128(io[3], 4);
61 temp1[0] = _mm_mul_epu32(io[1], cospi_p24_p24); // input[1] * cospi_24_64
62 temp1[1] = _mm_mul_epu32(temp1[3], cospi_p24_p24); // input[1] * cospi_24_64
63 temp2[0] = _mm_mul_epu32(io[1], cospi_p08_p08); // input[1] * cospi_8_64
64 temp2[1] = _mm_mul_epu32(temp1[3], cospi_p08_p08); // input[1] * cospi_8_64
65 temp1[2] = _mm_mul_epu32(io[3], cospi_p08_p08); // input[3] * cospi_8_64
66 temp1[3] = _mm_mul_epu32(temp2[3], cospi_p08_p08); // input[3] * cospi_8_64
67 temp2[2] = _mm_mul_epu32(io[3], cospi_p24_p24); // input[3] * cospi_24_64
68 temp2[3] = _mm_mul_epu32(temp2[3], cospi_p24_p24); // input[3] * cospi_24_64
69 temp1[0] = _mm_sub_epi64(temp1[0], temp1[2]); // [1]*cospi_24 - [3]*cospi_8
70 temp1[1] = _mm_sub_epi64(temp1[1], temp1[3]); // [1]*cospi_24 - [3]*cospi_8
71 temp2[0] = _mm_add_epi64(temp2[0], temp2[2]); // [1]*cospi_8 + [3]*cospi_24
72 temp2[1] = _mm_add_epi64(temp2[1], temp2[3]); // [1]*cospi_8 + [3]*cospi_24
73 step[2] = dct_const_round_shift_4_sse2(temp1[0], temp1[1]);
74 step[3] = dct_const_round_shift_4_sse2(temp2[0], temp2[1]);
77 io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
78 io[1] = _mm_add_epi32(step[1], step[2]); // step[1] + step[2]
79 io[2] = _mm_sub_epi32(step[1], step[2]); // step[1] - step[2]
80 io[3] = _mm_sub_epi32(step[0], step[3]); // step[0] - step[3]
83 static INLINE void abs_extend_64bit_sse2(const __m128i in,
84 __m128i *const out /*out[2]*/,
85 __m128i *const sign /*sign[2]*/) {
86 sign[0] = _mm_srai_epi32(in, 31);
87 out[0] = _mm_xor_si128(in, sign[0]);
88 out[0] = _mm_sub_epi32(out[0], sign[0]);
89 sign[1] = _mm_unpackhi_epi32(sign[0], sign[0]); // 64-bit sign of 2, 3
90 sign[0] = _mm_unpacklo_epi32(sign[0], sign[0]); // 64-bit sign of 0, 1
91 out[1] = _mm_unpackhi_epi32(out[0], out[0]); // 2, 3
92 out[0] = _mm_unpacklo_epi32(out[0], out[0]); // 0, 1
95 static INLINE __m128i multiply_apply_sign_sse2(const __m128i in,
97 const __m128i cospi) {
98 __m128i out = _mm_mul_epu32(in, cospi);
99 out = _mm_xor_si128(out, sign);
100 return _mm_sub_epi64(out, sign);
103 static INLINE __m128i dct_const_round_shift_64bit_sse2(const __m128i in) {
104 const __m128i t = _mm_add_epi64(
106 _mm_setr_epi32(DCT_CONST_ROUNDING << 2, 0, DCT_CONST_ROUNDING << 2, 0));
107 return _mm_srli_si128(t, 2);
110 static INLINE __m128i pack_4_sse2(const __m128i in0, const __m128i in1) {
111 const __m128i t0 = _mm_unpacklo_epi32(in0, in1); // 0, 2
112 const __m128i t1 = _mm_unpackhi_epi32(in0, in1); // 1, 3
113 return _mm_unpacklo_epi32(t0, t1); // 0, 1, 2, 3
116 static INLINE void highbd_idct4_large_sse2(__m128i *const io) {
117 const __m128i cospi_p16_p16 =
118 _mm_setr_epi32(cospi_16_64 << 2, 0, cospi_16_64 << 2, 0);
119 const __m128i cospi_p08_p08 =
120 _mm_setr_epi32(cospi_8_64 << 2, 0, cospi_8_64 << 2, 0);
121 const __m128i cospi_p24_p24 =
122 _mm_setr_epi32(cospi_24_64 << 2, 0, cospi_24_64 << 2, 0);
123 __m128i temp1[4], temp2[4], step[4], sign1[4], sign2[4];
125 transpose_32bit_4x4(&io[0], &io[1], &io[2], &io[3]);
128 temp1[0] = _mm_add_epi32(io[0], io[2]); // input[0] + input[2]
129 temp2[0] = _mm_sub_epi32(io[0], io[2]); // input[0] - input[2]
130 abs_extend_64bit_sse2(temp1[0], temp1, sign1);
131 abs_extend_64bit_sse2(temp2[0], temp2, sign2);
132 temp1[0] = multiply_apply_sign_sse2(temp1[0], sign1[0], cospi_p16_p16);
133 temp1[1] = multiply_apply_sign_sse2(temp1[1], sign1[1], cospi_p16_p16);
134 temp2[0] = multiply_apply_sign_sse2(temp2[0], sign2[0], cospi_p16_p16);
135 temp2[1] = multiply_apply_sign_sse2(temp2[1], sign2[1], cospi_p16_p16);
136 temp1[0] = dct_const_round_shift_64bit_sse2(temp1[0]);
137 temp1[1] = dct_const_round_shift_64bit_sse2(temp1[1]);
138 temp2[0] = dct_const_round_shift_64bit_sse2(temp2[0]);
139 temp2[1] = dct_const_round_shift_64bit_sse2(temp2[1]);
140 step[0] = pack_4_sse2(temp1[0], temp1[1]);
141 step[1] = pack_4_sse2(temp2[0], temp2[1]);
143 abs_extend_64bit_sse2(io[1], temp1, sign1);
144 abs_extend_64bit_sse2(io[3], temp2, sign2);
145 temp1[2] = multiply_apply_sign_sse2(temp1[0], sign1[0], cospi_p08_p08);
146 temp1[3] = multiply_apply_sign_sse2(temp1[1], sign1[1], cospi_p08_p08);
147 temp1[0] = multiply_apply_sign_sse2(temp1[0], sign1[0], cospi_p24_p24);
148 temp1[1] = multiply_apply_sign_sse2(temp1[1], sign1[1], cospi_p24_p24);
149 temp2[2] = multiply_apply_sign_sse2(temp2[0], sign2[0], cospi_p24_p24);
150 temp2[3] = multiply_apply_sign_sse2(temp2[1], sign2[1], cospi_p24_p24);
151 temp2[0] = multiply_apply_sign_sse2(temp2[0], sign2[0], cospi_p08_p08);
152 temp2[1] = multiply_apply_sign_sse2(temp2[1], sign2[1], cospi_p08_p08);
153 temp1[0] = _mm_sub_epi64(temp1[0], temp2[0]); // [1]*cospi_24 - [3]*cospi_8
154 temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]); // [1]*cospi_24 - [3]*cospi_8
155 temp2[0] = _mm_add_epi64(temp1[2], temp2[2]); // [1]*cospi_8 + [3]*cospi_24
156 temp2[1] = _mm_add_epi64(temp1[3], temp2[3]); // [1]*cospi_8 + [3]*cospi_24
157 temp1[0] = dct_const_round_shift_64bit_sse2(temp1[0]);
158 temp1[1] = dct_const_round_shift_64bit_sse2(temp1[1]);
159 temp2[0] = dct_const_round_shift_64bit_sse2(temp2[0]);
160 temp2[1] = dct_const_round_shift_64bit_sse2(temp2[1]);
161 step[2] = pack_4_sse2(temp1[0], temp1[1]);
162 step[3] = pack_4_sse2(temp2[0], temp2[1]);
165 io[0] = _mm_add_epi32(step[0], step[3]); // step[0] + step[3]
166 io[1] = _mm_add_epi32(step[1], step[2]); // step[1] + step[2]
167 io[2] = _mm_sub_epi32(step[1], step[2]); // step[1] - step[2]
168 io[3] = _mm_sub_epi32(step[0], step[3]); // step[0] - step[3]
171 void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint16_t *dest,
172 int stride, int bd) {
173 int16_t max = 0, min = 0;
174 __m128i io[4], io_short[2];
176 io[0] = _mm_load_si128((const __m128i *)(input + 0));
177 io[1] = _mm_load_si128((const __m128i *)(input + 4));
178 io[2] = _mm_load_si128((const __m128i *)(input + 8));
179 io[3] = _mm_load_si128((const __m128i *)(input + 12));
181 io_short[0] = _mm_packs_epi32(io[0], io[1]);
182 io_short[1] = _mm_packs_epi32(io[2], io[3]);
185 __m128i max_input, min_input;
187 max_input = _mm_max_epi16(io_short[0], io_short[1]);
188 min_input = _mm_min_epi16(io_short[0], io_short[1]);
189 max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 8));
190 min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 8));
191 max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 4));
192 min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 4));
193 max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 2));
194 min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 2));
195 max = _mm_extract_epi16(max_input, 0);
196 min = _mm_extract_epi16(min_input, 0);
199 if (bd == 8 || (max < 4096 && min >= -4096)) {
200 idct4_sse2(io_short);
201 idct4_sse2(io_short);
202 io_short[0] = _mm_add_epi16(io_short[0], _mm_set1_epi16(8));
203 io_short[1] = _mm_add_epi16(io_short[1], _mm_set1_epi16(8));
204 io[0] = _mm_srai_epi16(io_short[0], 4);
205 io[1] = _mm_srai_epi16(io_short[1], 4);
207 if (max < 32767 && min > -32768) {
208 highbd_idct4_small_sse2(io);
209 highbd_idct4_small_sse2(io);
211 highbd_idct4_large_sse2(io);
212 highbd_idct4_large_sse2(io);
214 io[0] = wraplow_16bit_sse2(io[0], io[1], _mm_set1_epi32(8));
215 io[1] = wraplow_16bit_sse2(io[2], io[3], _mm_set1_epi32(8));
218 // Reconstruction and Store
220 __m128i d0 = _mm_loadl_epi64((const __m128i *)dest);
221 __m128i d2 = _mm_loadl_epi64((const __m128i *)(dest + stride * 2));
222 d0 = _mm_unpacklo_epi64(d0,
223 _mm_loadl_epi64((const __m128i *)(dest + stride)));
224 d2 = _mm_unpacklo_epi64(
225 d2, _mm_loadl_epi64((const __m128i *)(dest + stride * 3)));
226 d0 = clamp_high_sse2(_mm_adds_epi16(d0, io[0]), bd);
227 d2 = clamp_high_sse2(_mm_adds_epi16(d2, io[1]), bd);
229 _mm_storel_epi64((__m128i *)dest, d0);
231 d0 = _mm_srli_si128(d0, 8);
232 _mm_storel_epi64((__m128i *)(dest + stride), d0);
234 _mm_storel_epi64((__m128i *)(dest + stride * 2), d2);
236 d2 = _mm_srli_si128(d2, 8);
237 _mm_storel_epi64((__m128i *)(dest + stride * 3), d2);
241 void vpx_highbd_idct4x4_1_add_sse2(const tran_low_t *input, uint16_t *dest,
242 int stride, int bd) {
243 const __m128i zero = _mm_setzero_si128();
244 // Faster than _mm_set1_epi16((1 << bd) - 1).
245 const __m128i one = _mm_set1_epi16(1);
246 const __m128i max = _mm_sub_epi16(_mm_slli_epi16(one, bd), one);
251 out = HIGHBD_WRAPLOW(dct_const_round_shift(input[0] * cospi_16_64), bd);
252 out = HIGHBD_WRAPLOW(dct_const_round_shift(out * cospi_16_64), bd);
253 a1 = ROUND_POWER_OF_TWO(out, 4);
254 dc = _mm_set1_epi16(a1);
256 for (i = 0; i < 4; ++i) {
257 d = _mm_loadl_epi64((const __m128i *)dest);
258 d = add_dc_clamp(&zero, &max, &dc, &d);
259 _mm_storel_epi64((__m128i *)dest, d);