1 /*****************************************************************************
2 * util.h: x86 inline asm
3 *****************************************************************************
4 * Copyright (C) 2008-2020 x264 project
6 * Authors: Fiona Glaser <fiona@x264.com>
7 * Loren Merritt <lorenm@u.washington.edu>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
23 * This program is also available under a commercial proprietary license.
24 * For more information, contact us at licensing@x264.com.
25 *****************************************************************************/
27 #ifndef X264_X86_UTIL_H
28 #define X264_X86_UTIL_H
31 #include <xmmintrin.h>
34 #define M128_ZERO ((__m128){0,0,0,0})
35 #define x264_union128_t x264_union128_sse_t
36 typedef union { __m128 i; uint64_t q[2]; uint32_t d[4]; uint16_t w[8]; uint8_t b[16]; } MAY_ALIAS x264_union128_sse_t;
38 typedef uint32_t v4si __attribute__((vector_size (16)));
42 #if HAVE_X86_INLINE_ASM && HAVE_MMX
44 #define x264_median_mv x264_median_mv_mmx2
45 static ALWAYS_INLINE void x264_median_mv_mmx2( int16_t *dst, int16_t *a, int16_t *b, int16_t *c )
50 "movq %%mm0, %%mm3 \n"
52 "pmaxsw %%mm1, %%mm0 \n"
53 "pminsw %%mm3, %%mm1 \n"
54 "pminsw %%mm2, %%mm0 \n"
55 "pmaxsw %%mm1, %%mm0 \n"
57 :"=m"(*(x264_union32_t*)dst)
58 :"m"(M32( a )), "m"(M32( b )), "m"(M32( c ))
59 :"mm0", "mm1", "mm2", "mm3"
63 #define x264_predictor_difference x264_predictor_difference_mmx2
64 static ALWAYS_INLINE int x264_predictor_difference_mmx2( int16_t (*mvc)[2], intptr_t i_mvc )
67 static const uint64_t pw_1 = 0x0001000100010001ULL;
70 "pxor %%mm4, %%mm4 \n"
73 "movd -8(%2,%1,4), %%mm0 \n"
74 "movd -4(%2,%1,4), %%mm3 \n"
75 "psubw %%mm3, %%mm0 \n"
80 "movq -8(%2,%1,4), %%mm0 \n"
81 "psubw -4(%2,%1,4), %%mm0 \n"
84 "pxor %%mm2, %%mm2 \n"
85 "psubw %%mm0, %%mm2 \n"
86 "pmaxsw %%mm2, %%mm0 \n"
87 "paddusw %%mm0, %%mm4 \n"
89 "pmaddwd %4, %%mm4 \n"
90 "pshufw $14, %%mm4, %%mm0 \n"
91 "paddd %%mm0, %%mm4 \n"
93 :"=r"(sum), "+r"(i_mvc)
94 :"r"(mvc), "m"(M64( mvc )), "m"(pw_1)
95 :"mm0", "mm2", "mm3", "mm4", "cc"
100 #define x264_cabac_mvd_sum x264_cabac_mvd_sum_mmx2
101 static ALWAYS_INLINE uint16_t x264_cabac_mvd_sum_mmx2(uint8_t *mvdleft, uint8_t *mvdtop)
103 static const uint64_t pb_2 = 0x0202020202020202ULL;
104 static const uint64_t pb_32 = 0x2020202020202020ULL;
105 static const uint64_t pb_33 = 0x2121212121212121ULL;
110 "paddusb %%mm1, %%mm0 \n"
111 "pminub %5, %%mm0 \n"
112 "pxor %%mm2, %%mm2 \n"
113 "movq %%mm0, %%mm1 \n"
114 "pcmpgtb %3, %%mm0 \n"
115 "pcmpgtb %4, %%mm1 \n"
116 "psubb %%mm0, %%mm2 \n"
117 "psubb %%mm1, %%mm2 \n"
120 :"m"(M16( mvdleft )),"m"(M16( mvdtop )),
121 "m"(pb_2),"m"(pb_32),"m"(pb_33)
127 #define x264_predictor_clip x264_predictor_clip_mmx2
128 static ALWAYS_INLINE int x264_predictor_clip_mmx2( int16_t (*dst)[2], int16_t (*mvc)[2], int i_mvc, int16_t mv_limit[2][2], uint32_t pmv )
130 static const uint32_t pd_32 = 0x20;
131 intptr_t tmp = (intptr_t)mv_limit, mvc_max = i_mvc, i = 0;
134 "movq (%2), %%mm5 \n"
136 "psllw $2, %%mm5 \n" // Convert to subpel
137 "pshufw $0xEE, %%mm5, %%mm6 \n"
139 "jz 2f \n" // if( i_mvc == 1 ) {do the last iteration}
140 "punpckldq %%mm3, %%mm3 \n"
141 "punpckldq %%mm5, %%mm5 \n"
143 "lea (%0,%3,4), %3 \n"
145 "movq (%0), %%mm0 \n"
147 "movq %%mm3, %%mm1 \n"
148 "pxor %%mm2, %%mm2 \n"
149 "pcmpeqd %%mm0, %%mm1 \n" // mv == pmv
150 "pcmpeqd %%mm0, %%mm2 \n" // mv == 0
151 "por %%mm1, %%mm2 \n" // (mv == pmv || mv == 0) * -1
152 "pmovmskb %%mm2, %k2 \n" // (mv == pmv || mv == 0) * 0xf
153 "pmaxsw %%mm5, %%mm0 \n"
154 "pminsw %%mm6, %%mm0 \n"
155 "pand %%mm4, %%mm2 \n" // (mv0 == pmv || mv0 == 0) * 32
156 "psrlq %%mm2, %%mm0 \n" // drop mv0 if it's skipped
157 "movq %%mm0, (%5,%4,4) \n"
161 "shr $4, %k2 \n" // (4-val)>>1
162 "sub %2, %4 \n" // +1 for each valid motion vector
165 "jg 3f \n" // if( i == i_mvc - 1 ) {do the last iteration}
167 /* Do the last iteration */
169 "movd (%0), %%mm0 \n"
170 "pxor %%mm2, %%mm2 \n"
171 "pcmpeqd %%mm0, %%mm3 \n"
172 "pcmpeqd %%mm0, %%mm2 \n"
173 "por %%mm3, %%mm2 \n"
174 "pmovmskb %%mm2, %k2 \n"
175 "pmaxsw %%mm5, %%mm0 \n"
176 "pminsw %%mm6, %%mm0 \n"
177 "movd %%mm0, (%5,%4,4) \n"
180 "sub %2, %4 \n" // output += !(mv == pmv || mv == 0)
182 :"+r"(mvc), "=m"(M64( dst )), "+r"(tmp), "+r"(mvc_max), "+r"(i)
183 :"r"(dst), "g"(pmv), "m"(pd_32), "m"(M64( mvc ))
184 :"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "cc"
189 /* Same as the above, except we do (mv + 2) >> 2 on the input. */
190 #define x264_predictor_roundclip x264_predictor_roundclip_mmx2
191 static ALWAYS_INLINE int x264_predictor_roundclip_mmx2( int16_t (*dst)[2], int16_t (*mvc)[2], int i_mvc, int16_t mv_limit[2][2], uint32_t pmv )
193 static const uint64_t pw_2 = 0x0002000200020002ULL;
194 static const uint32_t pd_32 = 0x20;
195 intptr_t tmp = (intptr_t)mv_limit, mvc_max = i_mvc, i = 0;
198 "movq (%2), %%mm5 \n"
201 "pshufw $0xEE, %%mm5, %%mm6 \n"
204 "punpckldq %%mm3, %%mm3 \n"
205 "punpckldq %%mm5, %%mm5 \n"
207 "lea (%0,%3,4), %3 \n"
209 "movq (%0), %%mm0 \n"
211 "paddw %%mm7, %%mm0 \n"
213 "movq %%mm3, %%mm1 \n"
214 "pxor %%mm2, %%mm2 \n"
215 "pcmpeqd %%mm0, %%mm1 \n"
216 "pcmpeqd %%mm0, %%mm2 \n"
217 "por %%mm1, %%mm2 \n"
218 "pmovmskb %%mm2, %k2 \n"
219 "pmaxsw %%mm5, %%mm0 \n"
220 "pminsw %%mm6, %%mm0 \n"
221 "pand %%mm4, %%mm2 \n"
222 "psrlq %%mm2, %%mm0 \n"
223 "movq %%mm0, (%5,%4,4) \n"
233 /* Do the last iteration */
235 "movd (%0), %%mm0 \n"
236 "paddw %%mm7, %%mm0 \n"
238 "pxor %%mm2, %%mm2 \n"
239 "pcmpeqd %%mm0, %%mm3 \n"
240 "pcmpeqd %%mm0, %%mm2 \n"
241 "por %%mm3, %%mm2 \n"
242 "pmovmskb %%mm2, %k2 \n"
243 "pmaxsw %%mm5, %%mm0 \n"
244 "pminsw %%mm6, %%mm0 \n"
245 "movd %%mm0, (%5,%4,4) \n"
250 :"+r"(mvc), "=m"(M64( dst )), "+r"(tmp), "+r"(mvc_max), "+r"(i)
251 :"r"(dst), "m"(pw_2), "g"(pmv), "m"(pd_32), "m"(M64( mvc ))
252 :"mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", "cc"