2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #ifndef VPX_PORTS_X86_H_
13 #define VPX_PORTS_X86_H_
15 #include "vpx_config.h"
16 #include "vpx/vpx_integer.h"
34 VPX_CPU_TRANSMETA_OLD,
41 #if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
43 #define cpuid(func, func2, ax, bx, cx, dx)\
44 __asm__ __volatile__ (\
46 : "=a" (ax), "=b" (bx), "=c" (cx), "=d" (dx) \
47 : "a" (func), "c" (func2));
49 #define cpuid(func, func2, ax, bx, cx, dx)\
50 __asm__ __volatile__ (\
51 "mov %%ebx, %%edi \n\t" \
53 "xchg %%edi, %%ebx \n\t" \
54 : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
55 : "a" (func), "c" (func2));
57 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
59 #define cpuid(func, func2, ax, bx, cx, dx)\
61 "xchg %rsi, %rbx \n\t" \
63 "movl %ebx, %edi \n\t" \
64 "xchg %rsi, %rbx \n\t" \
65 : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
66 : "a" (func), "c" (func2));
68 #define cpuid(func, func2, ax, bx, cx, dx)\
72 "movl %ebx, %edi \n\t" \
74 : "=a" (ax), "=D" (bx), "=c" (cx), "=d" (dx) \
75 : "a" (func), "c" (func2));
77 #else /* end __SUNPRO__ */
79 #if defined(_MSC_VER) && _MSC_VER > 1500
80 void __cpuidex(int CPUInfo[4], int info_type, int ecxvalue);
81 #pragma intrinsic(__cpuidex)
82 #define cpuid(func, func2, a, b, c, d) do {\
84 __cpuidex(regs, func, func2); \
85 a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\
88 void __cpuid(int CPUInfo[4], int info_type);
89 #pragma intrinsic(__cpuid)
90 #define cpuid(func, func2, a, b, c, d) do {\
92 __cpuid(regs, func); \
93 a = regs[0]; b = regs[1]; c = regs[2]; d = regs[3];\
97 #define cpuid(func, func2, a, b, c, d)\
106 #endif /* end others */
108 // NaCl has no support for xgetbv or the raw opcode.
109 #if !defined(__native_client__) && (defined(__i386__) || defined(__x86_64__))
110 static INLINE uint64_t xgetbv(void) {
111 const uint32_t ecx = 0;
113 // Use the raw opcode for xgetbv for compatibility with older toolchains.
115 ".byte 0x0f, 0x01, 0xd0\n"
116 : "=a"(eax), "=d"(edx) : "c" (ecx));
117 return ((uint64_t)edx << 32) | eax;
119 #elif (defined(_M_X64) || defined(_M_IX86)) && \
120 defined(_MSC_FULL_VER) && _MSC_FULL_VER >= 160040219 // >= VS2010 SP1
121 #include <immintrin.h>
122 #define xgetbv() _xgetbv(0)
123 #elif defined(_MSC_VER) && defined(_M_IX86)
124 static INLINE uint64_t xgetbv(void) {
127 xor ecx, ecx // ecx = 0
128 // Use the raw opcode for xgetbv for compatibility with older toolchains.
129 __asm _emit 0x0f __asm _emit 0x01 __asm _emit 0xd0
133 return ((uint64_t)edx_ << 32) | eax_;
136 #define xgetbv() 0U // no AVX for older x64 or unrecognized toolchains.
139 #if defined(_MSC_VER) && _MSC_VER >= 1700
141 #if WINAPI_FAMILY_PARTITION(WINAPI_FAMILY_APP)
142 #define getenv(x) NULL
148 #define HAS_SSE2 0x04
149 #define HAS_SSE3 0x08
150 #define HAS_SSSE3 0x10
151 #define HAS_SSE4_1 0x20
153 #define HAS_AVX2 0x80
155 #define BIT(n) (1<<n)
159 x86_simd_caps(void) {
160 unsigned int flags = 0;
161 unsigned int mask = ~0;
162 unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx;
166 /* See if the CPU capabilities are being overridden by the environment */
167 env = getenv("VPX_SIMD_CAPS");
170 return (int)strtol(env, NULL, 0);
172 env = getenv("VPX_SIMD_CAPS_MASK");
175 mask = (unsigned int)strtoul(env, NULL, 0);
177 /* Ensure that the CPUID instruction supports extended features */
178 cpuid(0, 0, max_cpuid_val, reg_ebx, reg_ecx, reg_edx);
180 if (max_cpuid_val < 1)
183 /* Get the standard feature flags */
184 cpuid(1, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
186 if (reg_edx & BIT(23)) flags |= HAS_MMX;
188 if (reg_edx & BIT(25)) flags |= HAS_SSE; /* aka xmm */
190 if (reg_edx & BIT(26)) flags |= HAS_SSE2; /* aka wmt */
192 if (reg_ecx & BIT(0)) flags |= HAS_SSE3;
194 if (reg_ecx & BIT(9)) flags |= HAS_SSSE3;
196 if (reg_ecx & BIT(19)) flags |= HAS_SSE4_1;
198 // bits 27 (OSXSAVE) & 28 (256-bit AVX)
199 if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) {
200 if ((xgetbv() & 0x6) == 0x6) {
203 if (max_cpuid_val >= 7) {
204 /* Get the leaf 7 feature flags. Needed to check for AVX2 support */
205 cpuid(7, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
207 if (reg_ebx & BIT(5)) flags |= HAS_AVX2;
215 #if ARCH_X86_64 && defined(_MSC_VER)
216 unsigned __int64 __rdtsc(void);
217 #pragma intrinsic(__rdtsc)
220 // 32-bit CPU cycle counter is light-weighted for most function performance
221 // measurement. For large function (CPU time > a couple of seconds), 64-bit
222 // counter should be used.
223 // 32-bit CPU cycle counter
224 static INLINE unsigned int
226 #if defined(__GNUC__) && __GNUC__
228 __asm__ __volatile__("rdtsc\n\t":"=a"(tsc):);
230 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
232 asm volatile("rdtsc\n\t":"=a"(tsc):);
236 return (unsigned int)__rdtsc();
242 // 64-bit CPU cycle counter
243 static INLINE uint64_t
244 x86_readtsc64(void) {
245 #if defined(__GNUC__) && __GNUC__
247 __asm__ __volatile__("rdtsc" : "=a"(lo), "=d"(hi));
248 return ((uint64_t)hi << 32) | lo;
249 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
251 asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi));
252 return ((uint64_t)hi << 32) | lo;
255 return (uint64_t)__rdtsc();
262 #if defined(__GNUC__) && __GNUC__
263 #define x86_pause_hint()\
264 __asm__ __volatile__ ("pause \n\t")
265 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
266 #define x86_pause_hint()\
267 asm volatile ("pause \n\t")
270 #define x86_pause_hint()\
273 #define x86_pause_hint()\
278 #if defined(__GNUC__) && __GNUC__
280 x87_set_control_word(unsigned short mode) {
281 __asm__ __volatile__("fldcw %0" : : "m"(*&mode));
283 static unsigned short
284 x87_get_control_word(void) {
286 __asm__ __volatile__("fstcw %0\n\t":"=m"(*&mode):);
289 #elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
291 x87_set_control_word(unsigned short mode) {
292 asm volatile("fldcw %0" : : "m"(*&mode));
294 static unsigned short
295 x87_get_control_word(void) {
297 asm volatile("fstcw %0\n\t":"=m"(*&mode):);
301 /* No fldcw intrinsics on Windows x64, punt to external asm */
302 extern void vpx_winx64_fldcw(unsigned short mode);
303 extern unsigned short vpx_winx64_fstcw(void);
304 #define x87_set_control_word vpx_winx64_fldcw
305 #define x87_get_control_word vpx_winx64_fstcw
308 x87_set_control_word(unsigned short mode) {
311 static unsigned short
312 x87_get_control_word(void) {
319 static INLINE unsigned int
320 x87_set_double_precision(void) {
321 unsigned int mode = x87_get_control_word();
322 x87_set_control_word((mode&~0x300) | 0x200);
327 extern void vpx_reset_mmx_state(void);
333 #endif // VPX_PORTS_X86_H_