} vpx_cpu_t;
#if defined(__GNUC__) && __GNUC__ || defined(__ANDROID__)
-#if ARCH_X86_64
+#if VPX_ARCH_X86_64
#define cpuid(func, func2, ax, bx, cx, dx) \
__asm__ __volatile__("cpuid \n\t" \
: "=a"(ax), "=b"(bx), "=c"(cx), "=d"(dx) \
#endif
#elif defined(__SUNPRO_C) || \
defined(__SUNPRO_CC) /* end __GNUC__ or __ANDROID__*/
-#if ARCH_X86_64
+#if VPX_ARCH_X86_64
#define cpuid(func, func2, ax, bx, cx, dx) \
asm volatile( \
"xchg %rsi, %rbx \n\t" \
: "a"(func), "c"(func2));
#endif
#else /* end __SUNPRO__ */
-#if ARCH_X86_64
+#if VPX_ARCH_X86_64
#if defined(_MSC_VER) && _MSC_VER > 1500
#define cpuid(func, func2, a, b, c, d) \
do { \
#define HAS_AVX2 0x080
#define HAS_AVX512 0x100
#ifndef BIT
-#define BIT(n) (1u << n)
+#define BIT(n) (1u << (n))
#endif
static INLINE int x86_simd_caps(void) {
unsigned int flags = 0;
- unsigned int mask = ~0;
+ unsigned int mask = ~0u;
unsigned int max_cpuid_val, reg_eax, reg_ebx, reg_ecx, reg_edx;
char *env;
(void)reg_ebx;
// bits 27 (OSXSAVE) & 28 (256-bit AVX)
if ((reg_ecx & (BIT(27) | BIT(28))) == (BIT(27) | BIT(28))) {
+ // Check for OS-support of YMM state. Necessary for AVX and AVX2.
if ((xgetbv() & 0x6) == 0x6) {
flags |= HAS_AVX;
// bits 16 (AVX-512F) & 17 (AVX-512DQ) & 28 (AVX-512CD) &
// 30 (AVX-512BW) & 32 (AVX-512VL)
if ((reg_ebx & (BIT(16) | BIT(17) | BIT(28) | BIT(30) | BIT(31))) ==
- (BIT(16) | BIT(17) | BIT(28) | BIT(30) | BIT(31)))
- flags |= HAS_AVX512;
+ (BIT(16) | BIT(17) | BIT(28) | BIT(30) | BIT(31))) {
+ // Check for OS-support of ZMM and YMM state. Necessary for AVX-512.
+ if ((xgetbv() & 0xe6) == 0xe6) flags |= HAS_AVX512;
+ }
}
}
}
+ (void)reg_eax; // Avoid compiler warning on unused-but-set variable.
+
return flags & mask;
}
-// Note:
-// 32-bit CPU cycle counter is light-weighted for most function performance
-// measurement. For large function (CPU time > a couple of seconds), 64-bit
-// counter should be used.
-// 32-bit CPU cycle counter
+// Fine-Grain Measurement Functions
+//
+// If you are timing a small region of code, access the timestamp counter
+// (TSC) via:
+//
+// unsigned int start = x86_tsc_start();
+// ...
+// unsigned int end = x86_tsc_end();
+// unsigned int diff = end - start;
+//
+// The start/end functions introduce a few more instructions than using
+// x86_readtsc directly, but prevent the CPU's out-of-order execution from
+// affecting the measurement (by having earlier/later instructions be evaluated
+// in the time interval). See the white paper, "How to Benchmark Code
+// Execution Times on IntelĀ® IA-32 and IA-64 Instruction Set Architectures" by
+// Gabriele Paoloni for more information.
+//
+// If you are timing a large function (CPU time > a couple of seconds), use
+// x86_readtsc64 to read the timestamp counter in a 64-bit integer. The
+// out-of-order leakage that can occur is minimal compared to total runtime.
static INLINE unsigned int x86_readtsc(void) {
#if defined(__GNUC__) && __GNUC__
unsigned int tsc;
asm volatile("rdtsc\n\t" : "=a"(tsc) :);
return tsc;
#else
-#if ARCH_X86_64
+#if VPX_ARCH_X86_64
return (unsigned int)__rdtsc();
#else
__asm rdtsc;
asm volatile("rdtsc\n\t" : "=a"(lo), "=d"(hi));
return ((uint64_t)hi << 32) | lo;
#else
-#if ARCH_X86_64
+#if VPX_ARCH_X86_64
return (uint64_t)__rdtsc();
#else
__asm rdtsc;
#endif
}
+// 32-bit CPU cycle counter with a partial fence against out-of-order execution.
+static INLINE unsigned int x86_readtscp(void) {
+#if defined(__GNUC__) && __GNUC__
+ unsigned int tscp;
+ __asm__ __volatile__("rdtscp\n\t" : "=a"(tscp) :);
+ return tscp;
+#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
+ unsigned int tscp;
+ asm volatile("rdtscp\n\t" : "=a"(tscp) :);
+ return tscp;
+#elif defined(_MSC_VER)
+ unsigned int ui;
+ return (unsigned int)__rdtscp(&ui);
+#else
+#if VPX_ARCH_X86_64
+ return (unsigned int)__rdtscp();
+#else
+ __asm rdtscp;
+#endif
+#endif
+}
+
+static INLINE unsigned int x86_tsc_start(void) {
+ unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
+ cpuid(0, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ // Avoid compiler warnings on unused-but-set variables.
+ (void)reg_eax;
+ (void)reg_ebx;
+ (void)reg_ecx;
+ (void)reg_edx;
+ return x86_readtsc();
+}
+
+static INLINE unsigned int x86_tsc_end(void) {
+ uint32_t v = x86_readtscp();
+ unsigned int reg_eax, reg_ebx, reg_ecx, reg_edx;
+ cpuid(0, 0, reg_eax, reg_ebx, reg_ecx, reg_edx);
+ // Avoid compiler warnings on unused-but-set variables.
+ (void)reg_eax;
+ (void)reg_ebx;
+ (void)reg_ecx;
+ (void)reg_edx;
+ return v;
+}
+
#if defined(__GNUC__) && __GNUC__
#define x86_pause_hint() __asm__ __volatile__("pause \n\t")
#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
#define x86_pause_hint() asm volatile("pause \n\t")
#else
-#if ARCH_X86_64
+#if VPX_ARCH_X86_64
#define x86_pause_hint() _mm_pause();
#else
#define x86_pause_hint() __asm pause
asm volatile("fstcw %0\n\t" : "=m"(*&mode) :);
return mode;
}
-#elif ARCH_X86_64
+#elif VPX_ARCH_X86_64
/* No fldcw intrinsics on Windows x64, punt to external asm */
extern void vpx_winx64_fldcw(unsigned short mode);
extern unsigned short vpx_winx64_fstcw(void);