BUILTIN(__builtin_ia32_fxsave, "vv*", "")
BUILTIN(__builtin_ia32_fxsave64, "vv*", "")
+// XSAVE
+BUILTIN(__builtin_ia32_xsave, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xsave64, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xrstor, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xrstor64, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xsaveopt, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xsaveopt64, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xrstors, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xrstors64, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xsavec, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xsavec64, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xsaves, "vv*ULLi", "")
+BUILTIN(__builtin_ia32_xsaves64, "vv*ULLi", "")
+
// ADX
TARGET_BUILTIN(__builtin_ia32_addcarryx_u32, "UcUcUiUiUi*", "", "adx")
TARGET_BUILTIN(__builtin_ia32_addcarryx_u64, "UcUcULLiULLiULLi*", "", "adx")
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
+ case X86::BI__builtin_ia32_xsave:
+ case X86::BI__builtin_ia32_xsave64:
+ case X86::BI__builtin_ia32_xrstor:
+ case X86::BI__builtin_ia32_xrstor64:
+ case X86::BI__builtin_ia32_xsaveopt:
+ case X86::BI__builtin_ia32_xsaveopt64:
+ case X86::BI__builtin_ia32_xrstors:
+ case X86::BI__builtin_ia32_xrstors64:
+ case X86::BI__builtin_ia32_xsavec:
+ case X86::BI__builtin_ia32_xsavec64:
+ case X86::BI__builtin_ia32_xsaves:
+ case X86::BI__builtin_ia32_xsaves64: {
+ Intrinsic::ID ID;
+#define INTRINSIC_X86_XSAVE_ID(NAME) \
+ case X86::BI__builtin_ia32_##NAME: \
+ ID = Intrinsic::x86_##NAME; \
+ break
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ INTRINSIC_X86_XSAVE_ID(xsave);
+ INTRINSIC_X86_XSAVE_ID(xsave64);
+ INTRINSIC_X86_XSAVE_ID(xrstor);
+ INTRINSIC_X86_XSAVE_ID(xrstor64);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt);
+ INTRINSIC_X86_XSAVE_ID(xsaveopt64);
+ INTRINSIC_X86_XSAVE_ID(xrstors);
+ INTRINSIC_X86_XSAVE_ID(xrstors64);
+ INTRINSIC_X86_XSAVE_ID(xsavec);
+ INTRINSIC_X86_XSAVE_ID(xsavec64);
+ INTRINSIC_X86_XSAVE_ID(xsaves);
+ INTRINSIC_X86_XSAVE_ID(xsaves64);
+ }
+#undef INTRINSIC_X86_XSAVE_ID
+ Value *Mhi = Builder.CreateTrunc(
+ Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, 32)), Int32Ty);
+ Value *Mlo = Builder.CreateTrunc(Ops[1], Int32Ty);
+ Ops[1] = Mhi;
+ Ops.push_back(Mlo);
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops);
+ }
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
x86intrin.h
xmmintrin.h
xopintrin.h
+ xsaveintrin.h
+ xsaveoptintrin.h
+ xsavecintrin.h
+ xsavesintrin.h
xtestintrin.h
)
static __inline__
#define _XCR_XFEATURE_ENABLED_MASK 0
unsigned __int64 __cdecl _xgetbv(unsigned int);
-void __cdecl _xrstor(void const *, unsigned __int64);
-void __cdecl _xsave(void *, unsigned __int64);
-void __cdecl _xsaveopt(void *, unsigned __int64);
void __cdecl _xsetbv(unsigned int, unsigned __int64);
/* These additional intrinsics are turned on in x64/amd64/x86_64 mode. */
(unsigned __int128)_Multiplier * (unsigned __int128)_Multiplicand;
return _FullProduct >> 64;
}
-void __cdecl _xrstor64(void const *, unsigned __int64);
-void __cdecl _xsave64(void *, unsigned __int64);
-void __cdecl _xsaveopt64(void *, unsigned __int64);
#endif /* __x86_64__ */
#include <fxsrintrin.h>
+#include <xsaveintrin.h>
+
+#include <xsaveoptintrin.h>
+
+#include <xsavecintrin.h>
+
+#include <xsavesintrin.h>
+
/* Some intrinsics inside adxintrin.h are available only on processors with ADX,
* whereas others are also available at all times. */
#include <adxintrin.h>
--- /dev/null
+/*===---- xsavecintrin.h - XSAVEC intrinsic ------------------------------------===\r
+ *\r
+ * Permission is hereby granted, free of charge, to any person obtaining a copy\r
+ * of this software and associated documentation files (the "Software"), to deal\r
+ * in the Software without restriction, including without limitation the rights\r
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r
+ * copies of the Software, and to permit persons to whom the Software is\r
+ * furnished to do so, subject to the following conditions:\r
+ *\r
+ * The above copyright notice and this permission notice shall be included in\r
+ * all copies or substantial portions of the Software.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r
+ * THE SOFTWARE.\r
+ *\r
+ *===-----------------------------------------------------------------------===\r
+ */\r
+\r
+#ifndef __IMMINTRIN_H\r
+#error "Never use <xsavecintrin.h> directly; include <immintrin.h> instead."\r
+#endif\r
+\r
+#ifndef __XSAVECINTRIN_H\r
+#define __XSAVECINTRIN_H\r
+\r
+/* Define the default attributes for the functions in this file. */\r
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsavec")))\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsavec(void *__p, unsigned long long __m) {\r
+ __builtin_ia32_xsavec(__p, __m);\r
+}\r
+\r
+#ifdef __x86_64__\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsavec64(void *__p, unsigned long long __m) {\r
+ __builtin_ia32_xsavec64(__p, __m);\r
+}\r
+#endif\r
+\r
+#undef __DEFAULT_FN_ATTRS\r
+\r
+#endif\r
--- /dev/null
+/*===---- xsaveintrin.h - XSAVE intrinsic ------------------------------------===\r
+ *\r
+ * Permission is hereby granted, free of charge, to any person obtaining a copy\r
+ * of this software and associated documentation files (the "Software"), to deal\r
+ * in the Software without restriction, including without limitation the rights\r
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r
+ * copies of the Software, and to permit persons to whom the Software is\r
+ * furnished to do so, subject to the following conditions:\r
+ *\r
+ * The above copyright notice and this permission notice shall be included in\r
+ * all copies or substantial portions of the Software.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r
+ * THE SOFTWARE.\r
+ *\r
+ *===-----------------------------------------------------------------------===\r
+ */\r
+\r
+#ifndef __IMMINTRIN_H\r
+#error "Never use <xsaveintrin.h> directly; include <immintrin.h> instead."\r
+#endif\r
+\r
+#ifndef __XSAVEINTRIN_H\r
+#define __XSAVEINTRIN_H\r
+\r
+/* Define the default attributes for the functions in this file. */\r
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsave")))\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsave(void *__p, unsigned long long __m) {\r
+ return __builtin_ia32_xsave(__p, __m);\r
+}\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xrstor(void *__p, unsigned long long __m) {\r
+ return __builtin_ia32_xrstor(__p, __m);\r
+}\r
+\r
+#ifdef __x86_64__\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsave64(void *__p, unsigned long long __m) {\r
+ return __builtin_ia32_xsave64(__p, __m);\r
+}\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xrstor64(void *__p, unsigned long long __m) {\r
+ return __builtin_ia32_xrstor64(__p, __m);\r
+}\r
+#endif\r
+\r
+#undef __DEFAULT_FN_ATTRS\r
+\r
+#endif\r
--- /dev/null
+/*===---- xsaveoptintrin.h - XSAVEOPT intrinsic ------------------------------------===\r
+ *\r
+ * Permission is hereby granted, free of charge, to any person obtaining a copy\r
+ * of this software and associated documentation files (the "Software"), to deal\r
+ * in the Software without restriction, including without limitation the rights\r
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r
+ * copies of the Software, and to permit persons to whom the Software is\r
+ * furnished to do so, subject to the following conditions:\r
+ *\r
+ * The above copyright notice and this permission notice shall be included in\r
+ * all copies or substantial portions of the Software.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r
+ * THE SOFTWARE.\r
+ *\r
+ *===-----------------------------------------------------------------------===\r
+ */\r
+\r
+#ifndef __IMMINTRIN_H\r
+#error "Never use <xsaveoptintrin.h> directly; include <immintrin.h> instead."\r
+#endif\r
+\r
+#ifndef __XSAVEOPTINTRIN_H\r
+#define __XSAVEOPTINTRIN_H\r
+\r
+/* Define the default attributes for the functions in this file. */\r
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaveopt")))\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsaveopt(void *__p, unsigned long long __m) {\r
+ return __builtin_ia32_xsaveopt(__p, __m);\r
+}\r
+\r
+#ifdef __x86_64__\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsaveopt64(void *__p, unsigned long long __m) {\r
+ return __builtin_ia32_xsaveopt64(__p, __m);\r
+}\r
+#endif\r
+\r
+#undef __DEFAULT_FN_ATTRS\r
+\r
+#endif\r
--- /dev/null
+/*===---- xsavesintrin.h - XSAVES intrinsic ------------------------------------===\r
+ *\r
+ * Permission is hereby granted, free of charge, to any person obtaining a copy\r
+ * of this software and associated documentation files (the "Software"), to deal\r
+ * in the Software without restriction, including without limitation the rights\r
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\r
+ * copies of the Software, and to permit persons to whom the Software is\r
+ * furnished to do so, subject to the following conditions:\r
+ *\r
+ * The above copyright notice and this permission notice shall be included in\r
+ * all copies or substantial portions of the Software.\r
+ *\r
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\r
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\r
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\r
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\r
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\r
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\r
+ * THE SOFTWARE.\r
+ *\r
+ *===-----------------------------------------------------------------------===\r
+ */\r
+\r
+#ifndef __IMMINTRIN_H\r
+#error "Never use <xsavesintrin.h> directly; include <immintrin.h> instead."\r
+#endif\r
+\r
+#ifndef __XSAVESINTRIN_H\r
+#define __XSAVESINTRIN_H\r
+\r
+/* Define the default attributes for the functions in this file. */\r
+#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("xsaves")))\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsaves(void *__p, unsigned long long __m) {\r
+ __builtin_ia32_xsaves(__p, __m);\r
+}\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xrstors(void *__p, unsigned long long __m) {\r
+ __builtin_ia32_xrstors(__p, __m);\r
+}\r
+\r
+#ifdef __x86_64__\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xrstors64(void *__p, unsigned long long __m) {\r
+ __builtin_ia32_xrstors64(__p, __m);\r
+}\r
+\r
+static __inline__ void __DEFAULT_FN_ATTRS\r
+_xsaves64(void *__p, unsigned long long __m) {\r
+ __builtin_ia32_xsaves64(__p, __m);\r
+}\r
+#endif\r
+\r
+#undef __DEFAULT_FN_ATTRS\r
+\r
+#endif\r
signed int tmp_i;
unsigned int tmp_Ui;
signed long long tmp_LLi;
-// unsigned long long tmp_ULLi;
+ unsigned long long tmp_ULLi;
float tmp_f;
double tmp_d;
(void)__builtin_ia32_fxsave64(tmp_vp);
(void)__builtin_ia32_fxrstor(tmp_vp);
(void)__builtin_ia32_fxrstor64(tmp_vp);
+
+ (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xsave64(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xrstor64(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xsaveopt64(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xrstors64(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xsavec64(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi);
+ (void)__builtin_ia32_xsaves64(tmp_vp, tmp_ULLi);
+
tmp_V4f = __builtin_ia32_cvtpi2ps(tmp_V4f, tmp_V2i);
tmp_V2i = __builtin_ia32_cvtps2pi(tmp_V4f);
tmp_i = __builtin_ia32_cvtss2si(tmp_V4f);
--- /dev/null
+// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE\r
+// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=i686-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE\r
+\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaveopt -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaveopt -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT\r
+\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsavec -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsavec -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC\r
+\r
+// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaves -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES\r
+// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=i686-unknown-unknown -target-feature +xsave,+xsaves -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES\r
+\r
+void test() {\r
+ unsigned long long tmp_ULLi;\r
+ void* tmp_vp;\r
+\r
+#ifdef TEST_XSAVE\r
+// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4\r
+// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4\r
+// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32\r
+// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32\r
+// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32\r
+// XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])\r
+ (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi);\r
+#endif\r
+\r
+#ifdef TEST_XSAVEOPT\r
+// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4\r
+// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi);\r
+#endif\r
+\r
+#ifdef TEST_XSAVEC\r
+// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4\r
+// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi);\r
+#endif\r
+\r
+#ifdef TEST_XSAVES\r
+// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4\r
+// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 4\r
+// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32\r
+// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32\r
+// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32\r
+// XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])\r
+ (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi);\r
+#endif\r
+}\r
--- /dev/null
+// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE\r
+// RUN: %clang_cc1 %s -DTEST_XSAVE -O0 -triple=x86_64-unknown-unknown -target-feature +xsave -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVE\r
+\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaveopt -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEOPT -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaveopt -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEOPT\r
+\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsavec -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC\r
+// RUN: %clang_cc1 %s -DTEST_XSAVEC -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsavec -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVEC\r
+\r
+// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaves -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES\r
+// RUN: %clang_cc1 %s -DTEST_XSAVES -O0 -triple=x86_64-unknown-unknown -target-feature +xsave,+xsaves -fno-signed-char -emit-llvm -o - -Werror | FileCheck %s --check-prefix=XSAVES\r
+\r
+void test() {\r
+ unsigned long long tmp_ULLi;\r
+ void* tmp_vp;\r
+\r
+#ifdef TEST_XSAVE\r
+// XSAVE: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVE: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVE: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVE: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVE: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVE: call void @llvm.x86.xsave(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsave(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVE: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVE: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVE: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32\r
+// XSAVE: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32\r
+// XSAVE: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32\r
+// XSAVE: call void @llvm.x86.xsave64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])\r
+ (void)__builtin_ia32_xsave64(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVE: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVE: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVE: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32\r
+// XSAVE: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32\r
+// XSAVE: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32\r
+// XSAVE: call void @llvm.x86.xrstor(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])\r
+ (void)__builtin_ia32_xrstor(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVE: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVE: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVE: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32\r
+// XSAVE: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32\r
+// XSAVE: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32\r
+// XSAVE: call void @llvm.x86.xrstor64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]])\r
+ (void)__builtin_ia32_xrstor64(tmp_vp, tmp_ULLi);\r
+#endif\r
+\r
+#ifdef TEST_XSAVEOPT\r
+// XSAVEOPT: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVEOPT: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVEOPT: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVEOPT: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVEOPT: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVEOPT: call void @llvm.x86.xsaveopt(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsaveopt(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVEOPT: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVEOPT: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVEOPT: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32\r
+// XSAVEOPT: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32\r
+// XSAVEOPT: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32\r
+// XSAVEOPT: call void @llvm.x86.xsaveopt64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])\r
+ (void)__builtin_ia32_xsaveopt64(tmp_vp, tmp_ULLi);\r
+#endif\r
+\r
+#ifdef TEST_XSAVEC\r
+// XSAVEC: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVEC: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVEC: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVEC: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVEC: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVEC: call void @llvm.x86.xsavec(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsavec(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVEC: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVEC: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVEC: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32\r
+// XSAVEC: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32\r
+// XSAVEC: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32\r
+// XSAVEC: call void @llvm.x86.xsavec64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])\r
+ (void)__builtin_ia32_xsavec64(tmp_vp, tmp_ULLi);\r
+#endif\r
+\r
+#ifdef TEST_XSAVES\r
+// XSAVES: [[tmp_vp_1:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVES: [[tmp_ULLi_1:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVES: [[high64_1:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_1]], 32\r
+// XSAVES: [[high32_1:%[0-9a-zA-z]+]] = trunc i64 [[high64_1]] to i32\r
+// XSAVES: [[low32_1:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_1]] to i32\r
+// XSAVES: call void @llvm.x86.xsaves(i8* [[tmp_vp_1]], i32 [[high32_1]], i32 [[low32_1]])\r
+ (void)__builtin_ia32_xsaves(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVES: [[tmp_vp_2:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVES: [[tmp_ULLi_2:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVES: [[high64_2:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_2]], 32\r
+// XSAVES: [[high32_2:%[0-9a-zA-z]+]] = trunc i64 [[high64_2]] to i32\r
+// XSAVES: [[low32_2:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_2]] to i32\r
+// XSAVES: call void @llvm.x86.xsaves64(i8* [[tmp_vp_2]], i32 [[high32_2]], i32 [[low32_2]])\r
+ (void)__builtin_ia32_xsaves64(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVES: [[tmp_vp_3:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVES: [[tmp_ULLi_3:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVES: [[high64_3:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_3]], 32\r
+// XSAVES: [[high32_3:%[0-9a-zA-z]+]] = trunc i64 [[high64_3]] to i32\r
+// XSAVES: [[low32_3:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_3]] to i32\r
+// XSAVES: call void @llvm.x86.xrstors(i8* [[tmp_vp_3]], i32 [[high32_3]], i32 [[low32_3]])\r
+ (void)__builtin_ia32_xrstors(tmp_vp, tmp_ULLi);\r
+\r
+// XSAVES: [[tmp_vp_4:%[0-9a-zA-z]+]] = load i8*, i8** %tmp_vp, align 8\r
+// XSAVES: [[tmp_ULLi_4:%[0-9a-zA-z]+]] = load i64, i64* %tmp_ULLi, align 8\r
+// XSAVES: [[high64_4:%[0-9a-zA-z]+]] = lshr i64 [[tmp_ULLi_4]], 32\r
+// XSAVES: [[high32_4:%[0-9a-zA-z]+]] = trunc i64 [[high64_4]] to i32\r
+// XSAVES: [[low32_4:%[0-9a-zA-z]+]] = trunc i64 [[tmp_ULLi_4]] to i32\r
+// XSAVES: call void @llvm.x86.xrstors64(i8* [[tmp_vp_4]], i32 [[high32_4]], i32 [[low32_4]])\r
+ (void)__builtin_ia32_xrstors64(tmp_vp, tmp_ULLi);\r
+#endif\r
+}\r