]> granicus.if.org Git - clang/commitdiff
Make __shiftleft128 / __shiftright128 real compiler built-ins.
authorNico Weber <nicolasweber@gmx.de>
Fri, 17 Aug 2018 17:19:06 +0000 (17:19 +0000)
committerNico Weber <nicolasweber@gmx.de>
Fri, 17 Aug 2018 17:19:06 +0000 (17:19 +0000)
r337619 added __shiftleft128 / __shiftright128 as functions in intrin.h.
Microsoft's STL plans on using these functions, and they're using intrin0.h
which just has declarations of built-ins to not pull in the huge intrin.h
header in the standard library headers. That requires that these functions are
real built-ins.

https://reviews.llvm.org/D50907

git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@340048 91177308-0d34-0410-b5e6-96231b3b80d8

include/clang/Basic/BuiltinsX86_64.def
lib/CodeGen/CGBuiltin.cpp
lib/Headers/intrin.h
test/CodeGen/ms-x86-intrinsics.c
test/Headers/ms-intrin.cpp

index cc400c0697f9119937049a34373d1817d5522dab..a9aaadca8489a2bff6d85f92b9ddea19ba93b1b4 100644 (file)
@@ -31,6 +31,8 @@ TARGET_HEADER_BUILTIN(_mul128, "LLiLLiLLiLLi*",      "nch",   "intrin.h", ALL_MS
 TARGET_HEADER_BUILTIN(_umul128, "ULLiULLiULLiULLi*", "nch",   "intrin.h", ALL_MS_LANGUAGES, "")
 
 TARGET_HEADER_BUILTIN(__faststorefence, "v", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__shiftleft128, "ULLiULLiULLiUc", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
+TARGET_HEADER_BUILTIN(__shiftright128, "ULLiULLiULLiUc", "nch", "intrin.h", ALL_MS_LANGUAGES, "")
 
 TARGET_HEADER_BUILTIN(_InterlockedAnd64,         "LLiLLiD*LLi", "nh", "intrin.h", ALL_MS_LANGUAGES, "")
 TARGET_HEADER_BUILTIN(_InterlockedDecrement64,   "LLiLLiD*",    "nh", "intrin.h", ALL_MS_LANGUAGES, "")
index 22a9c2f2758ac9ddc5ac8266bb20085da61a4840..5b8a272d58941dff8197f6e3b270a22f49ec5590 100644 (file)
@@ -10440,6 +10440,27 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
     return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
                                llvm::SyncScope::System);
   }
+  case X86::BI__shiftleft128:
+  case X86::BI__shiftright128: {
+    // FIXME: Once fshl/fshr no longer add an unneeded and and cmov, do this:
+    // llvm::Function *F = CGM.getIntrinsic(
+    //   BuiltinID == X86::BI__shiftleft128 ? Intrinsic::fshl : Intrinsic::fshr,
+    //   Int64Ty);
+    // Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
+    // return Builder.CreateCall(F, Ops);
+    llvm::Type *Int128Ty = Builder.getInt128Ty();
+    Value *Val = Builder.CreateOr(
+        Builder.CreateShl(Builder.CreateZExt(Ops[1], Int128Ty), 64),
+        Builder.CreateZExt(Ops[0], Int128Ty));
+    Value *Amt = Builder.CreateAnd(Builder.CreateZExt(Ops[2], Int128Ty),
+                                   llvm::ConstantInt::get(Int128Ty, 0x3f));
+    Value *Res;
+    if (BuiltinID == X86::BI__shiftleft128)
+      Res = Builder.CreateLShr(Builder.CreateShl(Val, Amt), 64);
+    else
+      Res = Builder.CreateLShr(Val, Amt);
+    return Builder.CreateTrunc(Res, Int64Ty);
+  }
   case X86::BI_ReadWriteBarrier:
   case X86::BI_ReadBarrier:
   case X86::BI_WriteBarrier: {
index 91914214e29940f35f0dc4c32585ad4dabe62372..edb947eef659236b5dc49e1236bdb502a6182447 100644 (file)
@@ -863,20 +863,6 @@ __nop(void) {
   __asm__ volatile ("nop");
 }
 #endif
-#if defined(__x86_64__)
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__shiftleft128(unsigned __int64 __l, unsigned __int64 __h, unsigned char __d) {
-  unsigned __int128 __val = ((unsigned __int128)__h << 64) | __l;
-  unsigned __int128 __res = __val << (__d & 63);
-  return (unsigned __int64)(__res >> 64);
-}
-static __inline__ unsigned __int64 __DEFAULT_FN_ATTRS
-__shiftright128(unsigned __int64 __l, unsigned __int64 __h, unsigned char __d) {
-  unsigned __int128 __val = ((unsigned __int128)__h << 64) | __l;
-  unsigned __int128 __res = __val >> (__d & 63);
-  return (unsigned __int64)__res;
-}
-#endif
 
 /*----------------------------------------------------------------------------*\
 |* Privileged intrinsics
index 450a134131beebc743dd89b438f261206b064a94..bd8f5fce237a1bdb7846b2e50b505ec11a3d784f 100644 (file)
@@ -130,4 +130,34 @@ unsigned __int64 test_umul128(unsigned __int64 Multiplier,
 // CHECK-X64: = mul nuw i128 %
 // CHECK-X64: store i64 %
 // CHECK-X64: ret i64 %
-#endif
+
+unsigned __int64 test__shiftleft128(unsigned __int64 l, unsigned __int64 h,
+                                    unsigned char d) {
+  return __shiftleft128(l, h, d);
+}
+// CHECK-X64-LABEL: define dso_local i64 @test__shiftleft128(i64 %l, i64 %h, i8 %d)
+// CHECK-X64  = zext i64 %h to i128
+// CHECK-X64  = shl nuw i128 %0, 64
+// CHECK-X64  = zext i64 %l to i128
+// CHECK-X64  = or i128 %1, %2
+// CHECK-X64  = and i8 %d, 63
+// CHECK-X64  = shl i128 %
+// CHECK-X64  = lshr i128 %
+// CHECK-X64  = trunc i128 %
+// CHECK-X64  ret i64 %
+
+unsigned __int64 test__shiftright128(unsigned __int64 l, unsigned __int64 h,
+                                     unsigned char d) {
+  return __shiftright128(l, h, d);
+}
+// CHECK-X64-LABEL: define dso_local i64 @test__shiftright128(i64 %l, i64 %h, i8 %d)
+// CHECK-X64  = zext i64 %h to i128
+// CHECK-X64  = shl nuw i128 %
+// CHECK-X64  = zext i64 %l to i128
+// CHECK-X64  = or i128 %
+// CHECK-X64  = and i8 %d, 63
+// CHECK-X64  = lshr i128 %
+// CHECK-X64  = trunc i128 %
+// CHECK-X64  ret i64 %
+
+#endif // defined(__x86_64__)
index d8a4d382eb3763714cea2320ff325c487b59e49e..b0fef9cc06a79db1bf7e4fd375d3d48891c58cd1 100644 (file)
@@ -42,8 +42,6 @@ void f() {
   __stosw(0, 0, 0);
 
 #ifdef _M_X64
-  __shiftleft128(1, 2, 3);
-  __shiftright128(1, 2, 3);
   __movsq(0, 0, 0);
   __stosq(0, 0, 0);
 #endif