From: Piotr Padlewski Date: Thu, 12 Jul 2018 23:55:20 +0000 (+0000) Subject: Simplify recursive launder.invariant.group and strip X-Git-Url: https://granicus.if.org/sourcecode?a=commitdiff_plain;h=674f0a117445cc5c1c8fd0ed51ff2377197ede1f;p=llvm Simplify recursive launder.invariant.group and strip Summary: This patch is crucial for proving equality laundered/stripped pointers. eg: bool foo(A *a) { return a == std::launder(a); } Clang with -fstrict-vtable-pointers will emit something like: define dso_local zeroext i1 @_Z3fooP1A(%struct.A* %a) { entry: %c = bitcast %struct.A* %a to i8* %call = tail call i8* @llvm.launder.invariant.group.p0i8(i8* %c) %0 = bitcast %struct.A* %a to i8* %1 = tail call i8* @llvm.strip.invariant.group.p0i8(i8* %0) %2 = tail call i8* @llvm.strip.invariant.group.p0i8(i8* %call) %cmp = icmp eq i8* %1, %2 ret i1 %cmp } and because %2 can be replaced with @llvm.strip.invariant.group(%0) and that %2 and %1 will produce the same value (because strip is readnone) we can replace compare with true. Reviewers: rsmith, hfinkel, majnemer, amharc, kuhar Subscribers: llvm-commits, hiraditya Differential Revision: https://reviews.llvm.org/D47423 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@336963 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index cdf5746bb97..af79851ad36 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -1266,6 +1266,40 @@ static Instruction *simplifyMaskedGather(IntrinsicInst &II, InstCombiner &IC) { return nullptr; } +/// This function transforms launder.invariant.group and strip.invariant.group +/// like: +/// launder(launder(%x)) -> launder(%x) (the result is not the argument) +/// launder(strip(%x)) -> launder(%x) +/// strip(strip(%x)) -> strip(%x) (the result is not the argument) +/// strip(launder(%x)) -> strip(%x) +/// This is legal because it preserves the most recent information about +/// the presence or absence of invariant.group. +static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II, + InstCombiner &IC) { + auto *Arg = II.getArgOperand(0); + auto *StrippedArg = Arg->stripPointerCasts(); + auto *StrippedInvariantGroupsArg = Arg->stripPointerCastsAndInvariantGroups(); + if (StrippedArg == StrippedInvariantGroupsArg) + return nullptr; // No launders/strips to remove. + + Value *Result = nullptr; + + if (II.getIntrinsicID() == Intrinsic::launder_invariant_group) + Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg); + else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group) + Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg); + else + llvm_unreachable( + "simplifyInvariantGroupIntrinsic only handles launder and strip"); + if (Result->getType()->getPointerAddressSpace() != + II.getType()->getPointerAddressSpace()) + Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType()); + if (Result->getType() != II.getType()) + Result = IC.Builder.CreateBitCast(Result, II.getType()); + + return cast(Result); +} + static Instruction *simplifyMaskedScatter(IntrinsicInst &II, InstCombiner &IC) { // If the mask is all zeros, a scatter does nothing. auto *ConstMask = dyn_cast(II.getArgOperand(3)); @@ -1940,7 +1974,11 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { return simplifyMaskedGather(*II, *this); case Intrinsic::masked_scatter: return simplifyMaskedScatter(*II, *this); - + case Intrinsic::launder_invariant_group: + case Intrinsic::strip_invariant_group: + if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this)) + return replaceInstUsesWith(*II, SkippedBarrier); + break; case Intrinsic::powi: if (ConstantInt *Power = dyn_cast(II->getArgOperand(1))) { // 0 and 1 are handled in instsimplify diff --git a/test/Analysis/ValueTracking/invariant.group.ll b/test/Analysis/ValueTracking/invariant.group.ll index 70c32e19172..ca81b9a6aad 100644 --- a/test/Analysis/ValueTracking/invariant.group.ll +++ b/test/Analysis/ValueTracking/invariant.group.ll @@ -2,9 +2,8 @@ ; CHECK-LABEL: define void @checkNonnullLaunder() define void @checkNonnullLaunder() { -; CHECK: %p = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %0) -; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %p) -; CHECK: call void @use(i8* nonnull %p2) +; CHECK: %[[p:.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %0) +; CHECK: call void @use(i8* nonnull %[[p]]) entry: %0 = alloca i8, align 8 @@ -17,9 +16,8 @@ entry: ; CHECK-LABEL: define void @checkNonnullStrip() define void @checkNonnullStrip() { -; CHECK: %p = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %0) -; CHECK: %p2 = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %p) -; CHECK: call void @use(i8* nonnull %p2) +; CHECK: %[[p:.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %0) +; CHECK: call void @use(i8* nonnull %[[p]]) entry: %0 = alloca i8, align 8 diff --git a/test/Transforms/InstCombine/invariant.group.ll b/test/Transforms/InstCombine/invariant.group.ll index 4d36e706df5..6b79ceb5b2d 100644 --- a/test/Transforms/InstCombine/invariant.group.ll +++ b/test/Transforms/InstCombine/invariant.group.ll @@ -1,4 +1,4 @@ -; RUN: opt -instcombine -S < %s | FileCheck %s +; RUN: opt -instcombine -early-cse -S < %s | FileCheck %s ; CHECK-LABEL: define i8* @simplifyNullLaunder() @@ -37,10 +37,6 @@ define i8 addrspace(42)* @simplifyUndefLaunder2() { ret i8 addrspace(42)* %b2 } -declare i8* @llvm.launder.invariant.group.p0i8(i8*) -declare i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)*) - - ; CHECK-LABEL: define i8* @simplifyNullStrip() define i8* @simplifyNullStrip() { ; CHECK-NEXT: ret i8* null @@ -77,7 +73,78 @@ define i8 addrspace(42)* @simplifyUndefStrip2() { ret i8 addrspace(42)* %b2 } +; CHECK-LABEL: define i8* @simplifyLaunderOfLaunder( +define i8* @simplifyLaunderOfLaunder(i8* %a) { +; CHECK: call i8* @llvm.launder.invariant.group.p0i8(i8* %a) +; CHECK-NOT: llvm.launder.invariant.group + %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a) + %a3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a2) + ret i8* %a3 +} + +; CHECK-LABEL: define i8* @simplifyStripOfLaunder( +define i8* @simplifyStripOfLaunder(i8* %a) { +; CHECK-NOT: llvm.launder.invariant.group +; CHECK: call i8* @llvm.strip.invariant.group.p0i8(i8* %a) + %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a) + %a3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a2) + ret i8* %a3 +} + +; CHECK-LABEL: define i1 @simplifyForCompare( +define i1 @simplifyForCompare(i8* %a) { + %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a) + + %a3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a2) + %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a) + %c = icmp eq i8* %a3, %b2 +; CHECK: ret i1 true + ret i1 %c +} + +; CHECK-LABEL: define i16* @skipWithDifferentTypes( +define i16* @skipWithDifferentTypes(i8* %a) { + %a2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %a) + %c1 = bitcast i8* %a2 to i16* + + ; CHECK: %[[b:.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* %a) + %a3 = call i16* @llvm.strip.invariant.group.p0i16(i16* %c1) + ; CHECK-NEXT: %[[r:.*]] = bitcast i8* %[[b]] to i16* + ; CHECK-NEXT: ret i16* %[[r]] + ret i16* %a3 +} + +; CHECK-LABEL: define i16 addrspace(42)* @skipWithDifferentTypesAddrspace( +define i16 addrspace(42)* @skipWithDifferentTypesAddrspace(i8 addrspace(42)* %a) { + %a2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* %a) + %c1 = bitcast i8 addrspace(42)* %a2 to i16 addrspace(42)* + + ; CHECK: %[[b:.*]] = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* %a) + %a3 = call i16 addrspace(42)* @llvm.strip.invariant.group.p42i16(i16 addrspace(42)* %c1) + ; CHECK-NEXT: %[[r:.*]] = bitcast i8 addrspace(42)* %[[b]] to i16 addrspace(42)* + ; CHECK-NEXT: ret i16 addrspace(42)* %[[r]] + ret i16 addrspace(42)* %a3 +} + +; CHECK-LABEL: define i16 addrspace(42)* @skipWithDifferentTypesDifferentAddrspace( +define i16 addrspace(42)* @skipWithDifferentTypesDifferentAddrspace(i8* %a) { + %cast = addrspacecast i8* %a to i8 addrspace(42)* + %a2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* %cast) + %c1 = bitcast i8 addrspace(42)* %a2 to i16 addrspace(42)* + + ; CHECK: %[[b:.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* %a) + %a3 = call i16 addrspace(42)* @llvm.strip.invariant.group.p42i16(i16 addrspace(42)* %c1) + ; CHECK-NEXT: %[[r:.*]] = bitcast i8* %[[b]] to i16* + ; CHECK-NEXT: %[[r2:.*]] = addrspacecast i16* %[[r]] to i16 addrspace(42)* + ; CHECK-NEXT: ret i16 addrspace(42)* %[[r2]] + ret i16 addrspace(42)* %a3 +} + +declare i8* @llvm.launder.invariant.group.p0i8(i8*) +declare i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)*) declare i8* @llvm.strip.invariant.group.p0i8(i8*) declare i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)*) +declare i16* @llvm.strip.invariant.group.p0i16(i16* %c1) +declare i16 addrspace(42)* @llvm.strip.invariant.group.p42i16(i16 addrspace(42)* %c1) attributes #0 = { "null-pointer-is-valid"="true" }