#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/BranchProbabilityInfo.h"
-#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/EHPersonalities.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
#include "llvm/CodeGen/Passes.h"
return NeedsProtector;
}
+bool StackProtector::HasAddressTaken(const Instruction *AI) {
+ for (const User *U : AI->users()) {
+ if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
+ if (AI == SI->getValueOperand())
+ return true;
+ } else if (const PtrToIntInst *SI = dyn_cast<PtrToIntInst>(U)) {
+ if (AI == SI->getOperand(0))
+ return true;
+ } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
+ // Ignore intrinsics that are not calls. TODO: Use isLoweredToCall().
+ if (!isa<DbgInfoIntrinsic>(CI) && !CI->isLifetimeStartOrEnd())
+ return true;
+ } else if (isa<InvokeInst>(U)) {
+ return true;
+ } else if (const SelectInst *SI = dyn_cast<SelectInst>(U)) {
+ if (HasAddressTaken(SI))
+ return true;
+ } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
+ // Keep track of what PHI nodes we have already visited to ensure
+ // they are only visited once.
+ if (VisitedPHIs.insert(PN).second)
+ if (HasAddressTaken(PN))
+ return true;
+ } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
+ if (HasAddressTaken(GEP))
+ return true;
+ } else if (const BitCastInst *BI = dyn_cast<BitCastInst>(U)) {
+ if (HasAddressTaken(BI))
+ return true;
+ }
+ }
+ return false;
+}
+
/// Search for the first call to the llvm.stackprotector intrinsic and return it
/// if present.
static const CallInst *findStackProtectorIntrinsic(Function &F) {
continue;
}
- if (Strong && PointerMayBeCaptured(AI,
- /* ReturnCaptures */ false,
- /* StoreCaptures */ true)) {
+ if (Strong && HasAddressTaken(AI)) {
++NumAddrTaken;
Layout.insert(std::make_pair(AI, MachineFrameInfo::SSPLK_AddrOf));
ORE.emit([&]() {
+++ /dev/null
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -mtriple=x86_64-pc-linux-gnu -stack-protector < %s | FileCheck %s
-; Bug 42238: Test some situations missed by old, custom capture tracking.
-
-define void @store_captures() #0 {
-; CHECK-LABEL: @store_captures(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
-; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
-; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
-; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
-; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
-; CHECK-NEXT: store i32* [[A]], i32** [[J]], align 8
-; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
-; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
-; CHECK: SP_return:
-; CHECK-NEXT: ret void
-; CHECK: CallStackCheckFailBlk:
-; CHECK-NEXT: call void @__stack_chk_fail()
-; CHECK-NEXT: unreachable
-;
-entry:
- %retval = alloca i32, align 4
- %a = alloca i32, align 4
- %j = alloca i32*, align 8
- store i32 0, i32* %retval
- %load = load i32, i32* %a, align 4
- %add = add nsw i32 %load, 1
- store i32 %add, i32* %a, align 4
- store i32* %a, i32** %j, align 8
- ret void
-}
-
-define i32* @return_captures() #0 {
-; CHECK-LABEL: @return_captures(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
-; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
-; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
-; CHECK-NEXT: ret i32* [[A]]
-;
-entry:
- %retval = alloca i32, align 4
- %a = alloca i32, align 4
- %j = alloca i32*, align 8
- store i32 0, i32* %retval
- %load = load i32, i32* %a, align 4
- %add = add nsw i32 %load, 1
- store i32 %add, i32* %a, align 4
- ret i32* %a
-}
-
-define void @store_addrspacecast_captures() #0 {
-; CHECK-LABEL: @store_addrspacecast_captures(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
-; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
-; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[J:%.*]] = alloca i32 addrspace(1)*, align 8
-; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
-; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
-; CHECK-NEXT: [[A_ADDRSPACECAST:%.*]] = addrspacecast i32* [[A]] to i32 addrspace(1)*
-; CHECK-NEXT: store i32 addrspace(1)* [[A_ADDRSPACECAST]], i32 addrspace(1)** [[J]], align 8
-; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: [[TMP0:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
-; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP0]]
-; CHECK-NEXT: br i1 [[TMP1]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
-; CHECK: SP_return:
-; CHECK-NEXT: ret void
-; CHECK: CallStackCheckFailBlk:
-; CHECK-NEXT: call void @__stack_chk_fail()
-; CHECK-NEXT: unreachable
-;
-entry:
- %retval = alloca i32, align 4
- %a = alloca i32, align 4
- %j = alloca i32 addrspace(1)*, align 8
- store i32 0, i32* %retval
- %load = load i32, i32* %a, align 4
- %add = add nsw i32 %load, 1
- store i32 %add, i32* %a, align 4
- %a.addrspacecast = addrspacecast i32* %a to i32 addrspace(1)*
- store i32 addrspace(1)* %a.addrspacecast, i32 addrspace(1)** %j, align 8
- ret void
-}
-
-define void @cmpxchg_captures() #0 {
-; CHECK-LABEL: @cmpxchg_captures(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[STACKGUARDSLOT:%.*]] = alloca i8*
-; CHECK-NEXT: [[STACKGUARD:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: call void @llvm.stackprotector(i8* [[STACKGUARD]], i8** [[STACKGUARDSLOT]])
-; CHECK-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
-; CHECK-NEXT: [[J:%.*]] = alloca i32*, align 8
-; CHECK-NEXT: store i32 0, i32* [[RETVAL]]
-; CHECK-NEXT: [[LOAD:%.*]] = load i32, i32* [[A]], align 4
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i32 [[LOAD]], 1
-; CHECK-NEXT: store i32 [[ADD]], i32* [[A]], align 4
-; CHECK-NEXT: [[TMP0:%.*]] = cmpxchg i32** [[J]], i32* [[A]], i32* null seq_cst monotonic
-; CHECK-NEXT: [[STACKGUARD1:%.*]] = load volatile i8*, i8* addrspace(257)* inttoptr (i32 40 to i8* addrspace(257)*)
-; CHECK-NEXT: [[TMP1:%.*]] = load volatile i8*, i8** [[STACKGUARDSLOT]]
-; CHECK-NEXT: [[TMP2:%.*]] = icmp eq i8* [[STACKGUARD1]], [[TMP1]]
-; CHECK-NEXT: br i1 [[TMP2]], label [[SP_RETURN:%.*]], label [[CALLSTACKCHECKFAILBLK:%.*]], !prof !0
-; CHECK: SP_return:
-; CHECK-NEXT: ret void
-; CHECK: CallStackCheckFailBlk:
-; CHECK-NEXT: call void @__stack_chk_fail()
-; CHECK-NEXT: unreachable
-;
-entry:
- %retval = alloca i32, align 4
- %a = alloca i32, align 4
- %j = alloca i32*, align 8
- store i32 0, i32* %retval
- %load = load i32, i32* %a, align 4
- %add = add nsw i32 %load, 1
- store i32 %add, i32* %a, align 4
-
- cmpxchg i32** %j, i32* %a, i32* null seq_cst monotonic
- ret void
-}
-
-attributes #0 = { sspstrong }