bool ContainsNoDuplicateCall;
bool HasReturn;
bool HasIndirectBr;
+ bool HasFrameEscape;
/// Number of bytes allocated statically by the callee.
uint64_t AllocatedSize;
IsCallerRecursive(false), IsRecursiveCall(false),
ExposesReturnsTwice(false), HasDynamicAlloca(false),
ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
- AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
- FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
- NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
- NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
- NumInstructionsSimplified(0), SROACostSavings(0),
- SROACostSavingsLost(0) {}
+ HasFrameEscape(false), AllocatedSize(0), NumInstructions(0),
+ NumVectorInstructions(0), FiftyPercentVectorBonus(0),
+ TenPercentVectorBonus(0), VectorBonus(0), NumConstantArgs(0),
+ NumConstantOffsetPtrArgs(0), NumAllocaArgs(0), NumConstantPtrCmps(0),
+ NumConstantPtrDiffs(0), NumInstructionsSimplified(0),
+ SROACostSavings(0), SROACostSavingsLost(0) {}
bool analyzeCall(CallSite CS);
case Intrinsic::memmove:
// SROA can usually chew through these intrinsics, but they aren't free.
return false;
+ case Intrinsic::frameescape:
+ HasFrameEscape = true;
+ return false;
}
}
// If the visit this instruction detected an uninlinable pattern, abort.
if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
- HasIndirectBr)
+ HasIndirectBr || HasFrameEscape)
return false;
// If the caller is a recursive function then we don't want to inline
// returns false, and we can bail on out.
if (!analyzeBlock(BB, EphValues)) {
if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
- HasIndirectBr)
+ HasIndirectBr || HasFrameEscape)
return false;
// If the caller is a recursive function then we don't want to inline
if (!ReturnsTwice && CS.isCall() &&
cast<CallInst>(CS.getInstruction())->canReturnTwice())
return false;
+
+ // Disallow inlining functions that call @llvm.frameescape. Doing this
+ // correctly would require major changes to the inliner.
+ if (CS.getCalledFunction() &&
+ CS.getCalledFunction()->getIntrinsicID() ==
+ llvm::Intrinsic::frameescape)
+ return false;
}
}
--- /dev/null
+; RUN: opt -inline -S < %s | FileCheck %s
+
+; PR23216: We can't inline functions using llvm.frameescape.
+
+declare void @llvm.frameescape(...)
+declare i8* @llvm.frameaddress(i32)
+declare i8* @llvm.framerecover(i8*, i8*, i32)
+
+define internal void @foo(i8* %fp) {
+ %a.i8 = call i8* @llvm.framerecover(i8* bitcast (i32 ()* @bar to i8*), i8* %fp, i32 0)
+ %a = bitcast i8* %a.i8 to i32*
+ store i32 42, i32* %a
+ ret void
+}
+
+define internal i32 @bar() {
+entry:
+ %a = alloca i32
+ call void (...)* @llvm.frameescape(i32* %a)
+ %fp = call i8* @llvm.frameaddress(i32 0)
+ tail call void @foo(i8* %fp)
+ %r = load i32, i32* %a
+ ret i32 %r
+}
+
+; We even bail when someone marks it alwaysinline.
+define internal i32 @bar_alwaysinline() alwaysinline {
+entry:
+ %a = alloca i32
+ call void (...)* @llvm.frameescape(i32* %a)
+ tail call void @foo(i8* null)
+ ret i32 0
+}
+
+define i32 @bazz() {
+entry:
+ %r = tail call i32 @bar()
+ %r1 = tail call i32 @bar_alwaysinline()
+ ret i32 %r
+}
+
+; CHECK: define i32 @bazz()
+; CHECK: call i32 @bar()
+; CHECK: call i32 @bar_alwaysinline()