The ``llvm.type.test`` intrinsic tests whether the given pointer is associated
with the given type identifier.
+'``llvm.type.checked.load``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare {i8*, i1} @llvm.type.checked.load(i8* %ptr, i32 %offset, metadata %type) argmemonly nounwind readonly
+
+
+Arguments:
+""""""""""
+
+The first argument is a pointer from which to load a function pointer. The
+second argument is the byte offset from which to load the function pointer. The
+third argument is a metadata object representing a :doc:`type identifier
+<TypeMetadata>`.
+
+Overview:
+"""""""""
+
+The ``llvm.type.checked.load`` intrinsic safely loads a function pointer from a
+virtual table pointer using type metadata. This intrinsic is used to implement
+control flow integrity in conjunction with virtual call optimization. The
+virtual call optimization pass will optimize away ``llvm.type.checked.load``
+intrinsics associated with devirtualized calls, thereby removing the type
+check in cases where it is not needed to enforce the control flow integrity
+constraint.
+
+If the given pointer is associated with a type metadata identifier, this
+function returns true as the second element of its return value. (Note that
+the function may also return true if the given pointer is not associated
+with a type metadata identifier.) If the function's return value's second
+element is true, the following rules apply to the first element:
+
+- If the given pointer is associated with the given type metadata identifier,
+ it is the function pointer loaded from the given byte offset from the given
+ pointer.
+
+- If the given pointer is not associated with the given type metadata
+ identifier, it is one of the following (the choice of which is unspecified):
+
+ 1. The function pointer that would have been loaded from an arbitrarily chosen
+ (through an unspecified mechanism) pointer associated with the type
+ metadata.
+
+ 2. If the function has a non-void return type, a pointer to a function that
+ returns an unspecified value without causing side effects.
+
+If the function's return value's second element is false, the value of the
+first element is undefined.
+
+
'``llvm.donothing``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
/// Given a call to the intrinsic @llvm.type.test, find all devirtualizable
/// call sites based on the call and return them in DevirtCalls.
-void findDevirtualizableCalls(SmallVectorImpl<DevirtCallSite> &DevirtCalls,
- SmallVectorImpl<CallInst *> &Assumes,
- CallInst *CI);
+void findDevirtualizableCallsForTypeTest(
+ SmallVectorImpl<DevirtCallSite> &DevirtCalls,
+ SmallVectorImpl<CallInst *> &Assumes, CallInst *CI);
+
+/// Given a call to the intrinsic @llvm.type.checked.load, find all
+/// devirtualizable call sites based on the call and return them in DevirtCalls.
+void findDevirtualizableCallsForTypeCheckedLoad(
+ SmallVectorImpl<DevirtCallSite> &DevirtCalls,
+ SmallVectorImpl<Instruction *> &LoadedPtrs,
+ SmallVectorImpl<Instruction *> &Preds, bool &HasNonCallUses, CallInst *CI);
}
#endif
def int_type_test : Intrinsic<[llvm_i1_ty], [llvm_ptr_ty, llvm_metadata_ty],
[IntrNoMem]>;
+// Safely loads a function pointer from a virtual table pointer using type metadata.
+def int_type_checked_load : Intrinsic<[llvm_ptr_ty, llvm_i1_ty],
+ [llvm_ptr_ty, llvm_i32_ty, llvm_metadata_ty],
+ [IntrNoMem]>;
+
def int_load_relative: Intrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_anyint_ty],
[IntrReadMem, IntrArgMemOnly]>;
//===----------------------------------------------------------------------===//
#include "llvm/Analysis/TypeMetadataUtils.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
// Search for virtual calls that call FPtr and add them to DevirtCalls.
static void
findCallsAtConstantOffset(SmallVectorImpl<DevirtCallSite> &DevirtCalls,
- Value *FPtr, uint64_t Offset) {
+ bool *HasNonCallUses, Value *FPtr, uint64_t Offset) {
for (const Use &U : FPtr->uses()) {
Value *User = U.getUser();
if (isa<BitCastInst>(User)) {
- findCallsAtConstantOffset(DevirtCalls, User, Offset);
+ findCallsAtConstantOffset(DevirtCalls, HasNonCallUses, User, Offset);
} else if (auto CI = dyn_cast<CallInst>(User)) {
DevirtCalls.push_back({Offset, CI});
} else if (auto II = dyn_cast<InvokeInst>(User)) {
DevirtCalls.push_back({Offset, II});
+ } else if (HasNonCallUses) {
+ *HasNonCallUses = true;
}
}
}
if (isa<BitCastInst>(User)) {
findLoadCallsAtConstantOffset(M, DevirtCalls, User, Offset);
} else if (isa<LoadInst>(User)) {
- findCallsAtConstantOffset(DevirtCalls, User, Offset);
+ findCallsAtConstantOffset(DevirtCalls, nullptr, User, Offset);
} else if (auto GEP = dyn_cast<GetElementPtrInst>(User)) {
// Take into account the GEP offset.
if (VPtr == GEP->getPointerOperand() && GEP->hasAllConstantIndices()) {
}
}
-void llvm::findDevirtualizableCalls(
+void llvm::findDevirtualizableCallsForTypeTest(
SmallVectorImpl<DevirtCallSite> &DevirtCalls,
SmallVectorImpl<CallInst *> &Assumes, CallInst *CI) {
assert(CI->getCalledFunction()->getIntrinsicID() == Intrinsic::type_test);
findLoadCallsAtConstantOffset(M, DevirtCalls,
CI->getArgOperand(0)->stripPointerCasts(), 0);
}
+
+void llvm::findDevirtualizableCallsForTypeCheckedLoad(
+ SmallVectorImpl<DevirtCallSite> &DevirtCalls,
+ SmallVectorImpl<Instruction *> &LoadedPtrs,
+ SmallVectorImpl<Instruction *> &Preds, bool &HasNonCallUses, CallInst *CI) {
+ assert(CI->getCalledFunction()->getIntrinsicID() ==
+ Intrinsic::type_checked_load);
+
+ auto *Offset = dyn_cast<ConstantInt>(CI->getArgOperand(1));
+ if (!Offset) {
+ HasNonCallUses = true;
+ return;
+ }
+
+ for (Use &U : CI->uses()) {
+ auto CIU = U.getUser();
+ if (auto EVI = dyn_cast<ExtractValueInst>(CIU)) {
+ if (EVI->getNumIndices() == 1 && EVI->getIndices()[0] == 0) {
+ LoadedPtrs.push_back(EVI);
+ continue;
+ }
+ if (EVI->getNumIndices() == 1 && EVI->getIndices()[0] == 1) {
+ Preds.push_back(EVI);
+ continue;
+ }
+ }
+ HasNonCallUses = true;
+ }
+
+ for (Value *LoadedPtr : LoadedPtrs)
+ findCallsAtConstantOffset(DevirtCalls, &HasNonCallUses, LoadedPtr,
+ Offset->getZExtValue());
+}
setHasMetadataHashEntry(false);
}
-
void GlobalObject::setMetadata(unsigned KindID, MDNode *N) {
eraseMetadata(KindID);
if (N)
Value *VTable;
CallSite CS;
+ // If non-null, this field points to the associated unsafe use count stored in
+ // the DevirtModule::NumUnsafeUsesForTypeTest map below. See the description
+ // of that field for details.
+ unsigned *NumUnsafeUses;
+
void replaceAndErase(Value *New) {
CS->replaceAllUsesWith(New);
if (auto II = dyn_cast<InvokeInst>(CS.getInstruction())) {
II->getUnwindDest()->removePredecessor(II->getParent());
}
CS->eraseFromParent();
+ // This use is no longer unsafe.
+ if (NumUnsafeUses)
+ --*NumUnsafeUses;
}
};
MapVector<VTableSlot, std::vector<VirtualCallSite>> CallSlots;
+ // This map keeps track of the number of "unsafe" uses of a loaded function
+ // pointer. The key is the associated llvm.type.test intrinsic call generated
+ // by this pass. An unsafe use is one that calls the loaded function pointer
+ // directly. Every time we eliminate an unsafe use (for example, by
+ // devirtualizing it or by applying virtual constant propagation), we
+ // decrement the value stored in this map. If a value reaches zero, we can
+ // eliminate the type check by RAUWing the associated llvm.type.test call with
+ // true.
+ std::map<CallInst *, unsigned> NumUnsafeUsesForTypeTest;
+
DevirtModule(Module &M)
: M(M), Int8Ty(Type::getInt8Ty(M.getContext())),
Int8PtrTy(Type::getInt8PtrTy(M.getContext())),
Int32Ty(Type::getInt32Ty(M.getContext())) {}
+ void scanTypeTestUsers(Function *TypeTestFunc, Function *AssumeFunc);
+ void scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc);
+
void buildTypeIdentifierMap(
std::vector<VTableBits> &Bits,
DenseMap<Metadata *, std::set<TypeMemberInfo>> &TypeIdMap);
for (auto &&VCallSite : CallSites) {
VCallSite.CS.setCalledFunction(ConstantExpr::getBitCast(
TheFn, VCallSite.CS.getCalledValue()->getType()));
+ // This use is no longer unsafe.
+ if (VCallSite.NumUnsafeUses)
+ --*VCallSite.NumUnsafeUses;
}
return true;
}
NewGV->setSection(B.GV->getSection());
NewGV->setComdat(B.GV->getComdat());
+ // Copy the original vtable's metadata to the anonymous global, adjusting
+ // offsets as required.
+ NewGV->copyMetadata(B.GV, B.Before.Bytes.size());
+
// Build an alias named after the original global, pointing at the second
// element (the original initializer).
auto Alias = GlobalAlias::create(
B.GV->eraseFromParent();
}
-bool DevirtModule::run() {
- Function *TypeTestFunc =
- M.getFunction(Intrinsic::getName(Intrinsic::type_test));
- if (!TypeTestFunc || TypeTestFunc->use_empty())
- return false;
-
- Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume));
- if (!AssumeFunc || AssumeFunc->use_empty())
- return false;
-
+void DevirtModule::scanTypeTestUsers(Function *TypeTestFunc,
+ Function *AssumeFunc) {
// Find all virtual calls via a virtual table pointer %p under an assumption
// of the form llvm.assume(llvm.type.test(%p, %md)). This indicates that %p
// points to a member of the type identifier %md. Group calls by (type ID,
// Search for virtual calls based on %p and add them to DevirtCalls.
SmallVector<DevirtCallSite, 1> DevirtCalls;
SmallVector<CallInst *, 1> Assumes;
- findDevirtualizableCalls(DevirtCalls, Assumes, CI);
+ findDevirtualizableCallsForTypeTest(DevirtCalls, Assumes, CI);
// If we found any, add them to CallSlots. Only do this if we haven't seen
// the vtable pointer before, as it may have been CSE'd with pointers from
if (SeenPtrs.insert(Ptr).second) {
for (DevirtCallSite Call : DevirtCalls) {
CallSlots[{TypeId, Call.Offset}].push_back(
- {CI->getArgOperand(0), Call.CS});
+ {CI->getArgOperand(0), Call.CS, nullptr});
}
}
}
if (CI->use_empty())
CI->eraseFromParent();
}
+}
+
+void DevirtModule::scanTypeCheckedLoadUsers(Function *TypeCheckedLoadFunc) {
+ Function *TypeTestFunc = Intrinsic::getDeclaration(&M, Intrinsic::type_test);
+
+ for (auto I = TypeCheckedLoadFunc->use_begin(),
+ E = TypeCheckedLoadFunc->use_end();
+ I != E;) {
+ auto CI = dyn_cast<CallInst>(I->getUser());
+ ++I;
+ if (!CI)
+ continue;
+
+ Value *Ptr = CI->getArgOperand(0);
+ Value *Offset = CI->getArgOperand(1);
+ Value *TypeIdValue = CI->getArgOperand(2);
+ Metadata *TypeId = cast<MetadataAsValue>(TypeIdValue)->getMetadata();
+
+ SmallVector<DevirtCallSite, 1> DevirtCalls;
+ SmallVector<Instruction *, 1> LoadedPtrs;
+ SmallVector<Instruction *, 1> Preds;
+ bool HasNonCallUses = false;
+ findDevirtualizableCallsForTypeCheckedLoad(DevirtCalls, LoadedPtrs, Preds,
+ HasNonCallUses, CI);
+
+ // Start by generating "pessimistic" code that explicitly loads the function
+ // pointer from the vtable and performs the type check. If possible, we will
+ // eliminate the load and the type check later.
+
+ // If possible, only generate the load at the point where it is used.
+ // This helps avoid unnecessary spills.
+ IRBuilder<> LoadB(
+ (LoadedPtrs.size() == 1 && !HasNonCallUses) ? LoadedPtrs[0] : CI);
+ Value *GEP = LoadB.CreateGEP(Int8Ty, Ptr, Offset);
+ Value *GEPPtr = LoadB.CreateBitCast(GEP, PointerType::getUnqual(Int8PtrTy));
+ Value *LoadedValue = LoadB.CreateLoad(Int8PtrTy, GEPPtr);
+
+ for (Instruction *LoadedPtr : LoadedPtrs) {
+ LoadedPtr->replaceAllUsesWith(LoadedValue);
+ LoadedPtr->eraseFromParent();
+ }
+
+ // Likewise for the type test.
+ IRBuilder<> CallB((Preds.size() == 1 && !HasNonCallUses) ? Preds[0] : CI);
+ CallInst *TypeTestCall = CallB.CreateCall(TypeTestFunc, {Ptr, TypeIdValue});
+
+ for (Instruction *Pred : Preds) {
+ Pred->replaceAllUsesWith(TypeTestCall);
+ Pred->eraseFromParent();
+ }
+
+ // We have already erased any extractvalue instructions that refer to the
+ // intrinsic call, but the intrinsic may have other non-extractvalue uses
+ // (although this is unlikely). In that case, explicitly build a pair and
+ // RAUW it.
+ if (!CI->use_empty()) {
+ Value *Pair = UndefValue::get(CI->getType());
+ IRBuilder<> B(CI);
+ Pair = B.CreateInsertValue(Pair, LoadedValue, {0});
+ Pair = B.CreateInsertValue(Pair, TypeTestCall, {1});
+ CI->replaceAllUsesWith(Pair);
+ }
+
+ // The number of unsafe uses is initially the number of uses.
+ auto &NumUnsafeUses = NumUnsafeUsesForTypeTest[TypeTestCall];
+ NumUnsafeUses = DevirtCalls.size();
+
+ // If the function pointer has a non-call user, we cannot eliminate the type
+ // check, as one of those users may eventually call the pointer. Increment
+ // the unsafe use count to make sure it cannot reach zero.
+ if (HasNonCallUses)
+ ++NumUnsafeUses;
+ for (DevirtCallSite Call : DevirtCalls) {
+ CallSlots[{TypeId, Call.Offset}].push_back(
+ {Ptr, Call.CS, &NumUnsafeUses});
+ }
+
+ CI->eraseFromParent();
+ }
+}
+
+bool DevirtModule::run() {
+ Function *TypeTestFunc =
+ M.getFunction(Intrinsic::getName(Intrinsic::type_test));
+ Function *TypeCheckedLoadFunc =
+ M.getFunction(Intrinsic::getName(Intrinsic::type_checked_load));
+ Function *AssumeFunc = M.getFunction(Intrinsic::getName(Intrinsic::assume));
+
+ if ((!TypeTestFunc || TypeTestFunc->use_empty() || !AssumeFunc ||
+ AssumeFunc->use_empty()) &&
+ (!TypeCheckedLoadFunc || TypeCheckedLoadFunc->use_empty()))
+ return false;
+
+ if (TypeTestFunc && AssumeFunc)
+ scanTypeTestUsers(TypeTestFunc, AssumeFunc);
+
+ if (TypeCheckedLoadFunc)
+ scanTypeCheckedLoadUsers(TypeCheckedLoadFunc);
// Rebuild type metadata into a map for easy lookup.
std::vector<VTableBits> Bits;
DidVirtualConstProp |= tryVirtualConstProp(TargetsForSlot, S.second);
}
+ // If we were able to eliminate all unsafe uses for a type checked load,
+ // eliminate the type test by replacing it with true.
+ if (TypeCheckedLoadFunc) {
+ auto True = ConstantInt::getTrue(M.getContext());
+ for (auto &&U : NumUnsafeUsesForTypeTest) {
+ if (U.second == 0) {
+ U.first->replaceAllUsesWith(True);
+ U.first->eraseFromParent();
+ }
+ }
+ }
+
// Rebuild each global we touched as part of virtual constant propagation to
// include the before and after bytes.
if (DidVirtualConstProp)
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf1 to i8*)], [0 x i8] zeroinitializer }
-; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf2 to i8*)], [0 x i8] zeroinitializer }
-; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf4 to i8*)], [0 x i8] zeroinitializer }
-; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf8 to i8*)], [0 x i8] zeroinitializer }
+; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf1 to i8*)], [0 x i8] zeroinitializer }, !type [[T8:![0-9]+]]
+; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf2 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\01", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf4 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
+; CHECK: private constant { [8 x i8], [1 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\00\00\00\00\02", [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf8 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
@vt1 = constant [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf1 to i8*)], !type !0
@vt2 = constant [1 x i8*] [i8* bitcast (i1 (i8*, i32)* @vf2 to i8*)], !type !0
declare i1 @llvm.type.test(i8*, metadata)
declare void @llvm.assume(i1)
+; CHECK: [[T8]] = !{i32 8, !"typeid"}
!0 = !{i32 0, !"typeid"}
--- /dev/null
+; RUN: opt -S -wholeprogramdevirt %s | FileCheck %s
+
+target datalayout = "e-p:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+@vt1 = constant [1 x i8*] [i8* bitcast (void (i8*)* @vf to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (void (i8*)* @vf to i8*)], !type !0
+
+define void @vf(i8* %this) {
+ ret void
+}
+
+; CHECK: define void @call
+define void @call(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 0, metadata !"typeid")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %p = extractvalue {i8*, i1} %pair, 1
+ ; CHECK: br i1 true,
+ br i1 %p, label %cont, label %trap
+
+cont:
+ %fptr_casted = bitcast i8* %fptr to void (i8*)*
+ ; CHECK: call void @vf(
+ call void %fptr_casted(i8* %obj)
+ ret void
+
+trap:
+ call void @llvm.trap()
+ unreachable
+}
+
+declare {i8*, i1} @llvm.type.checked.load(i8*, i32, metadata)
+declare void @llvm.trap()
+
+!0 = !{i32 0, !"typeid"}
--- /dev/null
+; RUN: opt -S -wholeprogramdevirt %s | FileCheck %s
+
+; Test that we correctly expand the llvm.type.checked.load intrinsic in cases
+; where we cannot devirtualize.
+
+target datalayout = "e-p:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+@vt1 = constant [1 x i8*] [i8* bitcast (void (i8*)* @vf1 to i8*)], !type !0
+@vt2 = constant [1 x i8*] [i8* bitcast (void (i8*)* @vf2 to i8*)], !type !0
+
+define void @vf1(i8* %this) {
+ ret void
+}
+
+define void @vf2(i8* %this) {
+ ret void
+}
+
+; CHECK: define void @call
+define void @call(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [1 x i8*]**
+ %vtable = load [1 x i8*]*, [1 x i8*]** %vtableptr
+ %vtablei8 = bitcast [1 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 0, metadata !"typeid")
+ %p = extractvalue {i8*, i1} %pair, 1
+ ; CHECK: [[TT:%[^ ]*]] = call i1 @llvm.type.test(i8* [[VT:%[^,]*]], metadata !"typeid")
+ ; CHECK: br i1 [[TT]],
+ br i1 %p, label %cont, label %trap
+
+cont:
+ ; CHECK: [[GEP:%[^ ]*]] = getelementptr i8, i8* [[VT]], i32 0
+ ; CHECK: [[BC:%[^ ]*]] = bitcast i8* [[GEP]] to i8**
+ ; CHECK: [[LOAD:%[^ ]*]] = load i8*, i8** [[BC]]
+ ; CHECK: [[FPC:%[^ ]*]] = bitcast i8* [[LOAD]] to void (i8*)*
+ ; CHECK: call void [[FPC]]
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %fptr_casted = bitcast i8* %fptr to void (i8*)*
+ call void %fptr_casted(i8* %obj)
+ ret void
+
+trap:
+ call void @llvm.trap()
+ unreachable
+}
+
+; CHECK: define { i8*, i1 } @ret
+define {i8*, i1} @ret(i8* %vtablei8) {
+ ; CHECK: [[GEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2:%[^,]*]], i32 1
+ ; CHECK: [[BC2:%[^ ]*]] = bitcast i8* [[GEP2]] to i8**
+ ; CHECK: [[LOAD2:%[^ ]*]] = load i8*, i8** [[BC2]]
+ ; CHECK: [[TT2:%[^ ]*]] = call i1 @llvm.type.test(i8* [[VT2]], metadata !"typeid")
+ ; CHECK: [[I1:%[^ ]*]] = insertvalue { i8*, i1 } undef, i8* [[LOAD2]], 0
+ ; CHECK: [[I2:%[^ ]*]] = insertvalue { i8*, i1 } %5, i1 [[TT2]], 1
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 1, metadata !"typeid")
+ ; CHECK: ret { i8*, i1 } [[I2]]
+ ret {i8*, i1} %pair
+}
+
+declare {i8*, i1} @llvm.type.checked.load(i8*, i32, metadata)
+declare void @llvm.trap()
+
+!0 = !{i32 0, !"typeid"}
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\01\01\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf1i32 to i8*)], [0 x i8] zeroinitializer }, section "vt1sec"
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\01\01\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf1i32 to i8*)], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
@vt1 = constant [3 x i8*] [
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
i8* bitcast (i32 (i8*)* @vf1i32 to i8*)
], section "vt1sec", !type !0
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\02\02\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf2i32 to i8*)], [0 x i8] zeroinitializer }{{$}}
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\02\02\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf2i32 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
@vt2 = constant [3 x i8*] [
i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
i8* bitcast (i32 (i8*)* @vf2i32 to i8*)
], !type !0
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\01\03\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf3i32 to i8*)], [0 x i8] zeroinitializer }{{$}}
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\01\03\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf3i32 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
@vt3 = constant [3 x i8*] [
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
i8* bitcast (i32 (i8*)* @vf3i32 to i8*)
], !type !0
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\02\04\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf4i32 to i8*)], [0 x i8] zeroinitializer }{{$}}
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\02\04\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf4i32 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
@vt4 = constant [3 x i8*] [
i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
i8* bitcast (i32 (i8*)* @vf4i32 to i8*)
], !type !0
+; CHECK: @vt5 = {{.*}}, !type [[T0:![0-9]+]]
@vt5 = constant [3 x i8*] [
i8* bitcast (void ()* @__cxa_pure_virtual to i8*),
i8* bitcast (void ()* @__cxa_pure_virtual to i8*),
declare void @llvm.assume(i1)
declare void @__cxa_pure_virtual()
+; CHECK: [[T8]] = !{i32 8, !"typeid"}
+; CHECK: [[T0]] = !{i32 0, !"typeid"}
+
!0 = !{i32 0, !"typeid"}
--- /dev/null
+; RUN: opt -S -wholeprogramdevirt %s | FileCheck %s
+
+target datalayout = "e-p:64:64"
+target triple = "x86_64-unknown-linux-gnu"
+
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\01\01\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf1i32 to i8*)], [0 x i8] zeroinitializer }, section "vt1sec", !type [[T8:![0-9]+]]
+@vt1 = constant [3 x i8*] [
+i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
+i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
+i8* bitcast (i32 (i8*)* @vf1i32 to i8*)
+], section "vt1sec", !type !0
+
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\02\02\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf2i32 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
+@vt2 = constant [3 x i8*] [
+i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
+i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
+i8* bitcast (i32 (i8*)* @vf2i32 to i8*)
+], !type !0
+
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\01\03\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf3i32 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
+@vt3 = constant [3 x i8*] [
+i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
+i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
+i8* bitcast (i32 (i8*)* @vf3i32 to i8*)
+], !type !0
+
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [8 x i8], [3 x i8*], [0 x i8] } { [8 x i8] c"\00\00\00\02\04\00\00\00", [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf4i32 to i8*)], [0 x i8] zeroinitializer }, !type [[T8]]
+@vt4 = constant [3 x i8*] [
+i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
+i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
+i8* bitcast (i32 (i8*)* @vf4i32 to i8*)
+], !type !0
+
+; CHECK: @vt5 = {{.*}}, !type [[T0:![0-9]+]]
+@vt5 = constant [3 x i8*] [
+i8* bitcast (void ()* @__cxa_pure_virtual to i8*),
+i8* bitcast (void ()* @__cxa_pure_virtual to i8*),
+i8* bitcast (void ()* @__cxa_pure_virtual to i8*)
+], !type !0
+
+; CHECK: @vt1 = alias [3 x i8*], getelementptr inbounds ({ [8 x i8], [3 x i8*], [0 x i8] }, { [8 x i8], [3 x i8*], [0 x i8] }* [[VT1DATA]], i32 0, i32 1)
+; CHECK: @vt2 = alias [3 x i8*], getelementptr inbounds ({ [8 x i8], [3 x i8*], [0 x i8] }, { [8 x i8], [3 x i8*], [0 x i8] }* [[VT2DATA]], i32 0, i32 1)
+; CHECK: @vt3 = alias [3 x i8*], getelementptr inbounds ({ [8 x i8], [3 x i8*], [0 x i8] }, { [8 x i8], [3 x i8*], [0 x i8] }* [[VT3DATA]], i32 0, i32 1)
+; CHECK: @vt4 = alias [3 x i8*], getelementptr inbounds ({ [8 x i8], [3 x i8*], [0 x i8] }, { [8 x i8], [3 x i8*], [0 x i8] }* [[VT4DATA]], i32 0, i32 1)
+
+define i1 @vf0i1(i8* %this) readnone {
+ ret i1 0
+}
+
+define i1 @vf1i1(i8* %this) readnone {
+ ret i1 1
+}
+
+define i32 @vf1i32(i8* %this) readnone {
+ ret i32 1
+}
+
+define i32 @vf2i32(i8* %this) readnone {
+ ret i32 2
+}
+
+define i32 @vf3i32(i8* %this) readnone {
+ ret i32 3
+}
+
+define i32 @vf4i32(i8* %this) readnone {
+ ret i32 4
+}
+
+; CHECK: define i1 @call1(
+define i1 @call1(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [3 x i8*]**
+ %vtable = load [3 x i8*]*, [3 x i8*]** %vtableptr
+ ; CHECK: [[VT1:%[^ ]*]] = bitcast [3 x i8*]* {{.*}} to i8*
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 0, metadata !"typeid")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %fptr_casted = bitcast i8* %fptr to i1 (i8*)*
+ ; CHECK: [[VTGEP1:%[^ ]*]] = getelementptr i8, i8* [[VT1]], i64 -5
+ ; CHECK: [[VTLOAD1:%[^ ]*]] = load i8, i8* [[VTGEP1]]
+ ; CHECK: [[VTAND1:%[^ ]*]] = and i8 [[VTLOAD1]], 2
+ ; CHECK: [[VTCMP1:%[^ ]*]] = icmp ne i8 [[VTAND1]], 0
+ %result = call i1 %fptr_casted(i8* %obj)
+ ; CHECK: [[AND1:%[^ ]*]] = and i1 [[VTCMP1]], true
+ %p = extractvalue {i8*, i1} %pair, 1
+ %and = and i1 %result, %p
+ ; CHECK: ret i1 [[AND1]]
+ ret i1 %and
+}
+
+; CHECK: define i1 @call2(
+define i1 @call2(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [3 x i8*]**
+ %vtable = load [3 x i8*]*, [3 x i8*]** %vtableptr
+ ; CHECK: [[VT2:%[^ ]*]] = bitcast [3 x i8*]* {{.*}} to i8*
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 8, metadata !"typeid")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %fptr_casted = bitcast i8* %fptr to i1 (i8*)*
+ ; CHECK: [[VTGEP2:%[^ ]*]] = getelementptr i8, i8* [[VT2]], i64 -5
+ ; CHECK: [[VTLOAD2:%[^ ]*]] = load i8, i8* [[VTGEP2]]
+ ; CHECK: [[VTAND2:%[^ ]*]] = and i8 [[VTLOAD2]], 1
+ ; CHECK: [[VTCMP2:%[^ ]*]] = icmp ne i8 [[VTAND2]], 0
+ %result = call i1 %fptr_casted(i8* %obj)
+ ; CHECK: [[AND2:%[^ ]*]] = and i1 [[VTCMP2]], true
+ %p = extractvalue {i8*, i1} %pair, 1
+ %and = and i1 %result, %p
+ ; CHECK: ret i1 [[AND2]]
+ ret i1 %and
+}
+
+; CHECK: define i32 @call3(
+define i32 @call3(i8* %obj) {
+ %vtableptr = bitcast i8* %obj to [3 x i8*]**
+ %vtable = load [3 x i8*]*, [3 x i8*]** %vtableptr
+ ; CHECK: [[VT3:%[^ ]*]] = bitcast [3 x i8*]* {{.*}} to i8*
+ %vtablei8 = bitcast [3 x i8*]* %vtable to i8*
+ %pair = call {i8*, i1} @llvm.type.checked.load(i8* %vtablei8, i32 16, metadata !"typeid")
+ %fptr = extractvalue {i8*, i1} %pair, 0
+ %fptr_casted = bitcast i8* %fptr to i32 (i8*)*
+ ; CHECK: [[VTGEP3:%[^ ]*]] = getelementptr i8, i8* [[VT3]], i64 -4
+ ; CHECK: [[VTBC3:%[^ ]*]] = bitcast i8* [[VTGEP3]] to i32*
+ ; CHECK: [[VTLOAD3:%[^ ]*]] = load i32, i32* [[VTBC3]]
+ %result = call i32 %fptr_casted(i8* %obj)
+ ; CHECK: ret i32 [[VTLOAD3]]
+ ret i32 %result
+}
+
+declare {i8*, i1} @llvm.type.checked.load(i8*, i32, metadata)
+declare void @llvm.assume(i1)
+declare void @__cxa_pure_virtual()
+
+; CHECK: [[T8]] = !{i32 8, !"typeid"}
+; CHECK: [[T0]] = !{i32 0, !"typeid"}
+
+!0 = !{i32 0, !"typeid"}
target datalayout = "e-p:64:64"
target triple = "x86_64-unknown-linux-gnu"
-; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [4 x i8*] [i8* null, i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf1i32 to i8*)], [8 x i8] c"\01\00\00\00\01\00\00\00" }
+; CHECK: [[VT1DATA:@[^ ]*]] = private constant { [0 x i8], [4 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [4 x i8*] [i8* null, i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf1i32 to i8*)], [8 x i8] c"\01\00\00\00\01\00\00\00" }, !type [[T8:![0-9]+]]
@vt1 = constant [4 x i8*] [
i8* null,
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
i8* bitcast (i32 (i8*)* @vf1i32 to i8*)
], !type !1
-; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf2i32 to i8*)], [8 x i8] c"\02\00\00\00\02\00\00\00" }
+; CHECK: [[VT2DATA:@[^ ]*]] = private constant { [0 x i8], [3 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf2i32 to i8*)], [8 x i8] c"\02\00\00\00\02\00\00\00" }, !type [[T0:![0-9]+]]
@vt2 = constant [3 x i8*] [
i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
i8* bitcast (i32 (i8*)* @vf2i32 to i8*)
], !type !0
-; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [4 x i8*] [i8* null, i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf3i32 to i8*)], [8 x i8] c"\03\00\00\00\01\00\00\00" }
+; CHECK: [[VT3DATA:@[^ ]*]] = private constant { [0 x i8], [4 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [4 x i8*] [i8* null, i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i32 (i8*)* @vf3i32 to i8*)], [8 x i8] c"\03\00\00\00\01\00\00\00" }, !type [[T8]]
@vt3 = constant [4 x i8*] [
i8* null,
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
i8* bitcast (i32 (i8*)* @vf3i32 to i8*)
], !type !1
-; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf4i32 to i8*)], [8 x i8] c"\04\00\00\00\02\00\00\00" }
+; CHECK: [[VT4DATA:@[^ ]*]] = private constant { [0 x i8], [3 x i8*], [8 x i8] } { [0 x i8] zeroinitializer, [3 x i8*] [i8* bitcast (i1 (i8*)* @vf1i1 to i8*), i8* bitcast (i1 (i8*)* @vf0i1 to i8*), i8* bitcast (i32 (i8*)* @vf4i32 to i8*)], [8 x i8] c"\04\00\00\00\02\00\00\00" }, !type [[T0]]
@vt4 = constant [3 x i8*] [
i8* bitcast (i1 (i8*)* @vf1i1 to i8*),
i8* bitcast (i1 (i8*)* @vf0i1 to i8*),
declare i1 @llvm.type.test(i8*, metadata)
declare void @llvm.assume(i1)
+; CHECK: [[T8]] = !{i32 8, !"typeid"}
+; CHECK: [[T0]] = !{i32 0, !"typeid"}
+
!0 = !{i32 0, !"typeid"}
!1 = !{i32 8, !"typeid"}