protected:
bool UseARMMethodPtrABI;
bool UseARMGuardVarABI;
+ bool Use32BitVTableOffsetABI;
ItaniumMangleContext &getMangleContext() {
return cast<ItaniumMangleContext>(CodeGen::CGCXXABI::getMangleContext());
bool UseARMMethodPtrABI = false,
bool UseARMGuardVarABI = false) :
CGCXXABI(CGM), UseARMMethodPtrABI(UseARMMethodPtrABI),
- UseARMGuardVarABI(UseARMGuardVarABI) { }
+ UseARMGuardVarABI(UseARMGuardVarABI),
+ Use32BitVTableOffsetABI(false) { }
bool classifyReturnType(CGFunctionInfo &FI) const override;
class iOS64CXXABI : public ARMCXXABI {
public:
- iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {}
+ iOS64CXXABI(CodeGen::CodeGenModule &CGM) : ARMCXXABI(CGM) {
+ Use32BitVTableOffsetABI = true;
+ }
// ARM64 libraries are prepared for non-unique RTTI.
bool shouldRTTIBeUnique() const override { return false; }
CGF.GetVTablePtr(Address(This, VTablePtrAlign), VTableTy, RD);
// Apply the offset.
+ // On ARM64, to reserve extra space in virtual member function pointers,
+ // we only pay attention to the low 32 bits of the offset.
llvm::Value *VTableOffset = FnAsInt;
if (!UseARMMethodPtrABI)
VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
+ if (Use32BitVTableOffsetABI) {
+ VTableOffset = Builder.CreateTrunc(VTableOffset, CGF.Int32Ty);
+ VTableOffset = Builder.CreateZExt(VTableOffset, CGM.PtrDiffTy);
+ }
VTable = Builder.CreateGEP(VTable, VTableOffset);
// Load the virtual function to call.
// CHECK-GLOBALS-DAG: @_ZTIN5test21EIiEE = weak_odr constant { {{.*}}, i8* inttoptr (i64 add (i64 ptrtoint ([14 x i8]* @_ZTSN5test21EIiEE to i64), i64 -9223372036854775808) to i8*) }
}
+
+// ARM64 reserves the top half of the vtable offset in virtual
+// member pointers.
+namespace test3 {
+ struct A {
+ virtual void foo();
+ virtual void bar();
+ };
+
+ // The offset half of the pointer is still initialized to zero.
+ // CHECK-GLOBALS-DAG: @_ZN5test34mptrE = global { i64, i64 } { i64 0, i64 1 }
+ void (A::*mptr)() = &A::foo;
+
+ // CHECK-LABEL: define void @_ZN5test34testEv()
+ // CHECK: [[TEMP:%.*]] = alloca [[A:.*]], align 8
+ // CHECK: [[MEMPTR:%.*]] = load { i64, i64 }, { i64, i64 }* @_ZN5test34mptrE, align 8
+ // CHECK: [[ADJUST_AND_IS_VIRTUAL:%.*]] = extractvalue { i64, i64 } [[MEMPTR]], 1
+ // CHECK: [[ADJUST:%.*]] = ashr i64 [[ADJUST_AND_IS_VIRTUAL]], 1
+ // CHECK: [[T0:%.*]] = bitcast [[A]]* [[TEMP]] to i8*
+ // CHECK: [[T1:%.*]] = getelementptr inbounds i8, i8* [[T0]], i64 [[ADJUST]]
+ // CHECK: [[ADJUSTED:%.*]] = bitcast i8* [[T1]] to [[A]]*
+ // CHECK: [[MEMBER:%.*]] = extractvalue { i64, i64 } [[MEMPTR]], 0
+ // CHECK: [[T0:%.*]] = and i64 [[ADJUST_AND_IS_VIRTUAL]], 1
+ // CHECK: [[IS_VIRTUAL:%.*]] = icmp ne i64 [[T0]], 0
+ // CHECK: br i1 [[IS_VIRTUAL]],
+ // CHECK: [[T0:%.*]] = bitcast [[A]]* [[ADJUSTED]] to i8**
+ // CHECK: [[VPTR:%.*]] = load i8*, i8** [[T0]], align 8
+ // CHECK: [[TRUNC:%.*]] = trunc i64 [[MEMBER]] to i32
+ // CHECK: [[ZEXT:%.*]] = zext i32 [[TRUNC]] to i64
+ // CHECK: [[T0:%.*]] = getelementptr i8, i8* [[VPTR]], i64 [[ZEXT]]
+ // CHECK: [[T1:%.*]] = bitcast i8* [[T0]] to void ([[A]]*)**
+ // CHECK: load void ([[A]]*)*, void ([[A]]*)** [[T1]],
+ void test() {
+ (A().*mptr)();
+ }
+}