// Compute the virtual offset.
llvm::Value *VirtualOffset = 0;
if (VBase) {
- VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ VirtualOffset =
+ CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
}
// Apply both offsets.
PushDestructorCleanup(D, Addr);
}
-llvm::Value *
-CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl) {
- llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
- CharUnits VBaseOffsetOffset =
- CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
-
- llvm::Value *VBaseOffsetPtr =
- Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
- "vbase.offset.ptr");
- llvm::Type *PtrDiffTy =
- ConvertType(getContext().getPointerDiffType());
-
- VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
- PtrDiffTy->getPointerTo());
-
- llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
-
- return VBaseOffset;
-}
-
void
CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
// We need to use the virtual base offset offset because the virtual base
// might have a different offset in the most derived class.
- VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
- NearestVBase);
+ VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
+ LoadCXXThis(),
+ VTableClass,
+ NearestVBase);
NonVirtualOffset = OffsetFromNearestVBase;
} else {
// We can just use the base offset in the complete class.
llvm::Value *ptr,
QualType type);
+ llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
+ llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl);
+
void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
CXXCtorType Type,
CanQualType &ResTy,
GetNullMemberPointerFields(const MemberPointerType *MPT,
llvm::SmallVectorImpl<llvm::Constant *> &fields);
+ /// \brief Finds the offset from the base of RD to the vbptr it uses, even if
+ /// it is reusing a vbptr from a non-virtual base. RD must have morally
+ /// virtual bases.
+ CharUnits GetVBPtrOffsetFromBases(const CXXRecordDecl *RD);
+
+ /// \brief Shared code for virtual base adjustment. Returns the offset from
+ /// the vbptr to the virtual base. Optionally returns the address of the
+ /// vbptr itself.
+ llvm::Value *GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *VBPtrOffset,
+ llvm::Value *VBTableOffset,
+ llvm::Value **VBPtr = 0);
+
+ /// \brief Performs a full virtual base adjustment. Used to dereference
+ /// pointers to members of virtual bases.
llvm::Value *AdjustVirtualBase(CodeGenFunction &CGF, const CXXRecordDecl *RD,
llvm::Value *Base,
llvm::Value *VirtualBaseAdjustmentOffset,
return ptr;
}
+CharUnits MicrosoftCXXABI::GetVBPtrOffsetFromBases(const CXXRecordDecl *RD) {
+ assert(RD->getNumVBases());
+ CharUnits Total = CharUnits::Zero();
+ while (RD) {
+ const ASTRecordLayout &RDLayout = getContext().getASTRecordLayout(RD);
+ CharUnits VBPtrOffset = RDLayout.getVBPtrOffset();
+ // -1 is the sentinel for no vbptr.
+ if (VBPtrOffset != CharUnits::fromQuantity(-1)) {
+ Total += VBPtrOffset;
+ break;
+ }
+
+ // RD is reusing the vbptr of a non-virtual base. Find it and continue.
+ const CXXRecordDecl *FirstNVBaseWithVBases = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
+ if (!I->isVirtual() && Base->getNumVBases() > 0) {
+ FirstNVBaseWithVBases = Base;
+ break;
+ }
+ }
+ assert(FirstNVBaseWithVBases);
+ Total += RDLayout.getBaseClassOffset(FirstNVBaseWithVBases);
+ RD = FirstNVBaseWithVBases;
+ }
+ return Total;
+}
+
+llvm::Value *
+MicrosoftCXXABI::GetVirtualBaseClassOffset(CodeGenFunction &CGF,
+ llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ int64_t VBPtrChars = GetVBPtrOffsetFromBases(ClassDecl).getQuantity();
+ llvm::Value *VBPtrOffset = llvm::ConstantInt::get(CGM.PtrDiffTy, VBPtrChars);
+
+ // The vbtable is an array of i32 offsets. The first entry is a self entry,
+ // and the rest are offsets from the vbptr to virtual bases. The bases are
+ // ordered the same way our vbases are ordered: as they appear in a
+ // left-to-right depth-first search of the hierarchy.
+ unsigned VBTableIndex = 1; // Start with one to skip the self entry.
+ for (CXXRecordDecl::base_class_const_iterator I = ClassDecl->vbases_begin(),
+ E = ClassDecl->vbases_end(); I != E; ++I) {
+ if (I->getType()->getAsCXXRecordDecl() == BaseClassDecl)
+ break;
+ VBTableIndex++;
+ }
+ assert(VBTableIndex != 1 + ClassDecl->getNumVBases() &&
+ "BaseClassDecl must be a vbase of ClassDecl");
+ CharUnits IntSize = getContext().getTypeSizeInChars(getContext().IntTy);
+ CharUnits VBTableChars = IntSize * VBTableIndex;
+ llvm::Value *VBTableOffset =
+ llvm::ConstantInt::get(CGM.IntTy, VBTableChars.getQuantity());
+
+ llvm::Value *VBPtrToNewBase =
+ GetVBaseOffsetFromVBPtr(CGF, This, VBTableOffset, VBPtrOffset);
+ VBPtrToNewBase =
+ CGF.Builder.CreateSExtOrBitCast(VBPtrToNewBase, CGM.PtrDiffTy);
+ return CGF.Builder.CreateNSWAdd(VBPtrOffset, VBPtrToNewBase);
+}
+
bool MicrosoftCXXABI::needThisReturn(GlobalDecl GD) {
const CXXMethodDecl* MD = cast<CXXMethodDecl>(GD.getDecl());
return isa<CXXConstructorDecl>(MD);
return I == E;
}
+llvm::Value *
+MicrosoftCXXABI::GetVBaseOffsetFromVBPtr(CodeGenFunction &CGF,
+ llvm::Value *This,
+ llvm::Value *VBTableOffset,
+ llvm::Value *VBPtrOffset,
+ llvm::Value **VBPtrOut) {
+ CGBuilderTy &Builder = CGF.Builder;
+ // Load the vbtable pointer from the vbptr in the instance.
+ This = Builder.CreateBitCast(This, CGM.Int8PtrTy);
+ llvm::Value *VBPtr =
+ Builder.CreateInBoundsGEP(This, VBPtrOffset, "vbptr");
+ if (VBPtrOut) *VBPtrOut = VBPtr;
+ VBPtr = Builder.CreateBitCast(VBPtr, CGM.Int8PtrTy->getPointerTo(0));
+ llvm::Value *VBTable = Builder.CreateLoad(VBPtr, "vbtable");
+
+ // Load an i32 offset from the vb-table.
+ llvm::Value *VBaseOffs = Builder.CreateInBoundsGEP(VBTable, VBTableOffset);
+ VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
+ return Builder.CreateLoad(VBaseOffs, "vbase_offs");
+}
+
// Returns an adjusted base cast to i8*, since we do more address arithmetic on
// it.
llvm::Value *
MicrosoftCXXABI::AdjustVirtualBase(CodeGenFunction &CGF,
const CXXRecordDecl *RD, llvm::Value *Base,
- llvm::Value *VirtualBaseAdjustmentOffset,
+ llvm::Value *VBTableOffset,
llvm::Value *VBPtrOffset) {
CGBuilderTy &Builder = CGF.Builder;
Base = Builder.CreateBitCast(Base, CGM.Int8PtrTy);
VBaseAdjustBB = CGF.createBasicBlock("memptr.vadjust");
SkipAdjustBB = CGF.createBasicBlock("memptr.skip_vadjust");
llvm::Value *IsVirtual =
- Builder.CreateICmpNE(VirtualBaseAdjustmentOffset, getZeroInt(),
+ Builder.CreateICmpNE(VBTableOffset, getZeroInt(),
"memptr.is_vbase");
Builder.CreateCondBr(IsVirtual, VBaseAdjustBB, SkipAdjustBB);
CGF.EmitBlock(VBaseAdjustBB);
// If we weren't given a dynamic vbptr offset, RD should be complete and we'll
// know the vbptr offset.
if (!VBPtrOffset) {
- CharUnits offs = getContext().getASTRecordLayout(RD).getVBPtrOffset();
+ CharUnits offs = CharUnits::Zero();
+ if (RD->getNumVBases()) {
+ offs = GetVBPtrOffsetFromBases(RD);
+ }
VBPtrOffset = llvm::ConstantInt::get(CGM.IntTy, offs.getQuantity());
}
- // Load the vbtable pointer from the vbtable offset in the instance.
- llvm::Value *VBPtr =
- Builder.CreateInBoundsGEP(Base, VBPtrOffset, "memptr.vbptr");
- llvm::Value *VBTable =
- Builder.CreateBitCast(VBPtr, CGM.Int8PtrTy->getPointerTo(0));
- VBTable = Builder.CreateLoad(VBTable, "memptr.vbtable");
- // Load an i32 offset from the vb-table.
+ llvm::Value *VBPtr = 0;
llvm::Value *VBaseOffs =
- Builder.CreateInBoundsGEP(VBTable, VirtualBaseAdjustmentOffset);
- VBaseOffs = Builder.CreateBitCast(VBaseOffs, CGM.Int32Ty->getPointerTo(0));
- VBaseOffs = Builder.CreateLoad(VBaseOffs, "memptr.vbase_offs");
- // Add it to VBPtr. GEP will sign extend the i32 value for us.
+ GetVBaseOffsetFromVBPtr(CGF, Base, VBTableOffset, VBPtrOffset, &VBPtr);
llvm::Value *AdjustedBase = Builder.CreateInBoundsGEP(VBPtr, VBaseOffs);
// Merge control flow with the case where we didn't have to adjust.
// RUN: %clang_cc1 -emit-llvm %s -o - -triple i686-pc-linux-gnu | FileCheck %s
+// RUN: %clang_cc1 -cxx-abi microsoft -emit-llvm %s -o - -triple i686-pc-win32 | FileCheck -check-prefix MSVC %s
struct A { int a; virtual int aa(); };
struct B { int b; virtual int bb(); };
// CHECK: load i32* [[CASTVBASEOFFSETPTRA]]
// CHECK: }
+// MSVC: @"\01?a@@YAPAUA@@XZ"() [[NUW:#[0-9]+]] {
+// MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8* {{.*}}, i32 0
+// MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i8**
+// MSVC: %[[vbtable:.*]] = load i8** %[[vbptr]]
+// MSVC: %[[entry:.*]] = getelementptr inbounds i8* {{.*}}, i32 4
+// MSVC: %[[entry_i32:.*]] = bitcast i8* %[[entry]] to i32*
+// MSVC: %[[offset:.*]] = load i32* %[[entry_i32]]
+// MSVC: add nsw i32 0, %[[offset]]
+// MSVC: }
+
B* b() { return x; }
// CHECK: @_Z1bv() [[NUW]]
// CHECK: [[VBASEOFFSETPTRA:%[a-zA-Z0-9\.]+]] = getelementptr i8* {{.*}}, i64 -20
// CHECK: load i32* [[CASTVBASEOFFSETPTRA]]
// CHECK: }
+// Same as 'a' except we use a different vbtable offset.
+// MSVC: @"\01?b@@YAPAUB@@XZ"() [[NUW:#[0-9]+]] {
+// MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8* {{.*}}, i32 0
+// MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i8**
+// MSVC: %[[vbtable:.*]] = load i8** %[[vbptr]]
+// MSVC: %[[entry:.*]] = getelementptr inbounds i8* {{.*}}, i32 8
+// MSVC: %[[entry_i32:.*]] = bitcast i8* %[[entry]] to i32*
+// MSVC: %[[offset:.*]] = load i32* %[[entry_i32]]
+// MSVC: add nsw i32 0, %[[offset]]
+// MSVC: }
+
+
BB* c() { return x; }
// CHECK: @_Z1cv() [[NUW]]
// CHECK: [[VBASEOFFSETPTRC:%[a-zA-Z0-9\.]+]] = getelementptr i8* {{.*}}, i64 -24
// CHECK: add i32 [[VBASEOFFSETC]], 8
// CHECK: }
+// Same as 'a' except we use a different vbtable offset.
+// MSVC: @"\01?c@@YAPAUBB@@XZ"() [[NUW:#[0-9]+]] {
+// MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8* {{.*}}, i32 0
+// MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i8**
+// MSVC: %[[vbtable:.*]] = load i8** %[[vbptr]]
+// MSVC: %[[entry:.*]] = getelementptr inbounds i8* {{.*}}, i32 16
+// MSVC: %[[entry_i32:.*]] = bitcast i8* %[[entry]] to i32*
+// MSVC: %[[offset:.*]] = load i32* %[[entry_i32]]
+// MSVC: add nsw i32 0, %[[offset]]
+// MSVC: }
+
+// Put the vbptr at a non-zero offset inside a non-virtual base.
+struct E { int e; };
+struct F : E, D { int f; };
+
+F* y;
+
+BB* d() { return y; }
+
+// Same as 'c' except the vbptr offset is 4, changing the initial GEP and the
+// final add.
+// MSVC: @"\01?d@@YAPAUBB@@XZ"() [[NUW:#[0-9]+]] {
+// MSVC: %[[vbptr_off:.*]] = getelementptr inbounds i8* {{.*}}, i32 4
+// MSVC: %[[vbptr:.*]] = bitcast i8* %[[vbptr_off]] to i8**
+// MSVC: %[[vbtable:.*]] = load i8** %[[vbptr]]
+// MSVC: %[[entry:.*]] = getelementptr inbounds i8* {{.*}}, i32 16
+// MSVC: %[[entry_i32:.*]] = bitcast i8* %[[entry]] to i32*
+// MSVC: %[[offset:.*]] = load i32* %[[entry_i32]]
+// MSVC: add nsw i32 4, %[[offset]]
+// MSVC: }
+
// CHECK: attributes [[NUW]] = { nounwind{{.*}} }