From a6a7170e6e2977ca5ce60f68f217a8ec31a1bd7d Mon Sep 17 00:00:00 2001 From: Justin Bogner Date: Wed, 20 May 2015 15:53:59 +0000 Subject: [PATCH] CodeGen: Remove some trailing whitespace. NFC git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@237802 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/CGClass.cpp | 192 ++++++++++++++++++++-------------------- 1 file changed, 96 insertions(+), 96 deletions(-) diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp index 6528377a45..d9c336c6c4 100644 --- a/lib/CodeGen/CGClass.cpp +++ b/lib/CodeGen/CGClass.cpp @@ -29,31 +29,31 @@ using namespace clang; using namespace CodeGen; -static CharUnits -ComputeNonVirtualBaseClassOffset(ASTContext &Context, +static CharUnits +ComputeNonVirtualBaseClassOffset(ASTContext &Context, const CXXRecordDecl *DerivedClass, CastExpr::path_const_iterator Start, CastExpr::path_const_iterator End) { CharUnits Offset = CharUnits::Zero(); - + const CXXRecordDecl *RD = DerivedClass; - + for (CastExpr::path_const_iterator I = Start; I != End; ++I) { const CXXBaseSpecifier *Base = *I; assert(!Base->isVirtual() && "Should not see virtual bases here!"); // Get the layout. const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD); - - const CXXRecordDecl *BaseDecl = + + const CXXRecordDecl *BaseDecl = cast(Base->getType()->getAs()->getDecl()); - + // Add the offset. Offset += Layout.getBaseClassOffset(BaseDecl); - + RD = BaseDecl; } - + return Offset; } @@ -63,15 +63,15 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl, CastExpr::path_const_iterator PathEnd) { assert(PathBegin != PathEnd && "Base path should not be empty!"); - CharUnits Offset = + CharUnits Offset = ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl, PathBegin, PathEnd); if (Offset.isZero()) return nullptr; - llvm::Type *PtrDiffTy = + llvm::Type *PtrDiffTy = Types.ConvertType(getContext().getPointerDiffType()); - + return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity()); } @@ -128,7 +128,7 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr, } else { baseOffset = virtualOffset; } - + // Apply the base offset. ptr = CGF.Builder.CreateBitCast(ptr, CGF.Int8PtrTy); ptr = CGF.Builder.CreateInBoundsGEP(ptr, baseOffset, "add.ptr"); @@ -150,7 +150,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( // *start* with a step down to the correct virtual base subobject, // and hence will not require any further steps. if ((*Start)->isVirtual()) { - VBase = + VBase = cast((*Start)->getType()->getAs()->getDecl()); ++Start; } @@ -158,7 +158,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( // Compute the static offset of the ultimate destination within its // allocating subobject (the virtual base, if there is one, or else // the "complete" object that we see). - CharUnits NonVirtualOffset = + CharUnits NonVirtualOffset = ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived, Start, PathEnd); @@ -173,7 +173,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( } // Get the base pointer type. - llvm::Type *BasePtrTy = + llvm::Type *BasePtrTy = ConvertType((PathEnd[-1])->getType())->getPointerTo(); QualType DerivedTy = getContext().getRecordType(Derived); @@ -198,7 +198,7 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( origBB = Builder.GetInsertBlock(); llvm::BasicBlock *notNullBB = createBasicBlock("cast.notnull"); endBB = createBasicBlock("cast.end"); - + llvm::Value *isNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(isNull, endBB, notNullBB); EmitBlock(notNullBB); @@ -217,10 +217,10 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( } // Apply both offsets. - Value = ApplyNonVirtualAndVirtualOffset(*this, Value, + Value = ApplyNonVirtualAndVirtualOffset(*this, Value, NonVirtualOffset, VirtualOffset); - + // Cast to the destination type. Value = Builder.CreateBitCast(Value, BasePtrTy); @@ -229,13 +229,13 @@ llvm::Value *CodeGenFunction::GetAddressOfBaseClass( llvm::BasicBlock *notNullBB = Builder.GetInsertBlock(); Builder.CreateBr(endBB); EmitBlock(endBB); - + llvm::PHINode *PHI = Builder.CreatePHI(BasePtrTy, 2, "cast.result"); PHI->addIncoming(Value, notNullBB); PHI->addIncoming(llvm::Constant::getNullValue(BasePtrTy), origBB); Value = PHI; } - + return Value; } @@ -253,7 +253,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, llvm::Value *NonVirtualOffset = CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd); - + if (!NonVirtualOffset) { // No offset, we can just cast back. return Builder.CreateBitCast(Value, DerivedPtrTy); @@ -267,12 +267,12 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, CastNull = createBasicBlock("cast.null"); CastNotNull = createBasicBlock("cast.notnull"); CastEnd = createBasicBlock("cast.end"); - + llvm::Value *IsNull = Builder.CreateIsNull(Value); Builder.CreateCondBr(IsNull, CastNull, CastNotNull); EmitBlock(CastNotNull); } - + // Apply the offset. Value = Builder.CreateBitCast(Value, Int8PtrTy); Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset), @@ -286,14 +286,14 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value, EmitBlock(CastNull); Builder.CreateBr(CastEnd); EmitBlock(CastEnd); - + llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2); PHI->addIncoming(Value, CastNotNull); - PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), + PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull); Value = PHI; } - + return Value; } @@ -304,7 +304,7 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, // This constructor/destructor does not need a VTT parameter. return nullptr; } - + const CXXRecordDecl *RD = cast(CurCodeDecl)->getParent(); const CXXRecordDecl *Base = cast(GD.getDecl())->getParent(); @@ -324,15 +324,15 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD, SubVTTIndex = 0; } else { const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); - CharUnits BaseOffset = ForVirtualBase ? - Layout.getVBaseClassOffset(Base) : + CharUnits BaseOffset = ForVirtualBase ? + Layout.getVBaseClassOffset(Base) : Layout.getBaseClassOffset(Base); - SubVTTIndex = + SubVTTIndex = CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset)); assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!"); } - + if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) { // A VTT parameter was passed to the constructor, use it. VTT = LoadCXXVTT(); @@ -359,7 +359,7 @@ namespace { cast(CGF.CurCodeDecl)->getParent(); const CXXDestructorDecl *D = BaseClass->getDestructor(); - llvm::Value *Addr = + llvm::Value *Addr = CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(), DerivedClass, BaseClass, BaseIsVirtual); @@ -392,7 +392,7 @@ static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) { return Checker.UsesThis; } -static void EmitBaseInitializer(CodeGenFunction &CGF, +static void EmitBaseInitializer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, CXXCtorInitializer *BaseInit, CXXCtorType CtorType) { @@ -400,7 +400,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, "Must have base initializer!"); llvm::Value *ThisPtr = CGF.LoadCXXThis(); - + const Type *BaseType = BaseInit->getBaseClass(); CXXRecordDecl *BaseClassDecl = cast(BaseType->getAs()->getDecl()); @@ -419,7 +419,7 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, // We can pretend to be a complete class because it only matters for // virtual bases, and we only do virtual bases for complete ctors. - llvm::Value *V = + llvm::Value *V = CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl, BaseClassDecl, isBaseVirtual); @@ -431,8 +431,8 @@ static void EmitBaseInitializer(CodeGenFunction &CGF, AggValueSlot::IsNotAliased); CGF.EmitAggExpr(BaseInit->getInit(), AggSlot); - - if (CGF.CGM.getLangOpts().Exceptions && + + if (CGF.CGM.getLangOpts().Exceptions && !BaseClassDecl->hasTrivialDestructor()) CGF.EHStack.pushCleanup(EHCleanup, BaseClassDecl, isBaseVirtual); @@ -491,17 +491,17 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF, llvm::Value *IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]); assert(IndexVar && "Array index variable not loaded"); - + // Initialize this index variable to zero. llvm::Value* Zero = llvm::Constant::getNullValue( CGF.ConvertType(CGF.getContext().getSizeType())); CGF.Builder.CreateStore(Zero, IndexVar); - + // Start the loop with a block that tests the condition. llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond"); llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end"); - + CGF.EmitBlock(CondBlock); llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body"); @@ -513,7 +513,7 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF, llvm::ConstantInt::get(Counter->getType(), NumElements); llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr, "isless"); - + // If the condition is true, execute the body. CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor); @@ -566,7 +566,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, assert(MemberInit->isAnyMemberInitializer() && "Must have member initializer!"); assert(MemberInit->getInit() && "Must have initializer!"); - + // non-static data member initializers. FieldDecl *Field = MemberInit->getAnyMember(); QualType FieldType = Field->getType(); @@ -605,7 +605,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF, = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex])); LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy); LValue Src = CGF.EmitLValueForFieldInitialization(ThisRHSLV, Field); - + // Copy the aggregate. CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType, LHS.isVolatileQualified()); @@ -639,28 +639,28 @@ void CodeGenFunction::EmitInitializerForField( llvm::Value *ArrayIndexVar = nullptr; if (ArrayIndexes.size()) { llvm::Type *SizeTy = ConvertType(getContext().getSizeType()); - + // The LHS is a pointer to the first object we'll be constructing, as // a flat array. QualType BaseElementTy = getContext().getBaseElementType(FieldType); llvm::Type *BasePtr = ConvertType(BaseElementTy); BasePtr = llvm::PointerType::getUnqual(BasePtr); - llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), + llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr); LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy); - + // Create an array index that will be used to walk over all of the // objects we're constructing. ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index"); llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy); Builder.CreateStore(Zero, ArrayIndexVar); - - + + // Emit the block variables for the array indices, if any. for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I) EmitAutoVarDecl(*ArrayIndexes[I]); } - + EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType, ArrayIndexes, 0); } @@ -867,7 +867,7 @@ namespace { public: FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl, const VarDecl *SrcRec) - : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), + : CGF(CGF), ClassDecl(ClassDecl), SrcRec(SrcRec), RecLayout(CGF.getContext().getASTRecordLayout(ClassDecl)), FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0), LastFieldOffset(0), LastAddedFieldIndex(0) {} @@ -893,7 +893,7 @@ namespace { unsigned LastFieldSize = LastField->isBitField() ? LastField->getBitWidthValue(CGF.getContext()) : - CGF.getContext().getTypeSize(LastField->getType()); + CGF.getContext().getTypeSize(LastField->getType()); uint64_t MemcpySizeBits = LastFieldOffset + LastFieldSize - FirstByteOffset + CGF.getContext().getCharWidth() - 1; @@ -1204,7 +1204,7 @@ namespace { if (F) { addMemcpyableField(F); AggregatedStmts.push_back(S); - } else { + } else { emitAggregatedStmts(); CGF.EmitStmt(S); } @@ -1289,7 +1289,7 @@ static bool FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field); static bool -HasTrivialDestructorBody(ASTContext &Context, +HasTrivialDestructorBody(ASTContext &Context, const CXXRecordDecl *BaseClassDecl, const CXXRecordDecl *MostDerivedClassDecl) { @@ -1324,7 +1324,7 @@ HasTrivialDestructorBody(ASTContext &Context, cast(I.getType()->castAs()->getDecl()); if (!HasTrivialDestructorBody(Context, VirtualBase, MostDerivedClassDecl)) - return false; + return false; } } @@ -1340,7 +1340,7 @@ FieldHasTrivialDestructorBody(ASTContext &Context, const RecordType *RT = FieldBaseElementType->getAs(); if (!RT) return true; - + CXXRecordDecl *FieldClassDecl = cast(RT->getDecl()); return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl); } @@ -1389,11 +1389,11 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { // Enter the epilogue cleanups. RunCleanupsScope DtorEpilogue(*this); - + // If this is the complete variant, just invoke the base variant; // the epilogue will destruct the virtual bases. But we can't do // this optimization if the body is a function-try-block, because - // we'd introduce *two* handler blocks. In the Microsoft ABI, we + // we'd introduce *two* handler blocks. In the Microsoft ABI, we // always delegate because we might not have a definition in this TU. switch (DtorType) { case Dtor_Comdat: @@ -1414,7 +1414,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) { break; } // Fallthrough: act like we're in the base variant. - + case Dtor_Base: assert(Body); @@ -1461,7 +1461,7 @@ void CodeGenFunction::emitImplicitAssignmentOperatorBody(FunctionArgList &Args) AssignmentMemcpyizer AM(*this, AssignOp, Args); for (auto *I : RootCS->body()) - AM.emitAssignment(I); + AM.emitAssignment(I); AM.finish(); } @@ -1522,7 +1522,7 @@ namespace { LValue ThisLV = CGF.MakeAddrLValue(thisValue, RecordTy); LValue LV = CGF.EmitLValueForField(ThisLV, field); assert(LV.isSimple()); - + CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer, flags.isForNormalCleanup() && useEHCleanupForArray); } @@ -1540,7 +1540,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, // The deleting-destructor phase just needs to call the appropriate // operator delete that Sema picked up. if (DtorType == Dtor_Deleting) { - assert(DD->getOperatorDelete() && + assert(DD->getOperatorDelete() && "operator delete missing - EnterDtorCleanups"); if (CXXStructorImplicitParamValue) { // If there is an implicit param to the deleting dtor, it's a boolean @@ -1567,7 +1567,7 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, for (const auto &Base : ClassDecl->vbases()) { CXXRecordDecl *BaseClassDecl = cast(Base.getType()->getAs()->getDecl()); - + // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; @@ -1581,15 +1581,15 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD, } assert(DtorType == Dtor_Base); - + // Destroy non-virtual bases. for (const auto &Base : ClassDecl->bases()) { // Ignore virtual bases. if (Base.isVirtual()) continue; - + CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl(); - + // Ignore trivial destructors. if (BaseClassDecl->hasTrivialDestructor()) continue; @@ -1670,7 +1670,7 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB); EmitBlock(loopBB); } - + // Find the end of the array. llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements, "arrayctor.end"); @@ -1690,15 +1690,15 @@ void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor, // Zero initialize the storage, if requested. if (zeroInitialize) EmitNullInitialization(cur, type); - - // C++ [class.temporary]p4: + + // C++ [class.temporary]p4: // There are two contexts in which temporaries are destroyed at a different // point than the end of the full-expression. The first context is when a - // default constructor is called to initialize an element of an array. - // If the constructor has one or more default arguments, the destruction of - // every temporary created in a default argument expression is sequenced + // default constructor is called to initialize an element of an array. + // If the constructor has one or more default arguments, the destruction of + // every temporary created in a default argument expression is sequenced // before the construction of the next array element, if any. - + { RunCleanupsScope Scope(*this); @@ -1809,14 +1809,14 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D, llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete); assert(D->isInstance() && "Trying to emit a member call expr on a static method!"); - + const FunctionProtoType *FPT = D->getType()->castAs(); - + CallArgList Args; - + // Push the this ptr. Args.add(RValue::get(This), D->getThisType(getContext())); - + // Push the src ptr. QualType QT = *(FPT->param_type_begin()); llvm::Type *t = CGM.getTypes().ConvertType(QT); @@ -1959,7 +1959,7 @@ void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) { } void -CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, +CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, const CXXRecordDecl *VTableClass) { @@ -1982,7 +1982,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, // Compute where to store the address point. llvm::Value *VirtualOffset = nullptr; CharUnits NonVirtualOffset = CharUnits::Zero(); - + if (NeedsVirtualOffset) { // We need to use the virtual base offset offset because the virtual base // might have a different offset in the most derived class. @@ -1995,12 +1995,12 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, // We can just use the base offset in the complete class. NonVirtualOffset = Base.getBaseOffset(); } - + // Apply the offsets. llvm::Value *VTableField = LoadCXXThis(); - + if (!NonVirtualOffset.isZero() || VirtualOffset) - VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, + VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField, NonVirtualOffset, VirtualOffset); @@ -2017,7 +2017,7 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base, } void -CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, +CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, const CXXRecordDecl *NearestVBase, CharUnits OffsetFromNearestVBase, bool BaseIsNonVirtualPrimaryBase, @@ -2030,7 +2030,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase, VTableClass); } - + const CXXRecordDecl *RD = Base.getBase(); // Traverse bases. @@ -2051,7 +2051,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, if (!VBases.insert(BaseDecl).second) continue; - const ASTRecordLayout &Layout = + const ASTRecordLayout &Layout = getContext().getASTRecordLayout(VTableClass); BaseOffset = Layout.getVBaseClassOffset(BaseDecl); @@ -2061,15 +2061,15 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base, const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD); BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl); - BaseOffsetFromNearestVBase = + BaseOffsetFromNearestVBase = OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl); BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl; } - - InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), + + InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset), I.isVirtual() ? BaseDecl : NearestVBase, BaseOffsetFromNearestVBase, - BaseDeclIsNonVirtualPrimaryBase, + BaseDeclIsNonVirtualPrimaryBase, VTableClass, VBases); } } @@ -2081,7 +2081,7 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) { // Initialize the vtable pointers for this class and all of its bases. VisitedVirtualBasesSetTy VBases; - InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), + InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()), /*NearestVBase=*/nullptr, /*OffsetFromNearestVBase=*/CharUnits::Zero(), /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases); @@ -2284,7 +2284,7 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, // This is a record decl. We know the type and can devirtualize it. return VD->getType()->isRecordType(); } - + return false; } @@ -2298,11 +2298,11 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base, // We can always devirtualize calls on temporary object expressions. if (isa(Base)) return true; - + // And calls on bound temporaries. if (isa(Base)) return true; - + // Check if this is a call expr that returns a record type. if (const CallExpr *CE = dyn_cast(Base)) return CE->getCallReturnType(getContext())->isRecordType(); @@ -2334,7 +2334,7 @@ void CodeGenFunction::EmitForwardingCallToLambda( // We don't need to separately arrange the call arguments because // the call can't be variadic anyway --- it's impossible to forward // variadic arguments. - + // Now emit our call. RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs, callOperator); @@ -2362,7 +2362,7 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() { for (auto param : BD->params()) EmitDelegateCallArg(CallArgs, param, param->getLocStart()); - assert(!Lambda->isGenericLambda() && + assert(!Lambda->isGenericLambda() && "generic lambda interconversion to block not implemented"); EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs); } @@ -2400,7 +2400,7 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) { const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate(); void *InsertPos = nullptr; - FunctionDecl *CorrespondingCallOpSpecialization = + FunctionDecl *CorrespondingCallOpSpecialization = CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); assert(CorrespondingCallOpSpecialization); CallOp = cast(CorrespondingCallOpSpecialization); -- 2.40.0