From 77b89b87c3b9220fea1bc80f6d6598d2003cc8a8 Mon Sep 17 00:00:00 2001 From: Chris Lattner Date: Sun, 27 Jun 2010 07:15:29 +0000 Subject: [PATCH] finally get around to doing a significant cleanup to irgen: have CGF create and make accessible standard int32,int64 and intptr types. This fixes a ton of 80 column violations introduced by LLVMContextification and cleans up stuff a lot. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@106977 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/CGBlocks.cpp | 20 +++---- lib/CodeGen/CGBlocks.h | 3 +- lib/CodeGen/CGBuiltin.cpp | 97 +++++++++++++-------------------- lib/CodeGen/CGCall.cpp | 6 +- lib/CodeGen/CGDecl.cpp | 13 ++--- lib/CodeGen/CGDeclCXX.cpp | 1 - lib/CodeGen/CGException.cpp | 11 ++-- lib/CodeGen/CGExpr.cpp | 35 ++++-------- lib/CodeGen/CGExprAgg.cpp | 9 +-- lib/CodeGen/CGExprScalar.cpp | 97 ++++++++++++++------------------- lib/CodeGen/CGObjCMac.cpp | 22 ++++---- lib/CodeGen/CGStmt.cpp | 5 +- lib/CodeGen/CodeGenFunction.cpp | 40 ++++++-------- lib/CodeGen/CodeGenFunction.h | 3 +- lib/CodeGen/CodeGenTypes.cpp | 4 +- lib/CodeGen/TargetInfo.cpp | 31 +++++------ 16 files changed, 164 insertions(+), 233 deletions(-) diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp index 6edfe4c102..d5fc993c79 100644 --- a/lib/CodeGen/CGBlocks.cpp +++ b/lib/CodeGen/CGBlocks.cpp @@ -427,8 +427,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) { llvm::Value *BlockLiteral = LoadBlockStruct(); Loc = Builder.CreateGEP(BlockLiteral, - llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), - offset.getQuantity()), + llvm::ConstantInt::get(Int64Ty, offset.getQuantity()), "block.literal"); Ty = llvm::PointerType::get(Ty, 0); Loc = Builder.CreateBitCast(Loc, Ty); @@ -646,8 +645,7 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD, llvm::Value *BlockLiteral = LoadBlockStruct(); llvm::Value *V = Builder.CreateGEP(BlockLiteral, - llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), - offset.getQuantity()), + llvm::ConstantInt::get(Int64Ty, offset.getQuantity()), "block.literal"); if (IsByRef) { const llvm::Type *PtrStructTy @@ -1028,8 +1026,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T, llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index); Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty); - llvm::Value *N = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(T->getContext()), flag); + llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag); llvm::Value *F = getBlockObjectAssign(); Builder.CreateCall3(F, Dstv, Srcv, N); } @@ -1181,8 +1178,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) { flag |= BLOCK_BYREF_CALLER; - llvm::Value *N = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(T->getContext()), flag); + llvm::Value *N = llvm::ConstantInt::get(CGF.Int32Ty, flag); llvm::Value *F = getBlockObjectAssign(); Builder.CreateCall3(F, DstObj, SrcObj, N); @@ -1284,7 +1280,7 @@ llvm::Value *BlockFunction::getBlockObjectDispose() { std::vector ArgTys; const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); ArgTys.push_back(PtrToInt8Ty); - ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); + ArgTys.push_back(CGF.Int32Ty); FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectDispose = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose"); @@ -1299,7 +1295,7 @@ llvm::Value *BlockFunction::getBlockObjectAssign() { const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext); ArgTys.push_back(PtrToInt8Ty); ArgTys.push_back(PtrToInt8Ty); - ArgTys.push_back(llvm::Type::getInt32Ty(VMContext)); + ArgTys.push_back(CGF.Int32Ty); FTy = llvm::FunctionType::get(ResultType, ArgTys, false); CGM.BlockObjectAssign = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign"); @@ -1311,7 +1307,7 @@ void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) { llvm::Value *F = getBlockObjectDispose(); llvm::Value *N; V = Builder.CreateBitCast(V, PtrToInt8Ty); - N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag); + N = llvm::ConstantInt::get(CGF.Int32Ty, flag); Builder.CreateCall2(F, V, N); } @@ -1319,7 +1315,7 @@ ASTContext &BlockFunction::getContext() const { return CGM.getContext(); } BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B) - : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) { + : CGM(cgm), VMContext(cgm.getLLVMContext()), CGF(cgf), Builder(B) { PtrToInt8Ty = llvm::PointerType::getUnqual( llvm::Type::getInt8Ty(VMContext)); diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h index d0466a86d8..9e3750e3e6 100644 --- a/lib/CodeGen/CGBlocks.h +++ b/lib/CodeGen/CGBlocks.h @@ -121,13 +121,14 @@ public: class BlockFunction : public BlockBase { CodeGenModule &CGM; - CodeGenFunction &CGF; ASTContext &getContext() const; protected: llvm::LLVMContext &VMContext; public: + CodeGenFunction &CGF; + const llvm::PointerType *PtrToInt8Ty; struct HelperInfo { int index; diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp index 8b19bc0ea0..fff4bacab6 100644 --- a/lib/CodeGen/CGBuiltin.cpp +++ b/lib/CodeGen/CGBuiltin.cpp @@ -85,11 +85,6 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF, return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1])); } -static llvm::ConstantInt *getInt32(llvm::LLVMContext &Context, int32_t Value) { - return llvm::ConstantInt::get(llvm::Type::getInt32Ty(Context), Value); -} - - /// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy, /// which must be a scalar floating point type. static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) { @@ -284,9 +279,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0)); // FIXME: Technically these constants should of type 'int', yes? RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) : - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::ConstantInt::get(Int32Ty, 0); Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 3); + llvm::ConstantInt::get(Int32Ty, 3); Value *F = CGM.getIntrinsic(Intrinsic::prefetch, 0, 0); return RValue::get(Builder.CreateCall3(F, Address, RW, Locality)); } @@ -468,7 +463,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Address, llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 0), SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } @@ -480,7 +475,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateCall5(CGM.getMemCpyFn(Address->getType(), SrcAddr->getType(), SizeVal->getType()), Address, SrcAddr, SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } @@ -502,7 +497,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateCall5(CGM.getMemMoveFn(Address->getType(), SrcAddr->getType(), SizeVal->getType()), Address, SrcAddr, SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } @@ -515,7 +510,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), llvm::Type::getInt8Ty(VMContext)), SizeVal, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1), + llvm::ConstantInt::get(Int32Ty, 1), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); return RValue::get(Address); } @@ -531,21 +526,18 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, int32_t Offset = 0; Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0); - return RValue::get(Builder.CreateCall(F, getInt32(VMContext, Offset))); + return RValue::get(Builder.CreateCall(F, + llvm::ConstantInt::get(Int32Ty, Offset))); } case Builtin::BI__builtin_return_address: { Value *Depth = EmitScalarExpr(E->getArg(0)); - Depth = Builder.CreateIntCast(Depth, - llvm::Type::getInt32Ty(VMContext), - false, "tmp"); + Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); Value *F = CGM.getIntrinsic(Intrinsic::returnaddress, 0, 0); return RValue::get(Builder.CreateCall(F, Depth)); } case Builtin::BI__builtin_frame_address: { Value *Depth = EmitScalarExpr(E->getArg(0)); - Depth = Builder.CreateIntCast(Depth, - llvm::Type::getInt32Ty(VMContext), - false, "tmp"); + Depth = Builder.CreateIntCast(Depth, Int32Ty, false, "tmp"); Value *F = CGM.getIntrinsic(Intrinsic::frameaddress, 0, 0); return RValue::get(Builder.CreateCall(F, Depth)); } @@ -618,7 +610,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, return RValue::get(Result); // Otherwise, ask the codegen data what to do. - const llvm::IntegerType *Int64Ty = llvm::IntegerType::get(C, 64); if (getTargetHooks().extendPointerWithSExt()) return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext")); else @@ -631,15 +622,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD, // Store the frame pointer to the setjmp buffer. Value *FrameAddr = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress), - ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0)); + ConstantInt::get(Int32Ty, 0)); Builder.CreateStore(FrameAddr, Buf); // Store the stack pointer to the setjmp buffer. Value *StackAddr = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave)); Value *StackSaveSlot = - Builder.CreateGEP(Buf, ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - 2)); + Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2)); Builder.CreateStore(StackAddr, StackSaveSlot); // Call LLVM's EH setjmp, which is lightweight. @@ -1140,11 +1130,9 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, case ARM::BI__builtin_neon_vextq_v: { ConstantInt *C = dyn_cast(Ops[2]); int CV = C->getSExtValue(); - const llvm::Type *I32Ty = llvm::Type::getInt32Ty(VMContext); - SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) - Indices.push_back(ConstantInt::get(I32Ty, i+CV)); + Indices.push_back(ConstantInt::get(Int32Ty, i+CV)); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); @@ -1188,7 +1176,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Ty = llvm::PointerType::getUnqual(VTy->getElementType()); Ops[0] = Builder.CreateBitCast(Ops[0], Ty); Ops[0] = Builder.CreateLoad(Ops[0]); - llvm::Constant *CI = ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI); return EmitNeonSplat(Ops[0], CI); } @@ -1271,7 +1259,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Args.push_back(Ops[1]); Args.append(STy->getNumElements(), UndefValue::get(Ty)); - llvm::Constant *CI = ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::Constant *CI = ConstantInt::get(Int32Ty, 0); Args.push_back(CI); Ops[1] = Builder.CreateCall(F, Args.begin(), Args.end(), "vld_dup"); @@ -1594,14 +1582,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); Ops[2] = Builder.CreateBitCast(Ops[2], Ty); - Ty = llvm::Type::getInt32Ty(VMContext); Value *SV; for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { - Indices.push_back(ConstantInt::get(Ty, i+vi)); - Indices.push_back(ConstantInt::get(Ty, i+e+vi)); + Indices.push_back(ConstantInt::get(Int32Ty, i+vi)); + Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi)); } Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); @@ -1615,13 +1602,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); Ops[2] = Builder.CreateBitCast(Ops[2], Ty); - Ty = llvm::Type::getInt32Ty(VMContext); Value *SV; for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) - Indices.push_back(ConstantInt::get(Ty, 2*i+vi)); + Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi)); Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); @@ -1635,14 +1621,13 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID, Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty)); Ops[1] = Builder.CreateBitCast(Ops[1], Ty); Ops[2] = Builder.CreateBitCast(Ops[2], Ty); - Ty = llvm::Type::getInt32Ty(VMContext); Value *SV; for (unsigned vi = 0; vi != 2; ++vi) { SmallVector Indices; for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) { - Indices.push_back(ConstantInt::get(Ty, (i >> 1))); - Indices.push_back(ConstantInt::get(Ty, (i >> 1)+e)); + Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1))); + Indices.push_back(ConstantInt::get(Int32Ty, (i >> 1)+e)); } Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi); SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); @@ -1672,9 +1657,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrldi128: case X86::BI__builtin_ia32_psrlqi128: case X86::BI__builtin_ia32_psrlwi128: { - Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext"); - const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 2); - llvm::Value *Zero = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); + const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 2); + llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); Ops[1] = Builder.CreateInsertElement(llvm::UndefValue::get(Ty), Ops[1], Zero, "insert"); Ops[1] = Builder.CreateBitCast(Ops[1], Ops[0]->getType(), "bitcast"); @@ -1727,8 +1712,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, case X86::BI__builtin_ia32_psrldi: case X86::BI__builtin_ia32_psrlqi: case X86::BI__builtin_ia32_psrlwi: { - Ops[1] = Builder.CreateZExt(Ops[1], llvm::Type::getInt64Ty(VMContext), "zext"); - const llvm::Type *Ty = llvm::VectorType::get(llvm::Type::getInt64Ty(VMContext), 1); + Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty, "zext"); + const llvm::Type *Ty = llvm::VectorType::get(Int64Ty, 1); Ops[1] = Builder.CreateBitCast(Ops[1], Ty, "bitcast"); const char *name = 0; Intrinsic::ID ID = Intrinsic::not_intrinsic; @@ -1781,16 +1766,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, } case X86::BI__builtin_ia32_ldmxcsr: { const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext); - Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1); - Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp"); + Value *One = llvm::ConstantInt::get(Int32Ty, 1); + Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); Builder.CreateStore(Ops[0], Tmp); return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr), Builder.CreateBitCast(Tmp, PtrTy)); } case X86::BI__builtin_ia32_stmxcsr: { const llvm::Type *PtrTy = llvm::Type::getInt8PtrTy(VMContext); - Value *One = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1); - Value *Tmp = Builder.CreateAlloca(llvm::Type::getInt32Ty(VMContext), One, "tmp"); + Value *One = llvm::ConstantInt::get(Int32Ty, 1); + Value *Tmp = Builder.CreateAlloca(Int32Ty, One, "tmp"); One = Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr), Builder.CreateBitCast(Tmp, PtrTy)); return Builder.CreateLoad(Tmp, "stmxcsr"); @@ -1805,16 +1790,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, } case X86::BI__builtin_ia32_storehps: case X86::BI__builtin_ia32_storelps: { - const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); - llvm::Type *PtrTy = llvm::PointerType::getUnqual(EltTy); - llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty); + llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); // cast val v2i64 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast"); // extract (0, 1) unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1; - llvm::Value *Idx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), Index); + llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index); Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract"); // cast pointer to i64 & store @@ -1827,11 +1811,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // If palignr is shifting the pair of input vectors less than 9 bytes, // emit a shuffle instruction. if (shiftVal <= 8) { - const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); - llvm::SmallVector Indices; for (unsigned i = 0; i != 8; ++i) - Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i)); + Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); @@ -1841,8 +1823,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // than 16 bytes, emit a logical right shift of the destination. if (shiftVal < 16) { // MMX has these as 1 x i64 vectors for some odd optimization reasons. - const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); - const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 1); + const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1); Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8); @@ -1861,11 +1842,9 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // If palignr is shifting the pair of input vectors less than 17 bytes, // emit a shuffle instruction. if (shiftVal <= 16) { - const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); - llvm::SmallVector Indices; for (unsigned i = 0; i != 16; ++i) - Indices.push_back(llvm::ConstantInt::get(IntTy, shiftVal + i)); + Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i)); Value* SV = llvm::ConstantVector::get(Indices.begin(), Indices.size()); return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr"); @@ -1874,12 +1853,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID, // If palignr is shifting the pair of input vectors more than 16 but less // than 32 bytes, emit a logical right shift of the destination. if (shiftVal < 32) { - const llvm::Type *EltTy = llvm::Type::getInt64Ty(VMContext); - const llvm::Type *VecTy = llvm::VectorType::get(EltTy, 2); - const llvm::Type *IntTy = llvm::Type::getInt32Ty(VMContext); + const llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2); Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast"); - Ops[1] = llvm::ConstantInt::get(IntTy, (shiftVal-16) * 8); + Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8); // create i32 constant llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq); diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp index 6151fa2ed4..ec865e58c6 100644 --- a/lib/CodeGen/CGCall.cpp +++ b/lib/CodeGen/CGCall.cpp @@ -393,14 +393,12 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val, return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val"); // Convert the pointer to an integer so we can play with its width. - const llvm::Type *IntPtrTy = llvm::IntegerType::get(Ty->getContext(), - CGF.LLVMPointerWidth); - Val = CGF.Builder.CreatePtrToInt(Val, IntPtrTy, "coerce.val.pi"); + Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi"); } const llvm::Type *DestIntTy = Ty; if (isa(DestIntTy)) - DestIntTy = llvm::IntegerType::get(Ty->getContext(), CGF.LLVMPointerWidth); + DestIntTy = CGF.IntPtrTy; if (Val->getType() != DestIntTy) Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii"); diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp index 0190cac078..7498a07073 100644 --- a/lib/CodeGen/CGDecl.cpp +++ b/lib/CodeGen/CGDecl.cpp @@ -327,10 +327,10 @@ const llvm::Type *CodeGenFunction::BuildByRefType(const ValueDecl *D) { Types.push_back(llvm::PointerType::getUnqual(ByRefTypeHolder)); // int32_t __flags; - Types.push_back(llvm::Type::getInt32Ty(VMContext)); + Types.push_back(Int32Ty); // int32_t __size; - Types.push_back(llvm::Type::getInt32Ty(VMContext)); + Types.push_back(Int32Ty); bool HasCopyAndDispose = BlockRequiresCopying(Ty); if (HasCopyAndDispose) { @@ -568,18 +568,18 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { int isa = 0; if (flag&BLOCK_FIELD_IS_WEAK) isa = 1; - V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), isa); + V = llvm::ConstantInt::get(Int32Ty, isa); V = Builder.CreateIntToPtr(V, PtrToInt8Ty, "isa"); Builder.CreateStore(V, isa_field); Builder.CreateStore(DeclPtr, forwarding_field); - V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags); + V = llvm::ConstantInt::get(Int32Ty, flags); Builder.CreateStore(V, flags_field); const llvm::Type *V1; V1 = cast(DeclPtr->getType())->getElementType(); - V = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), + V = llvm::ConstantInt::get(Int32Ty, CGM.GetTargetTypeStoreSize(V1).getQuantity()); Builder.CreateStore(V, size_field); @@ -613,8 +613,7 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) { assert(Init != 0 && "Wasn't a simple constant init?"); llvm::Value *AlignVal = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - Align.getQuantity()); + llvm::ConstantInt::get(Int32Ty, Align.getQuantity()); const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext, LLVMPointerWidth); llvm::Value *SizeVal = diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp index 7cbadd315e..7fdb895e8e 100644 --- a/lib/CodeGen/CGDeclCXX.cpp +++ b/lib/CodeGen/CGDeclCXX.cpp @@ -329,7 +329,6 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D, CGM.getMangleContext().mangleGuardVariable(&D, GuardVName); // Create the guard variable. - const llvm::Type *Int64Ty = llvm::Type::getInt64Ty(VMContext); llvm::GlobalValue *GuardVariable = new llvm::GlobalVariable(CGM.getModule(), Int64Ty, false, GV->getLinkage(), diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp index 90edd19a33..83d91edb10 100644 --- a/lib/CodeGen/CGException.cpp +++ b/lib/CodeGen/CGException.cpp @@ -384,7 +384,7 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) { SelectorArgs.push_back(Exc); SelectorArgs.push_back(Personality); - SelectorArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), + SelectorArgs.push_back(llvm::ConstantInt::get(Int32Ty, Proto->getNumExceptions()+1)); for (unsigned i = 0; i < Proto->getNumExceptions(); ++i) { @@ -406,8 +406,7 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) { Builder.CreateStore(Exc, RethrowPtr); Builder.CreateCondBr(Builder.CreateICmpSLT(Selector, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - 0)), + llvm::ConstantInt::get(Int32Ty, 0)), Match, Unwind); EmitBlock(Match); @@ -594,8 +593,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, // We are required to emit this call to satisfy LLVM, even // though we don't use the result. llvm::Value *Args[] = { - Exc, Personality, - llvm::ConstantInt::getNullValue(llvm::Type::getInt32Ty(VMContext)) + Exc, Personality, llvm::ConstantInt::getNullValue(Int32Ty) }; Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args)); Builder.CreateStore(Exc, RethrowPtr); @@ -738,8 +736,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() { // We are required to emit this call to satisfy LLVM, even // though we don't use the result. llvm::Value *Args[] = { - Exc, Personality, - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1) + Exc, Personality, llvm::ConstantInt::get(Int32Ty, 1) }; Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args)); llvm::CallInst *TerminateCall = diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp index c41a99796d..46db4e98e7 100644 --- a/lib/CodeGen/CGExpr.cpp +++ b/lib/CodeGen/CGExpr.cpp @@ -362,24 +362,23 @@ void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) { if (!CatchUndefined) return; - const llvm::Type *Size_tTy - = llvm::IntegerType::get(VMContext, LLVMPointerWidth); Address = Builder.CreateBitCast(Address, PtrToInt8Ty); - llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &Size_tTy, 1); - const llvm::IntegerType *Int1Ty = llvm::IntegerType::get(VMContext, 1); + const llvm::Type *IntPtrT = IntPtrTy; + llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, &IntPtrT, 1); + const llvm::IntegerType *Int1Ty = llvm::Type::getInt1Ty(VMContext); // In time, people may want to control this and use a 1 here. llvm::Value *Arg = llvm::ConstantInt::get(Int1Ty, 0); llvm::Value *C = Builder.CreateCall2(F, Address, Arg); llvm::BasicBlock *Cont = createBasicBlock(); llvm::BasicBlock *Check = createBasicBlock(); - llvm::Value *NegativeOne = llvm::ConstantInt::get(Size_tTy, -1ULL); + llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL); Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check); EmitBlock(Check); Builder.CreateCondBr(Builder.CreateICmpUGE(C, - llvm::ConstantInt::get(Size_tTy, Size)), + llvm::ConstantInt::get(IntPtrTy, Size)), Cont, getTrapBB()); EmitBlock(Cont); } @@ -732,8 +731,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, const VectorType *ExprVT = ExprType->getAs(); if (!ExprVT) { unsigned InIdx = getAccessedFieldNo(0, Elts); - llvm::Value *Elt = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), InIdx); + llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); return RValue::get(Builder.CreateExtractElement(Vec, Elt, "tmp")); } @@ -743,8 +741,7 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV, llvm::SmallVector Mask; for (unsigned i = 0; i != NumResultElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); - Mask.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), InIdx)); + Mask.push_back(llvm::ConstantInt::get(Int32Ty, InIdx)); } llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); @@ -960,8 +957,7 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, llvm::SmallVector Mask(NumDstElts); for (unsigned i = 0; i != NumSrcElts; ++i) { unsigned InIdx = getAccessedFieldNo(i, Elts); - Mask[InIdx] = llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), i); + Mask[InIdx] = llvm::ConstantInt::get(Int32Ty, i); } llvm::Value *MaskV = llvm::ConstantVector::get(&Mask[0], Mask.size()); @@ -974,7 +970,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, // FIXME: since we're shuffling with undef, can we just use the indices // into that? This could be simpler. llvm::SmallVector ExtMask; - const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); unsigned i; for (i = 0; i != NumSrcElts; ++i) ExtMask.push_back(llvm::ConstantInt::get(Int32Ty, i)); @@ -1005,7 +1000,6 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src, } else { // If the Src is a scalar (not a vector) it must be updating one element. unsigned InIdx = getAccessedFieldNo(0, Elts); - const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx); Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt, "tmp"); } @@ -1345,16 +1339,14 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { // Emit the vector as an lvalue to get its address. LValue LHS = EmitLValue(E->getBase()); assert(LHS.isSimple() && "Can only subscript lvalue vectors here!"); - Idx = Builder.CreateIntCast(Idx, - llvm::Type::getInt32Ty(VMContext), IdxSigned, "vidx"); + Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vidx"); return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType().getCVRQualifiers()); } // Extend or truncate the index type to 32 or 64-bits. if (!Idx->getType()->isIntegerTy(LLVMPointerWidth)) - Idx = Builder.CreateIntCast(Idx, - llvm::IntegerType::get(VMContext, LLVMPointerWidth), + Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom"); // FIXME: As llvm implements the object size checking, this can come out. @@ -1419,7 +1411,6 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) { assert(Array->getType()->isArrayType() && "Array to pointer decay must have array source type!"); llvm::Value *ArrayPtr = EmitLValue(Array).getAddress(); - const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0); llvm::Value *Args[] = { Zero, Idx }; @@ -1451,17 +1442,15 @@ llvm::Constant *GenerateConstantVector(llvm::LLVMContext &VMContext, llvm::SmallVector &Elts) { llvm::SmallVector CElts; + const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); for (unsigned i = 0, e = Elts.size(); i != e; ++i) - CElts.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), Elts[i])); + CElts.push_back(llvm::ConstantInt::get(Int32Ty, Elts[i])); return llvm::ConstantVector::get(&CElts[0], CElts.size()); } LValue CodeGenFunction:: EmitExtVectorElementExpr(const ExtVectorElementExpr *E) { - const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); - // Emit the base vector as an l-value. LValue Base; diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp index 04e0044aaf..9d044fff77 100644 --- a/lib/CodeGen/CGExprAgg.cpp +++ b/lib/CodeGen/CGExprAgg.cpp @@ -818,8 +818,6 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, std::pair TypeInfo = getContext().getTypeInfo(Ty); // FIXME: Handle variable sized types. - const llvm::Type *IntPtr = - llvm::IntegerType::get(VMContext, LLVMPointerWidth); // FIXME: If we have a volatile struct, the optimizer can remove what might // appear to be `extra' memory ops: @@ -835,7 +833,6 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, // either the source or the destination is volatile. const llvm::Type *I1Ty = llvm::Type::getInt1Ty(VMContext); const llvm::Type *I8Ty = llvm::Type::getInt8Ty(VMContext); - const llvm::Type *I32Ty = llvm::Type::getInt32Ty(VMContext); const llvm::PointerType *DPT = cast(DestPtr->getType()); const llvm::Type *DBP = llvm::PointerType::get(I8Ty, DPT->getAddressSpace()); @@ -872,10 +869,10 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr, } Builder.CreateCall5(CGM.getMemCpyFn(DestPtr->getType(), SrcPtr->getType(), - IntPtr), + IntPtrTy), DestPtr, SrcPtr, // TypeInfo.first describes size in bits. - llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), - llvm::ConstantInt::get(I32Ty, TypeInfo.second/8), + llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8), + llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8), llvm::ConstantInt::get(I1Ty, isVolatile)); } diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp index 208e1a6c89..d0d89c66a4 100644 --- a/lib/CodeGen/CGExprScalar.cpp +++ b/lib/CodeGen/CGExprScalar.cpp @@ -449,8 +449,6 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, if (DstType->isVoidType()) return 0; - llvm::LLVMContext &VMContext = CGF.getLLVMContext(); - // Handle conversions to bool first, they are special: comparisons against 0. if (DstType->isBooleanType()) return EmitConversionToBool(Src, SrcType); @@ -472,8 +470,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?"); // First, convert to the correct width so that we control the kind of // extension. - const llvm::Type *MiddleTy = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); + const llvm::Type *MiddleTy = CGF.IntPtrTy; bool InputSigned = SrcType->isSignedIntegerType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); @@ -495,16 +492,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType, // Insert the element in element zero of an undef vector llvm::Value *UnV = llvm::UndefValue::get(DstTy); - llvm::Value *Idx = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); // Splat the element across to all elements llvm::SmallVector Args; unsigned NumElements = cast(DstTy)->getNumElements(); for (unsigned i = 0; i < NumElements; i++) - Args.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0)); llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); @@ -595,11 +590,10 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { // Vector Mask Case if (E->getNumSubExprs() == 2 || (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) { - Value* LHS = CGF.EmitScalarExpr(E->getExpr(0)); - Value* RHS = CGF.EmitScalarExpr(E->getExpr(1)); - Value* Mask; + Value *LHS = CGF.EmitScalarExpr(E->getExpr(0)); + Value *RHS = CGF.EmitScalarExpr(E->getExpr(1)); + Value *Mask; - const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext()); const llvm::VectorType *LTy = cast(LHS->getType()); unsigned LHSElts = LTy->getNumElements(); @@ -609,8 +603,8 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { // Shuffle LHS & RHS into one input vector. llvm::SmallVector concat; for (unsigned i = 0; i != LHSElts; ++i) { - concat.push_back(llvm::ConstantInt::get(I32Ty, 2*i)); - concat.push_back(llvm::ConstantInt::get(I32Ty, 2*i+1)); + concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i)); + concat.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 2*i+1)); } Value* CV = llvm::ConstantVector::get(concat.begin(), concat.size()); @@ -652,16 +646,17 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) { MTy->getNumElements()); Value* NewV = llvm::UndefValue::get(RTy); for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) { - Value *Indx = llvm::ConstantInt::get(I32Ty, i); + Value *Indx = llvm::ConstantInt::get(CGF.Int32Ty, i); Indx = Builder.CreateExtractElement(Mask, Indx, "shuf_idx"); - Indx = Builder.CreateZExt(Indx, I32Ty, "idx_zext"); + Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext"); // Handle vec3 special since the index will be off by one for the RHS. if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) { Value *cmpIndx, *newIndx; - cmpIndx = Builder.CreateICmpUGT(Indx, llvm::ConstantInt::get(I32Ty, 3), + cmpIndx = Builder.CreateICmpUGT(Indx, + llvm::ConstantInt::get(CGF.Int32Ty, 3), "cmp_shuf_idx"); - newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(I32Ty, 1), + newIndx = Builder.CreateSub(Indx, llvm::ConstantInt::get(CGF.Int32Ty,1), "shuf_idx_adj"); Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx"); } @@ -720,10 +715,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) { Value *Base = Visit(E->getBase()); Value *Idx = Visit(E->getIdx()); bool IdxSigned = E->getIdx()->getType()->isSignedIntegerType(); - Idx = Builder.CreateIntCast(Idx, - llvm::Type::getInt32Ty(CGF.getLLVMContext()), - IdxSigned, - "vecidxcast"); + Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast"); return Builder.CreateExtractElement(Base, Idx, "vecext"); } @@ -752,7 +744,6 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { return Visit(E->getInit(0)); unsigned ResElts = VType->getNumElements(); - const llvm::Type *I32Ty = llvm::Type::getInt32Ty(CGF.getLLVMContext()); // Loop over initializers collecting the Value for each, and remembering // whether the source was swizzle (ExtVectorElementExpr). This will allow @@ -783,7 +774,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // insert into undef -> shuffle (src, undef) Args.push_back(C); for (unsigned j = 1; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); LHS = EI->getVectorOperand(); RHS = V; @@ -792,11 +783,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // insert into undefshuffle && size match -> shuffle (v, src) llvm::ShuffleVectorInst *SVV = cast(V); for (unsigned j = 0; j != CurIdx; ++j) - Args.push_back(getMaskElt(SVV, j, 0, I32Ty)); - Args.push_back(llvm::ConstantInt::get(I32Ty, + Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, ResElts + C->getZExtValue())); for (unsigned j = CurIdx + 1; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); LHS = cast(V)->getOperand(0); RHS = EI->getVectorOperand(); @@ -810,7 +801,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { } } } - Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx); + Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx); V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); VIsUndefShuffle = false; ++CurIdx; @@ -834,15 +825,15 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // this shuffle directly into it. if (VIsUndefShuffle) { Args.push_back(getMaskElt(cast(V), j, 0, - I32Ty)); + CGF.Int32Ty)); } else { - Args.push_back(llvm::ConstantInt::get(I32Ty, j)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); } } for (unsigned j = 0, je = InitElts; j != je; ++j) - Args.push_back(getMaskElt(SVI, j, Offset, I32Ty)); + Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty)); for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); if (VIsUndefShuffle) V = cast(V)->getOperand(0); @@ -855,20 +846,20 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // to the vector initializer into V. if (Args.empty()) { for (unsigned j = 0; j != InitElts; ++j) - Args.push_back(llvm::ConstantInt::get(I32Ty, j)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); for (unsigned j = InitElts; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], ResElts); Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT), Mask, "vext"); Args.clear(); for (unsigned j = 0; j != CurIdx; ++j) - Args.push_back(llvm::ConstantInt::get(I32Ty, j)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j)); for (unsigned j = 0; j != InitElts; ++j) - Args.push_back(llvm::ConstantInt::get(I32Ty, j+Offset)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, j+Offset)); for (unsigned j = CurIdx + InitElts; j != ResElts; ++j) - Args.push_back(llvm::UndefValue::get(I32Ty)); + Args.push_back(llvm::UndefValue::get(CGF.Int32Ty)); } // If V is undef, make sure it ends up on the RHS of the shuffle to aid @@ -887,7 +878,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) { // Emit remaining default initializers for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) { - Value *Idx = llvm::ConstantInt::get(I32Ty, CurIdx); + Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, CurIdx); llvm::Value *Init = llvm::Constant::getNullValue(EltTy); V = Builder.CreateInsertElement(V, Init, Idx, "vecinit"); } @@ -1030,8 +1021,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { // First, convert to the correct width so that we control the kind of // extension. - const llvm::Type *MiddleTy = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); + const llvm::Type *MiddleTy = CGF.IntPtrTy; bool InputSigned = E->getType()->isSignedIntegerType(); llvm::Value* IntResult = Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv"); @@ -1052,16 +1042,14 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) { // Insert the element in element zero of an undef vector llvm::Value *UnV = llvm::UndefValue::get(DstTy); - llvm::Value *Idx = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0); UnV = Builder.CreateInsertElement(UnV, Elt, Idx, "tmp"); // Splat the element across to all elements llvm::SmallVector Args; unsigned NumElements = cast(DstTy)->getNumElements(); for (unsigned i = 0; i < NumElements; i++) - Args.push_back(llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0)); + Args.push_back(llvm::ConstantInt::get(CGF.Int32Ty, 0)); llvm::Constant *Mask = llvm::ConstantVector::get(&Args[0], NumElements); llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat"); @@ -1144,8 +1132,7 @@ EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV, llvm::Value *NextVal; if (const llvm::PointerType *PT = dyn_cast(InVal->getType())) { - llvm::Constant *Inc = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal); + llvm::Constant *Inc = llvm::ConstantInt::get(CGF.Int32Ty, AmountVal); if (!isa(PT->getElementType())) { QualType PTEE = ValTy->getPointeeType(); if (const ObjCObjectType *OIT = PTEE->getAs()) { @@ -1484,20 +1471,20 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) { // long long *__overflow_handler)(long long a, long long b, char op, // char width) std::vector handerArgTypes; - handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); - handerArgTypes.push_back(llvm::Type::getInt64Ty(VMContext)); + handerArgTypes.push_back(CGF.Int64Ty); + handerArgTypes.push_back(CGF.Int64Ty); handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); handerArgTypes.push_back(llvm::Type::getInt8Ty(VMContext)); - llvm::FunctionType *handlerTy = llvm::FunctionType::get( - llvm::Type::getInt64Ty(VMContext), handerArgTypes, false); + llvm::FunctionType *handlerTy = + llvm::FunctionType::get(CGF.Int64Ty, handerArgTypes, false); llvm::Value *handlerFunction = CGF.CGM.getModule().getOrInsertGlobal("__overflow_handler", llvm::PointerType::getUnqual(handlerTy)); handlerFunction = Builder.CreateLoad(handlerFunction); llvm::Value *handlerResult = Builder.CreateCall4(handlerFunction, - Builder.CreateSExt(Ops.LHS, llvm::Type::getInt64Ty(VMContext)), - Builder.CreateSExt(Ops.RHS, llvm::Type::getInt64Ty(VMContext)), + Builder.CreateSExt(Ops.LHS, CGF.Int64Ty), + Builder.CreateSExt(Ops.RHS, CGF.Int64Ty), llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), OpID), llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), cast(opTy)->getBitWidth())); @@ -1568,8 +1555,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) { if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); + const llvm::Type *IdxType = CGF.IntPtrTy; if (IdxExp->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else @@ -1642,8 +1628,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) { if (Width < CGF.LLVMPointerWidth) { // Zero or sign extend the pointer value based on whether the index is // signed or not. - const llvm::Type *IdxType = - llvm::IntegerType::get(VMContext, CGF.LLVMPointerWidth); + const llvm::Type *IdxType = CGF.IntPtrTy; if (BinOp->getRHS()->getType()->isSignedIntegerType()) Idx = Builder.CreateSExt(Idx, IdxType, "idx.ext"); else diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp index f73cfa4af6..0f88858f2e 100644 --- a/lib/CodeGen/CGObjCMac.cpp +++ b/lib/CodeGen/CGObjCMac.cpp @@ -3024,12 +3024,14 @@ void CGObjCCommonMac::EmitImageInfo() { // We never allow @synthesize of a superclass property. flags |= eImageInfo_CorrectedSynthesize; + const llvm::Type *Int32Ty = llvm::Type::getInt32Ty(VMContext); + // Emitted as int[2]; llvm::Constant *values[2] = { - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), version), - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), flags) + llvm::ConstantInt::get(Int32Ty, version), + llvm::ConstantInt::get(Int32Ty, flags) }; - llvm::ArrayType *AT = llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), 2); + llvm::ArrayType *AT = llvm::ArrayType::get(Int32Ty, 2); const char *Section; if (ObjCABI == 1) @@ -3985,8 +3987,9 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm) llvm::Type::getInt8PtrTy(VMContext), 4); ExceptionDataTy = - llvm::StructType::get(VMContext, llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), - SetJmpBufferSize), + llvm::StructType::get(VMContext, + llvm::ArrayType::get(llvm::Type::getInt32Ty(VMContext), + SetJmpBufferSize), StackPtrTy, NULL); CGM.getModule().addTypeName("struct._objc_exception_data", ExceptionDataTy); @@ -5743,8 +5746,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // though we don't use the result. CGF.Builder.CreateCall3(llvm_eh_selector, Exc, ObjCTypes.getEHPersonalityPtr(), - llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0), + llvm::ConstantInt::get(CGF.Int32Ty, 0), "unused_eh_selector"); CGF.Builder.CreateStore(Exc, RethrowPtr); CGF.EmitBranchThroughCleanup(FinallyRethrow); @@ -5774,8 +5776,7 @@ CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, // though we don't use the result. CGF.Builder.CreateCall3(llvm_eh_selector, Exc, ObjCTypes.getEHPersonalityPtr(), - llvm::ConstantInt::get( - llvm::Type::getInt32Ty(VMContext), 0), + llvm::ConstantInt::get(CGF.Int32Ty, 0), "unused_eh_selector"); CGF.Builder.CreateStore(Exc, RethrowPtr); CGF.EmitBranchThroughCleanup(FinallyRethrow); @@ -5900,7 +5901,8 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID, llvm::GlobalValue::ExternalLinkage, 0, VTableName); - llvm::Value *VTableIdx = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2); + llvm::Value *VTableIdx = + llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 2); std::vector Values(3); Values[0] = llvm::ConstantExpr::getGetElementPtr(VTableGV, &VTableIdx, 1); diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp index 22c154ad1e..aa54717e61 100644 --- a/lib/CodeGen/CGStmt.cpp +++ b/lib/CodeGen/CGStmt.cpp @@ -1066,8 +1066,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { getContext().getTypeSize(InputTy)) { // Use ptrtoint as appropriate so that we can do our extension. if (isa(Arg->getType())) - Arg = Builder.CreatePtrToInt(Arg, - llvm::IntegerType::get(VMContext, LLVMPointerWidth)); + Arg = Builder.CreatePtrToInt(Arg, IntPtrTy); const llvm::Type *OutputTy = ConvertType(OutputType); if (isa(OutputTy)) Arg = Builder.CreateZExt(Arg, OutputTy); @@ -1132,7 +1131,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) { // call. unsigned LocID = S.getAsmString()->getLocStart().getRawEncoding(); llvm::Value *LocIDC = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), LocID); + llvm::ConstantInt::get(Int32Ty, LocID); Result->setMetadata("srcloc", llvm::MDNode::get(VMContext, &LocIDC, 1)); // Extract all of the register value results from the asm. diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp index 696c8f5165..319144e89d 100644 --- a/lib/CodeGen/CodeGenFunction.cpp +++ b/lib/CodeGen/CodeGenFunction.cpp @@ -34,8 +34,14 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm) SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0), CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0), ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0) { - LLVMIntTy = ConvertType(getContext().IntTy); + + // Get some frequently used types. LLVMPointerWidth = Target.getPointerWidth(0); + llvm::LLVMContext &LLVMContext = CGM.getLLVMContext(); + IntPtrTy = llvm::IntegerType::get(LLVMContext, LLVMPointerWidth); + Int32Ty = llvm::Type::getInt32Ty(LLVMContext); + Int64Ty = llvm::Type::getInt64Ty(LLVMContext); + Exceptions = getContext().getLangOptions().Exceptions; CatchUndefined = getContext().getLangOptions().CatchUndefined; CGM.getMangleContext().startNewFunction(); @@ -195,7 +201,7 @@ void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) { llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn); llvm::CallInst *CallSite = Builder.CreateCall( CGM.getIntrinsic(llvm::Intrinsic::returnaddress, 0, 0), - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0), + llvm::ConstantInt::get(Int32Ty, 0), "callsite"); Builder.CreateCall2(F, @@ -230,10 +236,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, // Create a marker to make it easy to insert allocas into the entryblock // later. Don't create this with the builder, because we don't want it // folded. - llvm::Value *Undef = llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)); - AllocaInsertPt = new llvm::BitCastInst(Undef, - llvm::Type::getInt32Ty(VMContext), "", - EntryBB); + llvm::Value *Undef = llvm::UndefValue::get(Int32Ty); + AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB); if (Builder.isNamePreserving()) AllocaInsertPt->setName("allocapt"); @@ -558,15 +562,11 @@ CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) { return; // FIXME: Handle variable sized types. - const llvm::Type *IntPtr = llvm::IntegerType::get(VMContext, - LLVMPointerWidth); - - Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtr), DestPtr, + Builder.CreateCall5(CGM.getMemSetFn(BP, IntPtrTy), DestPtr, llvm::Constant::getNullValue(llvm::Type::getInt8Ty(VMContext)), // TypeInfo.first describes size in bits. - llvm::ConstantInt::get(IntPtr, TypeInfo.first/8), - llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - TypeInfo.second/8), + llvm::ConstantInt::get(IntPtrTy, TypeInfo.first/8), + llvm::ConstantInt::get(Int32Ty, TypeInfo.second/8), llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), 0)); } @@ -719,9 +719,7 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() { Builder.SetInsertPoint(SwitchBlock); - llvm::Value *DestCodePtr - = CreateTempAlloca(llvm::Type::getInt32Ty(VMContext), - "cleanup.dst"); + llvm::Value *DestCodePtr = CreateTempAlloca(Int32Ty, "cleanup.dst"); llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp"); // Create a switch instruction to determine where to jump next. @@ -734,7 +732,7 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() { // If we had a current basic block, we also need to emit an instruction // to initialize the cleanup destination. - Builder.CreateStore(llvm::Constant::getNullValue(llvm::Type::getInt32Ty(VMContext)), + Builder.CreateStore(llvm::Constant::getNullValue(Int32Ty), DestCodePtr); } else Builder.ClearInsertionPoint(); @@ -751,14 +749,13 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() { // Check if we already have a destination for this block. if (Dest == SI->getDefaultDest()) - ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0); + ID = llvm::ConstantInt::get(Int32Ty, 0); else { ID = SI->findCaseDest(Dest); if (!ID) { // No code found, get a new unique one by using the number of // switch successors. - ID = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - SI->getNumSuccessors()); + ID = llvm::ConstantInt::get(Int32Ty, SI->getNumSuccessors()); SI->addCase(ID, Dest); } } @@ -775,8 +772,7 @@ CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() { // Create a unique case ID. llvm::ConstantInt *ID - = llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), - SI->getNumSuccessors()); + = llvm::ConstantInt::get(Int32Ty, SI->getNumSuccessors()); // Store the jump destination before the branch instruction. new llvm::StoreInst(ID, DestCodePtr, BI); diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h index 04f5e959c5..9466b1c26f 100644 --- a/lib/CodeGen/CodeGenFunction.h +++ b/lib/CodeGen/CodeGenFunction.h @@ -103,7 +103,8 @@ public: /// we prefer to insert allocas. llvm::AssertingVH AllocaInsertPt; - const llvm::Type *LLVMIntTy; + // intptr_t, i32, i64 + const llvm::IntegerType *IntPtrTy, *Int32Ty, *Int64Ty; uint32_t LLVMPointerWidth; bool Exceptions; diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp index c94dd89d5c..f04b2ee3e9 100644 --- a/lib/CodeGen/CodeGenTypes.cpp +++ b/lib/CodeGen/CodeGenTypes.cpp @@ -201,7 +201,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::ObjCSel: // LLVM void type can only be used as the result of a function call. Just // map to the same as char. - return llvm::IntegerType::get(getLLVMContext(), 8); + return llvm::Type::getInt8Ty(getLLVMContext()); case BuiltinType::Bool: // Note that we always return bool as i1 for use as a scalar type. @@ -233,7 +233,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) { case BuiltinType::NullPtr: { // Model std::nullptr_t as i8* - const llvm::Type *Ty = llvm::IntegerType::get(getLLVMContext(), 8); + const llvm::Type *Ty = llvm::Type::getInt8Ty(getLLVMContext()); return llvm::PointerType::getUnqual(Ty); } diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp index 667220063b..e95b3b6e90 100644 --- a/lib/CodeGen/TargetInfo.cpp +++ b/lib/CodeGen/TargetInfo.cpp @@ -599,8 +599,7 @@ llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); llvm::Value *NextAddr = - Builder.CreateGEP(Addr, llvm::ConstantInt::get( - llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset), + Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); @@ -1380,12 +1379,11 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, // overflow_arg_area = (overflow_arg_area + 15) & ~15; llvm::Value *Offset = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), 15); + llvm::ConstantInt::get(CGF.Int32Ty, 15); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset); llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area, - llvm::Type::getInt64Ty(CGF.getLLVMContext())); - llvm::Value *Mask = llvm::ConstantInt::get( - llvm::Type::getInt64Ty(CGF.getLLVMContext()), ~15LL); + CGF.Int64Ty); + llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~15LL); overflow_arg_area = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask), overflow_arg_area->getType(), @@ -1405,8 +1403,7 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8; llvm::Value *Offset = - llvm::ConstantInt::get(llvm::Type::getInt32Ty(CGF.getLLVMContext()), - (SizeInBytes + 7) & ~7); + llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7); overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset, "overflow_arg_area.next"); CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p); @@ -1418,7 +1415,6 @@ static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr, llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, CodeGenFunction &CGF) const { llvm::LLVMContext &VMContext = CGF.getLLVMContext(); - const llvm::Type *i32Ty = llvm::Type::getInt32Ty(VMContext); const llvm::Type *DoubleTy = llvm::Type::getDoubleTy(VMContext); // Assume that va_list type is correct; should be pointer to LLVM type: @@ -1458,7 +1454,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset"); InRegs = CGF.Builder.CreateICmpULE(gp_offset, - llvm::ConstantInt::get(i32Ty, + llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8), "fits_in_gp"); } @@ -1468,7 +1464,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset"); llvm::Value *FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, - llvm::ConstantInt::get(i32Ty, + llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16), "fits_in_fp"); InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP; @@ -1537,7 +1533,7 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset); llvm::Value *RegAddrHi = CGF.Builder.CreateGEP(RegAddrLo, - llvm::ConstantInt::get(i32Ty, 16)); + llvm::ConstantInt::get(CGF.Int32Ty, 16)); const llvm::Type *DblPtrTy = llvm::PointerType::getUnqual(DoubleTy); const llvm::StructType *ST = llvm::StructType::get(VMContext, DoubleTy, @@ -1558,12 +1554,12 @@ llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, // l->gp_offset = l->gp_offset + num_gp * 8 // l->fp_offset = l->fp_offset + num_fp * 16. if (neededInt) { - llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededInt * 8); + llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset), gp_offset_p); } if (neededSSE) { - llvm::Value *Offset = llvm::ConstantInt::get(i32Ty, neededSSE * 16); + llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16); CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset), fp_offset_p); } @@ -1636,7 +1632,7 @@ ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty, } llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const { + CodeGenFunction &CGF) const { const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); @@ -1995,7 +1991,7 @@ ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, } llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, - CodeGenFunction &CGF) const { + CodeGenFunction &CGF) const { // FIXME: Need to handle alignment const llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext()); const llvm::Type *BPP = llvm::PointerType::getUnqual(BP); @@ -2011,8 +2007,7 @@ llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty, uint64_t Offset = llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4); llvm::Value *NextAddr = - Builder.CreateGEP(Addr, llvm::ConstantInt::get( - llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset), + Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next"); Builder.CreateStore(NextAddr, VAListAddrAsBPP); -- 2.40.0