1 //===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is the internal per-function state used for llvm translation.
12 //===----------------------------------------------------------------------===//
14 #ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
15 #define CLANG_CODEGEN_CODEGENFUNCTION_H
17 #include "CGBuilder.h"
18 #include "CGDebugInfo.h"
19 #include "CGLoopInfo.h"
21 #include "CodeGenModule.h"
22 #include "CodeGenPGO.h"
23 #include "EHScopeStack.h"
24 #include "clang/AST/CharUnits.h"
25 #include "clang/AST/ExprCXX.h"
26 #include "clang/AST/ExprObjC.h"
27 #include "clang/AST/Type.h"
28 #include "clang/Basic/ABI.h"
29 #include "clang/Basic/CapturedStmt.h"
30 #include "clang/Basic/TargetInfo.h"
31 #include "clang/Frontend/CodeGenOptions.h"
32 #include "llvm/ADT/ArrayRef.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/IR/ValueHandle.h"
36 #include "llvm/Support/Debug.h"
52 class CXXDestructorDecl;
53 class CXXForRangeStmt;
57 class EnumConstantDecl;
59 class FunctionProtoType;
61 class ObjCContainerDecl;
62 class ObjCInterfaceDecl;
65 class ObjCImplementationDecl;
66 class ObjCPropertyImplDecl;
68 class TargetCodeGenInfo;
70 class ObjCForCollectionStmt;
72 class ObjCAtThrowStmt;
73 class ObjCAtSynchronizedStmt;
74 class ObjCAutoreleasePoolStmt;
83 class BlockFieldFlags;
85 /// The kind of evaluation to perform on values of a particular
86 /// type. Basically, is the code in CGExprScalar, CGExprComplex, or
89 /// TODO: should vectors maybe be split out into their own thing?
90 enum TypeEvaluationKind {
96 class SuppressDebugLocation {
97 llvm::DebugLoc CurLoc;
98 llvm::IRBuilderBase &Builder;
100 SuppressDebugLocation(llvm::IRBuilderBase &Builder)
101 : CurLoc(Builder.getCurrentDebugLocation()), Builder(Builder) {
102 Builder.SetCurrentDebugLocation(llvm::DebugLoc());
104 ~SuppressDebugLocation() {
105 Builder.SetCurrentDebugLocation(CurLoc);
109 /// CodeGenFunction - This class organizes the per-function state that is used
110 /// while generating LLVM code.
111 class CodeGenFunction : public CodeGenTypeCache {
112 CodeGenFunction(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
113 void operator=(const CodeGenFunction &) LLVM_DELETED_FUNCTION;
115 friend class CGCXXABI;
117 /// A jump destination is an abstract label, branching to which may
118 /// require a jump out through normal cleanups.
120 JumpDest() : Block(nullptr), ScopeDepth(), Index(0) {}
121 JumpDest(llvm::BasicBlock *Block,
122 EHScopeStack::stable_iterator Depth,
124 : Block(Block), ScopeDepth(Depth), Index(Index) {}
126 bool isValid() const { return Block != nullptr; }
127 llvm::BasicBlock *getBlock() const { return Block; }
128 EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
129 unsigned getDestIndex() const { return Index; }
131 // This should be used cautiously.
132 void setScopeDepth(EHScopeStack::stable_iterator depth) {
137 llvm::BasicBlock *Block;
138 EHScopeStack::stable_iterator ScopeDepth;
142 CodeGenModule &CGM; // Per-module state.
143 const TargetInfo &Target;
145 typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
146 LoopInfoStack LoopStack;
149 /// \brief CGBuilder insert helper. This function is called after an
150 /// instruction is created using Builder.
151 void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
152 llvm::BasicBlock *BB,
153 llvm::BasicBlock::iterator InsertPt) const;
155 /// CurFuncDecl - Holds the Decl for the current outermost
156 /// non-closure context.
157 const Decl *CurFuncDecl;
158 /// CurCodeDecl - This is the inner-most code context, which includes blocks.
159 const Decl *CurCodeDecl;
160 const CGFunctionInfo *CurFnInfo;
162 llvm::Function *CurFn;
164 /// CurGD - The GlobalDecl for the current function being compiled.
167 /// PrologueCleanupDepth - The cleanup depth enclosing all the
168 /// cleanups associated with the parameters.
169 EHScopeStack::stable_iterator PrologueCleanupDepth;
171 /// ReturnBlock - Unified return block.
172 JumpDest ReturnBlock;
174 /// ReturnValue - The temporary alloca to hold the return value. This is null
175 /// iff the function has no return value.
176 llvm::Value *ReturnValue;
178 /// AllocaInsertPoint - This is an instruction in the entry block before which
179 /// we prefer to insert allocas.
180 llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
182 /// \brief API for captured statement code generation.
183 class CGCapturedStmtInfo {
185 explicit CGCapturedStmtInfo(const CapturedStmt &S,
186 CapturedRegionKind K = CR_Default)
187 : Kind(K), ThisValue(nullptr), CXXThisFieldDecl(nullptr) {
189 RecordDecl::field_iterator Field =
190 S.getCapturedRecordDecl()->field_begin();
191 for (CapturedStmt::const_capture_iterator I = S.capture_begin(),
193 I != E; ++I, ++Field) {
194 if (I->capturesThis())
195 CXXThisFieldDecl = *Field;
197 CaptureFields[I->getCapturedVar()] = *Field;
201 virtual ~CGCapturedStmtInfo();
203 CapturedRegionKind getKind() const { return Kind; }
205 void setContextValue(llvm::Value *V) { ThisValue = V; }
206 // \brief Retrieve the value of the context parameter.
207 llvm::Value *getContextValue() const { return ThisValue; }
209 /// \brief Lookup the captured field decl for a variable.
210 const FieldDecl *lookup(const VarDecl *VD) const {
211 return CaptureFields.lookup(VD);
214 bool isCXXThisExprCaptured() const { return CXXThisFieldDecl != nullptr; }
215 FieldDecl *getThisFieldDecl() const { return CXXThisFieldDecl; }
217 /// \brief Emit the captured statement body.
218 virtual void EmitBody(CodeGenFunction &CGF, Stmt *S) {
219 RegionCounter Cnt = CGF.getPGORegionCounter(S);
220 Cnt.beginRegion(CGF.Builder);
224 /// \brief Get the name of the capture helper.
225 virtual StringRef getHelperName() const { return "__captured_stmt"; }
228 /// \brief The kind of captured statement being generated.
229 CapturedRegionKind Kind;
231 /// \brief Keep the map between VarDecl and FieldDecl.
232 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields;
234 /// \brief The base address of the captured record, passed in as the first
235 /// argument of the parallel region function.
236 llvm::Value *ThisValue;
238 /// \brief Captured 'this' type.
239 FieldDecl *CXXThisFieldDecl;
241 CGCapturedStmtInfo *CapturedStmtInfo;
243 /// BoundsChecking - Emit run-time bounds checks. Higher values mean
244 /// potentially higher performance penalties.
245 unsigned char BoundsChecking;
247 /// \brief Sanitizer options to use for this function.
248 const SanitizerOptions *SanOpts;
250 /// \brief True if CodeGen currently emits code implementing sanitizer checks.
251 bool IsSanitizerScope;
253 /// \brief RAII object to set/unset CodeGenFunction::IsSanitizerScope.
254 class SanitizerScope {
255 CodeGenFunction *CGF;
257 SanitizerScope(CodeGenFunction *CGF);
261 /// In C++, whether we are code generating a thunk. This controls whether we
262 /// should emit cleanups.
265 /// In ARC, whether we should autorelease the return value.
266 bool AutoreleaseResult;
268 const CodeGen::CGBlockInfo *BlockInfo;
269 llvm::Value *BlockPointer;
271 llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
272 FieldDecl *LambdaThisCaptureField;
274 /// \brief A mapping from NRVO variables to the flags used to indicate
275 /// when the NRVO has been applied to this variable.
276 llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
278 EHScopeStack EHStack;
279 llvm::SmallVector<char, 256> LifetimeExtendedCleanupStack;
281 /// Header for data within LifetimeExtendedCleanupStack.
282 struct LifetimeExtendedCleanupHeader {
283 /// The size of the following cleanup object.
285 /// The kind of cleanup to push: a value from the CleanupKind enumeration.
288 size_t getSize() const { return Size; }
289 CleanupKind getKind() const { return static_cast<CleanupKind>(Kind); }
292 /// i32s containing the indexes of the cleanup destinations.
293 llvm::AllocaInst *NormalCleanupDest;
295 unsigned NextCleanupDestIndex;
297 /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
298 CGBlockInfo *FirstBlockInfo;
300 /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
301 llvm::BasicBlock *EHResumeBlock;
303 /// The exception slot. All landing pads write the current exception pointer
304 /// into this alloca.
305 llvm::Value *ExceptionSlot;
307 /// The selector slot. Under the MandatoryCleanup model, all landing pads
308 /// write the current selector value into this alloca.
309 llvm::AllocaInst *EHSelectorSlot;
311 /// Emits a landing pad for the current EH stack.
312 llvm::BasicBlock *EmitLandingPad();
314 llvm::BasicBlock *getInvokeDestImpl();
317 typename DominatingValue<T>::saved_type saveValueInCond(T value) {
318 return DominatingValue<T>::save(*this, value);
322 /// ObjCEHValueStack - Stack of Objective-C exception values, used for
324 SmallVector<llvm::Value*, 8> ObjCEHValueStack;
326 /// A class controlling the emission of a finally block.
328 /// Where the catchall's edge through the cleanup should go.
329 JumpDest RethrowDest;
331 /// A function to call to enter the catch.
332 llvm::Constant *BeginCatchFn;
334 /// An i1 variable indicating whether or not the @finally is
335 /// running for an exception.
336 llvm::AllocaInst *ForEHVar;
338 /// An i8* variable into which the exception pointer to rethrow
340 llvm::AllocaInst *SavedExnVar;
343 void enter(CodeGenFunction &CGF, const Stmt *Finally,
344 llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
345 llvm::Constant *rethrowFn);
346 void exit(CodeGenFunction &CGF);
349 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
350 /// current full-expression. Safe against the possibility that
351 /// we're currently inside a conditionally-evaluated expression.
352 template <class T, class A0>
353 void pushFullExprCleanup(CleanupKind kind, A0 a0) {
354 // If we're not in a conditional branch, or if none of the
355 // arguments requires saving, then use the unconditional cleanup.
356 if (!isInConditionalBranch())
357 return EHStack.pushCleanup<T>(kind, a0);
359 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
361 typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
362 EHStack.pushCleanup<CleanupType>(kind, a0_saved);
363 initFullExprCleanup();
366 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
367 /// current full-expression. Safe against the possibility that
368 /// we're currently inside a conditionally-evaluated expression.
369 template <class T, class A0, class A1>
370 void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
371 // If we're not in a conditional branch, or if none of the
372 // arguments requires saving, then use the unconditional cleanup.
373 if (!isInConditionalBranch())
374 return EHStack.pushCleanup<T>(kind, a0, a1);
376 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
377 typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
379 typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
380 EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
381 initFullExprCleanup();
384 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
385 /// current full-expression. Safe against the possibility that
386 /// we're currently inside a conditionally-evaluated expression.
387 template <class T, class A0, class A1, class A2>
388 void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
389 // If we're not in a conditional branch, or if none of the
390 // arguments requires saving, then use the unconditional cleanup.
391 if (!isInConditionalBranch()) {
392 return EHStack.pushCleanup<T>(kind, a0, a1, a2);
395 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
396 typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
397 typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
399 typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
400 EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
401 initFullExprCleanup();
404 /// pushFullExprCleanup - Push a cleanup to be run at the end of the
405 /// current full-expression. Safe against the possibility that
406 /// we're currently inside a conditionally-evaluated expression.
407 template <class T, class A0, class A1, class A2, class A3>
408 void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
409 // If we're not in a conditional branch, or if none of the
410 // arguments requires saving, then use the unconditional cleanup.
411 if (!isInConditionalBranch()) {
412 return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
415 typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
416 typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
417 typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
418 typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
420 typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
421 EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
423 initFullExprCleanup();
426 /// \brief Queue a cleanup to be pushed after finishing the current
428 template <class T, class A0, class A1, class A2, class A3>
429 void pushCleanupAfterFullExpr(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
430 assert(!isInConditionalBranch() && "can't defer conditional cleanup");
432 LifetimeExtendedCleanupHeader Header = { sizeof(T), Kind };
434 size_t OldSize = LifetimeExtendedCleanupStack.size();
435 LifetimeExtendedCleanupStack.resize(
436 LifetimeExtendedCleanupStack.size() + sizeof(Header) + Header.Size);
438 char *Buffer = &LifetimeExtendedCleanupStack[OldSize];
439 new (Buffer) LifetimeExtendedCleanupHeader(Header);
440 new (Buffer + sizeof(Header)) T(a0, a1, a2, a3);
443 /// Set up the last cleaup that was pushed as a conditional
444 /// full-expression cleanup.
445 void initFullExprCleanup();
447 /// PushDestructorCleanup - Push a cleanup to call the
448 /// complete-object destructor of an object of the given type at the
449 /// given address. Does nothing if T is not a C++ class type with a
450 /// non-trivial destructor.
451 void PushDestructorCleanup(QualType T, llvm::Value *Addr);
453 /// PushDestructorCleanup - Push a cleanup to call the
454 /// complete-object variant of the given destructor on the object at
455 /// the given address.
456 void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
459 /// PopCleanupBlock - Will pop the cleanup entry on the stack and
460 /// process all branch fixups.
461 void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
463 /// DeactivateCleanupBlock - Deactivates the given cleanup block.
464 /// The block cannot be reactivated. Pops it if it's the top of the
467 /// \param DominatingIP - An instruction which is known to
468 /// dominate the current IP (if set) and which lies along
469 /// all paths of execution between the current IP and the
470 /// the point at which the cleanup comes into scope.
471 void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
472 llvm::Instruction *DominatingIP);
474 /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
475 /// Cannot be used to resurrect a deactivated cleanup.
477 /// \param DominatingIP - An instruction which is known to
478 /// dominate the current IP (if set) and which lies along
479 /// all paths of execution between the current IP and the
480 /// the point at which the cleanup comes into scope.
481 void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
482 llvm::Instruction *DominatingIP);
484 /// \brief Enters a new scope for capturing cleanups, all of which
485 /// will be executed once the scope is exited.
486 class RunCleanupsScope {
487 EHScopeStack::stable_iterator CleanupStackDepth;
488 size_t LifetimeExtendedCleanupStackSize;
489 bool OldDidCallStackSave;
494 RunCleanupsScope(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
495 void operator=(const RunCleanupsScope &) LLVM_DELETED_FUNCTION;
498 CodeGenFunction& CGF;
501 /// \brief Enter a new cleanup scope.
502 explicit RunCleanupsScope(CodeGenFunction &CGF)
503 : PerformCleanup(true), CGF(CGF)
505 CleanupStackDepth = CGF.EHStack.stable_begin();
506 LifetimeExtendedCleanupStackSize =
507 CGF.LifetimeExtendedCleanupStack.size();
508 OldDidCallStackSave = CGF.DidCallStackSave;
509 CGF.DidCallStackSave = false;
512 /// \brief Exit this cleanup scope, emitting any accumulated
514 ~RunCleanupsScope() {
515 if (PerformCleanup) {
516 CGF.DidCallStackSave = OldDidCallStackSave;
517 CGF.PopCleanupBlocks(CleanupStackDepth,
518 LifetimeExtendedCleanupStackSize);
522 /// \brief Determine whether this scope requires any cleanups.
523 bool requiresCleanups() const {
524 return CGF.EHStack.stable_begin() != CleanupStackDepth;
527 /// \brief Force the emission of cleanups now, instead of waiting
528 /// until this object is destroyed.
529 void ForceCleanup() {
530 assert(PerformCleanup && "Already forced cleanup");
531 CGF.DidCallStackSave = OldDidCallStackSave;
532 CGF.PopCleanupBlocks(CleanupStackDepth,
533 LifetimeExtendedCleanupStackSize);
534 PerformCleanup = false;
538 class LexicalScope: protected RunCleanupsScope {
540 SmallVector<const LabelDecl*, 4> Labels;
541 LexicalScope *ParentScope;
543 LexicalScope(const LexicalScope &) LLVM_DELETED_FUNCTION;
544 void operator=(const LexicalScope &) LLVM_DELETED_FUNCTION;
547 /// \brief Enter a new cleanup scope.
548 explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
549 : RunCleanupsScope(CGF), Range(Range), ParentScope(CGF.CurLexicalScope) {
550 CGF.CurLexicalScope = this;
551 if (CGDebugInfo *DI = CGF.getDebugInfo())
552 DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
555 void addLabel(const LabelDecl *label) {
556 assert(PerformCleanup && "adding label to dead scope?");
557 Labels.push_back(label);
560 /// \brief Exit this cleanup scope, emitting any accumulated
563 if (CGDebugInfo *DI = CGF.getDebugInfo())
564 DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
566 // If we should perform a cleanup, force them now. Note that
567 // this ends the cleanup scope before rescoping any labels.
568 if (PerformCleanup) ForceCleanup();
571 /// \brief Force the emission of cleanups now, instead of waiting
572 /// until this object is destroyed.
573 void ForceCleanup() {
574 CGF.CurLexicalScope = ParentScope;
575 RunCleanupsScope::ForceCleanup();
581 void rescopeLabels();
585 /// \brief Takes the old cleanup stack size and emits the cleanup blocks
586 /// that have been added.
587 void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
589 /// \brief Takes the old cleanup stack size and emits the cleanup blocks
590 /// that have been added, then adds all lifetime-extended cleanups from
591 /// the given position to the stack.
592 void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize,
593 size_t OldLifetimeExtendedStackSize);
595 void ResolveBranchFixups(llvm::BasicBlock *Target);
597 /// The given basic block lies in the current EH scope, but may be a
598 /// target of a potentially scope-crossing jump; get a stable handle
599 /// to which we can perform this jump later.
600 JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
601 return JumpDest(Target,
602 EHStack.getInnermostNormalCleanup(),
603 NextCleanupDestIndex++);
606 /// The given basic block lies in the current EH scope, but may be a
607 /// target of a potentially scope-crossing jump; get a stable handle
608 /// to which we can perform this jump later.
609 JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
610 return getJumpDestInCurrentScope(createBasicBlock(Name));
613 /// EmitBranchThroughCleanup - Emit a branch from the current insert
614 /// block through the normal cleanup handling code (if any) and then
616 void EmitBranchThroughCleanup(JumpDest Dest);
618 /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
619 /// specified destination obviously has no cleanups to run. 'false' is always
620 /// a conservatively correct answer for this method.
621 bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
623 /// popCatchScope - Pops the catch scope at the top of the EHScope
624 /// stack, emitting any required code (other than the catch handlers
626 void popCatchScope();
628 llvm::BasicBlock *getEHResumeBlock(bool isCleanup);
629 llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
631 /// An object to manage conditionally-evaluated expressions.
632 class ConditionalEvaluation {
633 llvm::BasicBlock *StartBB;
636 ConditionalEvaluation(CodeGenFunction &CGF)
637 : StartBB(CGF.Builder.GetInsertBlock()) {}
639 void begin(CodeGenFunction &CGF) {
640 assert(CGF.OutermostConditional != this);
641 if (!CGF.OutermostConditional)
642 CGF.OutermostConditional = this;
645 void end(CodeGenFunction &CGF) {
646 assert(CGF.OutermostConditional != nullptr);
647 if (CGF.OutermostConditional == this)
648 CGF.OutermostConditional = nullptr;
651 /// Returns a block which will be executed prior to each
652 /// evaluation of the conditional code.
653 llvm::BasicBlock *getStartingBlock() const {
658 /// isInConditionalBranch - Return true if we're currently emitting
659 /// one branch or the other of a conditional expression.
660 bool isInConditionalBranch() const { return OutermostConditional != nullptr; }
662 void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
663 assert(isInConditionalBranch());
664 llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
665 new llvm::StoreInst(value, addr, &block->back());
668 /// An RAII object to record that we're evaluating a statement
670 class StmtExprEvaluation {
671 CodeGenFunction &CGF;
673 /// We have to save the outermost conditional: cleanups in a
674 /// statement expression aren't conditional just because the
676 ConditionalEvaluation *SavedOutermostConditional;
679 StmtExprEvaluation(CodeGenFunction &CGF)
680 : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
681 CGF.OutermostConditional = nullptr;
684 ~StmtExprEvaluation() {
685 CGF.OutermostConditional = SavedOutermostConditional;
686 CGF.EnsureInsertPoint();
690 /// An object which temporarily prevents a value from being
691 /// destroyed by aggressive peephole optimizations that assume that
692 /// all uses of a value have been realized in the IR.
693 class PeepholeProtection {
694 llvm::Instruction *Inst;
695 friend class CodeGenFunction;
698 PeepholeProtection() : Inst(nullptr) {}
701 /// A non-RAII class containing all the information about a bound
702 /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
703 /// this which makes individual mappings very simple; using this
704 /// class directly is useful when you have a variable number of
705 /// opaque values or don't want the RAII functionality for some
707 class OpaqueValueMappingData {
708 const OpaqueValueExpr *OpaqueValue;
710 CodeGenFunction::PeepholeProtection Protection;
712 OpaqueValueMappingData(const OpaqueValueExpr *ov,
714 : OpaqueValue(ov), BoundLValue(boundLValue) {}
716 OpaqueValueMappingData() : OpaqueValue(nullptr) {}
718 static bool shouldBindAsLValue(const Expr *expr) {
719 // gl-values should be bound as l-values for obvious reasons.
720 // Records should be bound as l-values because IR generation
721 // always keeps them in memory. Expressions of function type
722 // act exactly like l-values but are formally required to be
724 return expr->isGLValue() ||
725 expr->getType()->isFunctionType() ||
726 hasAggregateEvaluationKind(expr->getType());
729 static OpaqueValueMappingData bind(CodeGenFunction &CGF,
730 const OpaqueValueExpr *ov,
732 if (shouldBindAsLValue(ov))
733 return bind(CGF, ov, CGF.EmitLValue(e));
734 return bind(CGF, ov, CGF.EmitAnyExpr(e));
737 static OpaqueValueMappingData bind(CodeGenFunction &CGF,
738 const OpaqueValueExpr *ov,
740 assert(shouldBindAsLValue(ov));
741 CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
742 return OpaqueValueMappingData(ov, true);
745 static OpaqueValueMappingData bind(CodeGenFunction &CGF,
746 const OpaqueValueExpr *ov,
748 assert(!shouldBindAsLValue(ov));
749 CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
751 OpaqueValueMappingData data(ov, false);
753 // Work around an extremely aggressive peephole optimization in
754 // EmitScalarConversion which assumes that all other uses of a
756 data.Protection = CGF.protectFromPeepholes(rv);
761 bool isValid() const { return OpaqueValue != nullptr; }
762 void clear() { OpaqueValue = nullptr; }
764 void unbind(CodeGenFunction &CGF) {
765 assert(OpaqueValue && "no data to unbind!");
768 CGF.OpaqueLValues.erase(OpaqueValue);
770 CGF.OpaqueRValues.erase(OpaqueValue);
771 CGF.unprotectFromPeepholes(Protection);
776 /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
777 class OpaqueValueMapping {
778 CodeGenFunction &CGF;
779 OpaqueValueMappingData Data;
782 static bool shouldBindAsLValue(const Expr *expr) {
783 return OpaqueValueMappingData::shouldBindAsLValue(expr);
786 /// Build the opaque value mapping for the given conditional
787 /// operator if it's the GNU ?: extension. This is a common
788 /// enough pattern that the convenience operator is really
791 OpaqueValueMapping(CodeGenFunction &CGF,
792 const AbstractConditionalOperator *op) : CGF(CGF) {
793 if (isa<ConditionalOperator>(op))
797 const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
798 Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
802 OpaqueValueMapping(CodeGenFunction &CGF,
803 const OpaqueValueExpr *opaqueValue,
805 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
808 OpaqueValueMapping(CodeGenFunction &CGF,
809 const OpaqueValueExpr *opaqueValue,
811 : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
819 ~OpaqueValueMapping() {
820 if (Data.isValid()) Data.unbind(CGF);
824 /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
825 /// number that holds the value.
826 unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
828 /// BuildBlockByrefAddress - Computes address location of the
829 /// variable which is declared as __block.
830 llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
833 CGDebugInfo *DebugInfo;
834 bool DisableDebugInfo;
836 /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
837 /// calling llvm.stacksave for multiple VLAs in the same scope.
838 bool DidCallStackSave;
840 /// IndirectBranch - The first time an indirect goto is seen we create a block
841 /// with an indirect branch. Every time we see the address of a label taken,
842 /// we add the label to the indirect goto. Every subsequent indirect goto is
843 /// codegen'd as a jump to the IndirectBranch's basic block.
844 llvm::IndirectBrInst *IndirectBranch;
846 /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
848 typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
849 DeclMapTy LocalDeclMap;
851 /// LabelMap - This keeps track of the LLVM basic block for each C label.
852 llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
854 // BreakContinueStack - This keeps track of where break and continue
855 // statements should jump to.
856 struct BreakContinue {
857 BreakContinue(JumpDest Break, JumpDest Continue)
858 : BreakBlock(Break), ContinueBlock(Continue) {}
861 JumpDest ContinueBlock;
863 SmallVector<BreakContinue, 8> BreakContinueStack;
868 /// Get a counter for instrumentation of the region associated with the given
870 RegionCounter getPGORegionCounter(const Stmt *S) {
871 return RegionCounter(PGO, S);
875 /// SwitchInsn - This is nearest current switch instruction. It is null if
876 /// current context is not in a switch.
877 llvm::SwitchInst *SwitchInsn;
878 /// The branch weights of SwitchInsn when doing instrumentation based PGO.
879 SmallVector<uint64_t, 16> *SwitchWeights;
881 /// CaseRangeBlock - This block holds if condition check for last case
882 /// statement range in current switch instruction.
883 llvm::BasicBlock *CaseRangeBlock;
885 /// OpaqueLValues - Keeps track of the current set of opaque value
887 llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
888 llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
890 // VLASizeMap - This keeps track of the associated size for each VLA type.
891 // We track this by the size expression rather than the type itself because
892 // in certain situations, like a const qualifier applied to an VLA typedef,
893 // multiple VLA types can share the same size expression.
894 // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
895 // enter/leave scopes.
896 llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
898 /// A block containing a single 'unreachable' instruction. Created
899 /// lazily by getUnreachableBlock().
900 llvm::BasicBlock *UnreachableBlock;
902 /// Counts of the number return expressions in the function.
903 unsigned NumReturnExprs;
905 /// Count the number of simple (constant) return expressions in the function.
906 unsigned NumSimpleReturnExprs;
908 /// The last regular (non-return) debug location (breakpoint) in the function.
909 SourceLocation LastStopPoint;
912 /// A scope within which we are constructing the fields of an object which
913 /// might use a CXXDefaultInitExpr. This stashes away a 'this' value to use
914 /// if we need to evaluate a CXXDefaultInitExpr within the evaluation.
915 class FieldConstructionScope {
917 FieldConstructionScope(CodeGenFunction &CGF, llvm::Value *This)
918 : CGF(CGF), OldCXXDefaultInitExprThis(CGF.CXXDefaultInitExprThis) {
919 CGF.CXXDefaultInitExprThis = This;
921 ~FieldConstructionScope() {
922 CGF.CXXDefaultInitExprThis = OldCXXDefaultInitExprThis;
926 CodeGenFunction &CGF;
927 llvm::Value *OldCXXDefaultInitExprThis;
930 /// The scope of a CXXDefaultInitExpr. Within this scope, the value of 'this'
931 /// is overridden to be the object under construction.
932 class CXXDefaultInitExprScope {
934 CXXDefaultInitExprScope(CodeGenFunction &CGF)
935 : CGF(CGF), OldCXXThisValue(CGF.CXXThisValue) {
936 CGF.CXXThisValue = CGF.CXXDefaultInitExprThis;
938 ~CXXDefaultInitExprScope() {
939 CGF.CXXThisValue = OldCXXThisValue;
943 CodeGenFunction &CGF;
944 llvm::Value *OldCXXThisValue;
948 /// CXXThisDecl - When generating code for a C++ member function,
949 /// this will hold the implicit 'this' declaration.
950 ImplicitParamDecl *CXXABIThisDecl;
951 llvm::Value *CXXABIThisValue;
952 llvm::Value *CXXThisValue;
954 /// The value of 'this' to use when evaluating CXXDefaultInitExprs within
956 llvm::Value *CXXDefaultInitExprThis;
958 /// CXXStructorImplicitParamDecl - When generating code for a constructor or
959 /// destructor, this will hold the implicit argument (e.g. VTT).
960 ImplicitParamDecl *CXXStructorImplicitParamDecl;
961 llvm::Value *CXXStructorImplicitParamValue;
963 /// OutermostConditional - Points to the outermost active
964 /// conditional control. This is used so that we know if a
965 /// temporary should be destroyed conditionally.
966 ConditionalEvaluation *OutermostConditional;
968 /// The current lexical scope.
969 LexicalScope *CurLexicalScope;
971 /// The current source location that should be used for exception
973 SourceLocation CurEHLocation;
975 /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
976 /// type as well as the field number that contains the actual data.
977 llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
978 unsigned> > ByRefValueInfo;
980 llvm::BasicBlock *TerminateLandingPad;
981 llvm::BasicBlock *TerminateHandler;
982 llvm::BasicBlock *TrapBB;
984 /// Add a kernel metadata node to the named metadata node 'opencl.kernels'.
985 /// In the kernel metadata node, reference the kernel function and metadata
986 /// nodes for its optional attribute qualifiers (OpenCL 1.1 6.7.2):
987 /// - A node for the vec_type_hint(<type>) qualifier contains string
988 /// "vec_type_hint", an undefined value of the <type> data type,
989 /// and a Boolean that is true if the <type> is integer and signed.
990 /// - A node for the work_group_size_hint(X,Y,Z) qualifier contains string
991 /// "work_group_size_hint", and three 32-bit integers X, Y and Z.
992 /// - A node for the reqd_work_group_size(X,Y,Z) qualifier contains string
993 /// "reqd_work_group_size", and three 32-bit integers X, Y and Z.
994 void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
998 CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
1001 CodeGenTypes &getTypes() const { return CGM.getTypes(); }
1002 ASTContext &getContext() const { return CGM.getContext(); }
1003 CGDebugInfo *getDebugInfo() {
1004 if (DisableDebugInfo)
1008 void disableDebugInfo() { DisableDebugInfo = true; }
1009 void enableDebugInfo() { DisableDebugInfo = false; }
1011 bool shouldUseFusedARCCalls() {
1012 return CGM.getCodeGenOpts().OptimizationLevel == 0;
1015 const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
1017 /// Returns a pointer to the function's exception object and selector slot,
1018 /// which is assigned in every landing pad.
1019 llvm::Value *getExceptionSlot();
1020 llvm::Value *getEHSelectorSlot();
1022 /// Returns the contents of the function's exception object and selector
1024 llvm::Value *getExceptionFromSlot();
1025 llvm::Value *getSelectorFromSlot();
1027 llvm::Value *getNormalCleanupDestSlot();
1029 llvm::BasicBlock *getUnreachableBlock() {
1030 if (!UnreachableBlock) {
1031 UnreachableBlock = createBasicBlock("unreachable");
1032 new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
1034 return UnreachableBlock;
1037 llvm::BasicBlock *getInvokeDest() {
1038 if (!EHStack.requiresLandingPad()) return nullptr;
1039 return getInvokeDestImpl();
1042 const TargetInfo &getTarget() const { return Target; }
1043 llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
1045 //===--------------------------------------------------------------------===//
1047 //===--------------------------------------------------------------------===//
1049 typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
1051 void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
1052 llvm::Value *arrayEndPointer,
1053 QualType elementType,
1054 Destroyer *destroyer);
1055 void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
1056 llvm::Value *arrayEnd,
1057 QualType elementType,
1058 Destroyer *destroyer);
1060 void pushDestroy(QualType::DestructionKind dtorKind,
1061 llvm::Value *addr, QualType type);
1062 void pushEHDestroy(QualType::DestructionKind dtorKind,
1063 llvm::Value *addr, QualType type);
1064 void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
1065 Destroyer *destroyer, bool useEHCleanupForArray);
1066 void pushLifetimeExtendedDestroy(CleanupKind kind, llvm::Value *addr,
1067 QualType type, Destroyer *destroyer,
1068 bool useEHCleanupForArray);
1069 void pushStackRestore(CleanupKind kind, llvm::Value *SPMem);
1070 void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
1071 bool useEHCleanupForArray);
1072 llvm::Function *generateDestroyHelper(llvm::Constant *addr, QualType type,
1073 Destroyer *destroyer,
1074 bool useEHCleanupForArray,
1076 void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
1077 QualType type, Destroyer *destroyer,
1078 bool checkZeroLength, bool useEHCleanup);
1080 Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
1082 /// Determines whether an EH cleanup is required to destroy a type
1083 /// with the given destruction kind.
1084 bool needsEHCleanup(QualType::DestructionKind kind) {
1086 case QualType::DK_none:
1088 case QualType::DK_cxx_destructor:
1089 case QualType::DK_objc_weak_lifetime:
1090 return getLangOpts().Exceptions;
1091 case QualType::DK_objc_strong_lifetime:
1092 return getLangOpts().Exceptions &&
1093 CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
1095 llvm_unreachable("bad destruction kind");
1098 CleanupKind getCleanupKind(QualType::DestructionKind kind) {
1099 return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
1102 //===--------------------------------------------------------------------===//
1104 //===--------------------------------------------------------------------===//
1106 void GenerateObjCMethod(const ObjCMethodDecl *OMD);
1108 void StartObjCMethod(const ObjCMethodDecl *MD,
1109 const ObjCContainerDecl *CD,
1110 SourceLocation StartLoc);
1112 /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
1113 void GenerateObjCGetter(ObjCImplementationDecl *IMP,
1114 const ObjCPropertyImplDecl *PID);
1115 void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
1116 const ObjCPropertyImplDecl *propImpl,
1117 const ObjCMethodDecl *GetterMothodDecl,
1118 llvm::Constant *AtomicHelperFn);
1120 void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
1121 ObjCMethodDecl *MD, bool ctor);
1123 /// GenerateObjCSetter - Synthesize an Objective-C property setter function
1124 /// for the given property.
1125 void GenerateObjCSetter(ObjCImplementationDecl *IMP,
1126 const ObjCPropertyImplDecl *PID);
1127 void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
1128 const ObjCPropertyImplDecl *propImpl,
1129 llvm::Constant *AtomicHelperFn);
1130 bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
1131 bool IvarTypeWithAggrGCObjects(QualType Ty);
1133 //===--------------------------------------------------------------------===//
1135 //===--------------------------------------------------------------------===//
1137 llvm::Value *EmitBlockLiteral(const BlockExpr *);
1138 llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
1139 static void destroyBlockInfos(CGBlockInfo *info);
1140 llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
1141 const CGBlockInfo &Info,
1143 llvm::Constant *BlockVarLayout);
1145 llvm::Function *GenerateBlockFunction(GlobalDecl GD,
1146 const CGBlockInfo &Info,
1147 const DeclMapTy &ldm,
1148 bool IsLambdaConversionToBlock);
1150 llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
1151 llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
1152 llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
1153 const ObjCPropertyImplDecl *PID);
1154 llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
1155 const ObjCPropertyImplDecl *PID);
1156 llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
1158 void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
1160 class AutoVarEmission;
1162 void emitByrefStructureInit(const AutoVarEmission &emission);
1163 void enterByrefCleanup(const AutoVarEmission &emission);
1165 llvm::Value *LoadBlockStruct() {
1166 assert(BlockPointer && "no block pointer set!");
1167 return BlockPointer;
1170 void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
1171 void AllocateBlockDecl(const DeclRefExpr *E);
1172 llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
1173 llvm::Type *BuildByRefType(const VarDecl *var);
1175 void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1176 const CGFunctionInfo &FnInfo);
1177 /// \brief Emit code for the start of a function.
1178 /// \param Loc The location to be associated with the function.
1179 /// \param StartLoc The location of the function body.
1180 void StartFunction(GlobalDecl GD,
1183 const CGFunctionInfo &FnInfo,
1184 const FunctionArgList &Args,
1185 SourceLocation Loc = SourceLocation(),
1186 SourceLocation StartLoc = SourceLocation());
1188 void EmitConstructorBody(FunctionArgList &Args);
1189 void EmitDestructorBody(FunctionArgList &Args);
1190 void emitImplicitAssignmentOperatorBody(FunctionArgList &Args);
1191 void EmitFunctionBody(FunctionArgList &Args, const Stmt *Body);
1192 void EmitBlockWithFallThrough(llvm::BasicBlock *BB, RegionCounter &Cnt);
1194 void EmitForwardingCallToLambda(const CXXMethodDecl *LambdaCallOperator,
1195 CallArgList &CallArgs);
1196 void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
1197 void EmitLambdaBlockInvokeBody();
1198 void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
1199 void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
1201 /// EmitReturnBlock - Emit the unified return block, trying to avoid its
1202 /// emission when possible.
1203 void EmitReturnBlock();
1205 /// FinishFunction - Complete IR generation of the current function. It is
1206 /// legal to call this function even if there is no current insertion point.
1207 void FinishFunction(SourceLocation EndLoc=SourceLocation());
1209 void StartThunk(llvm::Function *Fn, GlobalDecl GD, const CGFunctionInfo &FnInfo);
1211 void EmitCallAndReturnForThunk(llvm::Value *Callee, const ThunkInfo *Thunk);
1213 /// Emit a musttail call for a thunk with a potentially adjusted this pointer.
1214 void EmitMustTailThunk(const CXXMethodDecl *MD, llvm::Value *AdjustedThisPtr,
1215 llvm::Value *Callee);
1217 /// GenerateThunk - Generate a thunk for the given method.
1218 void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1219 GlobalDecl GD, const ThunkInfo &Thunk);
1221 void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
1222 GlobalDecl GD, const ThunkInfo &Thunk);
1224 void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
1225 FunctionArgList &Args);
1227 void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
1228 ArrayRef<VarDecl *> ArrayIndexes);
1230 /// InitializeVTablePointer - Initialize the vtable pointer of the given
1233 void InitializeVTablePointer(BaseSubobject Base,
1234 const CXXRecordDecl *NearestVBase,
1235 CharUnits OffsetFromNearestVBase,
1236 const CXXRecordDecl *VTableClass);
1238 typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
1239 void InitializeVTablePointers(BaseSubobject Base,
1240 const CXXRecordDecl *NearestVBase,
1241 CharUnits OffsetFromNearestVBase,
1242 bool BaseIsNonVirtualPrimaryBase,
1243 const CXXRecordDecl *VTableClass,
1244 VisitedVirtualBasesSetTy& VBases);
1246 void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
1248 /// GetVTablePtr - Return the Value of the vtable pointer member pointed
1250 llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
1253 /// CanDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
1254 /// expr can be devirtualized.
1255 bool CanDevirtualizeMemberFunctionCall(const Expr *Base,
1256 const CXXMethodDecl *MD);
1258 /// EnterDtorCleanups - Enter the cleanups necessary to complete the
1259 /// given phase of destruction for a destructor. The end result
1260 /// should call destructors on members and base classes in reverse
1261 /// order of their construction.
1262 void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
1264 /// ShouldInstrumentFunction - Return true if the current function should be
1265 /// instrumented with __cyg_profile_func_* calls
1266 bool ShouldInstrumentFunction();
1268 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
1269 /// instrumentation function with the current function and the call site, if
1270 /// function instrumentation is enabled.
1271 void EmitFunctionInstrumentation(const char *Fn);
1273 /// EmitMCountInstrumentation - Emit call to .mcount.
1274 void EmitMCountInstrumentation();
1276 /// EmitFunctionProlog - Emit the target specific LLVM code to load the
1277 /// arguments for the given function. This is also responsible for naming the
1278 /// LLVM function arguments.
1279 void EmitFunctionProlog(const CGFunctionInfo &FI,
1281 const FunctionArgList &Args);
1283 /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
1284 /// given temporary.
1285 void EmitFunctionEpilog(const CGFunctionInfo &FI, bool EmitRetDbgLoc,
1286 SourceLocation EndLoc);
1288 /// EmitStartEHSpec - Emit the start of the exception spec.
1289 void EmitStartEHSpec(const Decl *D);
1291 /// EmitEndEHSpec - Emit the end of the exception spec.
1292 void EmitEndEHSpec(const Decl *D);
1294 /// getTerminateLandingPad - Return a landing pad that just calls terminate.
1295 llvm::BasicBlock *getTerminateLandingPad();
1297 /// getTerminateHandler - Return a handler (not a landing pad, just
1298 /// a catch handler) that just calls terminate. This is used when
1299 /// a terminate scope encloses a try.
1300 llvm::BasicBlock *getTerminateHandler();
1302 llvm::Type *ConvertTypeForMem(QualType T);
1303 llvm::Type *ConvertType(QualType T);
1304 llvm::Type *ConvertType(const TypeDecl *T) {
1305 return ConvertType(getContext().getTypeDeclType(T));
1308 /// LoadObjCSelf - Load the value of self. This function is only valid while
1309 /// generating code for an Objective-C method.
1310 llvm::Value *LoadObjCSelf();
1312 /// TypeOfSelfObject - Return type of object that this self represents.
1313 QualType TypeOfSelfObject();
1315 /// hasAggregateLLVMType - Return true if the specified AST type will map into
1316 /// an aggregate LLVM type or is void.
1317 static TypeEvaluationKind getEvaluationKind(QualType T);
1319 static bool hasScalarEvaluationKind(QualType T) {
1320 return getEvaluationKind(T) == TEK_Scalar;
1323 static bool hasAggregateEvaluationKind(QualType T) {
1324 return getEvaluationKind(T) == TEK_Aggregate;
1327 /// createBasicBlock - Create an LLVM basic block.
1328 llvm::BasicBlock *createBasicBlock(const Twine &name = "",
1329 llvm::Function *parent = nullptr,
1330 llvm::BasicBlock *before = nullptr) {
1332 return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
1334 return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
1338 /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
1340 JumpDest getJumpDestForLabel(const LabelDecl *S);
1342 /// SimplifyForwardingBlocks - If the given basic block is only a branch to
1343 /// another basic block, simplify it. This assumes that no other code could
1344 /// potentially reference the basic block.
1345 void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
1347 /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
1348 /// adding a fall-through branch from the current insert block if
1349 /// necessary. It is legal to call this function even if there is no current
1350 /// insertion point.
1352 /// IsFinished - If true, indicates that the caller has finished emitting
1353 /// branches to the given block and does not expect to emit code into it. This
1354 /// means the block can be ignored if it is unreachable.
1355 void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
1357 /// EmitBlockAfterUses - Emit the given block somewhere hopefully
1358 /// near its uses, and leave the insertion point in it.
1359 void EmitBlockAfterUses(llvm::BasicBlock *BB);
1361 /// EmitBranch - Emit a branch to the specified basic block from the current
1362 /// insert block, taking care to avoid creation of branches from dummy
1363 /// blocks. It is legal to call this function even if there is no current
1364 /// insertion point.
1366 /// This function clears the current insertion point. The caller should follow
1367 /// calls to this function with calls to Emit*Block prior to generation new
1369 void EmitBranch(llvm::BasicBlock *Block);
1371 /// HaveInsertPoint - True if an insertion point is defined. If not, this
1372 /// indicates that the current code being emitted is unreachable.
1373 bool HaveInsertPoint() const {
1374 return Builder.GetInsertBlock() != nullptr;
1377 /// EnsureInsertPoint - Ensure that an insertion point is defined so that
1378 /// emitted IR has a place to go. Note that by definition, if this function
1379 /// creates a block then that block is unreachable; callers may do better to
1380 /// detect when no insertion point is defined and simply skip IR generation.
1381 void EnsureInsertPoint() {
1382 if (!HaveInsertPoint())
1383 EmitBlock(createBasicBlock());
1386 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1387 /// specified stmt yet.
1388 void ErrorUnsupported(const Stmt *S, const char *Type);
1390 //===--------------------------------------------------------------------===//
1392 //===--------------------------------------------------------------------===//
1394 LValue MakeAddrLValue(llvm::Value *V, QualType T,
1395 CharUnits Alignment = CharUnits()) {
1396 return LValue::MakeAddr(V, T, Alignment, getContext(),
1397 CGM.getTBAAInfo(T));
1400 LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
1401 CharUnits Alignment;
1402 if (!T->isIncompleteType()) {
1403 Alignment = getContext().getTypeAlignInChars(T);
1404 unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign;
1405 if (MaxAlign && Alignment.getQuantity() > MaxAlign &&
1406 !getContext().isAlignmentRequired(T))
1407 Alignment = CharUnits::fromQuantity(MaxAlign);
1409 return LValue::MakeAddr(V, T, Alignment, getContext(),
1410 CGM.getTBAAInfo(T));
1413 /// CreateTempAlloca - This creates a alloca and inserts it into the entry
1414 /// block. The caller is responsible for setting an appropriate alignment on
1416 llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
1417 const Twine &Name = "tmp");
1419 /// InitTempAlloca - Provide an initial value for the given alloca.
1420 void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
1422 /// CreateIRTemp - Create a temporary IR object of the given type, with
1423 /// appropriate alignment. This routine should only be used when an temporary
1424 /// value needs to be stored into an alloca (for example, to avoid explicit
1425 /// PHI construction), but the type is the IR type, not the type appropriate
1426 /// for storing in memory.
1427 llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
1429 /// CreateMemTemp - Create a temporary memory object of the given type, with
1430 /// appropriate alignment.
1431 llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
1433 /// CreateAggTemp - Create a temporary memory object for the given
1435 AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
1436 CharUnits Alignment = getContext().getTypeAlignInChars(T);
1437 return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
1439 AggValueSlot::IsNotDestructed,
1440 AggValueSlot::DoesNotNeedGCBarriers,
1441 AggValueSlot::IsNotAliased);
1444 /// CreateInAllocaTmp - Create a temporary memory object for the given
1446 AggValueSlot CreateInAllocaTmp(QualType T, const Twine &Name = "inalloca");
1448 /// Emit a cast to void* in the appropriate address space.
1449 llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
1451 /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
1452 /// expression and compare the result against zero, returning an Int1Ty value.
1453 llvm::Value *EvaluateExprAsBool(const Expr *E);
1455 /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
1456 void EmitIgnoredExpr(const Expr *E);
1458 /// EmitAnyExpr - Emit code to compute the specified expression which can have
1459 /// any type. The result is returned as an RValue struct. If this is an
1460 /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
1461 /// the result should be returned.
1463 /// \param ignoreResult True if the resulting value isn't used.
1464 RValue EmitAnyExpr(const Expr *E,
1465 AggValueSlot aggSlot = AggValueSlot::ignored(),
1466 bool ignoreResult = false);
1468 // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
1469 // or the value of the expression, depending on how va_list is defined.
1470 llvm::Value *EmitVAListRef(const Expr *E);
1472 /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
1473 /// always be accessible even if no aggregate location is provided.
1474 RValue EmitAnyExprToTemp(const Expr *E);
1476 /// EmitAnyExprToMem - Emits the code necessary to evaluate an
1477 /// arbitrary expression into the given memory location.
1478 void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
1479 Qualifiers Quals, bool IsInitializer);
1481 /// EmitExprAsInit - Emits the code necessary to initialize a
1482 /// location in memory with the given initializer.
1483 void EmitExprAsInit(const Expr *init, const ValueDecl *D,
1484 LValue lvalue, bool capturedByInit);
1486 /// hasVolatileMember - returns true if aggregate type has a volatile
1488 bool hasVolatileMember(QualType T) {
1489 if (const RecordType *RT = T->getAs<RecordType>()) {
1490 const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
1491 return RD->hasVolatileMember();
1495 /// EmitAggregateCopy - Emit an aggregate assignment.
1497 /// The difference to EmitAggregateCopy is that tail padding is not copied.
1498 /// This is required for correctness when assigning non-POD structures in C++.
1499 void EmitAggregateAssign(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1501 bool IsVolatile = hasVolatileMember(EltTy);
1502 EmitAggregateCopy(DestPtr, SrcPtr, EltTy, IsVolatile, CharUnits::Zero(),
1506 /// EmitAggregateCopy - Emit an aggregate copy.
1508 /// \param isVolatile - True iff either the source or the destination is
1510 /// \param isAssignment - If false, allow padding to be copied. This often
1511 /// yields more efficient.
1512 void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
1513 QualType EltTy, bool isVolatile=false,
1514 CharUnits Alignment = CharUnits::Zero(),
1515 bool isAssignment = false);
1517 /// StartBlock - Start new block named N. If insert block is a dummy block
1519 void StartBlock(const char *N);
1521 /// GetAddrOfLocalVar - Return the address of a local variable.
1522 llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
1523 llvm::Value *Res = LocalDeclMap[VD];
1524 assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
1528 /// getOpaqueLValueMapping - Given an opaque value expression (which
1529 /// must be mapped to an l-value), return its mapping.
1530 const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
1531 assert(OpaqueValueMapping::shouldBindAsLValue(e));
1533 llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
1534 it = OpaqueLValues.find(e);
1535 assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
1539 /// getOpaqueRValueMapping - Given an opaque value expression (which
1540 /// must be mapped to an r-value), return its mapping.
1541 const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
1542 assert(!OpaqueValueMapping::shouldBindAsLValue(e));
1544 llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
1545 it = OpaqueRValues.find(e);
1546 assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
1550 /// getAccessedFieldNo - Given an encoded value and a result number, return
1551 /// the input field number being accessed.
1552 static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
1554 llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
1555 llvm::BasicBlock *GetIndirectGotoBlock();
1557 /// EmitNullInitialization - Generate code to set a value of the given type to
1558 /// null, If the type contains data member pointers, they will be initialized
1559 /// to -1 in accordance with the Itanium C++ ABI.
1560 void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
1562 // EmitVAArg - Generate code to get an argument from the passed in pointer
1563 // and update it accordingly. The return value is a pointer to the argument.
1564 // FIXME: We should be able to get rid of this method and use the va_arg
1565 // instruction in LLVM instead once it works well enough.
1566 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
1568 /// emitArrayLength - Compute the length of an array, even if it's a
1569 /// VLA, and drill down to the base element type.
1570 llvm::Value *emitArrayLength(const ArrayType *arrayType,
1572 llvm::Value *&addr);
1574 /// EmitVLASize - Capture all the sizes for the VLA expressions in
1575 /// the given variably-modified type and store them in the VLASizeMap.
1577 /// This function can be called with a null (unreachable) insert point.
1578 void EmitVariablyModifiedType(QualType Ty);
1580 /// getVLASize - Returns an LLVM value that corresponds to the size,
1581 /// in non-variably-sized elements, of a variable length array type,
1582 /// plus that largest non-variably-sized element type. Assumes that
1583 /// the type has already been emitted with EmitVariablyModifiedType.
1584 std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
1585 std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
1587 /// LoadCXXThis - Load the value of 'this'. This function is only valid while
1588 /// generating code for an C++ member function.
1589 llvm::Value *LoadCXXThis() {
1590 assert(CXXThisValue && "no 'this' value for this function");
1591 return CXXThisValue;
1594 /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
1596 // FIXME: Every place that calls LoadCXXVTT is something
1597 // that needs to be abstracted properly.
1598 llvm::Value *LoadCXXVTT() {
1599 assert(CXXStructorImplicitParamValue && "no VTT value for this function");
1600 return CXXStructorImplicitParamValue;
1603 /// LoadCXXStructorImplicitParam - Load the implicit parameter
1604 /// for a constructor/destructor.
1605 llvm::Value *LoadCXXStructorImplicitParam() {
1606 assert(CXXStructorImplicitParamValue &&
1607 "no implicit argument value for this function");
1608 return CXXStructorImplicitParamValue;
1611 /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
1612 /// complete class to the given direct base.
1614 GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
1615 const CXXRecordDecl *Derived,
1616 const CXXRecordDecl *Base,
1617 bool BaseIsVirtual);
1619 /// GetAddressOfBaseClass - This function will add the necessary delta to the
1620 /// load of 'this' and returns address of the base class.
1621 llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
1622 const CXXRecordDecl *Derived,
1623 CastExpr::path_const_iterator PathBegin,
1624 CastExpr::path_const_iterator PathEnd,
1625 bool NullCheckValue);
1627 llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
1628 const CXXRecordDecl *Derived,
1629 CastExpr::path_const_iterator PathBegin,
1630 CastExpr::path_const_iterator PathEnd,
1631 bool NullCheckValue);
1633 /// GetVTTParameter - Return the VTT parameter that should be passed to a
1634 /// base constructor/destructor with virtual bases.
1635 /// FIXME: VTTs are Itanium ABI-specific, so the definition should move
1636 /// to ItaniumCXXABI.cpp together with all the references to VTT.
1637 llvm::Value *GetVTTParameter(GlobalDecl GD, bool ForVirtualBase,
1640 void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
1641 CXXCtorType CtorType,
1642 const FunctionArgList &Args,
1643 SourceLocation Loc);
1644 // It's important not to confuse this and the previous function. Delegating
1645 // constructors are the C++0x feature. The constructor delegate optimization
1646 // is used to reduce duplication in the base and complete consturctors where
1647 // they are substantially the same.
1648 void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
1649 const FunctionArgList &Args);
1650 void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
1651 bool ForVirtualBase, bool Delegating,
1653 CallExpr::const_arg_iterator ArgBeg,
1654 CallExpr::const_arg_iterator ArgEnd);
1656 void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
1657 llvm::Value *This, llvm::Value *Src,
1658 CallExpr::const_arg_iterator ArgBeg,
1659 CallExpr::const_arg_iterator ArgEnd);
1661 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1662 const ConstantArrayType *ArrayTy,
1663 llvm::Value *ArrayPtr,
1664 CallExpr::const_arg_iterator ArgBeg,
1665 CallExpr::const_arg_iterator ArgEnd,
1666 bool ZeroInitialization = false);
1668 void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
1669 llvm::Value *NumElements,
1670 llvm::Value *ArrayPtr,
1671 CallExpr::const_arg_iterator ArgBeg,
1672 CallExpr::const_arg_iterator ArgEnd,
1673 bool ZeroInitialization = false);
1675 static Destroyer destroyCXXObject;
1677 void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
1678 bool ForVirtualBase, bool Delegating,
1681 void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
1682 llvm::Value *NewPtr, llvm::Value *NumElements,
1683 llvm::Value *AllocSizeWithoutCookie);
1685 void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
1688 llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
1689 void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
1691 void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
1694 RValue EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
1695 const Expr *Arg, bool IsDelete);
1697 llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
1698 llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
1699 llvm::Value* EmitCXXUuidofExpr(const CXXUuidofExpr *E);
1701 /// \brief Situations in which we might emit a check for the suitability of a
1702 /// pointer or glvalue.
1703 enum TypeCheckKind {
1704 /// Checking the operand of a load. Must be suitably sized and aligned.
1706 /// Checking the destination of a store. Must be suitably sized and aligned.
1708 /// Checking the bound value in a reference binding. Must be suitably sized
1709 /// and aligned, but is not required to refer to an object (until the
1710 /// reference is used), per core issue 453.
1711 TCK_ReferenceBinding,
1712 /// Checking the object expression in a non-static data member access. Must
1713 /// be an object within its lifetime.
1715 /// Checking the 'this' pointer for a call to a non-static member function.
1716 /// Must be an object within its lifetime.
1718 /// Checking the 'this' pointer for a constructor call.
1719 TCK_ConstructorCall,
1720 /// Checking the operand of a static_cast to a derived pointer type. Must be
1721 /// null or an object within its lifetime.
1722 TCK_DowncastPointer,
1723 /// Checking the operand of a static_cast to a derived reference type. Must
1724 /// be an object within its lifetime.
1725 TCK_DowncastReference
1728 /// \brief Whether any type-checking sanitizers are enabled. If \c false,
1729 /// calls to EmitTypeCheck can be skipped.
1730 bool sanitizePerformTypeCheck() const;
1732 /// \brief Emit a check that \p V is the address of storage of the
1733 /// appropriate size and alignment for an object of type \p Type.
1734 void EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc, llvm::Value *V,
1735 QualType Type, CharUnits Alignment = CharUnits::Zero());
1737 /// \brief Emit a check that \p Base points into an array object, which
1738 /// we can access at index \p Index. \p Accessed should be \c false if we
1739 /// this expression is used as an lvalue, for instance in "&Arr[Idx]".
1740 void EmitBoundsCheck(const Expr *E, const Expr *Base, llvm::Value *Index,
1741 QualType IndexType, bool Accessed);
1743 llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
1744 bool isInc, bool isPre);
1745 ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
1746 bool isInc, bool isPre);
1747 //===--------------------------------------------------------------------===//
1748 // Declaration Emission
1749 //===--------------------------------------------------------------------===//
1751 /// EmitDecl - Emit a declaration.
1753 /// This function can be called with a null (unreachable) insert point.
1754 void EmitDecl(const Decl &D);
1756 /// EmitVarDecl - Emit a local variable declaration.
1758 /// This function can be called with a null (unreachable) insert point.
1759 void EmitVarDecl(const VarDecl &D);
1761 void EmitScalarInit(const Expr *init, const ValueDecl *D,
1762 LValue lvalue, bool capturedByInit);
1763 void EmitScalarInit(llvm::Value *init, LValue lvalue);
1765 typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
1766 llvm::Value *Address);
1768 /// EmitAutoVarDecl - Emit an auto variable declaration.
1770 /// This function can be called with a null (unreachable) insert point.
1771 void EmitAutoVarDecl(const VarDecl &D);
1773 class AutoVarEmission {
1774 friend class CodeGenFunction;
1776 const VarDecl *Variable;
1778 /// The alignment of the variable.
1779 CharUnits Alignment;
1781 /// The address of the alloca. Null if the variable was emitted
1782 /// as a global constant.
1783 llvm::Value *Address;
1785 llvm::Value *NRVOFlag;
1787 /// True if the variable is a __block variable.
1790 /// True if the variable is of aggregate type and has a constant
1792 bool IsConstantAggregate;
1794 /// Non-null if we should use lifetime annotations.
1795 llvm::Value *SizeForLifetimeMarkers;
1798 AutoVarEmission(Invalid) : Variable(nullptr) {}
1800 AutoVarEmission(const VarDecl &variable)
1801 : Variable(&variable), Address(nullptr), NRVOFlag(nullptr),
1802 IsByRef(false), IsConstantAggregate(false),
1803 SizeForLifetimeMarkers(nullptr) {}
1805 bool wasEmittedAsGlobal() const { return Address == nullptr; }
1808 static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
1810 bool useLifetimeMarkers() const {
1811 return SizeForLifetimeMarkers != nullptr;
1813 llvm::Value *getSizeForLifetimeMarkers() const {
1814 assert(useLifetimeMarkers());
1815 return SizeForLifetimeMarkers;
1818 /// Returns the raw, allocated address, which is not necessarily
1819 /// the address of the object itself.
1820 llvm::Value *getAllocatedAddress() const {
1824 /// Returns the address of the object within this declaration.
1825 /// Note that this does not chase the forwarding pointer for
1827 llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
1828 if (!IsByRef) return Address;
1830 return CGF.Builder.CreateStructGEP(Address,
1831 CGF.getByRefValueLLVMField(Variable),
1832 Variable->getNameAsString());
1835 AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
1836 void EmitAutoVarInit(const AutoVarEmission &emission);
1837 void EmitAutoVarCleanups(const AutoVarEmission &emission);
1838 void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
1839 QualType::DestructionKind dtorKind);
1841 void EmitStaticVarDecl(const VarDecl &D,
1842 llvm::GlobalValue::LinkageTypes Linkage);
1844 /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
1845 void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, bool ArgIsPointer,
1848 /// protectFromPeepholes - Protect a value that we're intending to
1849 /// store to the side, but which will probably be used later, from
1850 /// aggressive peepholing optimizations that might delete it.
1852 /// Pass the result to unprotectFromPeepholes to declare that
1853 /// protection is no longer required.
1855 /// There's no particular reason why this shouldn't apply to
1856 /// l-values, it's just that no existing peepholes work on pointers.
1857 PeepholeProtection protectFromPeepholes(RValue rvalue);
1858 void unprotectFromPeepholes(PeepholeProtection protection);
1860 //===--------------------------------------------------------------------===//
1861 // Statement Emission
1862 //===--------------------------------------------------------------------===//
1864 /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
1865 void EmitStopPoint(const Stmt *S);
1867 /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
1868 /// this function even if there is no current insertion point.
1870 /// This function may clear the current insertion point; callers should use
1871 /// EnsureInsertPoint if they wish to subsequently generate code without first
1872 /// calling EmitBlock, EmitBranch, or EmitStmt.
1873 void EmitStmt(const Stmt *S);
1875 /// EmitSimpleStmt - Try to emit a "simple" statement which does not
1876 /// necessarily require an insertion point or debug information; typically
1877 /// because the statement amounts to a jump or a container of other
1880 /// \return True if the statement was handled.
1881 bool EmitSimpleStmt(const Stmt *S);
1883 llvm::Value *EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
1884 AggValueSlot AVS = AggValueSlot::ignored());
1885 llvm::Value *EmitCompoundStmtWithoutScope(const CompoundStmt &S,
1886 bool GetLast = false,
1888 AggValueSlot::ignored());
1890 /// EmitLabel - Emit the block for the given label. It is legal to call this
1891 /// function even if there is no current insertion point.
1892 void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
1894 void EmitLabelStmt(const LabelStmt &S);
1895 void EmitAttributedStmt(const AttributedStmt &S);
1896 void EmitGotoStmt(const GotoStmt &S);
1897 void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
1898 void EmitIfStmt(const IfStmt &S);
1900 void EmitCondBrHints(llvm::LLVMContext &Context, llvm::BranchInst *CondBr,
1901 const ArrayRef<const Attr *> &Attrs);
1902 void EmitWhileStmt(const WhileStmt &S,
1903 const ArrayRef<const Attr *> &Attrs = None);
1904 void EmitDoStmt(const DoStmt &S, const ArrayRef<const Attr *> &Attrs = None);
1905 void EmitForStmt(const ForStmt &S,
1906 const ArrayRef<const Attr *> &Attrs = None);
1907 void EmitReturnStmt(const ReturnStmt &S);
1908 void EmitDeclStmt(const DeclStmt &S);
1909 void EmitBreakStmt(const BreakStmt &S);
1910 void EmitContinueStmt(const ContinueStmt &S);
1911 void EmitSwitchStmt(const SwitchStmt &S);
1912 void EmitDefaultStmt(const DefaultStmt &S);
1913 void EmitCaseStmt(const CaseStmt &S);
1914 void EmitCaseStmtRange(const CaseStmt &S);
1915 void EmitAsmStmt(const AsmStmt &S);
1917 void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
1918 void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
1919 void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
1920 void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
1921 void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
1923 void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1924 void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
1926 void EmitCXXTryStmt(const CXXTryStmt &S);
1927 void EmitSEHTryStmt(const SEHTryStmt &S);
1928 void EmitSEHLeaveStmt(const SEHLeaveStmt &S);
1929 void EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1930 const ArrayRef<const Attr *> &Attrs = None);
1932 llvm::Function *EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K);
1933 llvm::Function *GenerateCapturedStmtFunction(const CapturedStmt &S);
1934 llvm::Value *GenerateCapturedStmtArgument(const CapturedStmt &S);
1936 void EmitOMPParallelDirective(const OMPParallelDirective &S);
1937 void EmitOMPSimdDirective(const OMPSimdDirective &S);
1938 void EmitOMPForDirective(const OMPForDirective &S);
1939 void EmitOMPSectionsDirective(const OMPSectionsDirective &S);
1940 void EmitOMPSectionDirective(const OMPSectionDirective &S);
1941 void EmitOMPSingleDirective(const OMPSingleDirective &S);
1942 void EmitOMPMasterDirective(const OMPMasterDirective &S);
1943 void EmitOMPCriticalDirective(const OMPCriticalDirective &S);
1944 void EmitOMPParallelForDirective(const OMPParallelForDirective &S);
1945 void EmitOMPParallelSectionsDirective(const OMPParallelSectionsDirective &S);
1946 void EmitOMPTaskDirective(const OMPTaskDirective &S);
1947 void EmitOMPTaskyieldDirective(const OMPTaskyieldDirective &S);
1948 void EmitOMPBarrierDirective(const OMPBarrierDirective &S);
1949 void EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S);
1950 void EmitOMPFlushDirective(const OMPFlushDirective &S);
1951 void EmitOMPOrderedDirective(const OMPOrderedDirective &S);
1952 void EmitOMPAtomicDirective(const OMPAtomicDirective &S);
1954 //===--------------------------------------------------------------------===//
1955 // LValue Expression Emission
1956 //===--------------------------------------------------------------------===//
1958 /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
1959 RValue GetUndefRValue(QualType Ty);
1961 /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
1962 /// and issue an ErrorUnsupported style diagnostic (using the
1964 RValue EmitUnsupportedRValue(const Expr *E,
1967 /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
1968 /// an ErrorUnsupported style diagnostic (using the provided Name).
1969 LValue EmitUnsupportedLValue(const Expr *E,
1972 /// EmitLValue - Emit code to compute a designator that specifies the location
1973 /// of the expression.
1975 /// This can return one of two things: a simple address or a bitfield
1976 /// reference. In either case, the LLVM Value* in the LValue structure is
1977 /// guaranteed to be an LLVM pointer type.
1979 /// If this returns a bitfield reference, nothing about the pointee type of
1980 /// the LLVM value is known: For example, it may not be a pointer to an
1983 /// If this returns a normal address, and if the lvalue's C type is fixed
1984 /// size, this method guarantees that the returned pointer type will point to
1985 /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
1986 /// variable length type, this is not possible.
1988 LValue EmitLValue(const Expr *E);
1990 /// \brief Same as EmitLValue but additionally we generate checking code to
1991 /// guard against undefined behavior. This is only suitable when we know
1992 /// that the address will be used to access the object.
1993 LValue EmitCheckedLValue(const Expr *E, TypeCheckKind TCK);
1995 RValue convertTempToRValue(llvm::Value *addr, QualType type,
1996 SourceLocation Loc);
1998 void EmitAtomicInit(Expr *E, LValue lvalue);
2000 RValue EmitAtomicLoad(LValue lvalue, SourceLocation loc,
2001 AggValueSlot slot = AggValueSlot::ignored());
2003 void EmitAtomicStore(RValue rvalue, LValue lvalue, bool isInit);
2005 /// EmitToMemory - Change a scalar value from its value
2006 /// representation to its in-memory representation.
2007 llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
2009 /// EmitFromMemory - Change a scalar value from its memory
2010 /// representation to its value representation.
2011 llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
2013 /// EmitLoadOfScalar - Load a scalar value from an address, taking
2014 /// care to appropriately convert from the memory representation to
2015 /// the LLVM value representation.
2016 llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
2017 unsigned Alignment, QualType Ty,
2019 llvm::MDNode *TBAAInfo = nullptr,
2020 QualType TBAABaseTy = QualType(),
2021 uint64_t TBAAOffset = 0);
2023 /// EmitLoadOfScalar - Load a scalar value from an address, taking
2024 /// care to appropriately convert from the memory representation to
2025 /// the LLVM value representation. The l-value must be a simple
2027 llvm::Value *EmitLoadOfScalar(LValue lvalue, SourceLocation Loc);
2029 /// EmitStoreOfScalar - Store a scalar value to an address, taking
2030 /// care to appropriately convert from the memory representation to
2031 /// the LLVM value representation.
2032 void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
2033 bool Volatile, unsigned Alignment, QualType Ty,
2034 llvm::MDNode *TBAAInfo = nullptr, bool isInit = false,
2035 QualType TBAABaseTy = QualType(),
2036 uint64_t TBAAOffset = 0);
2038 /// EmitStoreOfScalar - Store a scalar value to an address, taking
2039 /// care to appropriately convert from the memory representation to
2040 /// the LLVM value representation. The l-value must be a simple
2041 /// l-value. The isInit flag indicates whether this is an initialization.
2042 /// If so, atomic qualifiers are ignored and the store is always non-atomic.
2043 void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
2045 /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
2046 /// this method emits the address of the lvalue, then loads the result as an
2047 /// rvalue, returning the rvalue.
2048 RValue EmitLoadOfLValue(LValue V, SourceLocation Loc);
2049 RValue EmitLoadOfExtVectorElementLValue(LValue V);
2050 RValue EmitLoadOfBitfieldLValue(LValue LV);
2051 RValue EmitLoadOfGlobalRegLValue(LValue LV);
2053 /// EmitStoreThroughLValue - Store the specified rvalue into the specified
2054 /// lvalue, where both are guaranteed to the have the same type, and that type
2056 void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
2057 void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
2058 void EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst);
2060 /// EmitStoreThroughBitfieldLValue - Store Src into Dst with same constraints
2061 /// as EmitStoreThroughLValue.
2063 /// \param Result [out] - If non-null, this will be set to a Value* for the
2064 /// bit-field contents after the store, appropriate for use as the result of
2065 /// an assignment to the bit-field.
2066 void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
2067 llvm::Value **Result=nullptr);
2069 /// Emit an l-value for an assignment (simple or compound) of complex type.
2070 LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
2071 LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
2072 LValue EmitScalarCompooundAssignWithComplex(const CompoundAssignOperator *E,
2073 llvm::Value *&Result);
2075 // Note: only available for agg return types
2076 LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
2077 LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
2078 // Note: only available for agg return types
2079 LValue EmitCallExprLValue(const CallExpr *E);
2080 // Note: only available for agg return types
2081 LValue EmitVAArgExprLValue(const VAArgExpr *E);
2082 LValue EmitDeclRefLValue(const DeclRefExpr *E);
2083 LValue EmitReadRegister(const VarDecl *VD);
2084 LValue EmitStringLiteralLValue(const StringLiteral *E);
2085 LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
2086 LValue EmitPredefinedLValue(const PredefinedExpr *E);
2087 LValue EmitUnaryOpLValue(const UnaryOperator *E);
2088 LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
2089 bool Accessed = false);
2090 LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
2091 LValue EmitMemberExpr(const MemberExpr *E);
2092 LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
2093 LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
2094 LValue EmitInitListLValue(const InitListExpr *E);
2095 LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
2096 LValue EmitCastLValue(const CastExpr *E);
2097 LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
2098 LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
2100 RValue EmitRValueForField(LValue LV, const FieldDecl *FD, SourceLocation Loc);
2102 class ConstantEmission {
2103 llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
2104 ConstantEmission(llvm::Constant *C, bool isReference)
2105 : ValueAndIsReference(C, isReference) {}
2107 ConstantEmission() {}
2108 static ConstantEmission forReference(llvm::Constant *C) {
2109 return ConstantEmission(C, true);
2111 static ConstantEmission forValue(llvm::Constant *C) {
2112 return ConstantEmission(C, false);
2115 LLVM_EXPLICIT operator bool() const {
2116 return ValueAndIsReference.getOpaqueValue() != nullptr;
2119 bool isReference() const { return ValueAndIsReference.getInt(); }
2120 LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
2121 assert(isReference());
2122 return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
2123 refExpr->getType());
2126 llvm::Constant *getValue() const {
2127 assert(!isReference());
2128 return ValueAndIsReference.getPointer();
2132 ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
2134 RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
2135 AggValueSlot slot = AggValueSlot::ignored());
2136 LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
2138 llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
2139 const ObjCIvarDecl *Ivar);
2140 LValue EmitLValueForField(LValue Base, const FieldDecl* Field);
2141 LValue EmitLValueForLambdaField(const FieldDecl *Field);
2143 /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
2144 /// if the Field is a reference, this will return the address of the reference
2145 /// and not the address of the value stored in the reference.
2146 LValue EmitLValueForFieldInitialization(LValue Base,
2147 const FieldDecl* Field);
2149 LValue EmitLValueForIvar(QualType ObjectTy,
2150 llvm::Value* Base, const ObjCIvarDecl *Ivar,
2151 unsigned CVRQualifiers);
2153 LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
2154 LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
2155 LValue EmitLambdaLValue(const LambdaExpr *E);
2156 LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
2157 LValue EmitCXXUuidofLValue(const CXXUuidofExpr *E);
2159 LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
2160 LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
2161 LValue EmitStmtExprLValue(const StmtExpr *E);
2162 LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
2163 LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
2164 void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
2166 //===--------------------------------------------------------------------===//
2167 // Scalar Expression Emission
2168 //===--------------------------------------------------------------------===//
2170 /// EmitCall - Generate a call of the given function, expecting the given
2171 /// result type, and using the given argument list which specifies both the
2172 /// LLVM arguments and the types they were derived from.
2174 /// \param TargetDecl - If given, the decl of the function in a direct call;
2175 /// used to set attributes on the call (noreturn, etc.).
2176 RValue EmitCall(const CGFunctionInfo &FnInfo,
2177 llvm::Value *Callee,
2178 ReturnValueSlot ReturnValue,
2179 const CallArgList &Args,
2180 const Decl *TargetDecl = nullptr,
2181 llvm::Instruction **callOrInvoke = nullptr);
2183 RValue EmitCall(QualType FnType, llvm::Value *Callee,
2184 SourceLocation CallLoc,
2185 ReturnValueSlot ReturnValue,
2186 CallExpr::const_arg_iterator ArgBeg,
2187 CallExpr::const_arg_iterator ArgEnd,
2188 const Decl *TargetDecl = nullptr);
2189 RValue EmitCallExpr(const CallExpr *E,
2190 ReturnValueSlot ReturnValue = ReturnValueSlot());
2192 llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
2193 const Twine &name = "");
2194 llvm::CallInst *EmitRuntimeCall(llvm::Value *callee,
2195 ArrayRef<llvm::Value*> args,
2196 const Twine &name = "");
2197 llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
2198 const Twine &name = "");
2199 llvm::CallInst *EmitNounwindRuntimeCall(llvm::Value *callee,
2200 ArrayRef<llvm::Value*> args,
2201 const Twine &name = "");
2203 llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2204 ArrayRef<llvm::Value *> Args,
2205 const Twine &Name = "");
2206 llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
2207 const Twine &Name = "");
2208 llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
2209 ArrayRef<llvm::Value*> args,
2210 const Twine &name = "");
2211 llvm::CallSite EmitRuntimeCallOrInvoke(llvm::Value *callee,
2212 const Twine &name = "");
2213 void EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
2214 ArrayRef<llvm::Value*> args);
2216 llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
2217 NestedNameSpecifier *Qual,
2220 llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
2222 const CXXRecordDecl *RD);
2224 RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
2225 SourceLocation CallLoc,
2226 llvm::Value *Callee,
2227 ReturnValueSlot ReturnValue,
2229 llvm::Value *ImplicitParam,
2230 QualType ImplicitParamTy,
2231 CallExpr::const_arg_iterator ArgBeg,
2232 CallExpr::const_arg_iterator ArgEnd);
2233 RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
2234 ReturnValueSlot ReturnValue);
2235 RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
2236 ReturnValueSlot ReturnValue);
2238 llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
2239 const CXXMethodDecl *MD,
2241 RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
2242 const CXXMethodDecl *MD,
2243 ReturnValueSlot ReturnValue);
2245 RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
2246 ReturnValueSlot ReturnValue);
2249 RValue EmitBuiltinExpr(const FunctionDecl *FD,
2250 unsigned BuiltinID, const CallExpr *E);
2252 RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
2254 /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
2255 /// is unhandled by the current target.
2256 llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2258 llvm::Value *EmitAArch64CompareBuiltinExpr(llvm::Value *Op, llvm::Type *Ty,
2259 const llvm::CmpInst::Predicate Fp,
2260 const llvm::CmpInst::Predicate Ip,
2261 const llvm::Twine &Name = "");
2262 llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2264 llvm::Value *EmitCommonNeonBuiltinExpr(unsigned BuiltinID,
2265 unsigned LLVMIntrinsic,
2266 unsigned AltLLVMIntrinsic,
2267 const char *NameHint,
2270 SmallVectorImpl<llvm::Value *> &Ops,
2271 llvm::Value *Align = nullptr);
2272 llvm::Function *LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
2273 unsigned Modifier, llvm::Type *ArgTy,
2275 llvm::Value *EmitNeonCall(llvm::Function *F,
2276 SmallVectorImpl<llvm::Value*> &O,
2278 unsigned shift = 0, bool rightshift = false);
2279 llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
2280 llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
2281 bool negateForRightShift);
2282 llvm::Value *EmitNeonRShiftImm(llvm::Value *Vec, llvm::Value *Amt,
2283 llvm::Type *Ty, bool usgn, const char *name);
2284 // Helper functions for EmitAArch64BuiltinExpr.
2285 llvm::Value *vectorWrapScalar8(llvm::Value *Op);
2286 llvm::Value *vectorWrapScalar16(llvm::Value *Op);
2287 llvm::Value *emitVectorWrappedScalar8Intrinsic(
2288 unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
2289 llvm::Value *emitVectorWrappedScalar16Intrinsic(
2290 unsigned Int, SmallVectorImpl<llvm::Value *> &Ops, const char *Name);
2291 llvm::Value *EmitAArch64BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2292 llvm::Value *EmitNeon64Call(llvm::Function *F,
2293 llvm::SmallVectorImpl<llvm::Value *> &O,
2296 llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
2297 llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2298 llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2299 llvm::Value *EmitR600BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
2301 llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
2302 llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
2303 llvm::Value *EmitObjCBoxedExpr(const ObjCBoxedExpr *E);
2304 llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
2305 llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
2306 llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
2307 const ObjCMethodDecl *MethodWithObjects,
2308 const ObjCMethodDecl *AllocMethod);
2309 llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
2310 RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
2311 ReturnValueSlot Return = ReturnValueSlot());
2313 /// Retrieves the default cleanup kind for an ARC cleanup.
2314 /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
2315 CleanupKind getARCCleanupKind() {
2316 return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
2317 ? NormalAndEHCleanup : NormalCleanup;
2321 void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
2322 void EmitARCDestroyWeak(llvm::Value *addr);
2323 llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
2324 llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
2325 llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
2327 void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
2328 void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
2329 llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
2330 llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
2331 llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
2332 bool resultIgnored);
2333 llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
2334 bool resultIgnored);
2335 llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
2336 llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
2337 llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
2338 void EmitARCDestroyStrong(llvm::Value *addr, ARCPreciseLifetime_t precise);
2339 void EmitARCRelease(llvm::Value *value, ARCPreciseLifetime_t precise);
2340 llvm::Value *EmitARCAutorelease(llvm::Value *value);
2341 llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
2342 llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
2343 llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
2345 std::pair<LValue,llvm::Value*>
2346 EmitARCStoreAutoreleasing(const BinaryOperator *e);
2347 std::pair<LValue,llvm::Value*>
2348 EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
2350 llvm::Value *EmitObjCThrowOperand(const Expr *expr);
2352 llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
2353 llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
2354 llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
2356 llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
2357 llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
2358 llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
2360 void EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values);
2362 static Destroyer destroyARCStrongImprecise;
2363 static Destroyer destroyARCStrongPrecise;
2364 static Destroyer destroyARCWeak;
2366 void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
2367 llvm::Value *EmitObjCAutoreleasePoolPush();
2368 llvm::Value *EmitObjCMRRAutoreleasePoolPush();
2369 void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
2370 void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
2372 /// \brief Emits a reference binding to the passed in expression.
2373 RValue EmitReferenceBindingToExpr(const Expr *E);
2375 //===--------------------------------------------------------------------===//
2376 // Expression Emission
2377 //===--------------------------------------------------------------------===//
2379 // Expressions are broken into three classes: scalar, complex, aggregate.
2381 /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
2382 /// scalar type, returning the result.
2383 llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
2385 /// EmitScalarConversion - Emit a conversion from the specified type to the
2386 /// specified destination type, both of which are LLVM scalar types.
2387 llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
2390 /// EmitComplexToScalarConversion - Emit a conversion from the specified
2391 /// complex type to the specified destination type, where the destination type
2392 /// is an LLVM scalar type.
2393 llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
2397 /// EmitAggExpr - Emit the computation of the specified expression
2398 /// of aggregate type. The result is computed into the given slot,
2399 /// which may be null to indicate that the value is not needed.
2400 void EmitAggExpr(const Expr *E, AggValueSlot AS);
2402 /// EmitAggExprToLValue - Emit the computation of the specified expression of
2403 /// aggregate type into a temporary LValue.
2404 LValue EmitAggExprToLValue(const Expr *E);
2406 /// EmitGCMemmoveCollectable - Emit special API for structs with object
2408 void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
2411 /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
2412 /// make sure it survives garbage collection until this point.
2413 void EmitExtendGCLifetime(llvm::Value *object);
2415 /// EmitComplexExpr - Emit the computation of the specified expression of
2416 /// complex type, returning the result.
2417 ComplexPairTy EmitComplexExpr(const Expr *E,
2418 bool IgnoreReal = false,
2419 bool IgnoreImag = false);
2421 /// EmitComplexExprIntoLValue - Emit the given expression of complex
2422 /// type and place its result into the specified l-value.
2423 void EmitComplexExprIntoLValue(const Expr *E, LValue dest, bool isInit);
2425 /// EmitStoreOfComplex - Store a complex number into the specified l-value.
2426 void EmitStoreOfComplex(ComplexPairTy V, LValue dest, bool isInit);
2428 /// EmitLoadOfComplex - Load a complex number from the specified l-value.
2429 ComplexPairTy EmitLoadOfComplex(LValue src, SourceLocation loc);
2431 /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
2432 /// a static local variable.
2433 llvm::Constant *CreateStaticVarDecl(const VarDecl &D,
2434 llvm::GlobalValue::LinkageTypes Linkage);
2436 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
2437 /// global variable that has already been created for it. If the initializer
2438 /// has a different type than GV does, this may free GV and return a different
2439 /// one. Otherwise it just returns GV.
2440 llvm::GlobalVariable *
2441 AddInitializerToStaticVarDecl(const VarDecl &D,
2442 llvm::GlobalVariable *GV);
2445 /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
2446 /// variable with global storage.
2447 void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
2450 /// Call atexit() with a function that passes the given argument to
2451 /// the given function.
2452 void registerGlobalDtorWithAtExit(const VarDecl &D, llvm::Constant *fn,
2453 llvm::Constant *addr);
2455 /// Emit code in this function to perform a guarded variable
2456 /// initialization. Guarded initializations are used when it's not
2457 /// possible to prove that an initialization will be done exactly
2458 /// once, e.g. with a static local variable or a static data member
2459 /// of a class template.
2460 void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
2463 /// GenerateCXXGlobalInitFunc - Generates code for initializing global
2465 void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
2466 ArrayRef<llvm::Constant *> Decls,
2467 llvm::GlobalVariable *Guard = nullptr);
2469 /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
2471 void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
2472 const std::vector<std::pair<llvm::WeakVH,
2473 llvm::Constant*> > &DtorsAndObjects);
2475 void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
2477 llvm::GlobalVariable *Addr,
2480 void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
2482 void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
2485 void enterFullExpression(const ExprWithCleanups *E) {
2486 if (E->getNumObjects() == 0) return;
2487 enterNonTrivialFullExpression(E);
2489 void enterNonTrivialFullExpression(const ExprWithCleanups *E);
2491 void EmitCXXThrowExpr(const CXXThrowExpr *E, bool KeepInsertionPoint = true);
2493 void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
2495 RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = nullptr);
2497 //===--------------------------------------------------------------------===//
2498 // Annotations Emission
2499 //===--------------------------------------------------------------------===//
2501 /// Emit an annotation call (intrinsic or builtin).
2502 llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
2503 llvm::Value *AnnotatedVal,
2504 StringRef AnnotationStr,
2505 SourceLocation Location);
2507 /// Emit local annotations for the local variable V, declared by D.
2508 void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
2510 /// Emit field annotations for the given field & value. Returns the
2511 /// annotation result.
2512 llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
2514 //===--------------------------------------------------------------------===//
2516 //===--------------------------------------------------------------------===//
2518 /// ContainsLabel - Return true if the statement contains a label in it. If
2519 /// this statement is not executed normally, it not containing a label means
2520 /// that we can just remove the code.
2521 static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
2523 /// containsBreak - Return true if the statement contains a break out of it.
2524 /// If the statement (recursively) contains a switch or loop with a break
2525 /// inside of it, this is fine.
2526 static bool containsBreak(const Stmt *S);
2528 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2529 /// to a constant, or if it does but contains a label, return false. If it
2530 /// constant folds return true and set the boolean result in Result.
2531 bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
2533 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
2534 /// to a constant, or if it does but contains a label, return false. If it
2535 /// constant folds return true and set the folded value.
2536 bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &Result);
2538 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
2539 /// if statement) to the specified blocks. Based on the condition, this might
2540 /// try to simplify the codegen of the conditional based on the branch.
2541 /// TrueCount should be the number of times we expect the condition to
2542 /// evaluate to true based on PGO data.
2543 void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
2544 llvm::BasicBlock *FalseBlock, uint64_t TrueCount);
2546 /// \brief Emit a description of a type in a format suitable for passing to
2547 /// a runtime sanitizer handler.
2548 llvm::Constant *EmitCheckTypeDescriptor(QualType T);
2550 /// \brief Convert a value into a format suitable for passing to a runtime
2551 /// sanitizer handler.
2552 llvm::Value *EmitCheckValue(llvm::Value *V);
2554 /// \brief Emit a description of a source location in a format suitable for
2555 /// passing to a runtime sanitizer handler.
2556 llvm::Constant *EmitCheckSourceLocation(SourceLocation Loc);
2558 /// \brief Specify under what conditions this check can be recovered
2559 enum CheckRecoverableKind {
2560 /// Always terminate program execution if this check fails
2562 /// Check supports recovering, allows user to specify which
2564 /// Runtime conditionally aborts, always need to support recovery.
2565 CRK_AlwaysRecoverable
2568 /// \brief Create a basic block that will call a handler function in a
2569 /// sanitizer runtime with the provided arguments, and create a conditional
2571 void EmitCheck(llvm::Value *Checked, StringRef CheckName,
2572 ArrayRef<llvm::Constant *> StaticArgs,
2573 ArrayRef<llvm::Value *> DynamicArgs,
2574 CheckRecoverableKind Recoverable);
2576 /// \brief Create a basic block that will call the trap intrinsic, and emit a
2577 /// conditional branch to it, for the -ftrapv checks.
2578 void EmitTrapCheck(llvm::Value *Checked);
2580 /// EmitCallArg - Emit a single call argument.
2581 void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
2583 /// EmitDelegateCallArg - We are performing a delegate call; that
2584 /// is, the current function is delegating to another one. Produce
2585 /// a r-value suitable for passing the given parameter.
2586 void EmitDelegateCallArg(CallArgList &args, const VarDecl *param,
2587 SourceLocation loc);
2589 /// SetFPAccuracy - Set the minimum required accuracy of the given floating
2590 /// point operation, expressed as the maximum relative error in ulp.
2591 void SetFPAccuracy(llvm::Value *Val, float Accuracy);
2594 llvm::MDNode *getRangeForLoadFromType(QualType Ty);
2595 void EmitReturnOfRValue(RValue RV, QualType Ty);
2597 void deferPlaceholderReplacement(llvm::Instruction *Old, llvm::Value *New);
2599 llvm::SmallVector<std::pair<llvm::Instruction *, llvm::Value *>, 4>
2600 DeferredReplacements;
2602 /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
2603 /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
2605 /// \param AI - The first function argument of the expansion.
2606 /// \return The argument following the last expanded function
2608 llvm::Function::arg_iterator
2609 ExpandTypeFromArgs(QualType Ty, LValue Dst,
2610 llvm::Function::arg_iterator AI);
2612 /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
2613 /// Ty, into individual arguments on the provided vector \arg Args. See
2614 /// ABIArgInfo::Expand.
2615 void ExpandTypeToArgs(QualType Ty, RValue Src,
2616 SmallVectorImpl<llvm::Value *> &Args,
2617 llvm::FunctionType *IRFuncTy);
2619 llvm::Value* EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2620 const Expr *InputExpr, std::string &ConstraintStr);
2622 llvm::Value* EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2623 LValue InputValue, QualType InputType,
2624 std::string &ConstraintStr,
2625 SourceLocation Loc);
2628 /// EmitCallArgs - Emit call arguments for a function.
2629 template <typename T>
2630 void EmitCallArgs(CallArgList &Args, const T *CallArgTypeInfo,
2631 CallExpr::const_arg_iterator ArgBeg,
2632 CallExpr::const_arg_iterator ArgEnd,
2633 bool ForceColumnInfo = false) {
2634 if (CallArgTypeInfo) {
2635 EmitCallArgs(Args, CallArgTypeInfo->isVariadic(),
2636 CallArgTypeInfo->param_type_begin(),
2637 CallArgTypeInfo->param_type_end(), ArgBeg, ArgEnd,
2640 // T::param_type_iterator might not have a default ctor.
2641 const QualType *NoIter = nullptr;
2642 EmitCallArgs(Args, /*AllowExtraArguments=*/true, NoIter, NoIter, ArgBeg,
2643 ArgEnd, ForceColumnInfo);
2647 template<typename ArgTypeIterator>
2648 void EmitCallArgs(CallArgList& Args,
2649 bool AllowExtraArguments,
2650 ArgTypeIterator ArgTypeBeg,
2651 ArgTypeIterator ArgTypeEnd,
2652 CallExpr::const_arg_iterator ArgBeg,
2653 CallExpr::const_arg_iterator ArgEnd,
2654 bool ForceColumnInfo = false) {
2655 SmallVector<QualType, 16> ArgTypes;
2656 CallExpr::const_arg_iterator Arg = ArgBeg;
2658 // First, use the argument types that the type info knows about
2659 for (ArgTypeIterator I = ArgTypeBeg, E = ArgTypeEnd; I != E; ++I, ++Arg) {
2660 assert(Arg != ArgEnd && "Running over edge of argument list!");
2662 QualType ArgType = *I;
2663 QualType ActualArgType = Arg->getType();
2664 if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
2665 QualType ActualBaseType =
2666 ActualArgType->getAs<PointerType>()->getPointeeType();
2667 QualType ArgBaseType =
2668 ArgType->getAs<PointerType>()->getPointeeType();
2669 if (ArgBaseType->isVariableArrayType()) {
2670 if (const VariableArrayType *VAT =
2671 getContext().getAsVariableArrayType(ActualBaseType)) {
2672 if (!VAT->getSizeExpr())
2673 ActualArgType = ArgType;
2677 assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
2679 getContext().getCanonicalType(ActualArgType).getTypePtr() &&
2680 "type mismatch in call argument!");
2682 ArgTypes.push_back(*I);
2685 // Either we've emitted all the call args, or we have a call to variadic
2686 // function or some other call that allows extra arguments.
2687 assert((Arg == ArgEnd || AllowExtraArguments) &&
2688 "Extra arguments in non-variadic function!");
2690 // If we still have any arguments, emit them using the type of the argument.
2691 for (; Arg != ArgEnd; ++Arg)
2692 ArgTypes.push_back(Arg->getType());
2694 EmitCallArgs(Args, ArgTypes, ArgBeg, ArgEnd, ForceColumnInfo);
2697 void EmitCallArgs(CallArgList &Args, ArrayRef<QualType> ArgTypes,
2698 CallExpr::const_arg_iterator ArgBeg,
2699 CallExpr::const_arg_iterator ArgEnd,
2700 bool ForceColumnInfo = false);
2703 const TargetCodeGenInfo &getTargetHooks() const {
2704 return CGM.getTargetCodeGenInfo();
2707 void EmitDeclMetadata();
2709 CodeGenModule::ByrefHelpers *
2710 buildByrefHelpers(llvm::StructType &byrefType,
2711 const AutoVarEmission &emission);
2713 void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
2715 /// GetPointeeAlignment - Given an expression with a pointer type, emit the
2716 /// value and compute our best estimate of the alignment of the pointee.
2717 std::pair<llvm::Value*, unsigned> EmitPointerWithAlignment(const Expr *Addr);
2720 /// Helper class with most of the code for saving a value for a
2721 /// conditional expression cleanup.
2722 struct DominatingLLVMValue {
2723 typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
2725 /// Answer whether the given value needs extra work to be saved.
2726 static bool needsSaving(llvm::Value *value) {
2727 // If it's not an instruction, we don't need to save.
2728 if (!isa<llvm::Instruction>(value)) return false;
2730 // If it's an instruction in the entry block, we don't need to save.
2731 llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
2732 return (block != &block->getParent()->getEntryBlock());
2735 /// Try to save the given value.
2736 static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
2737 if (!needsSaving(value)) return saved_type(value, false);
2739 // Otherwise we need an alloca.
2740 llvm::Value *alloca =
2741 CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
2742 CGF.Builder.CreateStore(value, alloca);
2744 return saved_type(alloca, true);
2747 static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
2748 if (!value.getInt()) return value.getPointer();
2749 return CGF.Builder.CreateLoad(value.getPointer());
2753 /// A partial specialization of DominatingValue for llvm::Values that
2754 /// might be llvm::Instructions.
2755 template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
2757 static type restore(CodeGenFunction &CGF, saved_type value) {
2758 return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
2762 /// A specialization of DominatingValue for RValue.
2763 template <> struct DominatingValue<RValue> {
2764 typedef RValue type;
2766 enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
2767 AggregateAddress, ComplexAddress };
2771 saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
2774 static bool needsSaving(RValue value);
2775 static saved_type save(CodeGenFunction &CGF, RValue value);
2776 RValue restore(CodeGenFunction &CGF);
2778 // implementations in CGExprCXX.cpp
2781 static bool needsSaving(type value) {
2782 return saved_type::needsSaving(value);
2784 static saved_type save(CodeGenFunction &CGF, type value) {
2785 return saved_type::save(CGF, value);
2787 static type restore(CodeGenFunction &CGF, saved_type value) {
2788 return value.restore(CGF);
2792 } // end namespace CodeGen
2793 } // end namespace clang