} else {
if (BDRE->getCopyConstructorExpr()) {
E = BDRE->getCopyConstructorExpr();
- // Code to destruct copy-constructed descriptor element for
- // copied-in class object.
- // TODO: Refactor this into common code with mostly similar
- // CodeGenFunction::EmitLocalBlockVarDecl
- QualType DtorTy = E->getType();
- if (const RecordType *RT = DtorTy->getAs<RecordType>())
- if (CXXRecordDecl *ClassDecl =
- dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- if (!ClassDecl->hasTrivialDestructor()) {
- const CXXDestructorDecl *D = ClassDecl->getDestructor();
- assert(D && "BuildBlockLiteralTmp - destructor is nul");
- {
- // Normal destruction.
- DelayedCleanupBlock Scope(*this);
- EmitCXXDestructorCall(D, Dtor_Complete,
- /*ForVirtualBase=*/false, Addr);
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
- if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
- EmitCXXDestructorCall(D, Dtor_Complete,
- /*ForVirtualBase=*/false, Addr);
- }
- }
- }
+ PushDestructorCleanup(E->getType(), Addr);
}
else {
E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
llvm::Value *BlockObjectAssign;
llvm::Value *BlockObjectDispose;
- const llvm::Type *PtrToInt8Ty;
+ const llvm::PointerType *PtrToInt8Ty;
std::map<uint64_t, llvm::Constant *> AssignCache;
std::map<uint64_t, llvm::Constant *> DestroyCache;
return EmitAnyExprToTemp(E);
}
+/// Emits a call or invoke instruction to the given function, depending
+/// on the current state of the EH stack.
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name) {
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+ if (!InvokeDest)
+ return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
+
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
+ ArgBegin, ArgEnd, Name);
+ EmitBlock(ContBB);
+ return Invoke;
+}
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::Value *Callee,
ReturnValueSlot ReturnValue,
}
- llvm::BasicBlock *InvokeDest = getInvokeDest();
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
AttributeList.end());
+ llvm::BasicBlock *InvokeDest = 0;
+ if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ InvokeDest = getInvokeDest();
+
llvm::CallSite CS;
- if (!InvokeDest || (Attrs.getFnAttributes() & llvm::Attribute::NoUnwind)) {
+ if (!InvokeDest) {
CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
if (CGF.Exceptions && !BaseClassDecl->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, CodeGenFunction::EHCleanup);
CXXDestructorDecl *DD = BaseClassDecl->getDestructor();
CGF.EmitCXXDestructorCall(DD, Dtor_Base, isBaseVirtual, V);
QualType T,
unsigned Index) {
if (Index == MemberInit->getNumArrayIndices()) {
- CodeGenFunction::CleanupScope Cleanups(CGF);
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
llvm::Value *Dest = LHS.getAddress();
if (ArrayIndexVar) {
llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
{
- CodeGenFunction::CleanupScope Cleanups(CGF);
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
// Inside the loop body recurse to emit the inner loop or, eventually, the
// constructor call.
CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
if (!RD->hasTrivialDestructor()) {
// FIXME: Is this OK for C++0x delegating constructors?
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, CodeGenFunction::EHCleanup);
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue LHS = CGF.EmitLValueForField(ThisPtr, Field, 0);
if (IsTryBody)
TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
- unsigned CleanupStackSize = CleanupEntries.size();
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
// Emit the constructor prologue, i.e. the base and member
// initializers.
// initializers, which includes (along the exceptional path) the
// destructors for those members and bases that were fully
// constructed.
- EmitCleanupBlocks(CleanupStackSize);
+ PopCleanupBlocks(CleanupDepth);
if (IsTryBody)
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
B != E; ++B) {
CXXBaseOrMemberInitializer *Member = (*B);
- assert(LiveTemporaries.empty() &&
- "Should not have any live temporaries at initializer start!");
-
if (Member->isBaseInitializer())
EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
else
InitializeVTablePointers(ClassDecl);
- for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) {
- assert(LiveTemporaries.empty() &&
- "Should not have any live temporaries at initializer start!");
-
+ for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
- }
}
/// EmitDestructorBody - Emits the body of the current destructor.
if (isTryBody)
TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
- llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue");
- PushCleanupBlock(DtorEpilogue);
+ // Emit the destructor epilogue now. If this is a complete
+ // destructor with a function-try-block, perform the base epilogue
+ // as well.
+ //
+ // FIXME: This isn't really right, because an exception in the
+ // non-EH epilogue should jump to the appropriate place in the
+ // EH epilogue.
+ {
+ CleanupBlock Cleanup(*this, NormalCleanup);
+
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+
+ if (Exceptions) {
+ Cleanup.beginEHCleanup();
+
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+ }
+ }
bool SkipBody = false; // should get jump-threaded
// nothing to do besides what's in the epilogue
}
- // Jump to the cleanup block.
- CleanupBlockInfo Info = PopCleanupBlock();
- assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
- EmitBlock(DtorEpilogue);
-
- // Emit the destructor epilogue now. If this is a complete
- // destructor with a function-try-block, perform the base epilogue
- // as well.
- if (isTryBody && DtorType == Dtor_Complete)
- EmitDtorEpilogue(Dtor, Dtor_Base);
- EmitDtorEpilogue(Dtor, DtorType);
-
- // Link up the cleanup information.
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
+ // We're done with the epilogue cleanup.
+ PopCleanupBlock();
// Exit the try if applicable.
if (isTryBody)
// Keep track of the current number of live temporaries.
{
- CXXTemporariesCleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
EmitCXXConstructorCall(D, Ctor_Complete, /*ForVirtualBase=*/false, Address,
ArgBeg, ArgEnd);
EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
}
+void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+ CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl) return;
+ if (ClassDecl->hasTrivialDestructor()) return;
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+
+ CleanupBlock Scope(*this, NormalCleanup);
+
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
+
+ if (Exceptions) {
+ Scope.beginEHCleanup();
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false, Addr);
+ }
+}
+
llvm::Value *
CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
const CXXRecordDecl *ClassDecl,
/// EmitLocalBlockVarDecl - Emit code and set up an entry in LocalDeclMap for a
/// variable declaration with auto, register, or no storage class specifier.
/// These turn into simple stack objects, or GlobalValues depending on target.
-void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
+void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D,
+ SpecialInitFn *SpecialInit) {
QualType Ty = D.getType();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
{
// Push a cleanup block and restore the stack there.
- DelayedCleanupBlock scope(*this);
+ CleanupBlock scope(*this, NormalCleanup);
V = Builder.CreateLoad(Stack, "tmp");
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
}
}
- if (Init) {
+ if (SpecialInit) {
+ SpecialInit(*this, D, DeclPtr);
+ } else if (Init) {
llvm::Value *Loc = DeclPtr;
if (isByRef)
Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
EmitAggExpr(Init, Loc, isVolatile);
}
}
-
+
// Handle CXX destruction of variables.
QualType DtorTy(Ty);
while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
if (const ConstantArrayType *Array =
getContext().getAsConstantArrayType(Ty)) {
- {
- DelayedCleanupBlock Scope(*this);
- QualType BaseElementTy = getContext().getBaseElementType(Array);
- const llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- llvm::Value *BaseAddrPtr =
- Builder.CreateBitCast(Loc, BasePtr);
- EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
+ CleanupBlock Scope(*this, NormalCleanup);
+
+ QualType BaseElementTy = getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr =
+ Builder.CreateBitCast(Loc, BasePtr);
+ EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
+
if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ Scope.beginEHCleanup();
+
QualType BaseElementTy = getContext().getBaseElementType(Array);
const llvm::Type *BasePtr = ConvertType(BaseElementTy);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
EmitCXXAggrDestructorCall(D, Array, BaseAddrPtr);
}
} else {
- {
- // Normal destruction.
- DelayedCleanupBlock Scope(*this);
-
- if (NRVO) {
- // If we exited via NRVO, we skip the destructor call.
- llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused");
- Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"),
- Scope.getCleanupExitBlock(),
- NoNRVO);
- EmitBlock(NoNRVO);
- }
-
- // We don't call the destructor along the normal edge if we're
- // applying the NRVO.
- EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
- Loc);
-
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
+ // Normal destruction.
+ CleanupBlock Scope(*this, NormalCleanup);
+
+ llvm::BasicBlock *SkipDtor = 0;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused");
+ SkipDtor = createBasicBlock("nrvo.skipdtor");
+ Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"),
+ SkipDtor,
+ NoNRVO);
+ EmitBlock(NoNRVO);
}
+ // We don't call the destructor along the normal edge if we're
+ // applying the NRVO.
+ EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
+ Loc);
+
+ if (NRVO) EmitBlock(SkipDtor);
+
+ // Along the exceptions path we always execute the dtor.
if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ Scope.beginEHCleanup();
EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
Loc);
}
//
// To fix this we insert a bitcast here.
QualType ArgTy = Info.arg_begin()->type;
- {
- DelayedCleanupBlock scope(*this);
- CallArgList Args;
- Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
- ConvertType(ArgTy))),
- getContext().getPointerType(D.getType())));
- EmitCall(Info, F, ReturnValueSlot(), Args);
- }
+ CleanupBlock CleanupScope(*this, NormalCleanup);
+
+ // Normal cleanup.
+ CallArgList Args;
+ Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
+ ConvertType(ArgTy))),
+ getContext().getPointerType(D.getType())));
+ EmitCall(Info, F, ReturnValueSlot(), Args);
+
+ // EH cleanup.
if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ CleanupScope.beginEHCleanup();
CallArgList Args;
Args.push_back(std::make_pair(RValue::get(Builder.CreateBitCast(DeclPtr,
}
if (needsDispose && CGM.getLangOptions().getGCMode() != LangOptions::GCOnly) {
- {
- DelayedCleanupBlock scope(*this);
- llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
- V = Builder.CreateLoad(V);
- BuildBlockRelease(V);
- }
+ CleanupBlock CleanupScope(*this, NormalCleanup);
+
+ llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
+ V = Builder.CreateLoad(V);
+ BuildBlockRelease(V);
+
// FIXME: Turn this on and audit the codegen
if (0 && Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ CleanupScope.beginEHCleanup();
+
llvm::Value *V = Builder.CreateStructGEP(DeclPtr, 1, "forwarding");
V = Builder.CreateLoad(V);
BuildBlockRelease(V);
EmitBlock(InitCheckBlock);
// Variables used when coping with thread-safe statics and exceptions.
- llvm::BasicBlock *SavedLandingPad = 0;
- llvm::BasicBlock *LandingPad = 0;
if (ThreadsafeStatics) {
// Call __cxa_guard_acquire.
V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
InitBlock, EndBlock);
+ // Call __cxa_guard_abort along the exceptional edge.
if (Exceptions) {
- SavedLandingPad = getInvokeDest();
- LandingPad = createBasicBlock("guard.lpad");
- setInvokeDest(LandingPad);
+ CleanupBlock Cleanup(*this, EHCleanup);
+ Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
}
EmitBlock(InitBlock);
EmitDeclInit(*this, D, GV);
if (ThreadsafeStatics) {
- // Call __cxa_guard_release.
+ // Call __cxa_guard_release. This cannot throw.
Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
} else {
llvm::Value *One =
if (!D.getType()->isReferenceType())
EmitDeclDestroy(*this, D, GV);
- if (ThreadsafeStatics && Exceptions) {
- // If an exception is thrown during initialization, call __cxa_guard_abort
- // along the exceptional edge.
- EmitBranch(EndBlock);
-
- // Construct the landing pad.
- EmitBlock(LandingPad);
-
- // Personality function and LLVM intrinsics.
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
- (VMContext),
- true),
- "__gxx_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
-
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- // Call the selector function.
- const llvm::PointerType *PtrToInt8Ty
- = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::Value* SelectorArgs[3] = { Exc, Personality, Null };
- Builder.CreateCall(llvm_eh_selector, SelectorArgs, SelectorArgs + 3,
- "selector");
- Builder.CreateStore(Exc, RethrowPtr);
-
- // Call __cxa_guard_abort along the exceptional edge.
- Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
-
- setInvokeDest(SavedLandingPad);
-
- // Rethrow the current exception.
- if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
- getInvokeDest(),
- Builder.CreateLoad(RethrowPtr));
- EmitBlock(Cont);
- } else
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
-
- Builder.CreateUnreachable();
- }
-
EmitBlock(EndBlock);
}
#include "clang/AST/StmtCXX.h"
#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
+
using namespace clang;
using namespace CodeGen;
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getEnclosingEHCleanup(iterator it) const {
+ assert(it != end());
+ do {
+ if (isa<EHCleanupScope>(*it)) {
+ if (cast<EHCleanupScope>(*it).isEHCleanup())
+ return stabilize(it);
+ return cast<EHCleanupScope>(*it).getEnclosingEHCleanup();
+ }
+ ++it;
+ } while (it != end());
+ return stable_end();
+}
+
+
+void EHScopeStack::pushCleanup(llvm::BasicBlock *NormalEntry,
+ llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry,
+ llvm::BasicBlock *EHExit) {
+ char *Buffer = allocate(EHCleanupScope::getSize());
+ new (Buffer) EHCleanupScope(BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHCleanup,
+ NormalEntry, NormalExit, EHEntry, EHExit);
+ if (NormalEntry)
+ InnermostNormalCleanup = stable_begin();
+ if (EHEntry)
+ InnermostEHCleanup = stable_begin();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHCleanup = Cleanup.getEnclosingEHCleanup();
+ StartOfData += EHCleanupScope::getSize();
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned NumFilters) {
+ char *Buffer = allocate(EHFilterScope::getSizeForNumFilters(NumFilters));
+ CatchDepth++;
+ return new (Buffer) EHFilterScope(NumFilters);
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &Filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(Filter.getNumFilters());
+
+ assert(CatchDepth > 0 && "mismatched filter push/pop");
+ CatchDepth--;
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned NumHandlers) {
+ char *Buffer = allocate(EHCatchScope::getSizeForNumHandlers(NumHandlers));
+ CatchDepth++;
+ return new (Buffer) EHCatchScope(NumHandlers);
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ CatchDepth++;
+ new (Buffer) EHTerminateScope();
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+void EHScopeStack::resolveBranchFixups(llvm::BasicBlock *Dest) {
+ assert(Dest && "null block passed to resolveBranchFixups");
+
+ if (BranchFixups.empty()) return;
+ assert(hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ for (unsigned I = 0, E = BranchFixups.size(); I != E; ++I)
+ if (BranchFixups[I].Destination == Dest)
+ BranchFixups[I].Destination = 0;
+
+ popNullFixups();
+}
+
static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
// void *__cxa_allocate_exception(size_t thrown_size);
const llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
}
+static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
+ // void *__cxa_get_exception_ptr(void*);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+ std::vector<const llvm::Type*> Args(1, Int8PtrTy);
+
+ const llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
+}
+
static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
- // void* __cxa_begin_catch();
+ // void *__cxa_begin_catch(void*);
const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
std::vector<const llvm::Type*> Args(1, Int8PtrTy);
CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
}
-static llvm::Constant *getPersonalityFn(CodeGenModule &CGM) {
- const char *PersonalityFnName = "__gcc_personality_v0";
- LangOptions Opts = CGM.getLangOptions();
- if (Opts.CPlusPlus) {
- if (Opts.SjLjExceptions)
- PersonalityFnName = "__gxx_personality_sj0";
+static const char *getCPersonalityFn(CodeGenFunction &CGF) {
+ return "__gcc_personality_v0";
+}
+
+static const char *getObjCPersonalityFn(CodeGenFunction &CGF) {
+ if (CGF.CGM.getLangOptions().NeXTRuntime) {
+ if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
+ return "__objc_personality_v0";
+ else
+ return getCPersonalityFn(CGF);
+ } else {
+ return "__gnu_objc_personality_v0";
+ }
+}
+
+static const char *getCXXPersonalityFn(CodeGenFunction &CGF) {
+ if (CGF.CGM.getLangOptions().SjLjExceptions)
+ return "__gxx_personality_sj0";
+ else
+ return "__gxx_personality_v0";
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const char *getObjCXXPersonalityFn(CodeGenFunction &CGF) {
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ if (CGF.CGM.getLangOptions().NeXTRuntime) {
+ if (CGF.CGM.getLangOptions().ObjCNonFragileABI)
+ return "__objc_personality_v0";
+
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
else
- PersonalityFnName = "__gxx_personality_v0";
- } else if (Opts.ObjC1) {
- if (Opts.NeXTRuntime) {
- if (Opts.ObjCNonFragileABI)
- PersonalityFnName = "__gcc_personality_v0";
- } else
- PersonalityFnName = "__gnu_objc_personality_v0";
+ return getCXXPersonalityFn(CGF);
}
+ // I'm pretty sure the GNU runtime doesn't support mixed EH.
+ // TODO: we don't necessarily need mixed EH here; remember what
+ // kind of exceptions we actually try to catch in this function.
+ CGF.CGM.ErrorUnsupported(CGF.CurCodeDecl,
+ "the GNU Objective C runtime does not support "
+ "catching C++ and Objective C exceptions in the "
+ "same function");
+ // Use the C++ personality just to avoid returning null.
+ return getCXXPersonalityFn(CGF);
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenFunction &CGF) {
+ const char *Name;
+ const LangOptions &Opts = CGF.CGM.getLangOptions();
+ if (Opts.CPlusPlus && Opts.ObjC1)
+ Name = getObjCXXPersonalityFn(CGF);
+ else if (Opts.CPlusPlus)
+ Name = getCXXPersonalityFn(CGF);
+ else if (Opts.ObjC1)
+ Name = getObjCPersonalityFn(CGF);
+ else
+ Name = getCPersonalityFn(CGF);
+
llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(
- CGM.getLLVMContext()),
- true),
- PersonalityFnName);
- return llvm::ConstantExpr::getBitCast(Personality, CGM.PtrToInt8Ty);
+ CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ llvm::Type::getInt32Ty(
+ CGF.CGM.getLLVMContext()),
+ true),
+ Name);
+ return llvm::ConstantExpr::getBitCast(Personality, CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a catch-all.
+static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
+ // Possibly we should use @llvm.eh.catch.all.value here.
+ return llvm::ConstantPointerNull::get(CGF.CGM.PtrToInt8Ty);
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a cleanup.
+static llvm::Constant *getCleanupValue(CodeGenFunction &CGF) {
+ return llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
}
// Emits an exception expression into the given location. This
llvm::AllocaInst *ExnLocVar =
CGF.CreateTempAlloca(ExnLoc->getType(), "exnobj.var");
- llvm::BasicBlock *SavedInvokeDest = CGF.getInvokeDest();
+ // Make sure the exception object is cleaned up if there's an
+ // exception during initialization.
+ // FIXME: StmtExprs probably force this to include a non-EH
+ // handler.
{
- CodeGenFunction::EHCleanupBlock Cleanup(CGF);
+ CodeGenFunction::CleanupBlock Cleanup(CGF, CodeGenFunction::EHCleanup);
llvm::BasicBlock *FreeBB = CGF.createBasicBlock("free-exnobj");
llvm::BasicBlock *DoneBB = CGF.createBasicBlock("free-exnobj.done");
CGF.Builder.CreateCondBr(ShouldFree, FreeBB, DoneBB);
CGF.EmitBlock(FreeBB);
llvm::Value *ExnLocLocal = CGF.Builder.CreateLoad(ExnLocVar, "exnobj");
- CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal);
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), ExnLocLocal)
+ ->setDoesNotThrow();
CGF.EmitBlock(DoneBB);
}
- llvm::BasicBlock *Cleanup = CGF.getInvokeDest();
+ EHScopeStack::stable_iterator Cleanup = CGF.EHStack.stable_begin();
CGF.Builder.CreateStore(ExnLoc, ExnLocVar);
CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(CGF.getLLVMContext()),
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
ShouldFreeVar);
- // Pop the cleanup block if it's still the top of the cleanup stack.
- // Otherwise, temporaries have been created and our cleanup will get
- // properly removed in time.
- // TODO: this is not very resilient.
- if (CGF.getInvokeDest() == Cleanup)
- CGF.setInvokeDest(SavedInvokeDest);
-}
-
-// CopyObject - Utility to copy an object. Calls copy constructor as necessary.
-// N is casted to the right type.
-static void CopyObject(CodeGenFunction &CGF, QualType ObjectType,
- bool WasPointer, bool WasPointerReference,
- llvm::Value *E, llvm::Value *N) {
- // Store the throw exception in the exception object.
- if (WasPointer || !CGF.hasAggregateLLVMType(ObjectType)) {
- llvm::Value *Value = E;
- if (!WasPointer)
- Value = CGF.Builder.CreateLoad(Value);
- const llvm::Type *ValuePtrTy = Value->getType()->getPointerTo(0);
- if (WasPointerReference) {
- llvm::Value *Tmp = CGF.CreateTempAlloca(Value->getType(), "catch.param");
- CGF.Builder.CreateStore(Value, Tmp);
- Value = Tmp;
- ValuePtrTy = Value->getType()->getPointerTo(0);
- }
- N = CGF.Builder.CreateBitCast(N, ValuePtrTy);
- CGF.Builder.CreateStore(Value, N);
- } else {
- const llvm::Type *Ty = CGF.ConvertType(ObjectType)->getPointerTo(0);
- const CXXRecordDecl *RD;
- RD = cast<CXXRecordDecl>(ObjectType->getAs<RecordType>()->getDecl());
- llvm::Value *This = CGF.Builder.CreateBitCast(N, Ty);
- if (RD->hasTrivialCopyConstructor()) {
- CGF.EmitAggregateCopy(This, E, ObjectType);
- } else if (CXXConstructorDecl *CopyCtor
- = RD->getCopyConstructor(CGF.getContext(), 0)) {
- llvm::Value *Src = E;
-
- // Stolen from EmitClassAggrMemberwiseCopy
- llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor,
- Ctor_Complete);
- CallArgList CallArgs;
- CallArgs.push_back(std::make_pair(RValue::get(This),
- CopyCtor->getThisType(CGF.getContext())));
-
- // Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- CopyCtor->getParamDecl(0)->getType()));
-
- const FunctionProtoType *FPT
- = CopyCtor->getType()->getAs<FunctionProtoType>();
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, CopyCtor);
- } else
- llvm_unreachable("uncopyable object");
+ // Technically, the exception object is like a temporary; it has to
+ // be cleaned up when its full-expression is complete.
+ // Unfortunately, the AST represents full-expressions by creating a
+ // CXXExprWithTemporaries, which it only does when there are actually
+ // temporaries.
+ //
+ // If any cleanups have been added since we pushed ours, they must
+ // be from temporaries; this will get popped at the same time.
+ // Otherwise we need to pop ours off. FIXME: this is very brittle.
+ if (Cleanup == CGF.EHStack.stable_begin())
+ CGF.PopCleanupBlock();
+}
+
+llvm::Value *CodeGenFunction::getExceptionSlot() {
+ if (!ExceptionSlot) {
+ const llvm::Type *i8p = llvm::Type::getInt8PtrTy(getLLVMContext());
+ ExceptionSlot = CreateTempAlloca(i8p, "exn.slot");
}
+ return ExceptionSlot;
}
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
if (!E->getSubExpr()) {
if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getReThrowFn(*this), Cont, getInvokeDest())
+ Builder.CreateInvoke(getReThrowFn(*this),
+ getUnreachableBlock(),
+ getInvokeDest())
->setDoesNotReturn();
- EmitBlock(Cont);
- } else
+ } else {
Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
- Builder.CreateUnreachable();
+ Builder.CreateUnreachable();
+ }
// Clear the insertion point to indicate we are in unreachable code.
Builder.ClearInsertionPoint();
uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
- llvm::Value *ExceptionPtr =
+ llvm::CallInst *ExceptionPtr =
Builder.CreateCall(AllocExceptionFn,
llvm::ConstantInt::get(SizeTy, TypeSize),
"exception");
+ ExceptionPtr->setDoesNotThrow();
EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
llvm::InvokeInst *ThrowCall =
- Builder.CreateInvoke3(getThrowFn(*this), Cont, getInvokeDest(),
+ Builder.CreateInvoke3(getThrowFn(*this),
+ getUnreachableBlock(), getInvokeDest(),
ExceptionPtr, TypeInfo, Dtor);
ThrowCall->setDoesNotReturn();
- EmitBlock(Cont);
} else {
llvm::CallInst *ThrowCall =
Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
ThrowCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
}
- Builder.CreateUnreachable();
// Clear the insertion point to indicate we are in unreachable code.
Builder.ClearInsertionPoint();
if (!Proto->hasExceptionSpec())
return;
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- const llvm::IntegerType *Int8Ty;
- const llvm::PointerType *PtrToInt8Ty;
- Int8Ty = llvm::Type::getInt8Ty(VMContext);
- // C string type. Used in lots of places.
- PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
-
- llvm::BasicBlock *PrevLandingPad = getInvokeDest();
- llvm::BasicBlock *EHSpecHandler = createBasicBlock("ehspec.handler");
- llvm::BasicBlock *Match = createBasicBlock("match");
- llvm::BasicBlock *Unwind = 0;
-
- assert(PrevLandingPad == 0 && "EHSpec has invoke context");
- (void)PrevLandingPad;
-
- llvm::BasicBlock *Cont = createBasicBlock("cont");
-
- EmitBranchThroughCleanup(Cont);
-
- // Emit the statements in the try {} block
- setInvokeDest(EHSpecHandler);
-
- EmitBlock(EHSpecHandler);
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(Personality);
- SelectorArgs.push_back(llvm::ConstantInt::get(Int32Ty,
- Proto->getNumExceptions()+1));
-
- for (unsigned i = 0; i < Proto->getNumExceptions(); ++i) {
- QualType Ty = Proto->getExceptionType(i);
- QualType ExceptType
- = Ty.getNonReferenceType().getUnqualifiedType();
- llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
- SelectorArgs.push_back(EHType);
- }
- if (Proto->getNumExceptions())
- SelectorArgs.push_back(Null);
-
- // Find which handler was matched.
- llvm::Value *Selector
- = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
- SelectorArgs.end(), "selector");
- if (Proto->getNumExceptions()) {
- Unwind = createBasicBlock("Unwind");
-
- Builder.CreateStore(Exc, RethrowPtr);
- Builder.CreateCondBr(Builder.CreateICmpSLT(Selector,
- llvm::ConstantInt::get(Int32Ty, 0)),
- Match, Unwind);
-
- EmitBlock(Match);
- }
- Builder.CreateCall(getUnexpectedFn(*this), Exc)->setDoesNotReturn();
- Builder.CreateUnreachable();
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
- if (Proto->getNumExceptions()) {
- EmitBlock(Unwind);
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
- Builder.CreateUnreachable();
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType, true);
+ Filter->setFilter(I, EHType);
}
-
- EmitBlock(Cont);
}
void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
if (!Proto->hasExceptionSpec())
return;
- setInvokeDest(0);
+ EHStack.popFilter();
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
CodeGenFunction::CXXTryStmtInfo
CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S) {
- CXXTryStmtInfo Info;
- Info.SavedLandingPad = getInvokeDest();
- Info.HandlerBlock = createBasicBlock("try.handler");
- Info.FinallyBlock = createBasicBlock("finally");
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ llvm::BasicBlock *Handler = createBasicBlock("catch");
+ if (C->getExceptionDecl()) {
+ // FIXME: Dropping the reference type on the type into makes it
+ // impossible to correctly implement catch-by-reference
+ // semantics for pointers. Unfortunately, this is what all
+ // existing compilers do, and it's not clear that the standard
+ // personality routine is capable of doing this right. See C++ DR 388:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
+ QualType CaughtType = C->getCaughtType();
+ CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, true);
+ CatchScope->setHandler(I, TypeInfo, Handler);
+ } else {
+ // No exception decl indicates '...', a catch-all.
+ CatchScope->setCatchAllHandler(I, Handler);
+ }
+ }
+
+ return CXXTryStmtInfo();
+}
+
+/// Check whether this is a non-EH scope, i.e. a scope which doesn't
+/// affect exception handling. Currently, the only non-EH scopes are
+/// normal-only cleanup scopes.
+static bool isNonEHScope(const EHScope &S) {
+ return isa<EHCleanupScope>(S) && !cast<EHCleanupScope>(S).isEHCleanup();
+}
- PushCleanupBlock(Info.FinallyBlock);
- setInvokeDest(Info.HandlerBlock);
+llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
+ assert(EHStack.requiresLandingPad());
+ assert(!EHStack.empty());
- return Info;
+ // Check the innermost scope for a cached landing pad. If this is
+ // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
+ llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
+ if (LP) return LP;
+
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ assert(LP);
+
+ // Cache the landing pad on the innermost scope. If this is a
+ // non-EH scope, cache the landing pad on the enclosing scope, too.
+ for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
+ ir->setCachedLandingPad(LP);
+ if (!isNonEHScope(*ir)) break;
+ }
+
+ return LP;
}
-void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
- CXXTryStmtInfo TryInfo) {
- // Pointer to the personality function
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
-
- llvm::BasicBlock *PrevLandingPad = TryInfo.SavedLandingPad;
- llvm::BasicBlock *TryHandler = TryInfo.HandlerBlock;
- llvm::BasicBlock *FinallyBlock = TryInfo.FinallyBlock;
- llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end");
-
- // Jump to end if there is no exception
- EmitBranchThroughCleanup(FinallyEnd);
-
- llvm::BasicBlock *TerminateHandler = getTerminateHandler();
-
- // Emit the handlers
- EmitBlock(TryHandler);
-
- const llvm::IntegerType *Int8Ty;
- const llvm::PointerType *PtrToInt8Ty;
- Int8Ty = llvm::Type::getInt8Ty(VMContext);
- // C string type. Used in lots of places.
- PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
- llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
- llvm::Value *llvm_eh_typeid_for =
- CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
- // Exception object
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
-
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(Personality);
-
- bool HasCatchAll = false;
- for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
- const CXXCatchStmt *C = S.getHandler(i);
- VarDecl *CatchParam = C->getExceptionDecl();
- if (CatchParam) {
- // C++ [except.handle]p3 indicates that top-level cv-qualifiers
- // are ignored.
- QualType CaughtType = C->getCaughtType().getNonReferenceType();
- llvm::Value *EHTypeInfo
- = CGM.GetAddrOfRTTIDescriptor(CaughtType.getUnqualifiedType(), true);
- SelectorArgs.push_back(EHTypeInfo);
- } else {
- // null indicates catch all
- SelectorArgs.push_back(Null);
- HasCatchAll = true;
+llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
+ assert(EHStack.requiresLandingPad());
+
+ // This function contains a hack to work around a design flaw in
+ // LLVM's EH IR which breaks semantics after inlining. This same
+ // hack is implemented in llvm-gcc.
+ //
+ // The LLVM EH abstraction is basically a thin veneer over the
+ // traditional GCC zero-cost design: for each range of instructions
+ // in the function, there is (at most) one "landing pad" with an
+ // associated chain of EH actions. A language-specific personality
+ // function interprets this chain of actions and (1) decides whether
+ // or not to resume execution at the landing pad and (2) if so,
+ // provides an integer indicating why it's stopping. In LLVM IR,
+ // the association of a landing pad with a range of instructions is
+ // achieved via an invoke instruction, the chain of actions becomes
+ // the arguments to the @llvm.eh.selector call, and the selector
+ // call returns the integer indicator. Other than the required
+ // presence of two intrinsic function calls in the landing pad,
+ // the IR exactly describes the layout of the output code.
+ //
+ // A principal advantage of this design is that it is completely
+ // language-agnostic; in theory, the LLVM optimizers can treat
+ // landing pads neutrally, and targets need only know how to lower
+ // the intrinsics to have a functioning exceptions system (assuming
+ // that platform exceptions follow something approximately like the
+ // GCC design). Unfortunately, landing pads cannot be combined in a
+ // language-agnostic way: given selectors A and B, there is no way
+ // to make a single landing pad which faithfully represents the
+ // semantics of propagating an exception first through A, then
+ // through B, without knowing how the personality will interpret the
+ // (lowered form of the) selectors. This means that inlining has no
+ // choice but to crudely chain invokes (i.e., to ignore invokes in
+ // the inlined function, but to turn all unwindable calls into
+ // invokes), which is only semantically valid if every unwind stops
+ // at every landing pad.
+ //
+ // Therefore, the invoke-inline hack is to guarantee that every
+ // landing pad has a catch-all.
+ const bool UseInvokeInlineHack = true;
+
+ for (EHScopeStack::iterator ir = EHStack.begin(); ; ) {
+ assert(ir != EHStack.end() &&
+ "stack requiring landing pad is nothing but non-EH scopes?");
+
+ // If this is a terminate scope, just use the singleton terminate
+ // landing pad.
+ if (isa<EHTerminateScope>(*ir))
+ return getTerminateLandingPad();
+
+ // If this isn't an EH scope, iterate; otherwise break out.
+ if (!isNonEHScope(*ir)) break;
+ ++ir;
+
+ // We haven't checked this scope for a cached landing pad yet.
+ if (llvm::BasicBlock *LP = ir->getCachedLandingPad())
+ return LP;
+ }
+
+ // Save the current IR generation state.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Create and configure the landing pad.
+ llvm::BasicBlock *LP = createBasicBlock("lpad");
+ EmitBlock(LP);
+
+ // Save the exception pointer. It's safe to use a single exception
+ // pointer per function because EH cleanups can never have nested
+ // try/catches.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+ Builder.CreateStore(Exn, getExceptionSlot());
+
+ // Build the selector arguments.
+ llvm::SmallVector<llvm::Value*, 8> EHSelector;
+ EHSelector.push_back(Exn);
+ EHSelector.push_back(getPersonalityFn(*this));
+
+ // Accumulate all the handlers in scope.
+ llvm::DenseMap<llvm::Value*, JumpDest> EHHandlers;
+ JumpDest CatchAll;
+ bool HasEHCleanup = false;
+ bool HasEHFilter = false;
+ llvm::SmallVector<llvm::Value*, 8> EHFilters;
+ for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end();
+ I != E; ++I) {
+
+ switch (I->getKind()) {
+ case EHScope::Cleanup:
+ if (!HasEHCleanup)
+ HasEHCleanup = cast<EHCleanupScope>(*I).isEHCleanup();
+ // We otherwise don't care about cleanups.
+ continue;
+
+ case EHScope::Filter: {
+ assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
+ assert(!CatchAll.Block && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the selector in wierd ways.
+ EHFilterScope &Filter = cast<EHFilterScope>(*I);
+ HasEHFilter = true;
+
+ // Add all the filter values which we aren't already explicitly
+ // catching.
+ for (unsigned I = 0, E = Filter.getNumFilters(); I != E; ++I) {
+ llvm::Value *FV = Filter.getFilter(I);
+ if (!EHHandlers.count(FV))
+ EHFilters.push_back(FV);
+ }
+ goto done;
+ }
+
+ case EHScope::Terminate:
+ // Terminate scopes are basically catch-alls.
+ assert(!CatchAll.Block);
+ CatchAll.Block = getTerminateHandler();
+ CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ goto done;
+
+ case EHScope::Catch:
+ break;
+ }
+
+ EHCatchScope &Catch = cast<EHCatchScope>(*I);
+ for (unsigned HI = 0, HE = Catch.getNumHandlers(); HI != HE; ++HI) {
+ EHCatchScope::Handler Handler = Catch.getHandler(HI);
+
+ // Catch-all. We should only have one of these per catch.
+ if (!Handler.Type) {
+ assert(!CatchAll.Block);
+ CatchAll.Block = Handler.Block;
+ CatchAll.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
+ continue;
+ }
+
+ // Check whether we already have a handler for this type.
+ JumpDest &Dest = EHHandlers[Handler.Type];
+ if (Dest.Block) continue;
+
+ EHSelector.push_back(Handler.Type);
+ Dest.Block = Handler.Block;
+ Dest.ScopeDepth = EHStack.getEnclosingEHCleanup(I);
}
+
+ // Stop if we found a catch-all.
+ if (CatchAll.Block) break;
}
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- SelectorArgs.push_back(Null);
+ done:
+ unsigned LastToEmitInLoop = EHSelector.size();
+
+ // If we have a catch-all, add null to the selector.
+ if (CatchAll.Block) {
+ EHSelector.push_back(getCatchAllValue(CGF));
+
+ // If we have an EH filter, we need to add those handlers in the
+ // right place in the selector, which is to say, at the end.
+ } else if (HasEHFilter) {
+ // Create a filter expression: an integer constant saying how many
+ // filters there are (+1 to avoid ambiguity with 0 for cleanup),
+ // followed by the filter types. The personality routine only
+ // lands here if the filter doesn't match.
+ EHSelector.push_back(llvm::ConstantInt::get(Builder.getInt32Ty(),
+ EHFilters.size() + 1));
+ EHSelector.append(EHFilters.begin(), EHFilters.end());
+
+ // Also check whether we need a cleanup.
+ if (UseInvokeInlineHack || HasEHCleanup)
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+
+ // Otherwise, signal that we at least have cleanups.
+ } else if (UseInvokeInlineHack || HasEHCleanup) {
+ EHSelector.push_back(UseInvokeInlineHack
+ ? getCatchAllValue(CGF)
+ : getCleanupValue(CGF));
+ } else {
+ assert(LastToEmitInLoop > 2);
+ LastToEmitInLoop--;
}
- // Find which handler was matched.
- llvm::Value *Selector
- = Builder.CreateCall(llvm_eh_selector, SelectorArgs.begin(),
- SelectorArgs.end(), "selector");
- for (unsigned i = 0; i<S.getNumHandlers(); ++i) {
- const CXXCatchStmt *C = S.getHandler(i);
- VarDecl *CatchParam = C->getExceptionDecl();
- Stmt *CatchBody = C->getHandlerBlock();
-
- llvm::BasicBlock *Next = 0;
-
- if (SelectorArgs[i+2] != Null) {
- llvm::BasicBlock *Match = createBasicBlock("match");
- Next = createBasicBlock("catch.next");
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
- llvm::Value *Id
- = Builder.CreateCall(llvm_eh_typeid_for,
- Builder.CreateBitCast(SelectorArgs[i+2],
- Int8PtrTy));
- Builder.CreateCondBr(Builder.CreateICmpEQ(Selector, Id),
- Match, Next);
+ assert(EHSelector.size() >= 3 && "selector call has only two arguments!");
+
+ // Tell the backend how to generate the landing pad.
+ llvm::CallInst *Selection =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ EHSelector.begin(), EHSelector.end(), "eh.selector");
+ Selection->setDoesNotThrow();
+
+ // Select the right handler.
+ llvm::Value *llvm_eh_typeid_for =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // The results of llvm_eh_typeid_for aren't reliable --- at least
+ // not locally --- so we basically have to do this as an 'if' chain.
+ // We walk through the first N-1 catch clauses, testing and chaining,
+ // and then fall into the final clause (which is either a cleanup, a
+ // filter (possibly with a cleanup), a catch-all, or another catch).
+ for (unsigned I = 2; I != LastToEmitInLoop; ++I) {
+ llvm::Value *Type = EHSelector[I];
+ JumpDest Dest = EHHandlers[Type];
+ assert(Dest.Block && "no handler entry for value in selector?");
+
+ // Figure out where to branch on a match. As a debug code-size
+ // optimization, if the scope depth matches the innermost cleanup,
+ // we branch directly to the catch handler.
+ llvm::BasicBlock *Match = Dest.Block;
+ bool MatchNeedsCleanup = Dest.ScopeDepth != EHStack.getInnermostEHCleanup();
+ if (MatchNeedsCleanup)
+ Match = createBasicBlock("eh.match");
+
+ llvm::BasicBlock *Next = createBasicBlock("eh.next");
+
+ // Check whether the exception matches.
+ llvm::CallInst *Id
+ = Builder.CreateCall(llvm_eh_typeid_for,
+ Builder.CreateBitCast(Type, CGM.PtrToInt8Ty));
+ Id->setDoesNotThrow();
+ Builder.CreateCondBr(Builder.CreateICmpEQ(Selection, Id),
+ Match, Next);
+
+ // Emit match code if necessary.
+ if (MatchNeedsCleanup) {
EmitBlock(Match);
+ EmitBranchThroughEHCleanup(Dest);
}
- llvm::BasicBlock *MatchEnd = createBasicBlock("match.end");
- llvm::BasicBlock *MatchHandler = createBasicBlock("match.handler");
-
- PushCleanupBlock(MatchEnd);
- setInvokeDest(MatchHandler);
-
- llvm::Value *ExcObject = Builder.CreateCall(getBeginCatchFn(*this), Exc);
-
- {
- CleanupScope CatchScope(*this);
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- QualType CatchType = CatchParam->getType().getNonReferenceType();
- setInvokeDest(TerminateHandler);
- bool WasPointer = true;
- bool WasPointerReference = false;
- CatchType = CGM.getContext().getCanonicalType(CatchType);
- if (CatchType.getTypePtr()->isPointerType()) {
- if (isa<ReferenceType>(CatchParam->getType()))
- WasPointerReference = true;
- } else {
- if (!isa<ReferenceType>(CatchParam->getType()))
- WasPointer = false;
- CatchType = getContext().getPointerType(CatchType);
- }
- ExcObject = Builder.CreateBitCast(ExcObject, ConvertType(CatchType));
- EmitLocalBlockVarDecl(*CatchParam);
- // FIXME: we need to do this sooner so that the EH region for the
- // cleanup doesn't start until after the ctor completes, use a decl
- // init?
- CopyObject(*this, CatchParam->getType().getNonReferenceType(),
- WasPointer, WasPointerReference, ExcObject,
- GetAddrOfLocalVar(CatchParam));
- setInvokeDest(MatchHandler);
+ // Continue to the next match.
+ EmitBlock(Next);
+ }
+
+ // Emit the final case in the selector.
+ // This might be a catch-all....
+ if (CatchAll.Block) {
+ assert(isa<llvm::ConstantPointerNull>(EHSelector.back()));
+ EmitBranchThroughEHCleanup(CatchAll);
+
+ // ...or an EH filter...
+ } else if (HasEHFilter) {
+ llvm::Value *SavedSelection = Selection;
+
+ // First, unwind out to the outermost scope if necessary.
+ if (EHStack.hasEHCleanups()) {
+ // The end here might not dominate the beginning, so we might need to
+ // save the selector if we need it.
+ llvm::AllocaInst *SelectorVar = 0;
+ if (HasEHCleanup) {
+ SelectorVar = CreateTempAlloca(Builder.getInt32Ty(), "selector.var");
+ Builder.CreateStore(Selection, SelectorVar);
}
- EmitStmt(CatchBody);
+ llvm::BasicBlock *CleanupContBB = createBasicBlock("ehspec.cleanup.cont");
+ EmitBranchThroughEHCleanup(JumpDest(CleanupContBB, EHStack.stable_end()));
+ EmitBlock(CleanupContBB);
+
+ if (HasEHCleanup)
+ SavedSelection = Builder.CreateLoad(SelectorVar, "ehspec.saved-selector");
}
- EmitBranchThroughCleanup(FinallyEnd);
+ // If there was a cleanup, we'll need to actually check whether we
+ // landed here because the filter triggered.
+ if (UseInvokeInlineHack || HasEHCleanup) {
+ llvm::BasicBlock *RethrowBB = createBasicBlock("cleanup");
+ llvm::BasicBlock *UnexpectedBB = createBasicBlock("ehspec.unexpected");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Builder.getInt32Ty(), 0);
+ llvm::Value *FailsFilter =
+ Builder.CreateICmpSLT(SavedSelection, Zero, "ehspec.fails");
+ Builder.CreateCondBr(FailsFilter, UnexpectedBB, RethrowBB);
+
+ // The rethrow block is where we land if this was a cleanup.
+ // TODO: can this be _Unwind_Resume if the InvokeInlineHack is off?
+ EmitBlock(RethrowBB);
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
- EmitBlock(MatchHandler);
+ EmitBlock(UnexpectedBB);
+ }
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- llvm::Value *Args[] = {
- Exc, Personality, llvm::ConstantInt::getNullValue(Int32Ty)
- };
- Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ Builder.CreateCall(getUnexpectedFn(*this),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
- CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+ // ...or a normal catch handler...
+ } else if (!UseInvokeInlineHack && !HasEHCleanup) {
+ llvm::Value *Type = EHSelector.back();
+ EmitBranchThroughEHCleanup(EHHandlers[Type]);
- EmitBlock(MatchEnd);
+ // ...or a cleanup.
+ } else {
+ // We emit a jump to a notional label at the outermost unwind state.
+ llvm::BasicBlock *Unwind = createBasicBlock("eh.resume");
+ JumpDest Dest(Unwind, EHStack.stable_end());
+ EmitBranchThroughEHCleanup(Dest);
+
+ // The unwind block. We have to reload the exception here because
+ // we might have unwound through arbitrary blocks, so the landing
+ // pad might not dominate.
+ EmitBlock(Unwind);
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getEndCatchFn(*this),
- Cont, TerminateHandler,
- &Args[0], &Args[0]);
- EmitBlock(Cont);
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // Restore the old IR generation state.
+ Builder.restoreIP(SavedIP);
+
+ return LP;
+}
- Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
+/// Emits a call to __cxa_begin_catch and enters a cleanup to call
+/// __cxa_end_catch.
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF, llvm::Value *Exn) {
+ llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
+ Call->setDoesNotThrow();
- if (Next)
- EmitBlock(Next);
+ {
+ CodeGenFunction::CleanupBlock EndCatchCleanup(CGF,
+ CodeGenFunction::NormalAndEHCleanup);
+
+ // __cxa_end_catch never throws, so this can just be a call.
+ CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
}
- if (!HasCatchAll) {
- Builder.CreateStore(Exc, RethrowPtr);
- EmitBranchThroughCleanup(FinallyRethrow);
+
+ return Call;
+}
+
+/// A "special initializer" callback for initializing a catch
+/// parameter during catch initialization.
+static void InitCatchParam(CodeGenFunction &CGF,
+ const VarDecl &CatchParam,
+ llvm::Value *ParamAddr) {
+ // Load the exception from where the landing pad saved it.
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+
+ CanQualType CatchType =
+ CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
+ const llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+
+ // If we're catching by reference, we can just cast the object
+ // pointer to the appropriate pointer.
+ if (isa<ReferenceType>(CatchType)) {
+ // __cxa_begin_catch returns the adjusted object pointer.
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn);
+ llvm::Value *ExnCast =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
+ CGF.Builder.CreateStore(ExnCast, ParamAddr);
+ return;
}
- CodeGenFunction::CleanupBlockInfo Info = PopCleanupBlock();
+ // Non-aggregates (plus complexes).
+ bool IsComplex = false;
+ if (!CGF.hasAggregateLLVMType(CatchType) ||
+ (IsComplex = CatchType->isAnyComplexType())) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn);
+
+ // If the catch type is a pointer type, __cxa_begin_catch returns
+ // the pointer by value.
+ if (CatchType->hasPointerRepresentation()) {
+ llvm::Value *CastExn =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+ }
- setInvokeDest(PrevLandingPad);
+ // Otherwise, it returns a pointer into the exception object.
- EmitBlock(FinallyBlock);
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
+ if (IsComplex) {
+ CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false),
+ ParamAddr, /*volatile*/ false);
+ } else {
+ llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar");
+ CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, CatchType);
+ }
+ return;
+ }
- // Branch around the rethrow code.
- EmitBranch(FinallyEnd);
+ // FIXME: this *really* needs to be done via a proper, Sema-emitted
+ // initializer expression.
- EmitBlock(FinallyRethrow);
- // FIXME: Eventually we can chain the handlers together and just do a call
- // here.
- if (getInvokeDest()) {
- llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
- getInvokeDest(),
- Builder.CreateLoad(RethrowPtr));
- EmitBlock(Cont);
- } else
- Builder.CreateCall(getUnwindResumeOrRethrowFn(),
- Builder.CreateLoad(RethrowPtr));
+ CXXRecordDecl *RD = CatchType.getTypePtr()->getAsCXXRecordDecl();
+ assert(RD && "aggregate catch type was not a record!");
- Builder.CreateUnreachable();
+ const llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+
+ if (RD->hasTrivialCopyConstructor()) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn);
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, Cast, CatchType);
+ return;
+ }
+
+ // We have to call __cxa_get_exception_ptr to get the adjusted
+ // pointer before copying.
+ llvm::CallInst *AdjustedExn =
+ CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
+ AdjustedExn->setDoesNotThrow();
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ CXXConstructorDecl *CD = RD->getCopyConstructor(CGF.getContext(), 0);
+ assert(CD && "record has no copy constructor!");
+ llvm::Value *CopyCtor = CGF.CGM.GetAddrOfCXXConstructor(CD, Ctor_Complete);
+
+ CallArgList CallArgs;
+ CallArgs.push_back(std::make_pair(RValue::get(ParamAddr),
+ CD->getThisType(CGF.getContext())));
+ CallArgs.push_back(std::make_pair(RValue::get(Cast),
+ CD->getParamDecl(0)->getType()));
+
+ const FunctionProtoType *FPT
+ = CD->getType()->getAs<FunctionProtoType>();
+
+ // Call the copy ctor in a terminate scope.
+ CGF.EHStack.pushTerminate();
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+ CopyCtor, ReturnValueSlot(), CallArgs, CD);
+ CGF.EHStack.popTerminate();
+
+ // Finally we can call __cxa_begin_catch.
+ CallBeginCatch(CGF, Exn);
+}
+
+/// Begins a catch statement by initializing the catch variable and
+/// calling __cxa_begin_catch.
+static void BeginCatch(CodeGenFunction &CGF,
+ const CXXCatchStmt *S) {
+ // We have to be very careful with the ordering of cleanups here:
+ // C++ [except.throw]p4:
+ // The destruction [of the exception temporary] occurs
+ // immediately after the destruction of the object declared in
+ // the exception-declaration in the handler.
+ //
+ // So the precise ordering is:
+ // 1. Construct catch variable.
+ // 2. __cxa_begin_catch
+ // 3. Enter __cxa_end_catch cleanup
+ // 4. Enter dtor cleanup
+ //
+ // We do this by initializing the exception variable with a
+ // "special initializer", InitCatchParam. Delegation sequence:
+ // - ExitCXXTryStmt opens a RunCleanupsScope
+ // - EmitLocalBlockVarDecl creates the variable and debug info
+ // - InitCatchParam initializes the variable from the exception
+ // - CallBeginCatch calls __cxa_begin_catch
+ // - CallBeginCatch enters the __cxa_end_catch cleanup
+ // - EmitLocalBlockVarDecl enters the variable destructor cleanup
+ // - EmitCXXTryStmt emits the code for the catch body
+ // - EmitCXXTryStmt close the RunCleanupsScope
+
+ VarDecl *CatchParam = S->getExceptionDecl();
+ if (!CatchParam) {
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot(), "exn");
+ CallBeginCatch(CGF, Exn);
+ return;
+ }
+
+ // Emit the local.
+ CGF.EmitLocalBlockVarDecl(*CatchParam, &InitCatchParam);
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
+ CXXTryStmtInfo TryInfo) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+ assert(CatchScope.getNumHandlers() == NumHandlers);
+
+ // Copy the handler blocks off before we pop the EH stack. Emitting
+ // the handlers might scribble on this memory.
+ llvm::SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
+ memcpy(Handlers.data(), CatchScope.begin(),
+ NumHandlers * sizeof(EHCatchScope::Handler));
+ EHStack.popCatch();
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
+
+ // We just emitted the body of the try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I].Block;
+ EmitBlock(CatchBlock);
+
+ // Catch the exception if this isn't a catch-all.
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ // Enter a cleanup scope, including the catch variable and the
+ // end-catch.
+ RunCleanupsScope CatchScope(*this);
+
+ // Initialize the catch variable and set up the cleanups.
+ BeginCatch(*this, C);
+
+ // Perform the body of the catch.
+ EmitStmt(C->getHandlerBlock());
+
+ // Fall out through the catch cleanups.
+ CatchScope.ForceCleanup();
+
+ // Branch out of the try.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+ }
- EmitBlock(FinallyEnd);
-}
-
-CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
- CGF.setInvokeDest(PreviousInvokeDest);
-
- llvm::BasicBlock *EndOfCleanup = CGF.Builder.GetInsertBlock();
-
- // Jump to the beginning of the cleanup.
- CGF.Builder.SetInsertPoint(CleanupHandler, CleanupHandler->begin());
-
- // The libstdc++ personality function.
- // TODO: generalize to work with other libraries.
- llvm::Constant *Personality = getPersonalityFn(CGF.CGM);
-
- // %exception = call i8* @llvm.eh.exception()
- // Magic intrinsic which tells gives us a handle to the caught
- // exception.
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
-
- llvm::Constant *Null = llvm::ConstantPointerNull::get(CGF.PtrToInt8Ty);
-
- // %ignored = call i32 @llvm.eh.selector(i8* %exception,
- // i8* @__gxx_personality_v0,
- // i8* null)
- // Magic intrinsic which tells LLVM that this invoke landing pad is
- // just a cleanup block.
- llvm::Value *Args[] = { Exc, Personality, Null };
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- CGF.Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
-
- // And then we fall through into the code that the user put there.
- // Jump back to the end of the cleanup.
- CGF.Builder.SetInsertPoint(EndOfCleanup);
-
- // Rethrow the exception.
- if (CGF.getInvokeDest()) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(CGF.getUnwindResumeOrRethrowFn(), Cont,
- CGF.getInvokeDest(), Exc);
- CGF.EmitBlock(Cont);
- } else
- CGF.Builder.CreateCall(CGF.getUnwindResumeOrRethrowFn(), Exc);
+ EmitBlock(ContBB);
+}
+
+/// Enters a finally block for an implementation using zero-cost
+/// exceptions. This is mostly general, but hard-codes some
+/// language/ABI-specific behavior in the catch-all sections.
+CodeGenFunction::FinallyInfo
+CodeGenFunction::EnterFinallyBlock(const Stmt *Body,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn) {
+ assert((BeginCatchFn != 0) == (EndCatchFn != 0) &&
+ "begin/end catch functions not paired");
+ assert(RethrowFn && "rethrow function is required");
+
+ // The rethrow function has one of the following two types:
+ // void (*)()
+ // void (*)(void*)
+ // In the latter case we need to pass it the exception object.
+ // But we can't use the exception slot because the @finally might
+ // have a landing pad (which would overwrite the exception slot).
+ const llvm::FunctionType *RethrowFnTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(RethrowFn->getType())
+ ->getElementType());
+ llvm::Value *SavedExnVar = 0;
+ if (RethrowFnTy->getNumParams())
+ SavedExnVar = CreateTempAlloca(Builder.getInt8PtrTy(), "finally.exn");
+
+ // A finally block is a statement which must be executed on any edge
+ // out of a given scope. Unlike a cleanup, the finally block may
+ // contain arbitrary control flow leading out of itself. In
+ // addition, finally blocks should always be executed, even if there
+ // are no catch handlers higher on the stack. Therefore, we
+ // surround the protected scope with a combination of a normal
+ // cleanup (to catch attempts to break out of the block via normal
+ // control flow) and an EH catch-all (semantically "outside" any try
+ // statement to which the finally block might have been attached).
+ // The finally block itself is generated in the context of a cleanup
+ // which conditionally leaves the catch-all.
+
+ FinallyInfo Info;
+
+ // Jump destination for performing the finally block on an exception
+ // edge. We'll never actually reach this block, so unreachable is
+ // fine.
+ JumpDest RethrowDest = getJumpDestInCurrentScope(getUnreachableBlock());
+
+ // Whether the finally block is being executed for EH purposes.
+ llvm::AllocaInst *ForEHVar = CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ "finally.for-eh");
+ InitTempAlloca(ForEHVar, llvm::ConstantInt::getFalse(getLLVMContext()));
+
+ // Enter a normal cleanup which will perform the @finally block.
+ {
+ CodeGenFunction::CleanupBlock
+ NormalCleanup(*this, CodeGenFunction::NormalCleanup);
+
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn) {
+ CodeGenFunction::CleanupBlock
+ FinallyExitCleanup(CGF, CodeGenFunction::NormalAndEHCleanup);
+
+ llvm::BasicBlock *EndCatchBB = createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB = createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ EmitBlock(EndCatchBB);
+ Builder.CreateCall(EndCatchFn)->setDoesNotThrow();
+ EmitBlock(CleanupContBB);
+ }
+
+ // Emit the finally block.
+ EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ llvm::Value *Args[] = { Builder.CreateLoad(SavedExnVar) };
+ EmitCallOrInvoke(RethrowFn, Args, Args+1);
+ } else {
+ EmitCallOrInvoke(RethrowFn, 0, 0);
+ }
+ Builder.CreateUnreachable();
+
+ EmitBlock(ContBB);
+ }
+
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ EnsureInsertPoint();
+ }
+
+ // Enter a catch-all scope.
+ llvm::BasicBlock *CatchAllBB = createBasicBlock("finally.catchall");
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+ Builder.SetInsertPoint(CatchAllBB);
+
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ Builder.CreateCall(BeginCatchFn, Builder.CreateLoad(getExceptionSlot()))
+ ->setDoesNotThrow();
+ }
+
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ llvm::Value *SavedExn = Builder.CreateLoad(getExceptionSlot());
+ Builder.CreateStore(SavedExn, SavedExnVar);
+ }
+
+ // Tell the finally block that we're in EH.
+ Builder.CreateStore(llvm::ConstantInt::getTrue(getLLVMContext()), ForEHVar);
+
+ // Thread a jump through the finally cleanup.
+ EmitBranchThroughCleanup(RethrowDest);
+
+ Builder.restoreIP(SavedIP);
+
+ EHCatchScope *CatchScope = EHStack.pushCatch(1);
+ CatchScope->setCatchAllHandler(0, CatchAllBB);
+
+ return Info;
+}
+
+void CodeGenFunction::ExitFinallyBlock(FinallyInfo &Info) {
+ // Leave the finally catch-all.
+ EHCatchScope &Catch = cast<EHCatchScope>(*EHStack.begin());
+ llvm::BasicBlock *CatchAllBB = Catch.getHandler(0).Block;
+ EHStack.popCatch();
+
+ // And leave the normal cleanup.
+ PopCleanupBlock();
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ EmitBlock(CatchAllBB, true);
+
+ Builder.restoreIP(SavedIP);
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
+ if (TerminateLandingPad)
+ return TerminateLandingPad;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // This will get inserted at the end of the function.
+ TerminateLandingPad = createBasicBlock("terminate.lpad");
+ Builder.SetInsertPoint(TerminateLandingPad);
+
+ // Tell the backend that this is a landing pad.
+ llvm::CallInst *Exn =
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_exception), "exn");
+ Exn->setDoesNotThrow();
+
+ // Tell the backend what the exception table should be:
+ // nothing but a catch-all.
+ llvm::Value *Args[3] = { Exn, getPersonalityFn(*this),
+ getCatchAllValue(*this) };
+ Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::eh_selector),
+ Args, Args+3, "eh.selector")
+ ->setDoesNotThrow();
+
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
CGF.Builder.CreateUnreachable();
- // Resume inserting where we started, but put the new cleanup
- // handler in place.
- if (PreviousInsertionBlock)
- CGF.Builder.SetInsertPoint(PreviousInsertionBlock);
- else
- CGF.Builder.ClearInsertionPoint();
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
- if (CGF.Exceptions)
- CGF.setInvokeDest(CleanupHandler);
+ return TerminateLandingPad;
}
llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
if (TerminateHandler)
return TerminateHandler;
- // We don't want to change anything at the current location, so
- // save it aside and clear the insert point.
- llvm::BasicBlock *SavedInsertBlock = Builder.GetInsertBlock();
- llvm::BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
- Builder.ClearInsertionPoint();
-
- llvm::Constant *Personality = getPersonalityFn(CGM);
- llvm::Value *llvm_eh_exception =
- CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
- // Set up terminate handler
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
TerminateHandler = createBasicBlock("terminate.handler");
- EmitBlock(TerminateHandler);
- llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- llvm::Value *Args[] = {
- Exc, Personality, llvm::ConstantInt::get(Int32Ty, 1)
- };
- Builder.CreateCall(llvm_eh_selector, &Args[0], llvm::array_endof(Args));
- llvm::CallInst *TerminateCall =
- Builder.CreateCall(getTerminateFn(*this));
+ Builder.SetInsertPoint(TerminateHandler);
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
TerminateCall->setDoesNotReturn();
TerminateCall->setDoesNotThrow();
Builder.CreateUnreachable();
// Restore the saved insertion state.
- Builder.SetInsertPoint(SavedInsertBlock, SavedInsertPoint);
+ Builder.restoreIP(SavedIP);
return TerminateHandler;
}
+
+CodeGenFunction::CleanupBlock::CleanupBlock(CodeGenFunction &CGF,
+ CleanupKind Kind)
+ : CGF(CGF), SavedIP(CGF.Builder.saveIP()), NormalCleanupExitBB(0) {
+ llvm::BasicBlock *EntryBB = CGF.createBasicBlock("cleanup");
+ CGF.Builder.SetInsertPoint(EntryBB);
+
+ switch (Kind) {
+ case NormalAndEHCleanup:
+ NormalCleanupEntryBB = EHCleanupEntryBB = EntryBB;
+ break;
+
+ case NormalCleanup:
+ NormalCleanupEntryBB = EntryBB;
+ EHCleanupEntryBB = 0;
+ break;
+
+ case EHCleanup:
+ NormalCleanupEntryBB = 0;
+ EHCleanupEntryBB = EntryBB;
+ CGF.EHStack.pushTerminate();
+ break;
+ }
+}
+
+void CodeGenFunction::CleanupBlock::beginEHCleanup() {
+ assert(EHCleanupEntryBB == 0 && "already started an EH cleanup");
+ NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
+
+ EHCleanupEntryBB = CGF.createBasicBlock("eh.cleanup");
+ CGF.Builder.SetInsertPoint(EHCleanupEntryBB);
+ CGF.EHStack.pushTerminate();
+}
+
+CodeGenFunction::CleanupBlock::~CleanupBlock() {
+ llvm::BasicBlock *EHCleanupExitBB = 0;
+
+ // If we're currently writing the EH cleanup...
+ if (EHCleanupEntryBB) {
+ // Set the EH cleanup exit block.
+ EHCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(EHCleanupExitBB && "end of EH cleanup is unreachable");
+
+ // If we're actually writing both at once, set the normal exit, too.
+ if (EHCleanupEntryBB == NormalCleanupEntryBB)
+ NormalCleanupExitBB = EHCleanupExitBB;
+
+ // Otherwise, we must have pushed a terminate handler.
+ else
+ CGF.EHStack.popTerminate();
+
+ // Otherwise, just set the normal cleanup exit block.
+ } else {
+ NormalCleanupExitBB = CGF.Builder.GetInsertBlock();
+ assert(NormalCleanupExitBB && "end of normal cleanup is unreachable");
+ }
+
+ CGF.EHStack.pushCleanup(NormalCleanupEntryBB, NormalCleanupExitBB,
+ EHCleanupEntryBB, EHCleanupExitBB);
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
--- /dev/null
+//===-- CGException.h - Classes for exceptions IR generation ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for exceptions in
+// C++ and Objective C.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGEXCEPTION_H
+#define CLANG_CODEGEN_CGEXCEPTION_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGException.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+
+ unsigned K : 2;
+
+protected:
+ enum { BitsRemaining = 30 };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind K) : CachedLandingPad(0), K(K) {}
+
+ Kind getKind() const { return static_cast<Kind>(K); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *Block) {
+ CachedLandingPad = Block;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ unsigned NumHandlers : BitsRemaining;
+
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ static Handler make(llvm::Value *Type, llvm::BasicBlock *Block) {
+ Handler Temp;
+ Temp.Type = Type;
+ Temp.Block = Block;
+ return Temp;
+ }
+ };
+
+private:
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned NumHandlers)
+ : EHScope(Catch), NumHandlers(NumHandlers) {
+ }
+
+ unsigned getNumHandlers() const {
+ return NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I] = Handler::make(Type, Block);
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A scope which needs to execute some code if we try to unwind ---
+/// either normally, via the EH mechanism, or both --- through it.
+class EHCleanupScope : public EHScope {
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : BitsRemaining;
+
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ llvm::BasicBlock *NormalEntry;
+ llvm::BasicBlock *NormalExit;
+ llvm::BasicBlock *EHEntry;
+ llvm::BasicBlock *EHExit;
+
+public:
+ static size_t getSize() { return sizeof(EHCleanupScope); }
+
+ EHCleanupScope(unsigned FixupDepth,
+ EHScopeStack::stable_iterator EnclosingNormal,
+ EHScopeStack::stable_iterator EnclosingEH,
+ llvm::BasicBlock *NormalEntry, llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry, llvm::BasicBlock *EHExit)
+ : EHScope(Cleanup), FixupDepth(FixupDepth),
+ EnclosingNormal(EnclosingNormal), EnclosingEH(EnclosingEH),
+ NormalEntry(NormalEntry), NormalExit(NormalExit),
+ EHEntry(EHEntry), EHExit(EHExit) {
+ assert((NormalEntry != 0) == (NormalExit != 0));
+ assert((EHEntry != 0) == (EHExit != 0));
+ }
+
+ bool isNormalCleanup() const { return NormalEntry != 0; }
+ bool isEHCleanup() const { return EHEntry != 0; }
+
+ llvm::BasicBlock *getNormalEntry() const { return NormalEntry; }
+ llvm::BasicBlock *getNormalExit() const { return NormalExit; }
+ llvm::BasicBlock *getEHEntry() const { return EHEntry; }
+ llvm::BasicBlock *getEHExit() const { return EHExit; }
+ unsigned getFixupDepth() const { return FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+ EHScopeStack::stable_iterator getEnclosingEHCleanup() const {
+ return EnclosingEH;
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Cleanup;
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ unsigned NumFilters : BitsRemaining;
+
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned NumFilters) :
+ EHScope(Filter), NumFilters(NumFilters) {}
+
+ static size_t getSizeForNumFilters(unsigned NumFilters) {
+ return sizeof(EHFilterScope) + NumFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return NumFilters; }
+
+ void setFilter(unsigned I, llvm::Value *FilterValue) {
+ assert(I < getNumFilters());
+ getFilters()[I] = FilterValue;
+ }
+
+ llvm::Value *getFilter(unsigned I) const {
+ assert(I < getNumFilters());
+ return getFilters()[I];
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+public:
+ EHTerminateScope() : EHScope(Terminate) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += EHCleanupScope::getSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCatchScope>(*begin()));
+ StartOfData += EHCatchScope::getSizeForNumHandlers(
+ cast<EHCatchScope>(*begin()).getNumHandlers());
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHTerminateScope>(*begin()));
+ StartOfData += EHTerminateScope::getSize();
+
+ assert(CatchDepth > 0 && "mismatched catch/terminate push/pop");
+ CatchDepth--;
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+}
+}
+
+#endif
E = DAE->getExpr();
if (const CXXExprWithTemporaries *TE = dyn_cast<CXXExprWithTemporaries>(E)) {
- CodeGenFunction::CXXTemporariesCleanupScope Scope(CGF);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
return EmitExprForReferenceBinding(CGF, TE->getSubExpr(),
ReferenceTemporary,
}
}
- {
- DelayedCleanupBlock Scope(*this);
- EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
- /*ForVirtualBase=*/false, ReferenceTemporary);
+ CleanupBlock Cleanup(*this, NormalCleanup);
+ EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, ReferenceTemporary);
- // Make sure to jump to the exit block.
- EmitBranch(Scope.getCleanupExitBlock());
- }
-
if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
+ Cleanup.beginEHCleanup();
EmitCXXDestructorCall(ReferenceTemporaryDtor, Dtor_Complete,
/*ForVirtualBase=*/false, ReferenceTemporary);
}
LValue
CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
LValue LV = EmitLValue(E->getSubExpr());
- PushCXXTemporary(E->getTemporary(), LV.getAddress());
+ EmitCXXTemporary(E->getTemporary(), LV.getAddress());
return LV;
}
// Don't make this a live temporary if we're emitting an initializer expr.
if (!IsInitializer)
- CGF.PushCXXTemporary(E->getTemporary(), Val);
+ CGF.EmitCXXTemporary(E->getTemporary(), Val);
}
void
llvm::ConstantInt::get(UnsignedLongLTy, 1));
Builder.CreateStore(Counter, CounterPtr);
- llvm::BasicBlock *LoopEnd = createBasicBlock("loopend");
- llvm::BasicBlock *AfterBody = createBasicBlock("afterbody");
+ JumpDest LoopEnd = getJumpDestInCurrentScope("loopend");
+ JumpDest AfterBody = getJumpDestInCurrentScope("afterbody");
BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
BreakContinueStack.pop_back();
- EmitBlock(AfterBody);
+ EmitBlock(AfterBody.Block);
llvm::BasicBlock *FetchMore = createBasicBlock("fetchmore");
LV.getAddress());
}
- EmitBlock(LoopEnd);
+ EmitBlock(LoopEnd.Block);
}
void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
- CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+ CGM.getObjCRuntime().EmitTryStmt(*this, S);
}
void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
void CodeGenFunction::EmitObjCAtSynchronizedStmt(
const ObjCAtSynchronizedStmt &S) {
- CGM.getObjCRuntime().EmitTryOrSynchronizedStmt(*this, S);
+ CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
}
CGObjCRuntime::~CGObjCRuntime() {}
+
+
#include "CGObjCRuntime.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
virtual llvm::Function *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
-void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) {
- // Pointer to the personality function
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(VMContext),
- true),
- "__gnu_objc_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrTy);
- std::vector<const llvm::Type*> Params;
- Params.push_back(PtrTy);
- llvm::Value *RethrowFn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- Params, false), "_Unwind_Resume");
-
- bool isTry = isa<ObjCAtTryStmt>(S);
- llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
- llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
- llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- llvm::BasicBlock *CatchInCatch = CGF.createBasicBlock("catch.rethrow");
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
-
- // @synchronized()
- if (!isTry) {
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncEnter, SyncArg);
- }
+void CGObjCGNU::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(S.getSynchExpr());
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
-
- // Emit the statements in the @try {} block
- CGF.setInvokeDest(TryHandler);
-
- CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
-
- // Jump to @finally if there is no exception
- CGF.EmitBranchThroughCleanup(FinallyEnd);
-
- // Emit the handlers
- CGF.EmitBlock(TryHandler);
-
- // Get the correct versions of the exception handling intrinsics
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- llvm::Value *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
-
- // Exception object
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
-
- llvm::SmallVector<llvm::Value*, 8> ESelArgs;
- llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
-
- ESelArgs.push_back(Exc);
- ESelArgs.push_back(Personality);
-
- bool HasCatchAll = false;
- // Only @try blocks are allowed @catch blocks, but both can have @finally
- if (isTry) {
- if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
- const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
- CGF.setInvokeDest(CatchInCatch);
-
- for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
- const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl,
- CatchStmt->getCatchBody()));
-
- // @catch() and @catch(id) both catch any ObjC exception
- if (!CatchDecl || CatchDecl->getType()->isObjCIdType()
- || CatchDecl->getType()->isObjCQualifiedIdType()) {
- // Use i8* null here to signal this is a catch all, not a cleanup.
- ESelArgs.push_back(NULLPtr);
- HasCatchAll = true;
- // No further catches after this one will ever by reached
- break;
- }
-
- // All other types should be Objective-C interface pointer types.
- const ObjCObjectPointerType *OPT =
- CatchDecl->getType()->getAs<ObjCObjectPointerType>();
- assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceDecl *IDecl =
- OPT->getObjectType()->getInterface();
- assert(IDecl && "Invalid @catch type.");
- llvm::Value *EHType =
- MakeConstantString(IDecl->getNameAsString());
- ESelArgs.push_back(EHType);
- }
- }
- }
+ // Acquire the lock.
+ llvm::Value *SyncEnter = CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncEnter, SyncArg);
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
- Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ // Register an all-paths cleanup to release the lock.
+ {
+ CodeGenFunction::CleanupBlock
+ ReleaseScope(CGF, CodeGenFunction::NormalAndEHCleanup);
+
+ llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
+ CGF.Builder.CreateCall(SyncExit, SyncArg);
}
- // Find which handler was matched.
- llvm::Value *ESelector = CGF.Builder.CreateCall(llvm_eh_selector,
- ESelArgs.begin(), ESelArgs.end(), "selector");
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const VarDecl *CatchParam = Handlers[i].first;
- const Stmt *CatchBody = Handlers[i].second;
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
- llvm::BasicBlock *Next = 0;
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+}
- // The last handler always matches.
- if (i + 1 != e) {
- assert(CatchParam && "Only last handler can be a catch all.");
+void CGObjCGNU::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Unlike the Apple non-fragile runtimes, which also uses
+ // unwind-based zero cost exceptions, the GNU Objective C runtime's
+ // EH support isn't a veneer over C++ EH. Instead, exception
+ // objects are created by __objc_exception_throw and destroyed by
+ // the personality function; this avoids the need for bracketing
+ // catch handlers with calls to __blah_begin_catch/__blah_end_catch
+ // (or even _Unwind_DeleteException), but probably doesn't
+ // interoperate very well with foreign exceptions.
+
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ // We handle @finally statements by pushing them as a cleanup
+ // before entering the catch.
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt()) {
+ std::vector<const llvm::Type*> Args(1, IdTy);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
+ llvm::Constant *Rethrow =
+ CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
- // Test whether this block matches the type for the selector and branch
- // to Match if it does, or to the next BB if it doesn't.
- llvm::BasicBlock *Match = CGF.createBasicBlock("match");
- Next = CGF.createBasicBlock("catch.next");
- llvm::Value *Id = CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateBitCast(ESelArgs[i+2], PtrTy));
- CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(ESelector, Id), Match,
- Next);
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(), 0, 0,
+ Rethrow);
+ }
- CGF.EmitBlock(Match);
- }
+ llvm::SmallVector<CatchHandler, 8> Handlers;
- if (CatchBody) {
- llvm::Value *ExcObject = CGF.Builder.CreateBitCast(Exc,
- CGF.ConvertType(CatchParam->getType()));
-
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- // CatchParam is a ParmVarDecl because of the grammar
- // construction used to handle this, but for codegen purposes
- // we treat this as a local decl.
- CGF.EmitLocalBlockVarDecl(*CatchParam);
- CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
- }
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- CGF.ObjCEHValueStack.push_back(ExcObject);
- CGF.EmitStmt(CatchBody);
- CGF.ObjCEHValueStack.pop_back();
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+ // @catch() and @catch(id) both catch any ObjC exception.
+ // Treat them as catch-alls.
+ // FIXME: this is what this code was doing before, but should 'id'
+ // really be catching foreign exceptions?
+ if (!CatchDecl
+ || CatchDecl->getType()->isObjCIdType()
+ || CatchDecl->getType()->isObjCQualifiedIdType()) {
- if (Next)
- CGF.EmitBlock(Next);
- } else {
- assert(!Next && "catchup should be last handler.");
+ Handler.TypeInfo = 0; // catch-all
+
+ // Don't consider any other catches.
+ break;
+ }
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ CatchDecl->getType()->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ Handler.TypeInfo = MakeConstantString(IDecl->getNameAsString());
}
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
}
- // The @finally block is a secondary landing pad for any exceptions thrown in
- // @catch() blocks
- CGF.EmitBlock(CatchInCatch);
- Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- ESelArgs.clear();
- ESelArgs.push_back(Exc);
- ESelArgs.push_back(Personality);
- // If there is a @catch or @finally clause in outside of this one then we
- // need to make sure that we catch and rethrow it.
- if (PrevLandingPad) {
- ESelArgs.push_back(NULLPtr);
- } else {
- ESelArgs.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 0));
- }
- CGF.Builder.CreateCall(llvm_eh_selector, ESelArgs.begin(), ESelArgs.end(),
- "selector");
- CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateIntToPtr(ESelArgs[2], PtrTy));
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
- CGF.setInvokeDest(PrevLandingPad);
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
- CGF.EmitBlock(FinallyBlock);
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *Exn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit 'objc_sync_exit(expr)' as finally's sole statement for
- // @synchronized.
- std::vector<const llvm::Type*> Args(1, IdTy);
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), Args, false);
- llvm::Value *SyncExit = CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
- llvm::Value *SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, IdTy);
- CGF.Builder.CreateCall(SyncExit, SyncArg);
- }
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ Exn = CGF.Builder.CreateBitCast(Exn, CatchType);
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(Exn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
- // Branch around the rethrow code.
- CGF.EmitBranch(FinallyEnd);
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
- CGF.EmitBlock(FinallyRethrow);
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
- llvm::Value *ExceptionObject = CGF.Builder.CreateLoad(RethrowPtr);
- llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
- if (!UnwindBB) {
- CGF.Builder.CreateCall(RethrowFn, ExceptionObject);
- // Exception always thrown, next instruction is never reached.
- CGF.Builder.CreateUnreachable();
- } else {
- // If there is a @catch block outside this scope, we invoke instead of
- // calling because we may return to this function. This is very slow, but
- // some people still do it. It would be nice to add an optimised path for
- // this.
- CGF.Builder.CreateInvoke(RethrowFn, UnwindBB, UnwindBB, &ExceptionObject,
- &ExceptionObject+1);
- }
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
- CGF.EmitBlock(FinallyEnd);
+ // Pop out of the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
+
+ if (Cont.Block) {
+ if (Cont.Block->use_empty())
+ delete Cont.Block;
+ else {
+ CGF.EmitBranch(Cont.Block);
+ CGF.EmitBlock(Cont.Block);
+ }
+ }
}
void CGObjCGNU::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
+#include "CGException.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
virtual llvm::Constant *GetCopyStructFunction();
virtual llvm::Constant *EnumerationMutationFunction();
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
return ObjCTypes.getEnumerationMutationFn();
}
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S);
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S);
virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
return ObjCTypes.getEnumerationMutationFn();
}
+void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
/*
Objective-C setjmp-longjmp (sjlj) Exception Handling
--
+ A catch buffer is a setjmp buffer plus:
+ - a pointer to the exception that was caught
+ - a pointer to the previous exception data buffer
+ - two pointers of reserved storage
+ Therefore catch buffers form a stack, with a pointer to the top
+ of the stack kept in thread-local storage.
+
+ objc_exception_try_enter pushes a catch buffer onto the EH stack.
+ objc_exception_try_exit pops the given catch buffer, which is
+ required to be the top of the EH stack.
+ objc_exception_throw pops the top of the EH stack, writes the
+ thrown exception into the appropriate field, and longjmps
+ to the setjmp buffer. It crashes the process (with a printf
+ and an abort()) if there are no catch buffers on the stack.
+ objc_exception_extract just reads the exception pointer out of the
+ catch buffer.
+
+ There's no reason an implementation couldn't use a light-weight
+ setjmp here --- something like __builtin_setjmp, but API-compatible
+ with the heavyweight setjmp. This will be more important if we ever
+ want to implement correct ObjC/C++ exception interactions for the
+ fragile ABI.
+
+ Note that for this use of setjmp/longjmp to be correct, we may need
+ to mark some local variables volatile: if a non-volatile local
+ variable is modified between the setjmp and the longjmp, it has
+ indeterminate value. For the purposes of LLVM IR, it may be
+ sufficient to make loads and stores within the @try (to variables
+ declared outside the @try) volatile. This is necessary for
+ optimized correctness, but is not currently being done; this is
+ being tracked as rdar://problem/8160285
+
The basic framework for a @try-catch-finally is as follows:
{
objc_exception_data d;
Rethrows and Jumps-Through-Finally
--
- Support for implicit rethrows and jumping through the finally block is
- handled by storing the current exception-handling context in
- ObjCEHStack.
-
- In order to implement proper @finally semantics, we support one basic
- mechanism for jumping through the finally block to an arbitrary
- destination. Constructs which generate exits from a @try or @catch
- block use this mechanism to implement the proper semantics by chaining
- jumps, as necessary.
-
- This mechanism works like the one used for indirect goto: we
- arbitrarily assign an ID to each destination and store the ID for the
- destination in a variable prior to entering the finally block. At the
- end of the finally block we simply create a switch to the proper
- destination.
-
- Code gen for @synchronized(expr) stmt;
- Effectively generating code for:
- objc_sync_enter(expr);
- @try stmt @finally { objc_sync_exit(expr); }
+ '@throw;' is supported by pushing the currently-caught exception
+ onto ObjCEHStack while the @catch blocks are emitted.
+
+ Branches through the @finally block are handled with an ordinary
+ normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC
+ exceptions are not compatible with C++ exceptions, and this is
+ hardly the only place where this will go wrong.
+
+ @synchronized(expr) { stmt; } is emitted as if it were:
+ id synch_value = expr;
+ objc_sync_enter(synch_value);
+ @try { stmt; } @finally { objc_sync_exit(synch_value); }
*/
void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const Stmt &S) {
bool isTry = isa<ObjCAtTryStmt>(S);
- // Create various blocks we refer to for handling @finally.
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyExit = CGF.createBasicBlock("finally.exit");
- llvm::BasicBlock *FinallyNoExit = CGF.createBasicBlock("finally.noexit");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+
+ // A destination for the fall-through edges of the catch handlers to
+ // jump to.
+ CodeGenFunction::JumpDest FinallyEnd =
+ CGF.getJumpDestInCurrentScope("finally.end");
+
+ // A destination for the rethrow edge of the catch handlers to jump
+ // to.
+ CodeGenFunction::JumpDest FinallyRethrow =
+ CGF.getJumpDestInCurrentScope("finally.rethrow");
// For @synchronized, call objc_sync_enter(sync.expr). The
// evaluation of the expression must occur before we enter the
SyncArg =
CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
}
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
-
- if (CGF.ObjCEHValueStack.empty())
- CGF.ObjCEHValueStack.push_back(0);
- // If This is a nested @try, caught exception is that of enclosing @try.
- else
- CGF.ObjCEHValueStack.push_back(CGF.ObjCEHValueStack.back());
// Allocate memory for the exception data and rethrow pointer.
llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
"exceptiondata.ptr");
llvm::Value *RethrowPtr = CGF.CreateTempAlloca(ObjCTypes.ObjectPtrTy,
"_rethrow");
- llvm::Value *CallTryExitPtr = CGF.CreateTempAlloca(
- llvm::Type::getInt1Ty(VMContext),
+
+ // Create a flag indicating whether the cleanup needs to call
+ // objc_exception_try_exit. This is true except when
+ // - no catches match and we're branching through the cleanup
+ // just to rethrow the exception, or
+ // - a catch matched and we're falling out of the catch handler.
+ llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
"_call_try_exit");
CGF.Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
+
+ // Push a normal cleanup to leave the try scope.
+ {
+ CodeGenFunction::CleanupBlock
+ FinallyScope(CGF, CodeGenFunction::NormalCleanup);
+
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isTry) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt())
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ // ~CleanupBlock requires there to be an exit block.
+ CGF.EnsureInsertPoint();
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
+ }
- // Enter a new try block and call setjmp.
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
- llvm::Value *JmpBufPtr = CGF.Builder.CreateStructGEP(ExceptionData, 0,
- "jmpbufarray");
- JmpBufPtr = CGF.Builder.CreateStructGEP(JmpBufPtr, 0, "tmp");
- llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
- JmpBufPtr, "result");
+ // Enter a try block:
+ // - Call objc_exception_try_enter to push ExceptionData on top of
+ // the EH stack.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+ // - Call setjmp on the exception data buffer.
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+ llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
+ llvm::Value *SetJmpBuffer =
+ CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, GEPIndexes+3, "setjmp_buffer");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
+ SetJmpResult->setDoesNotThrow();
+
+ // If setjmp returned 0, enter the protected block; otherwise,
+ // branch to the handler.
llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(SetJmpResult, "threw"),
- TryHandler, TryBlock);
+ llvm::Value *DidCatch =
+ CGF.Builder.CreateIsNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryBlock, TryHandler);
- // Emit the @try block.
+ // Emit the protected block.
CGF.EmitBlock(TryBlock);
CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
CGF.EmitBranchThroughCleanup(FinallyEnd);
- // Emit the "exception in @try" block.
+ // Emit the exception handler block.
CGF.EmitBlock(TryHandler);
// Retrieve the exception object. We may emit multiple blocks but
// nothing can cross this so the value is already in SSA form.
- llvm::Value *Caught =
+ llvm::CallInst *Caught =
CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
ExceptionData, "caught");
- CGF.ObjCEHValueStack.back() = Caught;
- if (!isTry) {
- CGF.Builder.CreateStore(Caught, RethrowPtr);
+ Caught->setDoesNotThrow();
+
+ // Remember the exception to rethrow.
+ CGF.Builder.CreateStore(Caught, RethrowPtr);
+
+ // Note: at this point, objc_exception_throw already popped the
+ // catch handler, so anything that branches to the cleanup needs
+ // to set CallTryExitVar to false.
+
+ // For a @synchronized (or a @try with no catches), just branch
+ // through the cleanup to the rethrow block.
+ if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ // Tell the cleanup not to re-pop the exit.
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
+
CGF.EmitBranchThroughCleanup(FinallyRethrow);
- } else if (cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+
+ // Otherwise, we have to match against the caught exceptions.
+ } else {
+ // Push the exception to rethrow onto the EH value stack for the
+ // benefit of any @throws in the handlers.
+ CGF.ObjCEHValueStack.push_back(Caught);
+
const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
// Enter a new exception try block (in case a @catch block throws
- // an exception).
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData);
+ // an exception). Now CallTryExitVar (currently true) is back in
+ // synch with reality.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
+ "setjmp.result");
+ SetJmpResult->setDoesNotThrow();
- llvm::Value *SetJmpResult = CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(),
- JmpBufPtr, "result");
- llvm::Value *Threw = CGF.Builder.CreateIsNotNull(SetJmpResult, "threw");
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
llvm::BasicBlock *CatchBlock = CGF.createBasicBlock("catch");
- llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch.handler");
+ llvm::BasicBlock *CatchHandler = CGF.createBasicBlock("catch_for_catch");
CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
CGF.EmitBlock(CatchBlock);
bool AllMatched = false;
for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
- llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch");
const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
const ObjCObjectPointerType *OPT = 0;
} else {
OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
- // catch(id e) always matches.
+ // catch(id e) always matches under this ABI, since only
+ // ObjC exceptions end up here in the first place.
// FIXME: For the time being we also match id<X>; this should
// be rejected by Sema instead.
if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
AllMatched = true;
}
+ // If this is a catch-all, we don't need to test anything.
if (AllMatched) {
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
if (CatchParam) {
CGF.EmitLocalBlockVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // These types work out because ConvertType(id) == i8*.
CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
}
CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // The scope of the catch variable ends right here.
+ CatchVarCleanups.ForceCleanup();
+
CGF.EmitBranchThroughCleanup(FinallyEnd);
break;
}
assert(OPT && "Unexpected non-object pointer type in @catch");
const ObjCObjectType *ObjTy = OPT->getObjectType();
+
+ // FIXME: @catch (Class c) ?
ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
assert(IDecl && "Catch parameter must have Objective-C type!");
// Check if the @catch block matches the exception object.
llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
- llvm::Value *Match =
+ llvm::CallInst *Match =
CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
Class, Caught, "match");
+ Match->setDoesNotThrow();
- llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("matched");
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match");
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next");
CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
MatchedBlock, NextCatchBlock);
// Emit the @catch block.
CGF.EmitBlock(MatchedBlock);
+
+ // Collect any cleanups for the catch variable. The scope lasts until
+ // the end of the catch body.
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
CGF.EmitLocalBlockVarDecl(*CatchParam);
assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+ // Initialize the catch variable.
llvm::Value *Tmp =
CGF.Builder.CreateBitCast(Caught,
CGF.ConvertType(CatchParam->getType()),
CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // We're done with the catch variable.
+ CatchVarCleanups.ForceCleanup();
+
CGF.EmitBranchThroughCleanup(FinallyEnd);
CGF.EmitBlock(NextCatchBlock);
}
+ CGF.ObjCEHValueStack.pop_back();
+
if (!AllMatched) {
// None of the handlers caught the exception, so store it to be
// rethrown at the end of the @finally block.
// Emit the exception handler for the @catch blocks.
CGF.EmitBlock(CatchHandler);
- CGF.Builder.CreateStore(
- CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
- ExceptionData),
- RethrowPtr);
- CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- } else {
+
+ // Rethrow the new exception, not the old one.
+ Caught = CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
CGF.Builder.CreateStore(Caught, RethrowPtr);
+
+ // Don't pop the catch handler; the throw already did.
CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext),
- CallTryExitPtr);
+ CallTryExitVar);
CGF.EmitBranchThroughCleanup(FinallyRethrow);
}
- // Pop the exception-handling stack entry. It is important to do
- // this now, because the code in the @finally block is not in this
- // context.
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
-
- CGF.ObjCEHValueStack.pop_back();
-
- // Emit the @finally block.
- CGF.EmitBlock(FinallyBlock);
- llvm::Value* CallTryExit = CGF.Builder.CreateLoad(CallTryExitPtr, "tmp");
+ // Pop the cleanup.
+ CGF.PopCleanupBlock();
+ CGF.EmitBlock(FinallyEnd.Block);
- CGF.Builder.CreateCondBr(CallTryExit, FinallyExit, FinallyNoExit);
-
- CGF.EmitBlock(FinallyExit);
- CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData);
-
- CGF.EmitBlock(FinallyNoExit);
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit objc_sync_exit(expr); as finally's sole statement for
- // @synchronized.
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
+ // Emit the rethrow block.
+ CGF.Builder.ClearInsertionPoint();
+ CGF.EmitBlock(FinallyRethrow.Block, true);
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
+ CGF.Builder.CreateLoad(RethrowPtr))
+ ->setDoesNotThrow();
+ CGF.Builder.CreateUnreachable();
}
- // Emit the switch block
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- CGF.EmitBlock(FinallyRethrow);
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr));
- CGF.Builder.CreateUnreachable();
-
- CGF.EmitBlock(FinallyEnd);
+ CGF.Builder.SetInsertPoint(FinallyEnd.Block);
}
void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
ExceptionAsObject = CGF.ObjCEHValueStack.back();
}
- CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject)
+ ->setDoesNotReturn();
CGF.Builder.CreateUnreachable();
// Clear the insertion point to indicate we are in unreachable code.
}
void
-CGObjCNonFragileABIMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) {
- bool isTry = isa<ObjCAtTryStmt>(S);
- llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
- llvm::BasicBlock *PrevLandingPad = CGF.getInvokeDest();
- llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
- llvm::BasicBlock *FinallyBlock = CGF.createBasicBlock("finally");
- llvm::BasicBlock *FinallyRethrow = CGF.createBasicBlock("finally.throw");
- llvm::BasicBlock *FinallyEnd = CGF.createBasicBlock("finally.end");
+CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ // Evaluate the lock operand. This should dominate the cleanup.
+ llvm::Value *SyncArg = CGF.EmitScalarExpr(S.getSynchExpr());
- // For @synchronized, call objc_sync_enter(sync.expr). The
- // evaluation of the expression must occur before we enter the
- // @synchronized. We can safely avoid a temp here because jumps into
- // @synchronized are illegal & this will dominate uses.
- llvm::Value *SyncArg = 0;
- if (!isTry) {
- SyncArg =
- CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
- SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
- CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg);
- }
+ // Acquire the lock.
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
- // Push an EH context entry, used for handling rethrows and jumps
- // through finally.
- CGF.PushCleanupBlock(FinallyBlock);
+ // Register an all-paths cleanup to release the lock.
+ {
+ CodeGenFunction::CleanupBlock
+ ReleaseScope(CGF, CodeGenFunction::NormalAndEHCleanup);
- CGF.setInvokeDest(TryHandler);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
- CGF.EmitBlock(TryBlock);
- CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
- : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
- // Emit the exception handler.
+ // Pop the lock-release cleanup.
+ CGF.PopCleanupBlock();
+}
- CGF.EmitBlock(TryHandler);
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+}
- llvm::Value *llvm_eh_exception =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
- llvm::Value *llvm_eh_selector =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- llvm::Value *llvm_eh_typeid_for =
- CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- llvm::Value *RethrowPtr = CGF.CreateTempAlloca(Exc->getType(), "_rethrow");
-
- llvm::SmallVector<llvm::Value*, 8> SelectorArgs;
- SelectorArgs.push_back(Exc);
- SelectorArgs.push_back(ObjCTypes.getEHPersonalityPtr());
-
- // Construct the lists of (type, catch body) to handle.
- llvm::SmallVector<std::pair<const VarDecl*, const Stmt*>, 8> Handlers;
- bool HasCatchAll = false;
- if (isTry) {
- const ObjCAtTryStmt &AtTry = cast<ObjCAtTryStmt>(S);
- for (unsigned I = 0, N = AtTry.getNumCatchStmts(); I != N; ++I) {
- const ObjCAtCatchStmt *CatchStmt = AtTry.getCatchStmt(I);
+void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo = CGF.EnterFinallyBlock(Finally->getFinallyBody(),
+ ObjCTypes.getObjCBeginCatchFn(),
+ ObjCTypes.getObjCEndCatchFn(),
+ ObjCTypes.getExceptionRethrowFn());
+
+ llvm::SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
- Handlers.push_back(std::make_pair(CatchDecl, CatchStmt->getCatchBody()));
- // catch(...) always matches.
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
if (!CatchDecl) {
- // Use i8* null here to signal this is a catch all, not a cleanup.
- llvm::Value *Null = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- SelectorArgs.push_back(Null);
- HasCatchAll = true;
+ Handler.TypeInfo = 0; // catch-all
+ // Don't consider any other catches.
break;
}
+ // There's a particular fixed type info for 'id'.
if (CatchDecl->getType()->isObjCIdType() ||
CatchDecl->getType()->isObjCQualifiedIdType()) {
llvm::Value *IDEHType =
false,
llvm::GlobalValue::ExternalLinkage,
0, "OBJC_EHTYPE_id");
- SelectorArgs.push_back(IDEHType);
+ Handler.TypeInfo = IDEHType;
} else {
// All other types should be Objective-C interface pointer types.
const ObjCObjectPointerType *PT =
assert(PT && "Invalid @catch type.");
const ObjCInterfaceType *IT = PT->getInterfaceType();
assert(IT && "Invalid @catch type.");
- llvm::Value *EHType = GetInterfaceEHType(IT->getDecl(), false);
- SelectorArgs.push_back(EHType);
+ Handler.TypeInfo = GetInterfaceEHType(IT->getDecl(), false);
}
}
- }
- // We use a cleanup unless there was already a catch all.
- if (!HasCatchAll) {
- // Even though this is a cleanup, treat it as a catch all to avoid the C++
- // personality behavior of terminating the process if only cleanups are
- // found in the exception handling stack.
- SelectorArgs.push_back(llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy));
- Handlers.push_back(std::make_pair((const ParmVarDecl*) 0, (const Stmt*) 0));
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
}
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
- llvm::Value *Selector =
- CGF.Builder.CreateCall(llvm_eh_selector,
- SelectorArgs.begin(), SelectorArgs.end(),
- "selector");
- for (unsigned i = 0, e = Handlers.size(); i != e; ++i) {
- const VarDecl *CatchParam = Handlers[i].first;
- const Stmt *CatchBody = Handlers[i].second;
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.EHStack.popCatch();
- llvm::BasicBlock *Next = 0;
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
- // The last handler always matches.
- if (i + 1 != e) {
- assert(CatchParam && "Only last handler can be a catch all.");
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
- llvm::BasicBlock *Match = CGF.createBasicBlock("match");
- Next = CGF.createBasicBlock("catch.next");
- llvm::Value *Id =
- CGF.Builder.CreateCall(llvm_eh_typeid_for,
- CGF.Builder.CreateBitCast(SelectorArgs[i+2],
- ObjCTypes.Int8PtrTy));
- CGF.Builder.CreateCondBr(CGF.Builder.CreateICmpEQ(Selector, Id),
- Match, Next);
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.Builder.CreateLoad(CGF.getExceptionSlot());
- CGF.EmitBlock(Match);
- }
+ // Enter the catch.
+ llvm::CallInst *Exn =
+ CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), RawExn,
+ "exn.adjusted");
+ Exn->setDoesNotThrow();
- if (CatchBody) {
- llvm::BasicBlock *MatchEnd = CGF.createBasicBlock("match.end");
-
- // Cleanups must call objc_end_catch.
- CGF.PushCleanupBlock(MatchEnd);
-
- llvm::Value *ExcObject =
- CGF.Builder.CreateCall(ObjCTypes.getObjCBeginCatchFn(), Exc);
-
- // Bind the catch parameter if it exists.
- if (CatchParam) {
- ExcObject =
- CGF.Builder.CreateBitCast(ExcObject,
- CGF.ConvertType(CatchParam->getType()));
- // CatchParam is a ParmVarDecl because of the grammar
- // construction used to handle this, but for codegen purposes
- // we treat this as a local decl.
- CGF.EmitLocalBlockVarDecl(*CatchParam);
- CGF.Builder.CreateStore(ExcObject, CGF.GetAddrOfLocalVar(CatchParam));
- }
+ // Add a cleanup to leave the catch.
+ {
+ CodeGenFunction::CleanupBlock
+ EndCatchBlock(CGF, CodeGenFunction::NormalAndEHCleanup);
- // Exceptions inside the catch block must be rethrown. We set a special
- // purpose invoke destination for this which just collects the thrown
- // exception and overwrites the object in RethrowPtr, branches through the
- // match.end to make sure we call objc_end_catch, before branching to the
- // rethrow handler.
- llvm::BasicBlock *MatchHandler = CGF.createBasicBlock("match.handler");
- CGF.setInvokeDest(MatchHandler);
- CGF.ObjCEHValueStack.push_back(ExcObject);
- CGF.EmitStmt(CatchBody);
- CGF.ObjCEHValueStack.pop_back();
- CGF.setInvokeDest(0);
+ // __objc_end_catch never throws.
+ CGF.Builder.CreateCall(ObjCTypes.getObjCEndCatchFn())
+ ->setDoesNotThrow();
+ }
- CGF.EmitBranchThroughCleanup(FinallyEnd);
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ const llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
- // Don't emit the extra match handler if there we no unprotected calls in
- // the catch block.
- if (MatchHandler->use_empty()) {
- delete MatchHandler;
- } else {
- CGF.EmitBlock(MatchHandler);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- CGF.Builder.CreateCall3(llvm_eh_selector,
- Exc, ObjCTypes.getEHPersonalityPtr(),
- llvm::ConstantInt::get(CGF.Int32Ty, 0),
- "unused_eh_selector");
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- }
+ CGF.EmitLocalBlockVarDecl(*CatchParam);
+ CGF.Builder.CreateStore(CastExn, CGF.GetAddrOfLocalVar(CatchParam));
+ }
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
-
- CGF.EmitBlock(MatchEnd);
-
- // Unfortunately, we also have to generate another EH frame here
- // in case this throws.
- llvm::BasicBlock *MatchEndHandler =
- CGF.createBasicBlock("match.end.handler");
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getObjCEndCatchFn(),
- Cont, MatchEndHandler);
-
- CGF.EmitBlock(Cont);
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- CGF.EmitBlock(MatchEndHandler);
- llvm::Value *Exc = CGF.Builder.CreateCall(llvm_eh_exception, "exc");
- // We are required to emit this call to satisfy LLVM, even
- // though we don't use the result.
- CGF.Builder.CreateCall3(llvm_eh_selector,
- Exc, ObjCTypes.getEHPersonalityPtr(),
- llvm::ConstantInt::get(CGF.Int32Ty, 0),
- "unused_eh_selector");
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
- if (Next)
- CGF.EmitBlock(Next);
- } else {
- assert(!Next && "catchup should be last handler.");
+ // Leave the earlier cleanup.
+ CGF.PopCleanupBlock();
- CGF.Builder.CreateStore(Exc, RethrowPtr);
- CGF.EmitBranchThroughCleanup(FinallyRethrow);
- }
- }
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
- // Pop the cleanup entry, the @finally is outside this cleanup
- // scope.
- CodeGenFunction::CleanupBlockInfo Info = CGF.PopCleanupBlock();
- CGF.setInvokeDest(PrevLandingPad);
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
- CGF.EmitBlock(FinallyBlock);
-
- if (isTry) {
- if (const ObjCAtFinallyStmt* FinallyStmt =
- cast<ObjCAtTryStmt>(S).getFinallyStmt())
- CGF.EmitStmt(FinallyStmt->getFinallyBody());
- } else {
- // Emit 'objc_sync_exit(expr)' as finally's sole statement for
- // @synchronized.
- CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg);
- }
-
- if (Info.SwitchBlock)
- CGF.EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- CGF.EmitBlock(Info.EndBlock);
-
- // Branch around the rethrow code.
- CGF.EmitBranch(FinallyEnd);
-
- // Generate the rethrow code, taking care to use an invoke if we are in a
- // nested exception scope.
- CGF.EmitBlock(FinallyRethrow);
- if (PrevLandingPad) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(ObjCTypes.getUnwindResumeOrRethrowFn(),
- Cont, PrevLandingPad,
- CGF.Builder.CreateLoad(RethrowPtr));
- CGF.EmitBlock(Cont);
- } else {
- CGF.Builder.CreateCall(ObjCTypes.getUnwindResumeOrRethrowFn(),
- CGF.Builder.CreateLoad(RethrowPtr));
- }
- CGF.Builder.CreateUnreachable();
+ // Pop out of the normal cleanup on the finally.
+ if (S.getFinallyStmt())
+ CGF.ExitFinallyBlock(FinallyInfo);
- CGF.EmitBlock(FinallyEnd);
+ if (Cont.Block)
+ CGF.EmitBlock(Cont.Block);
}
/// EmitThrowStmt - Generate code for a throw statement.
CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy, "tmp");
llvm::BasicBlock *InvokeDest = CGF.getInvokeDest();
if (InvokeDest) {
- llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
CGF.Builder.CreateInvoke(FunctionThrowOrRethrow,
- Cont, InvokeDest,
+ CGF.getUnreachableBlock(), InvokeDest,
&ExceptionAsObject, &ExceptionAsObject + 1);
- CGF.EmitBlock(Cont);
- } else
- CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject);
- CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateCall(FunctionThrowOrRethrow, ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ }
// Clear the insertion point to indicate we are in unreachable code.
CGF.Builder.ClearInsertionPoint();
/// compiler when a mutation is detected during foreach iteration.
virtual llvm::Constant *EnumerationMutationFunction() = 0;
- virtual void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
- const Stmt &S) = 0;
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) = 0;
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) = 0;
virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
const ObjCAtThrowStmt &S) = 0;
virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
// Expression emitters don't handle unreachable blocks yet, so look for one
// explicitly here. This handles the common case of a call to a noreturn
// function.
- // We can't erase blocks with an associated cleanup size here since the
- // memory might be reused, leaving the old cleanup info pointing at a new
- // block.
if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
- if (CurBB->empty() && CurBB->use_empty() && !BlockScopes.count(CurBB)) {
+ if (CurBB->empty() && CurBB->use_empty()) {
CurBB->eraseFromParent();
Builder.ClearInsertionPoint();
}
}
// Keep track of the current cleanup stack depth.
- CleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
for (CompoundStmt::const_body_iterator I = S.body_begin(),
E = S.body_end()-GetLast; I != E; ++I)
// If there is a cleanup stack, then we it isn't worth trying to
// simplify this block (we would need to remove it from the scope map
// and cleanup entry).
- if (!CleanupEntries.empty())
+ if (!EHStack.empty())
return;
// Can only simplify direct branches.
return;
}
- // If necessary, associate the block with the cleanup stack size.
- if (!CleanupEntries.empty()) {
- // Check if the basic block has already been inserted.
- BlockScopeMap::iterator I = BlockScopes.find(BB);
- if (I != BlockScopes.end()) {
- assert(I->second == CleanupEntries.size() - 1);
- } else {
- BlockScopes[BB] = CleanupEntries.size() - 1;
- CleanupEntries.back().Blocks.push_back(BB);
- }
- }
-
// Place the block after the current block, if possible, or else at
// the end of the function.
if (CurBB && CurBB->getParent())
Builder.ClearInsertionPoint();
}
+CodeGenFunction::JumpDest
+CodeGenFunction::getJumpDestForLabel(const LabelStmt *S) {
+ JumpDest &Dest = LabelMap[S];
+ if (Dest.Block) return Dest;
+
+ // Create, but don't insert, the new block.
+ Dest.Block = createBasicBlock(S->getName());
+ Dest.ScopeDepth = EHScopeStack::stable_iterator::invalid();
+ return Dest;
+}
+
void CodeGenFunction::EmitLabel(const LabelStmt &S) {
- EmitBlock(getBasicBlockForLabel(&S));
+ JumpDest &Dest = LabelMap[&S];
+
+ // If we didn't needed a forward reference to this label, just go
+ // ahead and create a destination at the current scope.
+ if (!Dest.Block) {
+ Dest = getJumpDestInCurrentScope(S.getName());
+
+ // Otherwise, we need to give this label a target depth and remove
+ // it from the branch-fixups list.
+ } else {
+ assert(!Dest.ScopeDepth.isValid() && "already emitted label!");
+ Dest.ScopeDepth = EHStack.stable_begin();
+
+ EHStack.resolveBranchFixups(Dest.Block);
+ }
+
+ EmitBlock(Dest.Block);
}
if (HaveInsertPoint())
EmitStopPoint(&S);
- EmitBranchThroughCleanup(getBasicBlockForLabel(S.getLabel()));
+ EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
}
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// C99 6.8.4.1: The first substatement is executed if the expression compares
// unequal to 0. The condition must be a scalar type.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
// This avoids emitting dead code and simplifies the CFG substantially.
if (!ContainsLabel(Skipped)) {
if (Executed) {
- CleanupScope ExecutedScope(*this);
+ RunCleanupsScope ExecutedScope(*this);
EmitStmt(Executed);
}
return;
// Emit the 'then' code.
EmitBlock(ThenBlock);
{
- CleanupScope ThenScope(*this);
+ RunCleanupsScope ThenScope(*this);
EmitStmt(S.getThen());
}
EmitBranch(ContBlock);
if (const Stmt *Else = S.getElse()) {
EmitBlock(ElseBlock);
{
- CleanupScope ElseScope(*this);
+ RunCleanupsScope ElseScope(*this);
EmitStmt(Else);
}
EmitBranch(ContBlock);
}
void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
- // Emit the header for the loop, insert it, which will create an uncond br to
- // it.
- llvm::BasicBlock *LoopHeader = createBasicBlock("while.cond");
- EmitBlock(LoopHeader);
-
- // Create an exit block for when the condition fails, create a block for the
- // body of the loop.
- llvm::BasicBlock *ExitBlock = createBasicBlock("while.end");
- llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
- llvm::BasicBlock *CleanupBlock = 0;
- llvm::BasicBlock *EffectiveExitBlock = ExitBlock;
+ // Emit the header for the loop, which will also become
+ // the continue target.
+ JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
+ EmitBlock(LoopHeader.Block);
+
+ // Create an exit block for when the condition fails, which will
+ // also become the break target.
+ JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(ExitBlock, LoopHeader));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
// C++ [stmt.while]p2:
// When the condition of a while statement is a declaration, the
// [...]
// The object created in a condition is destroyed and created
// with each iteration of the loop.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
- if (S.getConditionVariable()) {
+ if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
-
- // If this condition variable requires cleanups, create a basic
- // block to handle those cleanups.
- if (ConditionScope.requiresCleanups()) {
- CleanupBlock = createBasicBlock("while.cleanup");
- EffectiveExitBlock = CleanupBlock;
- }
- }
// Evaluate the conditional in the while header. C99 6.8.5.1: The
// evaluation of the controlling expression takes place before each
EmitBoolCondBranch = false;
// As long as the condition is true, go to the loop body.
- if (EmitBoolCondBranch)
- Builder.CreateCondBr(BoolCondVal, LoopBody, EffectiveExitBlock);
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ if (EmitBoolCondBranch) {
+ llvm::BasicBlock *ExitBlock = LoopExit.Block;
+ if (ConditionScope.requiresCleanups())
+ ExitBlock = createBasicBlock("while.exit");
+
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.Block) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ }
- // Emit the loop body.
+ // Emit the loop body. We have to emit this in a cleanup scope
+ // because it might be a singleton DeclStmt.
{
- CleanupScope BodyScope(*this);
+ RunCleanupsScope BodyScope(*this);
EmitBlock(LoopBody);
EmitStmt(S.getBody());
}
BreakContinueStack.pop_back();
- if (CleanupBlock) {
- // If we have a cleanup block, jump there to perform cleanups
- // before looping.
- EmitBranch(CleanupBlock);
+ // Immediately force cleanup.
+ ConditionScope.ForceCleanup();
- // Emit the cleanup block, performing cleanups for the condition
- // and then jumping to either the loop header or the exit block.
- EmitBlock(CleanupBlock);
- ConditionScope.ForceCleanup();
- Builder.CreateCondBr(BoolCondVal, LoopHeader, ExitBlock);
- } else {
- // Cycle to the condition.
- EmitBranch(LoopHeader);
- }
+ // Branch to the loop header again.
+ EmitBranch(LoopHeader.Block);
// Emit the exit block.
- EmitBlock(ExitBlock, true);
-
+ EmitBlock(LoopExit.Block, true);
// The LoopHeader typically is just a branch if we skipped emitting
// a branch, try to erase it.
- if (!EmitBoolCondBranch && !CleanupBlock)
- SimplifyForwardingBlocks(LoopHeader);
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader.Block);
}
void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
- // Emit the body for the loop, insert it, which will create an uncond br to
- // it.
- llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
- llvm::BasicBlock *AfterDo = createBasicBlock("do.end");
- EmitBlock(LoopBody);
-
- llvm::BasicBlock *DoCond = createBasicBlock("do.cond");
+ JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
+ JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(AfterDo, DoCond));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
- // Emit the body of the loop into the block.
- EmitStmt(S.getBody());
+ // Emit the body of the loop.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ EmitBlock(LoopBody);
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
BreakContinueStack.pop_back();
- EmitBlock(DoCond);
+ EmitBlock(LoopCond.Block);
// C99 6.8.5.2: "The evaluation of the controlling expression takes place
// after each execution of the loop body."
// As long as the condition is true, iterate the loop.
if (EmitBoolCondBranch)
- Builder.CreateCondBr(BoolCondVal, LoopBody, AfterDo);
+ Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.Block);
// Emit the exit block.
- EmitBlock(AfterDo);
+ EmitBlock(LoopExit.Block);
// The DoCond block typically is just a branch if we skipped
// emitting a branch, try to erase it.
if (!EmitBoolCondBranch)
- SimplifyForwardingBlocks(DoCond);
+ SimplifyForwardingBlocks(LoopCond.Block);
}
void CodeGenFunction::EmitForStmt(const ForStmt &S) {
- CleanupScope ForScope(*this);
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
// Evaluate the first part before the loop.
if (S.getInit())
EmitStmt(S.getInit());
// Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
- llvm::BasicBlock *IncBlock = 0;
- llvm::BasicBlock *CondCleanup = 0;
- llvm::BasicBlock *EffectiveExitBlock = AfterFor;
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ JumpDest Continue = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = Continue.Block;
EmitBlock(CondBlock);
// Create a cleanup scope for the condition variable cleanups.
- CleanupScope ConditionScope(*this);
+ RunCleanupsScope ConditionScope(*this);
llvm::Value *BoolCondVal = 0;
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
// declaration.
+ llvm::BasicBlock *ExitBlock = LoopExit.Block;
if (S.getConditionVariable()) {
EmitLocalBlockVarDecl(*S.getConditionVariable());
-
- if (ConditionScope.requiresCleanups()) {
- CondCleanup = createBasicBlock("for.cond.cleanup");
- EffectiveExitBlock = CondCleanup;
- }
}
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
// As long as the condition is true, iterate the loop.
llvm::BasicBlock *ForBody = createBasicBlock("for.body");
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
BoolCondVal = EvaluateExprAsBool(S.getCond());
- Builder.CreateCondBr(BoolCondVal, ForBody, EffectiveExitBlock);
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.Block) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
EmitBlock(ForBody);
} else {
}
// If the for loop doesn't have an increment we can just use the
- // condition as the continue block.
- llvm::BasicBlock *ContinueBlock;
+ // condition as the continue block. Otherwise we'll need to create
+ // a block for it (in the current scope, i.e. in the scope of the
+ // condition), and that we will become our continue block.
if (S.getInc())
- ContinueBlock = IncBlock = createBasicBlock("for.inc");
- else
- ContinueBlock = CondBlock;
+ Continue = getJumpDestInCurrentScope("for.inc");
// Store the blocks to use for break and continue.
- BreakContinueStack.push_back(BreakContinue(AfterFor, ContinueBlock));
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
- // If the condition is true, execute the body of the for stmt.
CGDebugInfo *DI = getDebugInfo();
if (DI) {
DI->setLocation(S.getSourceRange().getBegin());
{
// Create a separate cleanup scope for the body, in case it is not
// a compound statement.
- CleanupScope BodyScope(*this);
+ RunCleanupsScope BodyScope(*this);
EmitStmt(S.getBody());
}
// If there is an increment, emit it next.
if (S.getInc()) {
- EmitBlock(IncBlock);
+ EmitBlock(Continue.Block);
EmitStmt(S.getInc());
}
BreakContinueStack.pop_back();
-
- // Finally, branch back up to the condition for the next iteration.
- if (CondCleanup) {
- // Branch to the cleanup block.
- EmitBranch(CondCleanup);
-
- // Emit the cleanup block, which branches back to the loop body or
- // outside of the for statement once it is done.
- EmitBlock(CondCleanup);
- ConditionScope.ForceCleanup();
- Builder.CreateCondBr(BoolCondVal, CondBlock, AfterFor);
- } else
- EmitBranch(CondBlock);
+
+ ConditionScope.ForceCleanup();
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
if (DI) {
DI->setLocation(S.getSourceRange().getEnd());
DI->EmitRegionEnd(CurFn, Builder);
}
// Emit the fall-through block.
- EmitBlock(AfterFor, true);
+ EmitBlock(LoopExit.Block, true);
}
void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
if (HaveInsertPoint())
EmitStopPoint(&S);
- llvm::BasicBlock *Block = BreakContinueStack.back().BreakBlock;
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
EmitBranchThroughCleanup(Block);
}
if (HaveInsertPoint())
EmitStopPoint(&S);
- llvm::BasicBlock *Block = BreakContinueStack.back().ContinueBlock;
+ JumpDest Block = BreakContinueStack.back().ContinueBlock;
EmitBranchThroughCleanup(Block);
}
}
void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
- CleanupScope ConditionScope(*this);
+ JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
+
+ RunCleanupsScope ConditionScope(*this);
if (S.getConditionVariable())
EmitLocalBlockVarDecl(*S.getConditionVariable());
// statement. We also need to create a default block now so that
// explicit case ranges tests can have a place to jump to on
// failure.
- llvm::BasicBlock *NextBlock = createBasicBlock("sw.epilog");
llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
CaseRangeBlock = DefaultBlock;
// All break statements jump to NextBlock. If BreakContinueStack is non empty
// then reuse last ContinueBlock.
- llvm::BasicBlock *ContinueBlock = 0;
+ JumpDest OuterContinue;
if (!BreakContinueStack.empty())
- ContinueBlock = BreakContinueStack.back().ContinueBlock;
+ OuterContinue = BreakContinueStack.back().ContinueBlock;
- // Ensure any vlas created between there and here, are undone
- BreakContinueStack.push_back(BreakContinue(NextBlock, ContinueBlock));
+ BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
// Emit switch body.
EmitStmt(S.getBody());
// been chained on top.
SwitchInsn->setSuccessor(0, CaseRangeBlock);
- // If a default was never emitted then reroute any jumps to it and
- // discard.
+ // If a default was never emitted:
if (!DefaultBlock->getParent()) {
- DefaultBlock->replaceAllUsesWith(NextBlock);
- delete DefaultBlock;
+ // If we have cleanups, emit the default block so that there's a
+ // place to jump through the cleanups from.
+ if (ConditionScope.requiresCleanups()) {
+ EmitBlock(DefaultBlock);
+
+ // Otherwise, just forward the default block to the switch end.
+ } else {
+ DefaultBlock->replaceAllUsesWith(SwitchExit.Block);
+ delete DefaultBlock;
+ }
}
// Emit continuation.
- EmitBlock(NextBlock, true);
+ EmitBlock(SwitchExit.Block, true);
SwitchInsn = SavedSwitchInsn;
CaseRangeBlock = SavedCRBlock;
using namespace clang;
using namespace CodeGen;
-void CodeGenFunction::PushCXXTemporary(const CXXTemporary *Temporary,
- llvm::Value *Ptr) {
- assert((LiveTemporaries.empty() ||
- LiveTemporaries.back().ThisPtr != Ptr ||
- ConditionalBranchLevel) &&
- "Pushed the same temporary twice; AST is likely wrong");
- llvm::BasicBlock *DtorBlock = createBasicBlock("temp.dtor");
+static void EmitTemporaryCleanup(CodeGenFunction &CGF,
+ const CXXTemporary *Temporary,
+ llvm::Value *Addr,
+ llvm::Value *CondPtr) {
+ llvm::BasicBlock *CondEnd = 0;
+
+ // If this is a conditional temporary, we need to check the condition
+ // boolean and only call the destructor if it's true.
+ if (CondPtr) {
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("temp.cond-dtor.call");
+ CondEnd = CGF.createBasicBlock("temp.cond-dtor.cont");
+
+ llvm::Value *Cond = CGF.Builder.CreateLoad(CondPtr);
+ CGF.Builder.CreateCondBr(Cond, CondBlock, CondEnd);
+ CGF.EmitBlock(CondBlock);
+ }
+
+ CGF.EmitCXXDestructorCall(Temporary->getDestructor(),
+ Dtor_Complete, /*ForVirtualBase=*/false,
+ Addr);
+
+ if (CondPtr) {
+ // Reset the condition to false.
+ CGF.Builder.CreateStore(llvm::ConstantInt::getFalse(CGF.getLLVMContext()),
+ CondPtr);
+ CGF.EmitBlock(CondEnd);
+ }
+}
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ llvm::Value *Ptr) {
llvm::AllocaInst *CondPtr = 0;
// Check if temporaries need to be conditional. If so, we'll create a
Builder.CreateStore(llvm::ConstantInt::getTrue(VMContext), CondPtr);
}
- LiveTemporaries.push_back(CXXLiveTemporaryInfo(Temporary, Ptr, DtorBlock,
- CondPtr));
-
- PushCleanupBlock(DtorBlock);
+ CleanupBlock Cleanup(*this, NormalCleanup);
+ EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
if (Exceptions) {
- const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
- llvm::BasicBlock *CondEnd = 0;
-
- EHCleanupBlock Cleanup(*this);
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (Info.CondPtr) {
- llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
- CondEnd = createBasicBlock("cond.dtor.end");
-
- llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
- Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- EmitBlock(CondBlock);
- }
-
- EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false,
- Info.ThisPtr);
-
- if (CondEnd) {
- // Reset the condition. to false.
- Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
- EmitBlock(CondEnd);
- }
- }
-}
-
-void CodeGenFunction::PopCXXTemporary() {
- const CXXLiveTemporaryInfo& Info = LiveTemporaries.back();
-
- CleanupBlockInfo CleanupInfo = PopCleanupBlock();
- assert(CleanupInfo.CleanupBlock == Info.DtorBlock &&
- "Cleanup block mismatch!");
- assert(!CleanupInfo.SwitchBlock &&
- "Should not have a switch block for temporary cleanup!");
- assert(!CleanupInfo.EndBlock &&
- "Should not have an end block for temporary cleanup!");
-
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
- if (CurBB && !CurBB->getTerminator() &&
- Info.DtorBlock->getNumUses() == 0) {
- CurBB->getInstList().splice(CurBB->end(), Info.DtorBlock->getInstList());
- delete Info.DtorBlock;
- } else
- EmitBlock(Info.DtorBlock);
-
- llvm::BasicBlock *CondEnd = 0;
-
- // If this is a conditional temporary, we need to check the condition
- // boolean and only call the destructor if it's true.
- if (Info.CondPtr) {
- llvm::BasicBlock *CondBlock = createBasicBlock("cond.dtor.call");
- CondEnd = createBasicBlock("cond.dtor.end");
-
- llvm::Value *Cond = Builder.CreateLoad(Info.CondPtr);
- Builder.CreateCondBr(Cond, CondBlock, CondEnd);
- EmitBlock(CondBlock);
- }
-
- EmitCXXDestructorCall(Info.Temporary->getDestructor(),
- Dtor_Complete, /*ForVirtualBase=*/false, Info.ThisPtr);
-
- if (CondEnd) {
- // Reset the condition. to false.
- Builder.CreateStore(llvm::ConstantInt::getFalse(VMContext), Info.CondPtr);
- EmitBlock(CondEnd);
+ Cleanup.beginEHCleanup();
+ EmitTemporaryCleanup(*this, Temporary, Ptr, CondPtr);
}
-
- LiveTemporaries.pop_back();
}
RValue
llvm::Value *AggLoc,
bool IsAggLocVolatile,
bool IsInitializer) {
- // Keep track of the current cleanup stack depth.
- size_t CleanupStackDepth = CleanupEntries.size();
- (void) CleanupStackDepth;
-
RValue RV;
-
{
- CXXTemporariesCleanupScope Scope(*this);
+ RunCleanupsScope Scope(*this);
RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
/*IgnoreResult=*/false, IsInitializer);
}
- assert(CleanupEntries.size() == CleanupStackDepth &&
- "Cleanup size mismatch!");
-
return RV;
}
LValue CodeGenFunction::EmitCXXExprWithTemporariesLValue(
const CXXExprWithTemporaries *E) {
- // Keep track of the current cleanup stack depth.
- size_t CleanupStackDepth = CleanupEntries.size();
- (void) CleanupStackDepth;
-
- unsigned OldNumLiveTemporaries = LiveTemporaries.size();
-
- LValue LV = EmitLValue(E->getSubExpr());
-
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
-
- assert(CleanupEntries.size() == CleanupStackDepth &&
- "Cleanup size mismatch!");
+ LValue LV;
+ {
+ RunCleanupsScope Scope(*this);
+ LV = EmitLValue(E->getSubExpr());
+ }
return LV;
}
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGDebugInfo.h"
+#include "CGException.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
: BlockFunction(cgm, *this, Builder), CGM(cgm),
Target(CGM.getContext().Target),
Builder(cgm.getModule().getContext()),
- DebugInfo(0), IndirectBranch(0),
+ ExceptionSlot(0), DebugInfo(0), IndirectBranch(0),
SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
+ DidCallStackSave(false), UnreachableBlock(0),
CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
- ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0) {
+ ConditionalBranchLevel(0), TerminateLandingPad(0), TerminateHandler(0),
+ TrapBB(0) {
// Get some frequently used types.
LLVMPointerWidth = Target.getPointerWidth(0);
}
-llvm::BasicBlock *CodeGenFunction::getBasicBlockForLabel(const LabelStmt *S) {
- llvm::BasicBlock *&BB = LabelMap[S];
- if (BB) return BB;
-
- // Create, but don't insert, the new block.
- return BB = createBasicBlock(S->getName());
-}
-
llvm::Value *CodeGenFunction::GetAddrOfLocalVar(const VarDecl *VD) {
llvm::Value *Res = LocalDeclMap[VD];
assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
// We have a valid insert point, reuse it if it is empty or there are no
// explicit jumps to the return block.
- if (CurBB->empty() || ReturnBlock->use_empty()) {
- ReturnBlock->replaceAllUsesWith(CurBB);
- delete ReturnBlock;
+ if (CurBB->empty() || ReturnBlock.Block->use_empty()) {
+ ReturnBlock.Block->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.Block;
} else
- EmitBlock(ReturnBlock);
+ EmitBlock(ReturnBlock.Block);
return;
}
// Otherwise, if the return block is the target of a single direct
// branch then we can just put the code in that block instead. This
// cleans up functions which started with a unified return block.
- if (ReturnBlock->hasOneUse()) {
+ if (ReturnBlock.Block->hasOneUse()) {
llvm::BranchInst *BI =
- dyn_cast<llvm::BranchInst>(*ReturnBlock->use_begin());
- if (BI && BI->isUnconditional() && BI->getSuccessor(0) == ReturnBlock) {
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.Block->use_begin());
+ if (BI && BI->isUnconditional() &&
+ BI->getSuccessor(0) == ReturnBlock.Block) {
// Reset insertion point and delete the branch.
Builder.SetInsertPoint(BI->getParent());
BI->eraseFromParent();
- delete ReturnBlock;
+ delete ReturnBlock.Block;
return;
}
}
// unless it has uses. However, we still need a place to put the debug
// region.end for now.
- EmitBlock(ReturnBlock);
+ EmitBlock(ReturnBlock.Block);
+}
+
+static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
+ if (!BB) return;
+ if (!BB->use_empty())
+ return CGF.CurFn->getBasicBlockList().push_back(BB);
+ delete BB;
}
void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
assert(BreakContinueStack.empty() &&
"mismatched push/pop in break/continue stack!");
- assert(BlockScopes.empty() &&
- "did not remove all blocks from block scope map!");
- assert(CleanupEntries.empty() &&
- "mismatched push/pop in cleanup stack!");
// Emit function epilog (to return).
EmitReturnBlock();
EmitFunctionEpilog(*CurFnInfo);
EmitEndEHSpec(CurCodeDecl);
+ assert(EHStack.empty() &&
+ "did not remove all scopes from cleanup stack!");
+
// If someone did an indirect goto, emit the indirect goto block at the end of
// the function.
if (IndirectBranch) {
PN->eraseFromParent();
}
}
+
+ EmitIfUsed(*this, TerminateLandingPad);
+ EmitIfUsed(*this, TerminateHandler);
+ EmitIfUsed(*this, UnreachableBlock);
}
/// ShouldInstrumentFunction - Return true if the current function should be
if (Builder.isNamePreserving())
AllocaInsertPt->setName("allocapt");
- ReturnBlock = createBasicBlock("return");
+ ReturnBlock = getJumpDestInCurrentScope("return");
Builder.SetInsertPoint(EntryBB);
if (IndirectBranch == 0)
GetIndirectGotoBlock();
- llvm::BasicBlock *BB = getBasicBlockForLabel(L);
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).Block;
// Make sure the indirect branch includes all of the address-taken blocks.
IndirectBranch->addDestination(BB);
return EmitLValue(E).getAddress();
}
-void CodeGenFunction::PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool EHOnly) {
- CleanupEntries.push_back(CleanupEntry(CleanupEntryBlock, CleanupExitBlock,
- PreviousInvokeDest, EHOnly));
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ EHScopeStack::iterator E = EHStack.find(Old);
+ while (EHStack.begin() != E)
+ PopCleanupBlock();
}
-void CodeGenFunction::EmitCleanupBlocks(size_t OldCleanupStackSize) {
- assert(CleanupEntries.size() >= OldCleanupStackSize &&
- "Cleanup stack mismatch!");
+/// Destroys a cleanup if it was unused.
+static void DestroyCleanup(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
+ assert(Entry->use_empty() && "destroying cleanup with uses!");
+ assert(Exit->getTerminator() == 0 &&
+ "exit has terminator but entry has no predecessors!");
+
+ // This doesn't always remove the entire cleanup, but it's much
+ // safer as long as we don't know what blocks belong to the cleanup.
+ // A *much* better approach if we care about this inefficiency would
+ // be to lazily emit the cleanup.
+
+ // If the exit block is distinct from the entry, give it a branch to
+ // an unreachable destination. This preserves the well-formedness
+ // of the IR.
+ if (Entry != Exit)
+ llvm::BranchInst::Create(CGF.getUnreachableBlock(), Exit);
+
+ assert(!Entry->getParent() && "cleanup entry already positioned?");
+ delete Entry;
+}
- while (CleanupEntries.size() > OldCleanupStackSize)
- EmitCleanupBlock();
+/// Creates a switch instruction to thread branches out of the given
+/// block (which is the exit block of a cleanup).
+static void CreateCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ if (Block->getTerminator()) {
+ assert(isa<llvm::SwitchInst>(Block->getTerminator()) &&
+ "cleanup block already has a terminator, but it isn't a switch");
+ return;
+ }
+
+ llvm::Value *DestCodePtr
+ = CGF.CreateTempAlloca(CGF.Builder.getInt32Ty(), "cleanup.dst");
+ CGBuilderTy Builder(Block);
+ llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+
+ // Create a switch instruction to determine where to jump next.
+ Builder.CreateSwitch(DestCode, CGF.getUnreachableBlock());
}
-CodeGenFunction::CleanupBlockInfo CodeGenFunction::PopCleanupBlock() {
- CleanupEntry &CE = CleanupEntries.back();
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup
+/// blocks.
+static void SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return;
- llvm::BasicBlock *CleanupEntryBlock = CE.CleanupEntryBlock;
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return;
+ assert(Br->getSuccessor(0) == Entry);
- std::vector<llvm::BasicBlock *> Blocks;
- std::swap(Blocks, CE.Blocks);
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
- std::vector<llvm::BranchInst *> BranchFixups;
- std::swap(BranchFixups, CE.BranchFixups);
+ // Kill the branch.
+ Br->eraseFromParent();
- bool EHOnly = CE.EHOnly;
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
- setInvokeDest(CE.PreviousInvokeDest);
+ // Kill the entry block.
+ Entry->eraseFromParent();
- CleanupEntries.pop_back();
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+}
- // Check if any branch fixups pointed to the scope we just popped. If so,
- // we can remove them.
- for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
- llvm::BasicBlock *Dest = BranchFixups[i]->getSuccessor(0);
- BlockScopeMap::iterator I = BlockScopes.find(Dest);
+/// Attempts to reduce an cleanup's exit switch to an unconditional
+/// branch.
+static void SimplifyCleanupExit(llvm::BasicBlock *Exit) {
+ llvm::TerminatorInst *Terminator = Exit->getTerminator();
+ assert(Terminator && "completed cleanup exit has no terminator");
- if (I == BlockScopes.end())
- continue;
+ llvm::SwitchInst *Switch = dyn_cast<llvm::SwitchInst>(Terminator);
+ if (!Switch) return;
+ if (Switch->getNumCases() != 2) return; // default + 1
- assert(I->second <= CleanupEntries.size() && "Invalid branch fixup!");
+ llvm::LoadInst *Cond = cast<llvm::LoadInst>(Switch->getCondition());
+ llvm::AllocaInst *CondVar = cast<llvm::AllocaInst>(Cond->getPointerOperand());
- if (I->second == CleanupEntries.size()) {
- // We don't need to do this branch fixup.
- BranchFixups[i] = BranchFixups.back();
- BranchFixups.pop_back();
- i--;
- e--;
- continue;
- }
- }
+ // Replace the switch instruction with an unconditional branch.
+ llvm::BasicBlock *Dest = Switch->getSuccessor(1); // default is 0
+ Switch->eraseFromParent();
+ llvm::BranchInst::Create(Dest, Exit);
- llvm::BasicBlock *SwitchBlock = CE.CleanupExitBlock;
- llvm::BasicBlock *EndBlock = 0;
- if (!BranchFixups.empty()) {
- if (!SwitchBlock)
- SwitchBlock = createBasicBlock("cleanup.switch");
- EndBlock = createBasicBlock("cleanup.end");
+ // Delete all uses of the condition variable.
+ Cond->eraseFromParent();
+ while (!CondVar->use_empty())
+ cast<llvm::StoreInst>(*CondVar->use_begin())->eraseFromParent();
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+ // Delete the condition variable itself.
+ CondVar->eraseFromParent();
+}
- Builder.SetInsertPoint(SwitchBlock);
+/// Threads a branch fixup through a cleanup block.
+static void ThreadFixupThroughCleanup(CodeGenFunction &CGF,
+ BranchFixup &Fixup,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
+ if (!Exit->getTerminator())
+ CreateCleanupSwitch(CGF, Exit);
- llvm::Value *DestCodePtr = CreateTempAlloca(Int32Ty, "cleanup.dst");
- llvm::Value *DestCode = Builder.CreateLoad(DestCodePtr, "tmp");
+ // Find the switch and its destination index alloca.
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Exit->getTerminator());
+ llvm::Value *DestCodePtr =
+ cast<llvm::LoadInst>(Switch->getCondition())->getPointerOperand();
- // Create a switch instruction to determine where to jump next.
- llvm::SwitchInst *SI = Builder.CreateSwitch(DestCode, EndBlock,
- BranchFixups.size());
+ // Compute the index of the new case we're adding to the switch.
+ unsigned Index = Switch->getNumCases();
- // Restore the current basic block (if any)
- if (CurBB) {
- Builder.SetInsertPoint(CurBB);
+ const llvm::IntegerType *i32 = llvm::Type::getInt32Ty(CGF.getLLVMContext());
+ llvm::ConstantInt *IndexV = llvm::ConstantInt::get(i32, Index);
- // If we had a current basic block, we also need to emit an instruction
- // to initialize the cleanup destination.
- Builder.CreateStore(llvm::Constant::getNullValue(Int32Ty),
- DestCodePtr);
- } else
- Builder.ClearInsertionPoint();
-
- for (size_t i = 0, e = BranchFixups.size(); i != e; ++i) {
- llvm::BranchInst *BI = BranchFixups[i];
- llvm::BasicBlock *Dest = BI->getSuccessor(0);
-
- // Fixup the branch instruction to point to the cleanup block.
- BI->setSuccessor(0, CleanupEntryBlock);
-
- if (CleanupEntries.empty()) {
- llvm::ConstantInt *ID;
-
- // Check if we already have a destination for this block.
- if (Dest == SI->getDefaultDest())
- ID = llvm::ConstantInt::get(Int32Ty, 0);
- else {
- ID = SI->findCaseDest(Dest);
- if (!ID) {
- // No code found, get a new unique one by using the number of
- // switch successors.
- ID = llvm::ConstantInt::get(Int32Ty, SI->getNumSuccessors());
- SI->addCase(ID, Dest);
- }
- }
-
- // Store the jump destination before the branch instruction.
- new llvm::StoreInst(ID, DestCodePtr, BI);
- } else {
- // We need to jump through another cleanup block. Create a pad block
- // with a branch instruction that jumps to the final destination and add
- // it as a branch fixup to the current cleanup scope.
-
- // Create the pad block.
- llvm::BasicBlock *CleanupPad = createBasicBlock("cleanup.pad", CurFn);
-
- // Create a unique case ID.
- llvm::ConstantInt *ID
- = llvm::ConstantInt::get(Int32Ty, SI->getNumSuccessors());
-
- // Store the jump destination before the branch instruction.
- new llvm::StoreInst(ID, DestCodePtr, BI);
-
- // Add it as the destination.
- SI->addCase(ID, CleanupPad);
-
- // Create the branch to the final destination.
- llvm::BranchInst *BI = llvm::BranchInst::Create(Dest);
- CleanupPad->getInstList().push_back(BI);
-
- // And add it as a branch fixup.
- CleanupEntries.back().BranchFixups.push_back(BI);
- }
+ // Set the index in the origin block.
+ new llvm::StoreInst(IndexV, DestCodePtr, Fixup.Origin);
+
+ // Add a case to the switch.
+ Switch->addCase(IndexV, Fixup.Destination);
+
+ // Change the last branch to point to the cleanup entry block.
+ Fixup.LatestBranch->setSuccessor(Fixup.LatestBranchIndex, Entry);
+
+ // And finally, update the fixup.
+ Fixup.LatestBranch = Switch;
+ Fixup.LatestBranchIndex = Index;
+}
+
+/// Try to simplify both the entry and exit edges of a cleanup.
+static void SimplifyCleanupEdges(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry,
+ llvm::BasicBlock *Exit) {
+
+ // Given their current implementations, it's important to run these
+ // in this order: SimplifyCleanupEntry will delete Entry if it can
+ // be merged into its predecessor, which will then break
+ // SimplifyCleanupExit if (as is common) Entry == Exit.
+
+ SimplifyCleanupExit(Exit);
+ SimplifyCleanupEntry(CGF, Entry);
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock() {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Handle the EH cleanup if (1) there is one and (2) it's different
+ // from the normal cleanup.
+ if (Scope.isEHCleanup() &&
+ Scope.getEHEntry() != Scope.getNormalEntry()) {
+ llvm::BasicBlock *EHEntry = Scope.getEHEntry();
+ llvm::BasicBlock *EHExit = Scope.getEHExit();
+
+ if (EHEntry->use_empty()) {
+ DestroyCleanup(*this, EHEntry, EHExit);
+ } else {
+ // TODO: this isn't really the ideal location to put this EH
+ // cleanup, but lazy emission is a better solution than trying
+ // to pick a better spot.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ EmitBlock(EHEntry);
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEdges(*this, EHEntry, EHExit);
}
}
- // Remove all blocks from the block scope map.
- for (size_t i = 0, e = Blocks.size(); i != e; ++i) {
- assert(BlockScopes.count(Blocks[i]) &&
- "Did not find block in scope map!");
+ // If we only have an EH cleanup, we don't really need to do much
+ // here. Branch fixups just naturally drop down to the enclosing
+ // cleanup scope.
+ if (!Scope.isNormalCleanup()) {
+ EHStack.popCleanup();
+ assert(EHStack.getNumBranchFixups() == 0 || EHStack.hasNormalCleanups());
+ return;
+ }
- BlockScopes.erase(Blocks[i]);
+ // Check whether the scope has any fixups that need to be threaded.
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // Grab the entry and exit blocks.
+ llvm::BasicBlock *Entry = Scope.getNormalEntry();
+ llvm::BasicBlock *Exit = Scope.getNormalExit();
+
+ // Check whether anything's been threaded through the cleanup already.
+ assert((Exit->getTerminator() == 0) == Entry->use_empty() &&
+ "cleanup entry/exit mismatch");
+ bool HasExistingBranches = !Entry->use_empty();
+
+ // Check whether we need to emit a "fallthrough" branch through the
+ // cleanup for the current insertion point.
+ llvm::BasicBlock *FallThrough = Builder.GetInsertBlock();
+ if (FallThrough && FallThrough->getTerminator())
+ FallThrough = 0;
+
+ // If *nothing* is using the cleanup, kill it.
+ if (!FallThrough && !HasFixups && !HasExistingBranches) {
+ EHStack.popCleanup();
+ DestroyCleanup(*this, Entry, Exit);
+ return;
}
- return CleanupBlockInfo(CleanupEntryBlock, SwitchBlock, EndBlock, EHOnly);
-}
+ // Otherwise, add the block to the function.
+ EmitBlock(Entry);
-void CodeGenFunction::EmitCleanupBlock() {
- CleanupBlockInfo Info = PopCleanupBlock();
+ if (FallThrough)
+ Builder.SetInsertPoint(Exit);
+ else
+ Builder.ClearInsertionPoint();
- if (Info.EHOnly) {
- // FIXME: Add this to the exceptional edge
- if (Info.CleanupBlock->getNumUses() == 0)
- delete Info.CleanupBlock;
- return;
+ // Fast case: if we don't have to add any fixups, and either
+ // we don't have a fallthrough or the cleanup wasn't previously
+ // used, then the setup above is sufficient.
+ if (!HasFixups) {
+ if (!FallThrough) {
+ assert(HasExistingBranches && "no reason for cleanup but didn't kill before");
+ EHStack.popCleanup();
+ SimplifyCleanupEdges(*this, Entry, Exit);
+ return;
+ } else if (!HasExistingBranches) {
+ assert(FallThrough && "no reason for cleanup but didn't kill before");
+ // We can't simplify the exit edge in this case because we're
+ // already inserting at the end of the exit block.
+ EHStack.popCleanup();
+ SimplifyCleanupEntry(*this, Entry);
+ return;
+ }
}
- // Scrub debug location info.
- for (llvm::BasicBlock::iterator LBI = Info.CleanupBlock->begin(),
- LBE = Info.CleanupBlock->end(); LBI != LBE; ++LBI)
- Builder.SetInstDebugLocation(LBI);
+ // Otherwise we're going to have to thread things through the cleanup.
+ llvm::SmallVector<BranchFixup*, 8> Fixups;
- llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
- if (CurBB && !CurBB->getTerminator() &&
- Info.CleanupBlock->getNumUses() == 0) {
- CurBB->getInstList().splice(CurBB->end(), Info.CleanupBlock->getInstList());
- delete Info.CleanupBlock;
- } else
- EmitBlock(Info.CleanupBlock);
+ // Synthesize a fixup for the current insertion point.
+ BranchFixup Cur;
+ if (FallThrough) {
+ Cur.Destination = createBasicBlock("cleanup.cont");
+ Cur.LatestBranch = FallThrough->getTerminator();
+ Cur.LatestBranchIndex = 0;
+ Cur.Origin = Cur.LatestBranch;
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
-}
+ // Restore fixup invariant. EmitBlock added a branch to the cleanup
+ // which we need to redirect to the destination.
+ cast<llvm::BranchInst>(Cur.LatestBranch)->setSuccessor(0, Cur.Destination);
-void CodeGenFunction::AddBranchFixup(llvm::BranchInst *BI) {
- assert(!CleanupEntries.empty() &&
- "Trying to add branch fixup without cleanup block!");
+ Fixups.push_back(&Cur);
+ } else {
+ Cur.Destination = 0;
+ }
+
+ // Collect any "real" fixups we need to thread.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I != E; ++I)
+ if (EHStack.getBranchFixup(I).Destination)
+ Fixups.push_back(&EHStack.getBranchFixup(I));
+
+ assert(!Fixups.empty() && "no fixups, invariants broken!");
+
+ // If there's only a single fixup to thread through, do so with
+ // unconditional branches. This only happens if there's a single
+ // branch and no fallthrough.
+ if (Fixups.size() == 1 && !HasExistingBranches) {
+ Fixups[0]->LatestBranch->setSuccessor(Fixups[0]->LatestBranchIndex, Entry);
+ llvm::BranchInst *Br =
+ llvm::BranchInst::Create(Fixups[0]->Destination, Exit);
+ Fixups[0]->LatestBranch = Br;
+ Fixups[0]->LatestBranchIndex = 0;
+
+ // Otherwise, force a switch statement and thread everything through
+ // the switch.
+ } else {
+ CreateCleanupSwitch(*this, Exit);
+ for (unsigned I = 0, E = Fixups.size(); I != E; ++I)
+ ThreadFixupThroughCleanup(*this, *Fixups[I], Entry, Exit);
+ }
+
+ // Emit the fallthrough destination block if necessary.
+ if (Cur.Destination)
+ EmitBlock(Cur.Destination);
- // FIXME: We could be more clever here and check if there's already a branch
- // fixup for this destination and recycle it.
- CleanupEntries.back().BranchFixups.push_back(BI);
+ // We're finally done with the cleanup.
+ EHStack.popCleanup();
}
-void CodeGenFunction::EmitBranchThroughCleanup(llvm::BasicBlock *Dest) {
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
if (!HaveInsertPoint())
return;
- llvm::BranchInst* BI = Builder.CreateBr(Dest);
-
- Builder.ClearInsertionPoint();
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
- // The stack is empty, no need to do any cleanup.
- if (CleanupEntries.empty())
+ // If we're not in a cleanup scope, we don't need to worry about
+ // fixups.
+ if (!EHStack.hasNormalCleanups()) {
+ Builder.ClearInsertionPoint();
return;
+ }
- if (!Dest->getParent()) {
- // We are trying to branch to a block that hasn't been inserted yet.
- AddBranchFixup(BI);
+ // Initialize a fixup.
+ BranchFixup Fixup;
+ Fixup.Destination = Dest.Block;
+ Fixup.Origin = BI;
+ Fixup.LatestBranch = BI;
+ Fixup.LatestBranchIndex = 0;
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope.
+ if (!Dest.ScopeDepth.isValid()) {
+ EHStack.addBranchFixup() = Fixup;
+ Builder.ClearInsertionPoint();
return;
}
- BlockScopeMap::iterator I = BlockScopes.find(Dest);
- if (I == BlockScopes.end()) {
- // We are trying to jump to a block that is outside of any cleanup scope.
- AddBranchFixup(BI);
- return;
+ for (EHScopeStack::iterator I = EHStack.begin(),
+ E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
+ if (isa<EHCleanupScope>(*I)) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
+ if (Scope.isNormalCleanup())
+ ThreadFixupThroughCleanup(*this, Fixup, Scope.getNormalEntry(),
+ Scope.getNormalExit());
+ }
}
+
+ Builder.ClearInsertionPoint();
+}
- assert(I->second < CleanupEntries.size() &&
- "Trying to branch into cleanup region");
+void CodeGenFunction::EmitBranchThroughEHCleanup(JumpDest Dest) {
+ if (!HaveInsertPoint())
+ return;
- if (I->second == CleanupEntries.size() - 1) {
- // We have a branch to a block in the same scope.
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.Block);
+
+ // If we're not in a cleanup scope, we don't need to worry about
+ // fixups.
+ if (!EHStack.hasEHCleanups()) {
+ Builder.ClearInsertionPoint();
return;
}
- AddBranchFixup(BI);
+ // Initialize a fixup.
+ BranchFixup Fixup;
+ Fixup.Destination = Dest.Block;
+ Fixup.Origin = BI;
+ Fixup.LatestBranch = BI;
+ Fixup.LatestBranchIndex = 0;
+
+ // We should never get invalid scope depths for these: invalid scope
+ // depths only arise for as-yet-unemitted labels, and we can't do an
+ // EH-unwind to one of those.
+ assert(Dest.ScopeDepth.isValid() && "invalid scope depth on EH dest?");
+
+ for (EHScopeStack::iterator I = EHStack.begin(),
+ E = EHStack.find(Dest.ScopeDepth); I != E; ++I) {
+ if (isa<EHCleanupScope>(*I)) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*I);
+ if (Scope.isEHCleanup())
+ ThreadFixupThroughCleanup(*this, Fixup, Scope.getEHEntry(),
+ Scope.getEHExit());
+ }
+ }
+
+ Builder.ClearInsertionPoint();
}
class SwitchInst;
class Twine;
class Value;
+ class CallSite;
}
namespace clang {
class CGRecordLayout;
class CGBlockInfo;
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The origin of the branch. Any switch-index stores required by
+ /// cleanup threading are added before this instruction.
+ llvm::Instruction *Origin;
+
+ /// The destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ llvm::BasicBlock *Destination;
+
+ /// The last branch of the fixup. It is an invariant that
+ /// LatestBranch->getSuccessor(LatestBranchIndex) == Destination.
+ ///
+ /// The branch is always either a BranchInst or a SwitchInst.
+ llvm::TerminatorInst *LatestBranch;
+ unsigned LatestBranchIndex;
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from StartOfData to EndOfBuffer.
+ ptrdiff_t Size;
+
+ stable_iterator(ptrdiff_t Size) : Size(Size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() : Size(-1) {}
+
+ bool isValid() const { return Size >= 0; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.Size == B.Size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.Size != B.Size;
+ }
+ };
+
+private:
+ // The implementation for this class is in CGException.h and
+ // CGException.cpp; the definition is here because it's used as a
+ // member of CodeGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ char *StartOfBuffer;
+
+ /// The end of the buffer.
+ char *EndOfBuffer;
+
+ /// The first valid entry in the buffer.
+ char *StartOfData;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator InnermostNormalCleanup;
+
+ /// The innermost EH cleanup on the stack.
+ stable_iterator InnermostEHCleanup;
+
+ /// The number of catches on the stack.
+ unsigned CatchDepth;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ llvm::SmallVector<BranchFixup, 8> BranchFixups;
+
+ char *allocate(size_t Size);
+
+ void popNullFixups();
+
+public:
+ EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
+ InnermostNormalCleanup(stable_end()),
+ InnermostEHCleanup(stable_end()),
+ CatchDepth(0) {}
+ ~EHScopeStack() { delete[] StartOfBuffer; }
+
+ /// Push a cleanup on the stack.
+ void pushCleanup(llvm::BasicBlock *NormalEntry,
+ llvm::BasicBlock *NormalExit,
+ llvm::BasicBlock *EHEntry,
+ llvm::BasicBlock *EHExit);
+
+ /// Pops a cleanup scope off the stack. This should only be called
+ /// by CodeGenFunction::PopCleanupBlock.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned NumHandlers);
+
+ /// Pops a catch scope off the stack.
+ void popCatch();
+
+ /// Push an exceptions filter on the stack.
+ class EHFilterScope *pushFilter(unsigned NumFilters);
+
+ /// Pops an exceptions filter off the stack.
+ void popFilter();
+
+ /// Push a terminate handler on the stack.
+ void pushTerminate();
+
+ /// Pops a terminate handler off the stack.
+ void popTerminate();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return StartOfData == EndOfBuffer; }
+
+ bool requiresLandingPad() const {
+ return (CatchDepth || hasEHCleanups());
+ }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return InnermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return InnermostNormalCleanup;
+ }
+
+ /// Determines whether there are any EH cleanups on the stack.
+ bool hasEHCleanups() const {
+ return InnermostEHCleanup != stable_end();
+ }
+
+ /// Returns the innermost EH cleanup on the stack, or stable_end()
+ /// if there are no EH cleanups.
+ stable_iterator getInnermostEHCleanup() const {
+ return InnermostEHCleanup;
+ }
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(EndOfBuffer - StartOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() {
+ return stable_iterator(0);
+ }
+
+ /// Translates an iterator into a stable_iterator.
+ stable_iterator stabilize(iterator it) const;
+
+ /// Finds the nearest cleanup enclosing the given iterator.
+ /// Returns stable_iterator::invalid() if there are no such cleanups.
+ stable_iterator getEnclosingEHCleanup(iterator it) const;
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator save) const;
+
+ /// Removes the cleanup pointed to by the given stable_iterator.
+ void removeCleanup(stable_iterator save);
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ BranchFixups.push_back(BranchFixup());
+ return BranchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return BranchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned I) {
+ assert(I < getNumBranchFixups());
+ return BranchFixups[I];
+ }
+
+ /// Mark any branch fixups leading to the given block as resolved.
+ void resolveBranchFixups(llvm::BasicBlock *Dest);
+};
+
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
class CodeGenFunction : public BlockFunction {
CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
public:
+ /// A jump destination is a pair of a basic block and a cleanup
+ /// depth. They are used to implement direct jumps across cleanup
+ /// scopes, e.g. goto, break, continue, and return.
+ struct JumpDest {
+ JumpDest() : Block(0), ScopeDepth() {}
+ JumpDest(llvm::BasicBlock *Block, EHScopeStack::stable_iterator Depth)
+ : Block(Block), ScopeDepth(Depth) {}
+
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ };
+
CodeGenModule &CGM; // Per-module state.
const TargetInfo &Target;
GlobalDecl CurGD;
/// ReturnBlock - Unified return block.
- llvm::BasicBlock *ReturnBlock;
+ JumpDest ReturnBlock;
+
/// ReturnValue - The temporary alloca to hold the return value. This is null
/// iff the function has no return value.
llvm::Value *ReturnValue;
/// \brief A mapping from NRVO variables to the flags used to indicate
/// when the NRVO has been applied to this variable.
llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
-
-public:
- /// ObjCEHValueStack - Stack of Objective-C exception values, used for
- /// rethrows.
- llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
-
- /// PushCleanupBlock - Push a new cleanup entry on the stack and set the
- /// passed in block as the cleanup block.
- void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool EHOnly = false);
- void PushCleanupBlock(llvm::BasicBlock *CleanupEntryBlock) {
- PushCleanupBlock(CleanupEntryBlock, 0, getInvokeDest(), false);
- }
- /// CleanupBlockInfo - A struct representing a popped cleanup block.
- struct CleanupBlockInfo {
- /// CleanupEntryBlock - the cleanup entry block
- llvm::BasicBlock *CleanupBlock;
+ EHScopeStack EHStack;
- /// SwitchBlock - the block (if any) containing the switch instruction used
- /// for jumping to the final destination.
- llvm::BasicBlock *SwitchBlock;
+ /// The exception slot. All landing pads write the current
+ /// exception pointer into this alloca.
+ llvm::Value *ExceptionSlot;
- /// EndBlock - the default destination for the switch instruction.
- llvm::BasicBlock *EndBlock;
+ /// Emits a landing pad for the current EH stack.
+ llvm::BasicBlock *EmitLandingPad();
- /// EHOnly - True iff this cleanup should only be performed on the
- /// exceptional edge.
- bool EHOnly;
+ llvm::BasicBlock *getInvokeDestImpl();
- CleanupBlockInfo(llvm::BasicBlock *cb, llvm::BasicBlock *sb,
- llvm::BasicBlock *eb, bool ehonly = false)
- : CleanupBlock(cb), SwitchBlock(sb), EndBlock(eb), EHOnly(ehonly) {}
- };
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ llvm::SmallVector<llvm::Value*, 8> ObjCEHValueStack;
- /// EHCleanupBlock - RAII object that will create a cleanup block for the
- /// exceptional edge and set the insert point to that block. When destroyed,
- /// it creates the cleanup edge and sets the insert point to the previous
- /// block.
- class EHCleanupBlock {
- CodeGenFunction& CGF;
- llvm::BasicBlock *PreviousInsertionBlock;
- llvm::BasicBlock *CleanupHandler;
- llvm::BasicBlock *PreviousInvokeDest;
- public:
- EHCleanupBlock(CodeGenFunction &cgf)
- : CGF(cgf),
- PreviousInsertionBlock(CGF.Builder.GetInsertBlock()),
- CleanupHandler(CGF.createBasicBlock("ehcleanup", CGF.CurFn)),
- PreviousInvokeDest(CGF.getInvokeDest()) {
- llvm::BasicBlock *TerminateHandler = CGF.getTerminateHandler();
- CGF.Builder.SetInsertPoint(CleanupHandler);
- CGF.setInvokeDest(TerminateHandler);
- }
- ~EHCleanupBlock();
+ // A struct holding information about a finally block's IR
+ // generation. For now, doesn't actually hold anything.
+ struct FinallyInfo {
};
- /// PopCleanupBlock - Will pop the cleanup entry on the stack, process all
- /// branch fixups and return a block info struct with the switch block and end
- /// block. This will also reset the invoke handler to the previous value
- /// from when the cleanup block was created.
- CleanupBlockInfo PopCleanupBlock();
-
- /// DelayedCleanupBlock - RAII object that will create a cleanup block and set
- /// the insert point to that block. When destructed, it sets the insert point
- /// to the previous block and pushes a new cleanup entry on the stack.
- class DelayedCleanupBlock {
- CodeGenFunction& CGF;
- llvm::BasicBlock *CurBB;
- llvm::BasicBlock *CleanupEntryBB;
- llvm::BasicBlock *CleanupExitBB;
- llvm::BasicBlock *CurInvokeDest;
- bool EHOnly;
+ FinallyInfo EnterFinallyBlock(const Stmt *Stmt,
+ llvm::Constant *BeginCatchFn,
+ llvm::Constant *EndCatchFn,
+ llvm::Constant *RethrowFn);
+ void ExitFinallyBlock(FinallyInfo &FinallyInfo);
+
+ enum CleanupKind { NormalAndEHCleanup, EHCleanup, NormalCleanup };
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object destructor of an object of the given type at the
+ /// given address. Does nothing if T is not a C++ class type with a
+ /// non-trivial destructor.
+ void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack and
+ /// process all branch fixups.
+ void PopCleanupBlock();
+
+ /// CleanupBlock - RAII object that will create a cleanup block and
+ /// set the insert point to that block. When destructed, it sets the
+ /// insert point to the previous block and pushes a new cleanup
+ /// entry on the stack.
+ class CleanupBlock {
+ CodeGenFunction &CGF;
+ CGBuilderTy::InsertPoint SavedIP;
+ llvm::BasicBlock *NormalCleanupEntryBB;
+ llvm::BasicBlock *NormalCleanupExitBB;
+ llvm::BasicBlock *EHCleanupEntryBB;
public:
- DelayedCleanupBlock(CodeGenFunction &cgf, bool ehonly = false)
- : CGF(cgf), CurBB(CGF.Builder.GetInsertBlock()),
- CleanupEntryBB(CGF.createBasicBlock("cleanup")),
- CleanupExitBB(0),
- CurInvokeDest(CGF.getInvokeDest()),
- EHOnly(ehonly) {
- CGF.Builder.SetInsertPoint(CleanupEntryBB);
- }
+ CleanupBlock(CodeGenFunction &CGF, CleanupKind Kind);
- llvm::BasicBlock *getCleanupExitBlock() {
- if (!CleanupExitBB)
- CleanupExitBB = CGF.createBasicBlock("cleanup.exit");
- return CleanupExitBB;
- }
+ /// If we're currently writing a normal cleanup, tie that off and
+ /// start writing an EH cleanup.
+ void beginEHCleanup();
- ~DelayedCleanupBlock() {
- CGF.PushCleanupBlock(CleanupEntryBB, CleanupExitBB, CurInvokeDest,
- EHOnly);
- // FIXME: This is silly, move this into the builder.
- if (CurBB)
- CGF.Builder.SetInsertPoint(CurBB);
- else
- CGF.Builder.ClearInsertionPoint();
- }
+ ~CleanupBlock();
};
- /// \brief Enters a new scope for capturing cleanups, all of which will be
- /// executed once the scope is exited.
- class CleanupScope {
+ /// \brief Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
CodeGenFunction& CGF;
- size_t CleanupStackDepth;
+ EHScopeStack::stable_iterator CleanupStackDepth;
bool OldDidCallStackSave;
bool PerformCleanup;
- CleanupScope(const CleanupScope &); // DO NOT IMPLEMENT
- CleanupScope &operator=(const CleanupScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
public:
/// \brief Enter a new cleanup scope.
- explicit CleanupScope(CodeGenFunction &CGF)
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
: CGF(CGF), PerformCleanup(true)
{
- CleanupStackDepth = CGF.CleanupEntries.size();
+ CleanupStackDepth = CGF.EHStack.stable_begin();
OldDidCallStackSave = CGF.DidCallStackSave;
}
/// \brief Exit this cleanup scope, emitting any accumulated
/// cleanups.
- ~CleanupScope() {
+ ~RunCleanupsScope() {
if (PerformCleanup) {
CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.EmitCleanupBlocks(CleanupStackDepth);
+ CGF.PopCleanupBlocks(CleanupStackDepth);
}
}
/// \brief Determine whether this scope requires any cleanups.
bool requiresCleanups() const {
- return CGF.CleanupEntries.size() > CleanupStackDepth;
+ return CGF.EHStack.stable_begin() != CleanupStackDepth;
}
/// \brief Force the emission of cleanups now, instead of waiting
void ForceCleanup() {
assert(PerformCleanup && "Already forced cleanup");
CGF.DidCallStackSave = OldDidCallStackSave;
- CGF.EmitCleanupBlocks(CleanupStackDepth);
+ CGF.PopCleanupBlocks(CleanupStackDepth);
PerformCleanup = false;
}
};
- /// CXXTemporariesCleanupScope - Enters a new scope for catching live
- /// temporaries, all of which will be popped once the scope is exited.
- class CXXTemporariesCleanupScope {
- CodeGenFunction &CGF;
- size_t NumLiveTemporaries;
-
- // DO NOT IMPLEMENT
- CXXTemporariesCleanupScope(const CXXTemporariesCleanupScope &);
- CXXTemporariesCleanupScope &operator=(const CXXTemporariesCleanupScope &);
-
- public:
- explicit CXXTemporariesCleanupScope(CodeGenFunction &CGF)
- : CGF(CGF), NumLiveTemporaries(CGF.LiveTemporaries.size()) { }
-
- ~CXXTemporariesCleanupScope() {
- while (CGF.LiveTemporaries.size() > NumLiveTemporaries)
- CGF.PopCXXTemporary();
- }
- };
+ /// PopCleanupBlocks - Takes the old cleanup stack size and emits
+ /// the cleanup blocks that have been added.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
- /// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
- /// blocks that have been added.
- void EmitCleanupBlocks(size_t OldCleanupStackSize);
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) const {
+ return JumpDest(Target, EHStack.stable_begin());
+ }
- /// EmitBranchThroughCleanup - Emit a branch from the current insert block
- /// through the cleanup handling code (if any) and then on to \arg Dest.
- ///
- /// FIXME: Maybe this should really be in EmitBranch? Don't we always want
- /// this behavior for branches?
- void EmitBranchThroughCleanup(llvm::BasicBlock *Dest);
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(const char *Name = 0) {
+ return JumpDest(createBasicBlock(Name), EHStack.stable_begin());
+ }
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert
+ /// block through the normal cleanup handling code (if any) and then
+ /// on to \arg Dest.
+ void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// EmitBranchThroughEHCleanup - Emit a branch from the current
+ /// insert block through the EH cleanup handling code (if any) and
+ /// then on to \arg Dest.
+ void EmitBranchThroughEHCleanup(JumpDest Dest);
/// BeginConditionalBranch - Should be called before a conditional part of an
/// expression is emitted. For example, before the RHS of the expression below
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
/// LabelMap - This keeps track of the LLVM basic block for each C label.
- llvm::DenseMap<const LabelStmt*, llvm::BasicBlock*> LabelMap;
+ llvm::DenseMap<const LabelStmt*, JumpDest> LabelMap;
// BreakContinueStack - This keeps track of where break and continue
// statements should jump to.
struct BreakContinue {
- BreakContinue(llvm::BasicBlock *bb, llvm::BasicBlock *cb)
- : BreakBlock(bb), ContinueBlock(cb) {}
+ BreakContinue(JumpDest Break, JumpDest Continue)
+ : BreakBlock(Break), ContinueBlock(Continue) {}
- llvm::BasicBlock *BreakBlock;
- llvm::BasicBlock *ContinueBlock;
+ JumpDest BreakBlock;
+ JumpDest ContinueBlock;
};
llvm::SmallVector<BreakContinue, 8> BreakContinueStack;
/// calling llvm.stacksave for multiple VLAs in the same scope.
bool DidCallStackSave;
- struct CleanupEntry {
- /// CleanupEntryBlock - The block of code that does the actual cleanup.
- llvm::BasicBlock *CleanupEntryBlock;
-
- /// CleanupExitBlock - The cleanup exit block.
- llvm::BasicBlock *CleanupExitBlock;
-
- /// Blocks - Basic blocks that were emitted in the current cleanup scope.
- std::vector<llvm::BasicBlock *> Blocks;
-
- /// BranchFixups - Branch instructions to basic blocks that haven't been
- /// inserted into the current function yet.
- std::vector<llvm::BranchInst *> BranchFixups;
-
- /// PreviousInvokeDest - The invoke handler from the start of the cleanup
- /// region.
- llvm::BasicBlock *PreviousInvokeDest;
-
- /// EHOnly - Perform this only on the exceptional edge, not the main edge.
- bool EHOnly;
-
- explicit CleanupEntry(llvm::BasicBlock *CleanupEntryBlock,
- llvm::BasicBlock *CleanupExitBlock,
- llvm::BasicBlock *PreviousInvokeDest,
- bool ehonly)
- : CleanupEntryBlock(CleanupEntryBlock),
- CleanupExitBlock(CleanupExitBlock),
- PreviousInvokeDest(PreviousInvokeDest),
- EHOnly(ehonly) {}
- };
-
- /// CleanupEntries - Stack of cleanup entries.
- llvm::SmallVector<CleanupEntry, 8> CleanupEntries;
-
- typedef llvm::DenseMap<llvm::BasicBlock*, size_t> BlockScopeMap;
-
- /// BlockScopes - Map of which "cleanup scope" scope basic blocks have.
- BlockScopeMap BlockScopes;
+ /// A block containing a single 'unreachable' instruction. Created
+ /// lazily by getUnreachableBlock().
+ llvm::BasicBlock *UnreachableBlock;
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *CXXVTTDecl;
llvm::Value *CXXVTTValue;
- /// CXXLiveTemporaryInfo - Holds information about a live C++ temporary.
- struct CXXLiveTemporaryInfo {
- /// Temporary - The live temporary.
- const CXXTemporary *Temporary;
-
- /// ThisPtr - The pointer to the temporary.
- llvm::Value *ThisPtr;
-
- /// DtorBlock - The destructor block.
- llvm::BasicBlock *DtorBlock;
-
- /// CondPtr - If this is a conditional temporary, this is the pointer to the
- /// condition variable that states whether the destructor should be called
- /// or not.
- llvm::Value *CondPtr;
-
- CXXLiveTemporaryInfo(const CXXTemporary *temporary,
- llvm::Value *thisptr, llvm::BasicBlock *dtorblock,
- llvm::Value *condptr)
- : Temporary(temporary), ThisPtr(thisptr), DtorBlock(dtorblock),
- CondPtr(condptr) { }
- };
-
- llvm::SmallVector<CXXLiveTemporaryInfo, 4> LiveTemporaries;
-
/// ConditionalBranchLevel - Contains the nesting level of the current
/// conditional branch. This is used so that we know if a temporary should be
/// destroyed conditionally.
/// number that holds the value.
unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+ llvm::BasicBlock *TerminateLandingPad;
llvm::BasicBlock *TerminateHandler;
llvm::BasicBlock *TrapBB;
ASTContext &getContext() const;
CGDebugInfo *getDebugInfo() { return DebugInfo; }
- llvm::BasicBlock *getInvokeDest() { return InvokeDest; }
- void setInvokeDest(llvm::BasicBlock *B) { InvokeDest = B; }
+ /// Returns a pointer to the function's exception object slot, which
+ /// is assigned in every landing pad.
+ llvm::Value *getExceptionSlot();
+
+ llvm::BasicBlock *getUnreachableBlock() {
+ if (!UnreachableBlock) {
+ UnreachableBlock = createBasicBlock("unreachable");
+ new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
+ }
+ return UnreachableBlock;
+ }
+
+ llvm::BasicBlock *getInvokeDest() {
+ if (!EHStack.requiresLandingPad()) return 0;
+ return getInvokeDestImpl();
+ }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
/// EmitEndEHSpec - Emit the end of the exception spec.
void EmitEndEHSpec(const Decl *D);
- /// getTerminateHandler - Return a handler that just calls terminate.
+ /// getTerminateLandingPad - Return a landing pad that just calls terminate.
+ llvm::BasicBlock *getTerminateLandingPad();
+
+ /// getTerminateHandler - Return a handler (not a landing pad, just
+ /// a catch handler) that just calls terminate. This is used when
+ /// a terminate scope encloses a try.
llvm::BasicBlock *getTerminateHandler();
const llvm::Type *ConvertTypeForMem(QualType T);
/// getBasicBlockForLabel - Return the LLVM basicblock that the specified
/// label maps to.
- llvm::BasicBlock *getBasicBlockForLabel(const LabelStmt *S);
+ JumpDest getJumpDestForLabel(const LabelStmt *S);
/// SimplifyForwardingBlocks - If the given basic block is only a branch to
/// another basic block, simplify it. This assumes that no other code could
void EmitNewArrayInitializer(const CXXNewExpr *E, llvm::Value *NewPtr,
llvm::Value *NumElements);
- void PushCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
- void PopCXXTemporary();
+ void EmitCXXTemporary(const CXXTemporary *Temporary, llvm::Value *Ptr);
llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
/// This function can be called with a null (unreachable) insert point.
void EmitBlockVarDecl(const VarDecl &D);
+ typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
+ llvm::Value *Address);
+
/// EmitLocalBlockVarDecl - Emit a local block variable declaration.
///
/// This function can be called with a null (unreachable) insert point.
- void EmitLocalBlockVarDecl(const VarDecl &D);
+ void EmitLocalBlockVarDecl(const VarDecl &D, SpecialInitFn *SpecialInit = 0);
void EmitStaticBlockVarDecl(const VarDecl &D,
llvm::GlobalValue::LinkageTypes Linkage);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
llvm::Constant *getUnwindResumeOrRethrowFn();
- struct CXXTryStmtInfo {
- llvm::BasicBlock *SavedLandingPad;
- llvm::BasicBlock *HandlerBlock;
- llvm::BasicBlock *FinallyBlock;
- };
+ struct CXXTryStmtInfo {};
CXXTryStmtInfo EnterCXXTryStmt(const CXXTryStmt &S);
void ExitCXXTryStmt(const CXXTryStmt &S, CXXTryStmtInfo Info);
RValue EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue = ReturnValueSlot());
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ llvm::Value * const *ArgBegin,
+ llvm::Value * const *ArgEnd,
+ const llvm::Twine &Name = "");
+
llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
const llvm::Type *Ty);
llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
RValue EmitDelegateCallArg(const VarDecl *Param);
private:
-
void EmitReturnOfRValue(RValue RV, QualType Ty);
/// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
const TargetInfo::ConstraintInfo &Info,
const Expr *InputExpr, std::string &ConstraintStr);
- /// EmitCleanupBlock - emits a single cleanup block.
- void EmitCleanupBlock();
-
- /// AddBranchFixup - adds a branch instruction to the list of fixups for the
- /// current cleanup scope.
- void AddBranchFixup(llvm::BranchInst *BI);
-
/// EmitCallArgs - Emit call arguments for a function.
/// The CallArgTypeInfo parameter is used for iterating over the known
/// argument types of the function being called.
void test(X2 x2, X3 x3, X5 x5) {
// CHECK: define linkonce_odr void @_ZN2X2C1ERKS_
- // CHECK-NOT: define
- // CHECK: call void @__cxa_call_unexpected
- // CHECK-NOT: define
+ // CHECK-NOT: }
// CHECK: ret void
+ // CHECK-NOT: }
+ // CHECK: call void @__cxa_call_unexpected
+ // CHECK: }
X2 x2a(x2);
// CHECK: define linkonce_odr void @_ZN2X3C1ERKS_
- // CHECK-NOT: define
- // CHECK: call void @__cxa_call_unexpected
- // CHECK-NOT: define
+ // CHECK-NOT: }
// CHECK: ret void
+ // CHECK-NOT: }
+ // CHECK: call void @__cxa_call_unexpected
+ // CHECK: }
X3 x3a(x3);
// CHECK: define linkonce_odr void @_ZN2X5C1ERS_
// CHECK-NOT: call void @__cxa_call_unexpected
void test() {
// CHECK: define linkonce_odr void @_ZN2X8C1Ev
- // CHECK-NOT: define
- // CHECK: call void @__cxa_call_unexpected
- // CHECK-NOT: define
+ // CHECK-NOT: }
// CHECK: ret void
+ // CHECK-NOT: }
+ // CHECK: call void @__cxa_call_unexpected
+ // CHECK: }
X8();
// CHECK: define linkonce_odr void @_ZN2X9C1Ev
// CHECK-NOT: call void @__cxa_call_unexpected
int foo();
+// CHECK: define void @_Z14while_destructi
void while_destruct(int z) {
- // CHECK: define void @_Z14while_destructi
- // CHECK: {{while.cond:|:3}}
+ // CHECK: [[Z:%.*]] = alloca i32
+ // CHECK: [[CLEANUPDEST:%.*]] = alloca i32
while (X x = X()) {
// CHECK: call void @_ZN1XC1Ev
+ // CHECK-NEXT: [[COND:%.*]] = call zeroext i1 @_ZN1XcvbEv
+ // CHECK-NEXT: br i1 [[COND]]
- // CHECK: {{while.body:|:5}}
- // CHECK: store i32 21
+ // Loop-exit staging block.
+ // CHECK: store i32 1, i32* [[CLEANUPDEST]]
+ // CHECK-NEXT: br
+
+ // While body.
+ // CHECK: store i32 21, i32* [[Z]]
+ // CHECK: store i32 2, i32* [[CLEANUPDEST]]
+ // CHECK-NEXT: br
z = 21;
- // CHECK: {{while.cleanup:|:6}}
+ // Cleanup.
// CHECK: call void @_ZN1XD1Ev
+ // CHECK-NEXT: [[DEST:%.*]] = load i32* [[CLEANUPDEST]]
+ // CHECK-NEXT: switch i32 [[DEST]]
}
- // CHECK: {{while.end|:8}}
- // CHECK: store i32 22
+
+ // CHECK: store i32 22, i32* [[Z]]
z = 22;
// CHECK: call void @_Z4getXv
- // CHECK: call zeroext i1 @_ZN1XcvbEv
- // CHECK: call void @_ZN1XD1Ev
- // CHECK: br
+ // CHECK-NEXT: call zeroext i1 @_ZN1XcvbEv
+ // CHECK-NEXT: call void @_ZN1XD1Ev
+ // CHECK-NEXT: br
while(getX()) { }
- // CHECK: store i32 25
+ // CHECK: store i32 25, i32* [[Z]]
z = 25;
// CHECK: ret
}
+// CHECK: define void @_Z12for_destructi(
void for_destruct(int z) {
- // CHECK: define void @_Z12for_destruct
+ // CHECK: [[Z:%.*]] = alloca i32
+ // CHECK: [[XDEST:%.*]] = alloca i32
+ // CHECK: [[I:%.*]] = alloca i32
// CHECK: call void @_ZN1YC1Ev
- for(Y y = Y(); X x = X(); ++z)
- // CHECK: {{for.cond:|:4}}
+ // CHECK-NEXT: br
+ // -> %for.cond
+
+ for(Y y = Y(); X x = X(); ++z) {
+ // %for.cond: The loop condition.
// CHECK: call void @_ZN1XC1Ev
- // CHECK: {{for.body:|:6}}
- // CHECK: store i32 23
+ // CHECK-NEXT: [[COND:%.*]] = call zeroext i1 @_ZN1XcvbEv(
+ // CHECK-NEXT: br i1 [[COND]]
+ // -> %for.body, %for.cond.cleanup
+
+ // %for.cond.cleanup: Exit cleanup staging.
+ // CHECK: store i32 1, i32* [[XDEST]]
+ // CHECK-NEXT: br
+ // -> %cleanup
+
+ // %for.body:
+ // CHECK: store i32 23, i32* [[Z]]
+ // CHECK-NEXT: br
+ // -> %for.inc
z = 23;
- // CHECK: {{for.inc:|:7}}
- // CHECK: br label %{{for.cond.cleanup|10}}
- // CHECK: {{for.cond.cleanup:|:10}}
+
+ // %for.inc:
+ // CHECK: [[TMP:%.*]] = load i32* [[Z]]
+ // CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP]], 1
+ // CHECK-NEXT: store i32 [[INC]], i32* [[Z]]
+ // CHECK-NEXT: store i32 2, i32* [[XDEST]]
+ // CHECK-NEXT: br
+ // -> %cleanup
+
+ // %cleanup: Destroys X.
// CHECK: call void @_ZN1XD1Ev
- // CHECK: {{for.end:|:12}}
- // CHECK: call void @_ZN1YD1Ev
+ // CHECK-NEXT: [[YDESTTMP:%.*]] = load i32* [[XDEST]]
+ // CHECK-NEXT: switch i32 [[YDESTTMP]]
+ // 1 -> %cleanup4, 2 -> %cleanup.cont
+
+ // %cleanup.cont: (eliminable)
+ // CHECK: br
+ // -> %for.cond
+
+ // %cleanup4: Destroys Y.
+ // CHECK: call void @_ZN1YD1Ev(
+ // CHECK-NEXT: br
+ // -> %for.end
+ }
+
+ // %for.end:
// CHECK: store i32 24
z = 24;
+ // CHECK-NEXT: store i32 0, i32* [[I]]
+ // CHECK-NEXT: br
+ // -> %for.cond6
+
+ // %for.cond6:
// CHECK: call void @_Z4getXv
- // CHECK: call zeroext i1 @_ZN1XcvbEv
- // CHECK: call void @_ZN1XD1Ev
+ // CHECK-NEXT: call zeroext i1 @_ZN1XcvbEv
+ // CHECK-NEXT: call void @_ZN1XD1Ev
+ // CHECK-NEXT: br
+ // -> %for.body10, %for.end16
+
+ // %for.body10:
// CHECK: br
+ // -> %for.inc11
+
+ // %for.inc11:
// CHECK: call void @_Z4getXv
- // CHECK: load
- // CHECK: add
- // CHECK: call void @_ZN1XD1Ev
+ // CHECK-NEXT: load i32* [[I]]
+ // CHECK-NEXT: add
+ // CHECK-NEXT: store
+ // CHECK-NEXT: call void @_ZN1XD1Ev
+ // CHECK-NEXT: br
+ // -> %for.cond6
int i = 0;
for(; getX(); getX(), ++i) { }
- z = 26;
+
+ // %for.end16
// CHECK: store i32 26
- // CHECK: ret
+ z = 26;
+
+ // CHECK-NEXT: ret void
}
void do_destruct(int z) {
// Checked at top of file:
// @_ZN5test312_GLOBAL__N_11CD1Ev = alias internal {{.*}} @_ZN5test312_GLOBAL__N_11CD2Ev
+ // More checks at end of file.
+
+}
+
+namespace test4 {
+ struct A { ~A(); };
+
+ // CHECK: define void @_ZN5test43fooEv()
+ // CHECK: call void @_ZN5test41AD1Ev
+ // CHECK: ret void
+ void foo() {
+ {
+ A a;
+ goto failure;
+ }
+
+ failure:
+ return;
+ }
+
+ // CHECK: define void @_ZN5test43barEi(
+ // CHECK: [[X:%.*]] = alloca i32
+ // CHECK-NEXT: [[A:%.*]] = alloca
+ // CHECK: br label
+ // CHECK: [[TMP:%.*]] = load i32* [[X]]
+ // CHECK-NEXT: [[CMP:%.*]] = icmp ne i32 [[TMP]], 0
+ // CHECK-NEXT: br i1
+ // CHECK: call void @_ZN5test41AD1Ev(
+ // CHECK: br label
+ // CHECK: [[TMP:%.*]] = load i32* [[X]]
+ // CHECK: [[TMP2:%.*]] = add nsw i32 [[TMP]], -1
+ // CHECK: store i32 [[TMP2]], i32* [[X]]
+ // CHECK: br label
+ // CHECK: ret void
+ void bar(int x) {
+ for (A a; x; ) {
+ x--;
+ }
+ }
+}
+
+// Checks from test3:
+
// CHECK: define internal void @_ZN5test312_GLOBAL__N_11CD2Ev(
// CHECK: call void @_ZN5test31BD2Ev(
// CHECK: call void @_ZN5test31AD2Ev(
// CHECK: ret void
// CHECK: define internal void @_ZN5test312_GLOBAL__N_11DD0Ev(
- // CHECK: call void @_ZN5test312_GLOBAL__N_11DD1Ev(
+ // CHECK: invoke void @_ZN5test312_GLOBAL__N_11DD1Ev(
// CHECK: call void @_ZdlPv(
// CHECK: ret void
+ // CHECK: call i8* @llvm.eh.exception(
+ // CHECK: invoke void @_ZdlPv
+ // CHECK: call void @_Unwind_Resume_or_Rethrow
+ // CHECK: call i8* @llvm.eh.exception(
+ // CHECK: call void @_ZSt9terminatev(
// Checked at top of file:
// @_ZN5test312_GLOBAL__N_11DD1Ev = alias internal {{.*}} @_ZN5test312_GLOBAL__N_11DD2Ev
// CHECK: declare void @_ZN5test31AD2Ev(
// CHECK: define internal void @_ZN5test312_GLOBAL__N_11CD0Ev(
- // CHECK: call void @_ZN5test312_GLOBAL__N_11CD1Ev(
+ // CHECK: invoke void @_ZN5test312_GLOBAL__N_11CD1Ev(
// CHECK: call void @_ZdlPv(
// CHECK: ret void
+ // CHECK: call i8* @llvm.eh.exception()
+ // CHECK: invoke void @_ZdlPv(
+ // CHECK: call void @_Unwind_Resume_or_Rethrow(
+ // CHECK: call i8* @llvm.eh.exception()
+ // CHECK: call void @_ZSt9terminatev()
// CHECK: define internal void @_ZThn8_N5test312_GLOBAL__N_11CD1Ev(
// CHECK: getelementptr inbounds i8* {{.*}}, i64 -8
// CHECK: getelementptr inbounds i8* {{.*}}, i64 -8
// CHECK: call void @_ZN5test312_GLOBAL__N_11CD0Ev(
// CHECK: ret void
-}
// CHECK: define void @_Z5test2v()
// CHECK: [[FREEVAR:%.*]] = alloca i1
// CHECK-NEXT: [[EXNOBJVAR:%.*]] = alloca i8*
+// CHECK-NEXT: [[EXNSLOTVAR:%.*]] = alloca i8*
// CHECK-NEXT: store i1 false, i1* [[FREEVAR]]
// CHECK-NEXT: [[EXNOBJ:%.*]] = call i8* @__cxa_allocate_exception(i64 16)
// CHECK-NEXT: store i8* [[EXNOBJ]], i8** [[EXNOBJVAR]]
// : [[HANDLER]]: (can't check this in Release-Asserts builds)
// CHECK: {{%.*}} = call i32 @llvm.eh.typeid.for(i8* bitcast ({{%.*}}* @_ZTIN5test51AE to i8*))
}
+
+namespace test6 {
+ template <class T> struct allocator {
+ ~allocator() throw() { }
+ };
+
+ void foo() {
+ allocator<int> a;
+ }
+}
+
+// PR7127
+namespace test7 {
+// CHECK: define i32 @_ZN5test73fooEv()
+ int foo() {
+// CHECK: [[FREEEXNOBJ:%.*]] = alloca i1
+// CHECK-NEXT: [[EXNALLOCVAR:%.*]] = alloca i8*
+// CHECK-NEXT: [[CAUGHTEXNVAR:%.*]] = alloca i8*
+// CHECK-NEXT: [[INTCATCHVAR:%.*]] = alloca i32
+// CHECK-NEXT: store i1 false, i1* [[FREEEXNOBJ]]
+ try {
+ try {
+// CHECK-NEXT: [[EXNALLOC:%.*]] = call i8* @__cxa_allocate_exception
+// CHECK-NEXT: store i8* [[EXNALLOC]], i8** [[EXNALLOCVAR]]
+// CHECK-NEXT: store i1 true, i1* [[FREEEXNOBJ]]
+// CHECK-NEXT: bitcast i8* [[EXNALLOC]] to i32*
+// CHECK-NEXT: store i32 1, i32*
+// CHECK-NEXT: store i1 false, i1* [[FREEEXNOBJ]]
+// CHECK-NEXT: invoke void @__cxa_throw(i8* [[EXNALLOC]], i8* bitcast (i8** @_ZTIi to i8*), i8* null
+ throw 1;
+ }
+// This cleanup ends up here for no good reason. It's actually unused.
+// CHECK: load i8** [[EXNALLOCVAR]]
+// CHECK-NEXT: call void @__cxa_free_exception(
+
+// CHECK: [[CAUGHTEXN:%.*]] = call i8* @llvm.eh.exception()
+// CHECK-NEXT: store i8* [[CAUGHTEXN]], i8** [[CAUGHTEXNVAR]]
+// CHECK-NEXT: call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* [[CAUGHTEXN]], i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* bitcast (i8** @_ZTIi to i8*), i8* null)
+// CHECK-NEXT: call i32 @llvm.eh.typeid.for(i8* bitcast (i8** @_ZTIi to i8*))
+// CHECK-NEXT: icmp eq
+// CHECK-NEXT: br i1
+// CHECK: load i8** [[CAUGHTEXNVAR]]
+// CHECK-NEXT: call i8* @__cxa_begin_catch
+// CHECK: invoke void @__cxa_rethrow
+ catch (int) {
+ throw;
+ }
+ }
+// CHECK: [[CAUGHTEXN:%.*]] = call i8* @llvm.eh.exception()
+// CHECK-NEXT: store i8* [[CAUGHTEXN]], i8** [[CAUGHTEXNVAR]]
+// CHECK-NEXT: call i32 (i8*, i8*, ...)* @llvm.eh.selector(i8* [[CAUGHTEXN]], i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*), i8* null)
+// CHECK-NEXT: call void @__cxa_end_catch()
+// CHECK-NEXT: br label
+// CHECK: load i8** [[CAUGHTEXNVAR]]
+// CHECK-NEXT: call i8* @__cxa_begin_catch
+// CHECK-NEXT: call void @__cxa_end_catch
+ catch (...) {
+ }
+// CHECK: ret i32 0
+ return 0;
+ }
+}
+
+// Ordering of destructors in a catch handler.
+namespace test8 {
+ struct A { A(const A&); ~A(); };
+ void bar();
+
+ // CHECK: define void @_ZN5test83fooEv()
+ void foo() {
+ try {
+ // CHECK: invoke void @_ZN5test83barEv()
+ bar();
+ } catch (A a) {
+ // CHECK: call i8* @__cxa_get_exception_ptr
+ // CHECK-NEXT: bitcast
+ // CHECK-NEXT: invoke void @_ZN5test81AC1ERKS0_(
+ // CHECK: call i8* @__cxa_begin_catch
+ // CHECK-NEXT: invoke void @_ZN5test81AD1Ev(
+
+ // CHECK: call void @__cxa_end_catch()
+ // CHECK-NEXT: load
+ // CHECK-NEXT: switch
+
+ // CHECK: ret void
+ }
+ }
+}
// RUN: %clang_cc1 -emit-llvm -O1 -o - %s | FileCheck %s
-// RUN: %clang_cc1 -emit-llvm -fexceptions -o - %s | FileCheck --check-prefix=CHECK-EH %s
+// RUN: %clang_cc1 -emit-llvm -O1 -fexceptions -o - %s | FileCheck --check-prefix=CHECK-EH %s
// Test code generation for the named return value optimization.
class X {
// CHECK-EH: define void @_Z5test0v
X test0() {
X x;
- // CHECK-NOT: call void @_ZN1XD1Ev
- // CHECK: ret void
- // CHECK-EH: br label
- // CHECK-EH: call void @_ZN1XD1Ev
- // CHECK-EH: br label
- // CHECK-EH: invoke void @_ZN1XD1Ev
- // CHECK-EH: ret void
+ // CHECK: call void @_ZN1XC1Ev
+ // CHECK-NEXT: ret void
+
+ // CHECK-EH: call void @_ZN1XC1Ev
+ // CHECK-EH-NEXT: ret void
return x;
}
// CHECK: define void @_Z5test1b(
+// CHECK-EH: define void @_Z5test1b(
X test1(bool B) {
- // CHECK: call void @_ZN1XC1Ev
+ // CHECK: tail call void @_ZN1XC1Ev
+ // CHECK-NEXT: ret void
X x;
- // CHECK-NOT: call void @_ZN1XD1Ev
- // CHECK: ret void
if (B)
return (x);
return x;
- // CHECK-EH: invoke void @_ZN1XD1Ev
+ // CHECK-EH: tail call void @_ZN1XC1Ev
+ // CHECK-EH-NEXT: ret void
}
// CHECK: define void @_Z5test2b
// CHECK-EH: define void @_Z5test2b
X test2(bool B) {
- // No NRVO
- // CHECK: call void @_ZN1XC1Ev
+ // No NRVO.
+
X x;
- // CHECK: call void @_ZN1XC1Ev
X y;
- // CHECK: call void @_ZN1XC1ERKS_
- // CHECK-EH: invoke void @_ZN1XC1ERKS_
if (B)
return y;
- // CHECK: call void @_ZN1XC1ERKS_
- // CHECK-EH: invoke void @_ZN1XC1ERKS_
return x;
+
+ // CHECK: call void @_ZN1XC1Ev
+ // CHECK-NEXT: call void @_ZN1XC1Ev
+ // CHECK: call void @_ZN1XC1ERKS_
+ // CHECK: call void @_ZN1XC1ERKS_
// CHECK: call void @_ZN1XD1Ev
// CHECK: call void @_ZN1XD1Ev
// CHECK: ret void
+
+ // The block ordering in the -fexceptions IR is unfortunate.
+
+ // CHECK-EH: call void @_ZN1XC1Ev
+ // CHECK-EH-NEXT: invoke void @_ZN1XC1Ev
+ // -> %invoke.cont1, %lpad
+
+ // %invoke.cont1:
+ // CHECK-EH: br i1
+ // -> %if.then, %if.end
+
+ // %if.then: returning 'x'
+ // CHECK-EH: invoke void @_ZN1XC1ERKS_
+ // -> %cleanup, %lpad5
+
+ // %invoke.cont: rethrow block for %eh.cleanup.
+ // This really should be elsewhere in the function.
+ // CHECK-EH: call void @_Unwind_Resume_or_Rethrow
+ // CHECK-EH-NEXT: unreachable
+
+ // %lpad: landing pad for ctor of 'y', dtor of 'y'
+ // CHECK-EH: call i8* @llvm.eh.exception()
+ // CHECK-EH: call i32 (i8*, i8*, ...)* @llvm.eh.selector
+ // CHECK-EH-NEXT: br label
+ // -> %eh.cleanup
+
+ // %invoke.cont2: normal cleanup for 'x'
+ // CHECK-EH: call void @_ZN1XD1Ev
+ // CHECK-EH-NEXT: ret void
+
+ // %lpad5: landing pad for return copy ctors, EH cleanup for 'y'
+ // CHECK-EH: invoke void @_ZN1XD1Ev
+ // -> %eh.cleanup, %terminate.lpad
+
+ // %if.end: returning 'y'
+ // CHECK-EH: invoke void @_ZN1XC1ERKS_
+ // -> %cleanup, %lpad5
+
+ // %cleanup: normal cleanup for 'y'
// CHECK-EH: invoke void @_ZN1XD1Ev
+ // -> %invoke.cont2, %lpad
+
+ // %eh.cleanup: EH cleanup for 'x'
// CHECK-EH: invoke void @_ZN1XD1Ev
+ // -> %invoke.cont, %terminate.lpad
+
+ // %terminate.lpad: terminate landing pad.
+ // CHECK-EH: call i8* @llvm.eh.exception()
+ // CHECK-EH-NEXT: call i32 (i8*, i8*, ...)* @llvm.eh.selector
+ // CHECK-EH-NEXT: call void @_ZSt9terminatev()
+ // CHECK-EH-NEXT: unreachable
+
}
X test3(bool B) {
// CHECK: call i32 @__cxa_guard_acquire(i64* @_ZGVZ1fvE1x)
// CHECK: invoke void @_ZN1XC1Ev
// CHECK: call void @__cxa_guard_release(i64* @_ZGVZ1fvE1x)
- // CHECK: call i32 @__cxa_atexit
+ // CHECK-NEXT: call i32 @__cxa_atexit
// CHECK: br
static X x;
+
+ // CHECK: call i8* @__cxa_allocate_exception
+ // CHECK: invoke void @__cxa_throw
+ throw Y();
+
+ // Finally, the landing pad.
// CHECK: call i8* @llvm.eh.exception()
// CHECK: call i32 (i8*, i8*, ...)* @llvm.eh.selector
// CHECK: call void @__cxa_guard_abort(i64* @_ZGVZ1fvE1x)
// CHECK: call void @_Unwind_Resume_or_Rethrow
// CHECK: unreachable
-
- // CHECK: call i8* @__cxa_allocate_exception
- throw Y();
}
-// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -o %t %s
+// RUN: %clang_cc1 -triple x86_64-apple-darwin10 -emit-llvm -fexceptions -O2 -o - %s | FileCheck %s
//
// <rdar://problem/7471679> [irgen] [eh] Exception code built with clang (x86_64) crashes
// Just check that we don't emit any dead blocks.
-//
-// RUN: grep 'No predecessors' %t | count 0
-
@interface NSArray @end
void f0() {
@try {
} @catch (id e) {
}
}
+
+// CHECK: define void @f1()
+void f1() {
+ extern void foo(void);
+
+ while (1) {
+ // CHECK: call void @objc_exception_try_enter
+ // CHECK-NEXT: getelementptr
+ // CHECK-NEXT: call i32 @_setjmp(
+ // CHECK-NEXT: icmp
+ // CHECK-NEXT: br i1
+ @try {
+ // CHECK: call void @foo()
+ foo();
+ // CHECK: call void @objc_exception_try_exit
+ // CHECK-NEXT: ret void
+
+ // CHECK: call i8* @objc_exception_extract
+ // CHECK-NEXT: ret void
+ } @finally {
+ break;
+ }
+ }
+}