BLOCK_HAS_DESCRIPTOR = (1 << 29)
};
-llvm::Constant *CodeGenFunction::BuildDescriptorBlockDecl() {
- // FIXME: Push up.
- bool BlockHasCopyDispose = false;
-
+llvm::Constant *CodeGenFunction::BuildDescriptorBlockDecl(uint64_t Size) {
const llvm::PointerType *PtrToInt8Ty
= llvm::PointerType::getUnqual(llvm::Type::Int8Ty);
const llvm::Type *UnsignedLongTy
Elts.push_back(C);
// Size
- int sz;
- if (!BlockHasCopyDispose)
- sz = CGM.getTargetData()
- .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8;
- else
- sz = CGM.getTargetData()
- .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8;
- C = llvm::ConstantInt::get(UnsignedLongTy, sz);
+ C = llvm::ConstantInt::get(UnsignedLongTy, Size);
Elts.push_back(C);
if (BlockHasCopyDispose) {
// copy_func_helper_decl
+ // FIXME: implement
C = llvm::ConstantInt::get(UnsignedLongTy, 0);
C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
Elts.push_back(C);
// destroy_func_decl
+ // FIXME: implement
C = llvm::ConstantInt::get(UnsignedLongTy, 0);
C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
Elts.push_back(C);
// FIXME: Push most into CGM, passing down a few bits, like current
// function name.
llvm::Constant *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
- // FIXME: Push up
- bool BlockHasCopyDispose = false;
bool insideFunction = false;
bool BlockRefDeclList = false;
bool BlockByrefDeclList = false;
if (ND->getIdentifier())
Name = ND->getNameAsCString();
BlockInfo Info(0, Name);
- llvm::Function *Fn = CodeGenFunction(*this).GenerateBlockFunction(BE, Info);
+ uint64_t subBlockSize;
+ llvm::Function *Fn
+ = CodeGenFunction(*this).GenerateBlockFunction(BE, Info, subBlockSize);
Elts.push_back(Fn);
// __descriptor
- Elts.push_back(BuildDescriptorBlockDecl());
+ Elts.push_back(BuildDescriptorBlockDecl(subBlockSize));
// FIXME: Add block_original_ref_decl_list and block_byref_decl_list.
}
UnsignedLongTy,
NULL);
+ // FIXME: This breaks an unrelated testcase in the testsuite, we
+ // _want_ llvm to not use structural equality, sometimes. What
+ // should we do, modify the testcase and do this anyway, or...
+#if 0
getModule().addTypeName("struct.__block_descriptor",
BlockDescriptorType);
+#endif
return BlockDescriptorType;
}
BlockDescPtrTy,
NULL);
+ // FIXME: See struct.__block_descriptor
getModule().addTypeName("struct.__block_literal_generic",
GenericBlockLiteralType);
Int8PtrTy,
NULL);
+ // FIXME: See struct.__block_descriptor
getModule().addTypeName("struct.__block_literal_extended_generic",
GenericExtendedBlockLiteralType);
// Get the function pointer from the literal.
llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
+ // FIXME: second argument should be false?
llvm::Value *Func = Builder.CreateLoad(FuncPtr, FuncPtr, "tmp");
// Cast the function pointer to the right type.
llvm::Constant *LiteralFields[5];
CodeGenFunction::BlockInfo Info(0, n);
- llvm::Function *Fn = CodeGenFunction(*this).GenerateBlockFunction(BE, Info);
+ uint64_t subBlockSize;
+ llvm::Function *Fn
+ = CodeGenFunction(*this).GenerateBlockFunction(BE, Info, subBlockSize);
+ assert(subBlockSize == BlockLiteralSize
+ && "no imports allowed for global block");
// isa
LiteralFields[0] = getNSConcreteGlobalBlock();
return BlockLiteral;
}
+llvm::Value *CodeGenFunction::LoadBlockStruct() {
+ return Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()], "self");
+}
+
llvm::Function *CodeGenFunction::GenerateBlockFunction(const BlockExpr *Expr,
- const BlockInfo& Info)
-{
+ const BlockInfo& Info,
+ uint64_t &Size) {
const FunctionTypeProto *FTy =
cast<FunctionTypeProto>(Expr->getFunctionType());
getContext().getPointerType(getContext().VoidTy));
Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
+ BlockStructDecl = SelfDecl;
for (BlockDecl::param_iterator i = BD->param_begin(),
e = BD->param_end(); i != e; ++i)
EmitStmt(Expr->getBody());
FinishFunction(cast<CompoundStmt>(Expr->getBody())->getRBracLoc());
+ Size = BlockOffset;
+
return Fn;
}
#include "llvm/Intrinsics.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
#include <cstdarg>
using namespace clang;
}
Value *VisitStmtExpr(const StmtExpr *E);
+
+ Value *VisitBlockDeclRefExpr(BlockDeclRefExpr *E);
// Unary Operators.
Value *VisitPrePostIncDec(const UnaryOperator *E, bool isInc, bool isPre);
!E->getType()->isVoidType()).getScalarVal();
}
+Value *ScalarExprEmitter::VisitBlockDeclRefExpr(BlockDeclRefExpr *E) {
+ if (E->isByRef()) {
+ // FIXME: Add codegen for __block variables.
+ return VisitExpr(E);
+ }
+
+ // FIXME: We have most of the easy codegen for the helper, but we need to
+ // ensure we don't need copy/dispose, and we need to add the variables into
+ // the block literal still.
+ CGF.ErrorUnsupported(E, "scalar expression");
+
+ uint64_t &offset = CGF.BlockDecls[E->getDecl()];
+
+ const llvm::Type *Ty;
+ Ty = CGF.CGM.getTypes().ConvertType(E->getDecl()->getType());
+
+ // See if we have already allocated an offset for this variable.
+ if (offset == 0) {
+ int Size = CGF.CGM.getTargetData().getTypeStoreSizeInBits(Ty) / 8;
+
+ unsigned Align = CGF.CGM.getContext().getTypeAlign(E->getDecl()->getType());
+ if (const AlignedAttr* AA = E->getDecl()->getAttr<AlignedAttr>())
+ Align = std::max(Align, AA->getAlignment());
+
+ // if not, allocate one now.
+ offset = CGF.getBlockOffset(Size, Align);
+ }
+
+ llvm::Value *BlockLiteral = CGF.LoadBlockStruct();
+ llvm::Value *V = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::Int64Ty,
+ offset),
+ "tmp");
+ Ty = llvm::PointerType::get(Ty, 0);
+ if (E->isByRef())
+ Ty = llvm::PointerType::get(Ty, 0);
+ V = Builder.CreateBitCast(V, Ty);
+ V = Builder.CreateLoad(V, false, "tmp");
+ if (E->isByRef())
+ V = Builder.CreateLoad(V, false, "tmp");
+ return V;
+}
//===----------------------------------------------------------------------===//
// Unary Operators
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
: CGM(cgm), Target(CGM.getContext().Target), DebugInfo(0), SwitchInsn(0),
- CaseRangeBlock(0) {
- LLVMIntTy = ConvertType(getContext().IntTy);
- LLVMPointerWidth = Target.getPointerWidth(0);
+ CaseRangeBlock(0) {
+ LLVMIntTy = ConvertType(getContext().IntTy);
+ LLVMPointerWidth = Target.getPointerWidth(0);
+
+ // FIXME: We need to rearrange the code for copy/dispose so we have this
+ // sooner, so we can calculate offsets correctly.
+ BlockHasCopyDispose = false;
+ if (!BlockHasCopyDispose)
+ BlockOffset = CGM.getTargetData()
+ .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8;
+ else
+ BlockOffset = CGM.getTargetData()
+ .getTypeStoreSizeInBits(CGM.getGenericExtendedBlockLiteralType()) / 8;
}
ASTContext &CodeGenFunction::getContext() const {
const llvm::Type *LLVMIntTy;
uint32_t LLVMPointerWidth;
- llvm::Constant *BuildBlockLiteralTmp(const BlockExpr *);
- llvm::Constant *BuildDescriptorBlockDecl();
-
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
void GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID);
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *BuildBlockLiteralTmp(const BlockExpr *);
+ llvm::Constant *BuildDescriptorBlockDecl(uint64_t Size);
+
/// BlockInfo - Information to generate a block literal.
struct BlockInfo {
/// BlockLiteralTy - The type of the block literal.
};
llvm::Function *GenerateBlockFunction(const BlockExpr *Expr,
- const BlockInfo& Info);
+ const BlockInfo& Info,
+ uint64_t &Size);
+
+ ImplicitParamDecl *BlockStructDecl;
+
+ ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
+
+ llvm::Value *LoadBlockStruct();
+
+ /// BlockHasCopyDispose - True iff the block uses copy/dispose.
+ bool BlockHasCopyDispose;
+
+ uint64_t BlockOffset;
+ /// getBlockOffset - Offset for next allocated variable use in a BlockExpr.
+ uint64_t getBlockOffset(uint64_t Size, uint64_t Align) {
+ assert (((Align >> 3) > 0) && "alignment must be 1 byte or more");
+ assert (((Align & 7) == 0)
+ && "alignment must be on at least byte boundaries");
+ // Ensure proper alignment, even if it means we have to have a gap
+ if (BlockOffset % (Align >> 3)) {
+ BlockOffset += (Align >> 3) - (BlockOffset % (Align >> 3));
+ assert ((BlockOffset % (Align >> 3)) == 0
+ && "alignment calculation is wrong");
+ }
+
+ BlockOffset += Size;
+ return BlockOffset-Size;
+ }
+ std::map<Decl*, uint64_t> BlockDecls;
void GenerateCode(const FunctionDecl *FD,
llvm::Function *Fn);