const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
std::string Name = std::string("__copy_helper_block_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
const CGFunctionInfo &FI =
CGM.getTypes().getFunctionInfo(R, Args);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
std::string Name = std::string("__destroy_helper_block_");
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
Name,
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
llvm::Function *Fn =
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
Name,
}
llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
- int flag) {
- return CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag);
+ int flag, unsigned Align) {
+ // All alignments below that of pointer alignment collpase down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag;
+ llvm::Constant *& Entry = CGM.AssignCache[kind];
+ if (Entry)
+ return Entry;
+ return Entry=CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, flag);
}
llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
- int flag) {
- return CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag);
+ int flag,
+ unsigned Align) {
+ // All alignments below that of pointer alignment collpase down to just
+ // pointer alignment, as we always have at least that much alignment to begin
+ // with.
+ Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
+ // As an optimization, we only generate a single function of each kind we
+ // might need. We need a different one for each alignment and for each
+ // setting of flags. We mix Align and flag to get the kind.
+ uint64_t kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + flag;
+ llvm::Constant *& Entry = CGM.DestroyCache[kind];
+ if (Entry)
+ return Entry;
+ return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, flag);
}
llvm::Value *BlockFunction::getBlockObjectDispose() {
llvm::Value *BlockObjectDispose;
const llvm::Type *PtrToInt8Ty;
+ std::map<uint64_t, llvm::Constant *> AssignCache;
+ std::map<uint64_t, llvm::Constant *> DestroyCache;
+
BlockModule(ASTContext &C, llvm::Module &M, const llvm::TargetData &TD,
CodeGenTypes &T, CodeGenModule &CodeGen)
: Context(C), TheModule(M), TheTargetData(TD), Types(T),
variable */
BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
helpers */
- BLOCK_BYREF_CALLER = 128 /* called from __block (byref) copy/dispose
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
};
/// BlockInfo - Information to generate a block literal.
llvm::Constant *GeneratebyrefCopyHelperFunction(const llvm::Type *, int flag);
llvm::Constant *GeneratebyrefDestroyHelperFunction(const llvm::Type *T, int);
- llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag);
- llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag);
+ llvm::Constant *BuildbyrefCopyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
+ llvm::Constant *BuildbyrefDestroyHelper(const llvm::Type *T, int flag,
+ unsigned Align);
llvm::Value *getBlockObjectAssign();
llvm::Value *getBlockObjectDispose();
Types[4] = PtrToInt8Ty;
Types[5] = PtrToInt8Ty;
}
- // FIXME: Align this on at least an Align boundary.
+ // FIXME: Align this on at least an Align boundary, assert if we can't.
+ assert((Align <= unsigned(Target.getPointerAlign(0))/8)
+ && "Can't align more thqn pointer yet");
Types[needsCopyDispose*2 + 4] = LTy;
return llvm::StructType::get(Types, false);
}
QualType Ty = D.getType();
bool isByRef = D.hasAttr<BlocksAttr>();
bool needsDispose = false;
+ unsigned Align = 0;
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
// A normal fixed sized variable becomes an alloca in the entry block.
const llvm::Type *LTy = ConvertTypeForMem(Ty);
+ Align = getContext().getDeclAlignInBytes(&D);
if (isByRef)
- LTy = BuildByRefType(Ty, getContext().getDeclAlignInBytes(&D));
+ LTy = BuildByRefType(Ty, Align);
llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
Alloc->setName(D.getNameAsString().c_str());
if (isByRef)
- Alloc->setAlignment(std::max(getContext().getDeclAlignInBytes(&D),
- unsigned(Target.getPointerAlign(0) / 8)));
- else
- Alloc->setAlignment(getContext().getDeclAlignInBytes(&D));
+ Align = std::max(Align, unsigned(Target.getPointerAlign(0) / 8));
+ Alloc->setAlignment(Align);
DeclPtr = Alloc;
} else {
// Targets that don't support recursion emit locals as globals.
if (flags & BLOCK_HAS_COPY_DISPOSE) {
BlockHasCopyDispose = true;
llvm::Value *copy_helper = Builder.CreateStructGEP(DeclPtr, 4);
- Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag),
+ Builder.CreateStore(BuildbyrefCopyHelper(DeclPtr->getType(), flag, Align),
copy_helper);
llvm::Value *destroy_helper = Builder.CreateStructGEP(DeclPtr, 5);
- Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag),
+ Builder.CreateStore(BuildbyrefDestroyHelper(DeclPtr->getType(), flag,
+ Align),
destroy_helper);
}
}
--- /dev/null
+// RUN: clang-cc -triple x86_64-apple-darwin9 -emit-llvm -fblocks -o %t %s &&
+// RUN: grep 'object_assign' %t | count 11 &&
+// RUN: grep 'object_dispose' %t | count 29
+
+int main() {
+ typedef id aid __attribute__((aligned(1)));
+ __block aid a1;
+ __block id a2 __attribute__((aligned(2)));
+ __block id a3 __attribute__((aligned(4)));
+ __block id a4 __attribute__((aligned(8)));
+ __block id a5, a6, a7;
+ __block void (^b)();
+ ^{ a1=a2=a3=a4=a5=a6=a7=0; b = 0; }();
+ return 0;
+}