1 //===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This contains code to emit blocks.
12 //===----------------------------------------------------------------------===//
14 #include "CGDebugInfo.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "clang/AST/DeclObjC.h"
18 #include "llvm/Module.h"
19 #include "llvm/Target/TargetData.h"
23 using namespace clang;
24 using namespace CodeGen;
26 llvm::Constant *CodeGenFunction::
27 BuildDescriptorBlockDecl(bool BlockHasCopyDispose, uint64_t Size,
28 const llvm::StructType* Ty,
29 std::vector<HelperInfo> *NoteForHelper) {
30 const llvm::Type *UnsignedLongTy
31 = CGM.getTypes().ConvertType(getContext().UnsignedLongTy);
33 std::vector<llvm::Constant*> Elts;
36 C = llvm::ConstantInt::get(UnsignedLongTy, 0);
40 // FIXME: What is the right way to say this doesn't fit? We should give
41 // a user diagnostic in that case. Better fix would be to change the
43 C = llvm::ConstantInt::get(UnsignedLongTy, Size);
46 if (BlockHasCopyDispose) {
47 // copy_func_helper_decl
48 Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
51 Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
54 C = llvm::ConstantStruct::get(VMContext, Elts, false);
56 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
57 llvm::GlobalValue::InternalLinkage,
58 C, "__block_descriptor_tmp");
62 llvm::Constant *BlockModule::getNSConcreteGlobalBlock() {
63 if (NSConcreteGlobalBlock == 0)
64 NSConcreteGlobalBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
65 "_NSConcreteGlobalBlock");
66 return NSConcreteGlobalBlock;
69 llvm::Constant *BlockModule::getNSConcreteStackBlock() {
70 if (NSConcreteStackBlock == 0)
71 NSConcreteStackBlock = CGM.CreateRuntimeVariable(PtrToInt8Ty,
72 "_NSConcreteStackBlock");
73 return NSConcreteStackBlock;
76 static void CollectBlockDeclRefInfo(
77 const Stmt *S, CodeGenFunction::BlockInfo &Info,
78 llvm::SmallSet<const DeclContext *, 16> &InnerContexts) {
79 for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
82 CollectBlockDeclRefInfo(*I, Info, InnerContexts);
84 // We want to ensure we walk down into block literals so we can find
85 // all nested BlockDeclRefExprs.
86 if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
87 InnerContexts.insert(cast<DeclContext>(BE->getBlockDecl()));
88 CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts);
91 if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
92 // FIXME: Handle enums.
93 if (isa<FunctionDecl>(BDRE->getDecl()))
96 // Only Decls that escape are added.
97 if (!InnerContexts.count(BDRE->getDecl()->getDeclContext()))
98 Info.DeclRefs.push_back(BDRE);
102 /// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be
103 /// declared as a global variable instead of on the stack.
104 static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) {
105 return Info.DeclRefs.empty();
108 /// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to
109 /// ensure we can generate the debug information for the parameter for the block
111 static void AllocateAllBlockDeclRefs(const CodeGenFunction::BlockInfo &Info,
112 CodeGenFunction *CGF) {
113 // Always allocate self, as it is often handy in the debugger, even if there
114 // is no codegen in the block that uses it. This is also useful to always do
115 // this as if we didn't, we'd have to figure out all code that uses a self
116 // pointer, including implicit uses.
117 if (const ObjCMethodDecl *OMD
118 = dyn_cast_or_null<ObjCMethodDecl>(CGF->CurFuncDecl)) {
119 ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
120 BlockDeclRefExpr *BDRE = new (CGF->getContext())
121 BlockDeclRefExpr(SelfDecl,
122 SelfDecl->getType(), SourceLocation(), false);
123 CGF->AllocateBlockDecl(BDRE);
126 // FIXME: Also always forward the this pointer in C++ as well.
128 for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
129 CGF->AllocateBlockDecl(Info.DeclRefs[i]);
132 // FIXME: Push most into CGM, passing down a few bits, like current function
134 llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
136 std::string Name = CurFn->getName();
137 CodeGenFunction::BlockInfo Info(0, Name.c_str());
138 llvm::SmallSet<const DeclContext *, 16> InnerContexts;
139 InnerContexts.insert(BE->getBlockDecl());
140 CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts);
142 // Check if the block can be global.
143 // FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
144 // to just have one code path. We should move this function into CGM and pass
145 // CGF, then we can just check to see if CGF is 0.
146 if (0 && CanBlockBeGlobal(Info))
147 return CGM.GetAddrOfGlobalBlock(BE, Name.c_str());
149 size_t BlockFields = 5;
151 bool hasIntrospection = CGM.getContext().getLangOptions().BlockIntrospection;
153 if (hasIntrospection) {
156 std::vector<llvm::Constant*> Elts(BlockFields);
158 if (hasIntrospection) {
159 std::string BlockTypeEncoding;
160 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
162 Elts[5] = llvm::ConstantExpr::getBitCast(
163 CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
170 // C = BuildBlockStructInitlist();
171 unsigned int flags = BLOCK_HAS_DESCRIPTOR;
173 if (hasIntrospection)
174 flags |= BLOCK_HAS_OBJC_TYPE;
176 // We run this first so that we set BlockHasCopyDispose from the entire
179 uint64_t subBlockSize, subBlockAlign;
180 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
181 bool subBlockHasCopyDispose = false;
183 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
187 subBlockDeclRefDecls,
188 subBlockHasCopyDispose);
189 BlockHasCopyDispose |= subBlockHasCopyDispose;
192 // FIXME: Don't use BlockHasCopyDispose, it is set more often then
193 // necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
194 if (subBlockHasCopyDispose)
195 flags |= BLOCK_HAS_COPY_DISPOSE;
198 C = CGM.getNSConcreteStackBlock();
199 C = llvm::ConstantExpr::getBitCast(C, PtrToInt8Ty);
203 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
204 CGM.getTypes().ConvertType(CGM.getContext().IntTy));
205 C = llvm::ConstantInt::get(IntTy, flags);
209 C = llvm::ConstantInt::get(IntTy, 0);
212 if (subBlockDeclRefDecls.size() == 0) {
214 Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize,
217 // Optimize to being a global block.
218 Elts[0] = CGM.getNSConcreteGlobalBlock();
219 Elts[1] = llvm::ConstantInt::get(IntTy, flags|BLOCK_IS_GLOBAL);
221 C = llvm::ConstantStruct::get(VMContext, Elts, false);
224 sprintf(Name, "__block_holder_tmp_%d", CGM.getGlobalUniqueCount());
225 C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
226 llvm::GlobalValue::InternalLinkage,
228 QualType BPT = BE->getType();
229 C = llvm::ConstantExpr::getBitCast(C, ConvertType(BPT));
233 std::vector<const llvm::Type *> Types(BlockFields+subBlockDeclRefDecls.size());
234 for (int i=0; i<4; ++i)
235 Types[i] = Elts[i]->getType();
236 Types[4] = PtrToInt8Ty;
237 if (hasIntrospection)
238 Types[5] = PtrToInt8Ty;
240 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) {
241 const Expr *E = subBlockDeclRefDecls[i];
242 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
243 QualType Ty = E->getType();
244 if (BDRE && BDRE->isByRef()) {
245 Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
247 Types[i+BlockFields] = ConvertType(Ty);
250 llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
252 llvm::AllocaInst *A = CreateTempAlloca(Ty);
253 A->setAlignment(subBlockAlign);
256 std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size());
259 for (unsigned i=0; i<4; ++i)
260 Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
261 if (hasIntrospection)
262 Builder.CreateStore(Elts[5], Builder.CreateStructGEP(V, 5, "block.tmp"));
264 for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i)
266 // FIXME: Push const down.
267 Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]);
271 DR = dyn_cast<DeclRefExpr>(E);
275 BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
276 VD = BDRE->getDecl();
278 llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
279 NoteForHelper[helpersize].index = i+5;
280 NoteForHelper[helpersize].RequiresCopying
281 = BlockRequiresCopying(VD->getType());
282 NoteForHelper[helpersize].flag
283 = (VD->getType()->isBlockPointerType()
284 ? BLOCK_FIELD_IS_BLOCK
285 : BLOCK_FIELD_IS_OBJECT);
287 if (LocalDeclMap[VD]) {
288 if (BDRE->isByRef()) {
289 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
290 // FIXME: Someone double check this.
291 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
292 llvm::Value *Loc = LocalDeclMap[VD];
293 Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
294 Loc = Builder.CreateLoad(Loc);
295 Builder.CreateStore(Loc, Addr);
299 E = new (getContext()) DeclRefExpr (VD,
303 if (BDRE->isByRef()) {
304 NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
305 // FIXME: Someone double check this.
306 (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
307 E = new (getContext())
308 UnaryOperator(E, UnaryOperator::AddrOf,
309 getContext().getPointerType(E->getType()),
314 RValue r = EmitAnyExpr(E, Addr, false);
316 llvm::Value *Loc = r.getScalarVal();
317 const llvm::Type *Ty = Types[i+BlockFields];
318 if (BDRE->isByRef()) {
319 // E is now the address of the value field, instead, we want the
320 // address of the actual ByRef struct. We optimize this slightly
321 // compared to gcc by not grabbing the forwarding slot as this must
322 // be done during Block_copy for us, and we can postpone the work
324 uint64_t offset = BlockDecls[BDRE->getDecl()];
326 llvm::Value *BlockLiteral = LoadBlockStruct();
328 Loc = Builder.CreateGEP(BlockLiteral,
329 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
332 Ty = llvm::PointerType::get(Ty, 0);
333 Loc = Builder.CreateBitCast(Loc, Ty);
334 Loc = Builder.CreateLoad(Loc);
335 // Loc = Builder.CreateBitCast(Loc, Ty);
337 Builder.CreateStore(Loc, Addr);
338 } else if (r.isComplex())
340 ErrorUnsupported(BE, "complex in block literal");
341 else if (r.isAggregate())
342 ; // Already created into the destination
344 assert (0 && "bad block variable");
345 // FIXME: Ensure that the offset created by the backend for
346 // the struct matches the previously computed offset in BlockDecls.
348 NoteForHelper.resize(helpersize);
351 llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose,
354 Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
355 Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
358 QualType BPT = BE->getType();
359 return Builder.CreateBitCast(V, ConvertType(BPT));
363 const llvm::Type *BlockModule::getBlockDescriptorType() {
364 if (BlockDescriptorType)
365 return BlockDescriptorType;
367 const llvm::Type *UnsignedLongTy =
368 getTypes().ConvertType(getContext().UnsignedLongTy);
370 // struct __block_descriptor {
371 // unsigned long reserved;
372 // unsigned long block_size;
374 BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
379 getModule().addTypeName("struct.__block_descriptor",
380 BlockDescriptorType);
382 return BlockDescriptorType;
385 const llvm::Type *BlockModule::getGenericBlockLiteralType() {
386 if (GenericBlockLiteralType)
387 return GenericBlockLiteralType;
389 const llvm::Type *BlockDescPtrTy =
390 llvm::PointerType::getUnqual(getBlockDescriptorType());
392 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
393 getTypes().ConvertType(getContext().IntTy));
395 // struct __block_literal_generic {
399 // void (*__invoke)(void *);
400 // struct __block_descriptor *__descriptor;
401 // // GNU runtime only:
402 // const char *types;
404 if (CGM.getContext().getLangOptions().BlockIntrospection)
405 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
414 GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
422 getModule().addTypeName("struct.__block_literal_generic",
423 GenericBlockLiteralType);
425 return GenericBlockLiteralType;
428 const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
429 if (GenericExtendedBlockLiteralType)
430 return GenericExtendedBlockLiteralType;
432 const llvm::Type *BlockDescPtrTy =
433 llvm::PointerType::getUnqual(getBlockDescriptorType());
435 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
436 getTypes().ConvertType(getContext().IntTy));
438 // struct __block_literal_generic {
442 // void (*__invoke)(void *);
443 // struct __block_descriptor *__descriptor;
444 // void *__copy_func_helper_decl;
445 // void *__destroy_func_decl;
447 GenericExtendedBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
457 getModule().addTypeName("struct.__block_literal_extended_generic",
458 GenericExtendedBlockLiteralType);
460 return GenericExtendedBlockLiteralType;
463 RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
464 ReturnValueSlot ReturnValue) {
465 const BlockPointerType *BPT =
466 E->getCallee()->getType()->getAs<BlockPointerType>();
468 llvm::Value *Callee = EmitScalarExpr(E->getCallee());
470 // Get a pointer to the generic block literal.
471 const llvm::Type *BlockLiteralTy =
472 llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
474 // Bitcast the callee to a block literal.
475 llvm::Value *BlockLiteral =
476 Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
478 // Get the function pointer from the literal.
479 llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3, "tmp");
482 Builder.CreateBitCast(BlockLiteral,
483 llvm::Type::getInt8PtrTy(VMContext),
486 // Add the block literal.
487 QualType VoidPtrTy = getContext().getPointerType(getContext().VoidTy);
489 Args.push_back(std::make_pair(RValue::get(BlockLiteral), VoidPtrTy));
491 QualType FnType = BPT->getPointeeType();
493 // And the rest of the arguments.
494 EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
495 E->arg_begin(), E->arg_end());
497 // Load the function.
498 llvm::Value *Func = Builder.CreateLoad(FuncPtr, "tmp");
500 QualType ResultType = FnType->getAs<FunctionType>()->getResultType();
502 const CGFunctionInfo &FnInfo =
503 CGM.getTypes().getFunctionInfo(ResultType, Args);
505 // Cast the function pointer to the right type.
506 const llvm::Type *BlockFTy =
507 CGM.getTypes().GetFunctionType(FnInfo, false);
509 const llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
510 Func = Builder.CreateBitCast(Func, BlockFTyPtr);
512 // And call the block.
513 return EmitCall(FnInfo, Func, ReturnValue, Args);
516 uint64_t CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
517 const ValueDecl *VD = E->getDecl();
518 uint64_t &offset = BlockDecls[VD];
520 // See if we have already allocated an offset for this variable.
524 // Don't run the expensive check, unless we have to.
525 if (!BlockHasCopyDispose)
527 || BlockRequiresCopying(E->getType()))
528 BlockHasCopyDispose = true;
530 // if not, allocate one now.
531 offset = getBlockOffset(E);
536 llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
537 const ValueDecl *VD = E->getDecl();
538 uint64_t offset = AllocateBlockDecl(E);
541 llvm::Value *BlockLiteral = LoadBlockStruct();
542 llvm::Value *V = Builder.CreateGEP(BlockLiteral,
543 llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
547 const llvm::Type *PtrStructTy
548 = llvm::PointerType::get(BuildByRefType(VD), 0);
549 // The block literal will need a copy/destroy helper.
550 BlockHasCopyDispose = true;
552 const llvm::Type *Ty = PtrStructTy;
553 Ty = llvm::PointerType::get(Ty, 0);
554 V = Builder.CreateBitCast(V, Ty);
555 V = Builder.CreateLoad(V);
556 V = Builder.CreateStructGEP(V, 1, "forwarding");
557 V = Builder.CreateLoad(V);
558 V = Builder.CreateBitCast(V, PtrStructTy);
559 V = Builder.CreateStructGEP(V, getByRefValueLLVMField(VD),
560 VD->getNameAsString());
562 const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
564 Ty = llvm::PointerType::get(Ty, 0);
565 V = Builder.CreateBitCast(V, Ty);
570 void CodeGenFunction::BlockForwardSelf() {
571 const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
572 ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
573 llvm::Value *&DMEntry = LocalDeclMap[SelfDecl];
576 // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care
577 BlockDeclRefExpr *BDRE = new (getContext())
578 BlockDeclRefExpr(SelfDecl,
579 SelfDecl->getType(), SourceLocation(), false);
580 DMEntry = GetAddrOfBlockDecl(BDRE);
584 BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
585 // Generate the block descriptor.
586 const llvm::Type *UnsignedLongTy = Types.ConvertType(Context.UnsignedLongTy);
587 const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
588 getTypes().ConvertType(getContext().IntTy));
590 llvm::Constant *DescriptorFields[2];
593 DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
595 // Block literal size. For global blocks we just use the size of the generic
596 // block literal struct.
597 uint64_t BlockLiteralSize =
598 TheTargetData.getTypeStoreSizeInBits(getGenericBlockLiteralType()) / 8;
599 DescriptorFields[1] =
600 llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize);
602 llvm::Constant *DescriptorStruct =
603 llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false);
605 llvm::GlobalVariable *Descriptor =
606 new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
607 llvm::GlobalVariable::InternalLinkage,
608 DescriptorStruct, "__block_descriptor_global");
611 // Generate the constants for the block literal.
612 if (CGM.getContext().getLangOptions().BlockIntrospection)
615 std::vector<llvm::Constant*> LiteralFields(FieldCount);
617 CodeGenFunction::BlockInfo Info(0, n);
618 uint64_t subBlockSize, subBlockAlign;
619 llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
620 bool subBlockHasCopyDispose = false;
621 llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
623 = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap,
626 subBlockDeclRefDecls,
627 subBlockHasCopyDispose);
628 assert(subBlockSize == BlockLiteralSize
629 && "no imports allowed for global block");
632 LiteralFields[0] = getNSConcreteGlobalBlock();
635 LiteralFields[1] = CGM.getContext().getLangOptions().BlockIntrospection ?
636 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR |
637 BLOCK_HAS_OBJC_TYPE) :
638 llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR);
641 LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
644 LiteralFields[3] = Fn;
647 LiteralFields[4] = Descriptor;
650 if (CGM.getContext().getLangOptions().BlockIntrospection) {
651 std::string BlockTypeEncoding;
652 CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
654 LiteralFields[5] = CGM.GetAddrOfConstantCString(BlockTypeEncoding);
657 llvm::Constant *BlockLiteralStruct =
658 llvm::ConstantStruct::get(VMContext, LiteralFields, false);
660 llvm::GlobalVariable *BlockLiteral =
661 new llvm::GlobalVariable(getModule(), BlockLiteralStruct->getType(), true,
662 llvm::GlobalVariable::InternalLinkage,
663 BlockLiteralStruct, "__block_literal_global");
668 llvm::Value *CodeGenFunction::LoadBlockStruct() {
669 llvm::Value *V = Builder.CreateLoad(LocalDeclMap[getBlockStructDecl()],
671 // For now, we codegen based upon byte offsets.
672 return Builder.CreateBitCast(V, PtrToInt8Ty);
676 CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
677 const BlockInfo& Info,
678 const Decl *OuterFuncDecl,
679 llvm::DenseMap<const Decl*, llvm::Value*> ldm,
682 llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
683 bool &subBlockHasCopyDispose) {
685 // Check if we should generate debug info for this block.
686 if (CGM.getDebugInfo())
687 DebugInfo = CGM.getDebugInfo();
689 // Arrange for local static and local extern declarations to appear
690 // to be local to this function as well, as they are directly referenced
692 for (llvm::DenseMap<const Decl *, llvm::Value*>::iterator i = ldm.begin();
695 const VarDecl *VD = dyn_cast<VarDecl>(i->first);
697 if (VD->getStorageClass() == VarDecl::Static || VD->hasExternalStorage())
698 LocalDeclMap[VD] = i->second;
701 BlockOffset = CGM.getTargetData()
702 .getTypeStoreSizeInBits(CGM.getGenericBlockLiteralType()) / 8;
703 BlockAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
705 const FunctionType *BlockFunctionType = BExpr->getFunctionType();
708 if (const FunctionProtoType *FTy =
709 dyn_cast<FunctionProtoType>(BlockFunctionType)) {
710 ResultType = FTy->getResultType();
711 IsVariadic = FTy->isVariadic();
714 ResultType = BlockFunctionType->getResultType();
718 FunctionArgList Args;
720 CurFuncDecl = OuterFuncDecl;
722 const BlockDecl *BD = BExpr->getBlockDecl();
724 IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
726 // Allocate all BlockDeclRefDecls, so we can calculate the right ParmTy below.
727 AllocateAllBlockDeclRefs(Info, this);
729 QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
732 ImplicitParamDecl *SelfDecl =
733 ImplicitParamDecl::Create(getContext(), 0,
734 SourceLocation(), II,
737 Args.push_back(std::make_pair(SelfDecl, SelfDecl->getType()));
738 BlockStructDecl = SelfDecl;
740 for (BlockDecl::param_const_iterator i = BD->param_begin(),
741 e = BD->param_end(); i != e; ++i)
742 Args.push_back(std::make_pair(*i, (*i)->getType()));
744 const CGFunctionInfo &FI =
745 CGM.getTypes().getFunctionInfo(ResultType, Args);
747 std::string Name = std::string("__") + Info.Name + "_block_invoke_";
748 CodeGenTypes &Types = CGM.getTypes();
749 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
752 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
756 CGM.SetInternalFunctionAttributes(BD, Fn, FI);
758 StartFunction(BD, ResultType, Fn, Args,
759 BExpr->getBody()->getLocEnd());
761 CurFuncDecl = OuterFuncDecl;
764 // Save a spot to insert the debug information for all the BlockDeclRefDecls.
765 llvm::BasicBlock *entry = Builder.GetInsertBlock();
766 llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
769 EmitStmt(BExpr->getBody());
771 // Remember where we were...
772 llvm::BasicBlock *resume = Builder.GetInsertBlock();
774 // Go back to the entry.
776 Builder.SetInsertPoint(entry, entry_ptr);
778 if (CGDebugInfo *DI = getDebugInfo()) {
779 // Emit debug information for all the BlockDeclRefDecls.
780 for (unsigned i=0; i < BlockDeclRefDecls.size(); ++i) {
781 const Expr *E = BlockDeclRefDecls[i];
782 const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
784 const ValueDecl *D = BDRE->getDecl();
785 DI->setLocation(D->getLocation());
786 DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
787 LocalDeclMap[getBlockStructDecl()],
792 // And resume where we left off.
794 Builder.ClearInsertionPoint();
796 Builder.SetInsertPoint(resume);
798 FinishFunction(cast<CompoundStmt>(BExpr->getBody())->getRBracLoc());
800 // The runtime needs a minimum alignment of a void *.
801 uint64_t MinAlign = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
802 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, MinAlign);
806 subBlockDeclRefDecls = BlockDeclRefDecls;
807 subBlockHasCopyDispose |= BlockHasCopyDispose;
811 uint64_t BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
812 const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl());
814 uint64_t Size = getContext().getTypeSize(D->getType()) / 8;
815 uint64_t Align = getContext().getDeclAlignInBytes(D);
817 if (BDRE->isByRef()) {
818 Size = getContext().getTypeSize(getContext().VoidPtrTy) / 8;
819 Align = getContext().getTypeAlign(getContext().VoidPtrTy) / 8;
822 assert ((Align > 0) && "alignment must be 1 byte or more");
824 uint64_t OldOffset = BlockOffset;
826 // Ensure proper alignment, even if it means we have to have a gap
827 BlockOffset = llvm::RoundUpToAlignment(BlockOffset, Align);
828 BlockAlign = std::max(Align, BlockAlign);
830 uint64_t Pad = BlockOffset - OldOffset;
832 llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad);
833 QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
834 llvm::APInt(32, Pad),
835 ArrayType::Normal, 0);
836 ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
837 0, QualType(PadTy), 0, VarDecl::None);
839 E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
841 BlockDeclRefDecls.push_back(E);
843 BlockDeclRefDecls.push_back(BDRE);
846 return BlockOffset-Size;
849 llvm::Constant *BlockFunction::
850 GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
851 std::vector<HelperInfo> *NoteForHelperp) {
852 QualType R = getContext().VoidTy;
854 FunctionArgList Args;
856 ImplicitParamDecl *Dst =
857 ImplicitParamDecl::Create(getContext(), 0,
859 getContext().getPointerType(getContext().VoidTy));
860 Args.push_back(std::make_pair(Dst, Dst->getType()));
861 ImplicitParamDecl *Src =
862 ImplicitParamDecl::Create(getContext(), 0,
864 getContext().getPointerType(getContext().VoidTy));
865 Args.push_back(std::make_pair(Src, Src->getType()));
867 const CGFunctionInfo &FI =
868 CGM.getTypes().getFunctionInfo(R, Args);
870 // FIXME: We'd like to put these into a mergable by content, with
872 std::string Name = std::string("__copy_helper_block_");
873 CodeGenTypes &Types = CGM.getTypes();
874 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
877 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
882 = &CGM.getContext().Idents.get("__copy_helper_block_");
884 FunctionDecl *FD = FunctionDecl::Create(getContext(),
885 getContext().getTranslationUnitDecl(),
886 SourceLocation(), II, R, 0,
887 FunctionDecl::Static, false,
889 CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
891 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
894 if (NoteForHelperp) {
895 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
897 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
898 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
899 SrcObj = Builder.CreateLoad(SrcObj);
901 llvm::Value *DstObj = CGF.GetAddrOfLocalVar(Dst);
903 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
904 DstObj = Builder.CreateBitCast(DstObj, PtrPtrT);
905 DstObj = Builder.CreateLoad(DstObj);
907 for (unsigned i=0; i < NoteForHelper.size(); ++i) {
908 int flag = NoteForHelper[i].flag;
909 int index = NoteForHelper[i].index;
911 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
912 || NoteForHelper[i].RequiresCopying) {
913 llvm::Value *Srcv = SrcObj;
914 Srcv = Builder.CreateStructGEP(Srcv, index);
915 Srcv = Builder.CreateBitCast(Srcv,
916 llvm::PointerType::get(PtrToInt8Ty, 0));
917 Srcv = Builder.CreateLoad(Srcv);
919 llvm::Value *Dstv = Builder.CreateStructGEP(DstObj, index);
920 Dstv = Builder.CreateBitCast(Dstv, PtrToInt8Ty);
922 llvm::Value *N = llvm::ConstantInt::get(
923 llvm::Type::getInt32Ty(T->getContext()), flag);
924 llvm::Value *F = getBlockObjectAssign();
925 Builder.CreateCall3(F, Dstv, Srcv, N);
930 CGF.FinishFunction();
932 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
935 llvm::Constant *BlockFunction::
936 GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
937 const llvm::StructType* T,
938 std::vector<HelperInfo> *NoteForHelperp) {
939 QualType R = getContext().VoidTy;
941 FunctionArgList Args;
943 ImplicitParamDecl *Src =
944 ImplicitParamDecl::Create(getContext(), 0,
946 getContext().getPointerType(getContext().VoidTy));
948 Args.push_back(std::make_pair(Src, Src->getType()));
950 const CGFunctionInfo &FI =
951 CGM.getTypes().getFunctionInfo(R, Args);
953 // FIXME: We'd like to put these into a mergable by content, with
955 std::string Name = std::string("__destroy_helper_block_");
956 CodeGenTypes &Types = CGM.getTypes();
957 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
960 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
965 = &CGM.getContext().Idents.get("__destroy_helper_block_");
967 FunctionDecl *FD = FunctionDecl::Create(getContext(),
968 getContext().getTranslationUnitDecl(),
969 SourceLocation(), II, R, 0,
970 FunctionDecl::Static, false,
972 CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
974 if (NoteForHelperp) {
975 std::vector<HelperInfo> &NoteForHelper = *NoteForHelperp;
977 llvm::Value *SrcObj = CGF.GetAddrOfLocalVar(Src);
979 PtrPtrT = llvm::PointerType::get(llvm::PointerType::get(T, 0), 0);
980 SrcObj = Builder.CreateBitCast(SrcObj, PtrPtrT);
981 SrcObj = Builder.CreateLoad(SrcObj);
983 for (unsigned i=0; i < NoteForHelper.size(); ++i) {
984 int flag = NoteForHelper[i].flag;
985 int index = NoteForHelper[i].index;
987 if ((NoteForHelper[i].flag & BLOCK_FIELD_IS_BYREF)
988 || NoteForHelper[i].RequiresCopying) {
989 llvm::Value *Srcv = SrcObj;
990 Srcv = Builder.CreateStructGEP(Srcv, index);
991 Srcv = Builder.CreateBitCast(Srcv,
992 llvm::PointerType::get(PtrToInt8Ty, 0));
993 Srcv = Builder.CreateLoad(Srcv);
995 BuildBlockRelease(Srcv, flag);
1000 CGF.FinishFunction();
1002 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
1005 llvm::Constant *BlockFunction::BuildCopyHelper(const llvm::StructType *T,
1006 std::vector<HelperInfo> *NoteForHelper) {
1007 return CodeGenFunction(CGM).GenerateCopyHelperFunction(BlockHasCopyDispose,
1011 llvm::Constant *BlockFunction::BuildDestroyHelper(const llvm::StructType *T,
1012 std::vector<HelperInfo> *NoteForHelperp) {
1013 return CodeGenFunction(CGM).GenerateDestroyHelperFunction(BlockHasCopyDispose,
1017 llvm::Constant *BlockFunction::
1018 GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
1019 QualType R = getContext().VoidTy;
1021 FunctionArgList Args;
1022 // FIXME: This leaks
1023 ImplicitParamDecl *Dst =
1024 ImplicitParamDecl::Create(getContext(), 0,
1025 SourceLocation(), 0,
1026 getContext().getPointerType(getContext().VoidTy));
1027 Args.push_back(std::make_pair(Dst, Dst->getType()));
1029 // FIXME: This leaks
1030 ImplicitParamDecl *Src =
1031 ImplicitParamDecl::Create(getContext(), 0,
1032 SourceLocation(), 0,
1033 getContext().getPointerType(getContext().VoidTy));
1034 Args.push_back(std::make_pair(Src, Src->getType()));
1036 const CGFunctionInfo &FI =
1037 CGM.getTypes().getFunctionInfo(R, Args);
1039 std::string Name = std::string("__Block_byref_id_object_copy_");
1040 CodeGenTypes &Types = CGM.getTypes();
1041 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
1043 // FIXME: We'd like to put these into a mergable by content, with
1044 // internal linkage.
1045 llvm::Function *Fn =
1046 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
1051 = &CGM.getContext().Idents.get("__Block_byref_id_object_copy_");
1053 FunctionDecl *FD = FunctionDecl::Create(getContext(),
1054 getContext().getTranslationUnitDecl(),
1055 SourceLocation(), II, R, 0,
1056 FunctionDecl::Static, false,
1058 CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
1061 llvm::Value *V = CGF.GetAddrOfLocalVar(Dst);
1062 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
1063 V = Builder.CreateLoad(V);
1064 V = Builder.CreateStructGEP(V, 6, "x");
1065 llvm::Value *DstObj = Builder.CreateBitCast(V, PtrToInt8Ty);
1068 V = CGF.GetAddrOfLocalVar(Src);
1069 V = Builder.CreateLoad(V);
1070 V = Builder.CreateBitCast(V, T);
1071 V = Builder.CreateStructGEP(V, 6, "x");
1072 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
1073 llvm::Value *SrcObj = Builder.CreateLoad(V);
1075 flag |= BLOCK_BYREF_CALLER;
1077 llvm::Value *N = llvm::ConstantInt::get(
1078 llvm::Type::getInt32Ty(T->getContext()), flag);
1079 llvm::Value *F = getBlockObjectAssign();
1080 Builder.CreateCall3(F, DstObj, SrcObj, N);
1082 CGF.FinishFunction();
1084 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
1088 BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
1090 QualType R = getContext().VoidTy;
1092 FunctionArgList Args;
1093 // FIXME: This leaks
1094 ImplicitParamDecl *Src =
1095 ImplicitParamDecl::Create(getContext(), 0,
1096 SourceLocation(), 0,
1097 getContext().getPointerType(getContext().VoidTy));
1099 Args.push_back(std::make_pair(Src, Src->getType()));
1101 const CGFunctionInfo &FI =
1102 CGM.getTypes().getFunctionInfo(R, Args);
1104 std::string Name = std::string("__Block_byref_id_object_dispose_");
1105 CodeGenTypes &Types = CGM.getTypes();
1106 const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
1108 // FIXME: We'd like to put these into a mergable by content, with
1109 // internal linkage.
1110 llvm::Function *Fn =
1111 llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
1116 = &CGM.getContext().Idents.get("__Block_byref_id_object_dispose_");
1118 FunctionDecl *FD = FunctionDecl::Create(getContext(),
1119 getContext().getTranslationUnitDecl(),
1120 SourceLocation(), II, R, 0,
1121 FunctionDecl::Static, false,
1123 CGF.StartFunction(FD, R, Fn, Args, SourceLocation());
1125 llvm::Value *V = CGF.GetAddrOfLocalVar(Src);
1126 V = Builder.CreateBitCast(V, llvm::PointerType::get(T, 0));
1127 V = Builder.CreateLoad(V);
1128 V = Builder.CreateStructGEP(V, 6, "x");
1129 V = Builder.CreateBitCast(V, llvm::PointerType::get(PtrToInt8Ty, 0));
1130 V = Builder.CreateLoad(V);
1132 flag |= BLOCK_BYREF_CALLER;
1133 BuildBlockRelease(V, flag);
1134 CGF.FinishFunction();
1136 return llvm::ConstantExpr::getBitCast(Fn, PtrToInt8Ty);
1139 llvm::Constant *BlockFunction::BuildbyrefCopyHelper(const llvm::Type *T,
1140 int Flag, unsigned Align) {
1141 // All alignments below that of pointer alignment collapse down to just
1142 // pointer alignment, as we always have at least that much alignment to begin
1144 Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
1146 // As an optimization, we only generate a single function of each kind we
1147 // might need. We need a different one for each alignment and for each
1148 // setting of flags. We mix Align and flag to get the kind.
1149 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
1150 llvm::Constant *&Entry = CGM.AssignCache[Kind];
1153 return Entry = CodeGenFunction(CGM).GeneratebyrefCopyHelperFunction(T, Flag);
1156 llvm::Constant *BlockFunction::BuildbyrefDestroyHelper(const llvm::Type *T,
1159 // All alignments below that of pointer alignment collpase down to just
1160 // pointer alignment, as we always have at least that much alignment to begin
1162 Align /= unsigned(CGF.Target.getPointerAlign(0)/8);
1164 // As an optimization, we only generate a single function of each kind we
1165 // might need. We need a different one for each alignment and for each
1166 // setting of flags. We mix Align and flag to get the kind.
1167 uint64_t Kind = (uint64_t)Align*BLOCK_BYREF_CURRENT_MAX + Flag;
1168 llvm::Constant *&Entry = CGM.DestroyCache[Kind];
1171 return Entry=CodeGenFunction(CGM).GeneratebyrefDestroyHelperFunction(T, Flag);
1174 llvm::Value *BlockFunction::getBlockObjectDispose() {
1175 if (CGM.BlockObjectDispose == 0) {
1176 const llvm::FunctionType *FTy;
1177 std::vector<const llvm::Type*> ArgTys;
1178 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
1179 ArgTys.push_back(PtrToInt8Ty);
1180 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
1181 FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1182 CGM.BlockObjectDispose
1183 = CGM.CreateRuntimeFunction(FTy, "_Block_object_dispose");
1185 return CGM.BlockObjectDispose;
1188 llvm::Value *BlockFunction::getBlockObjectAssign() {
1189 if (CGM.BlockObjectAssign == 0) {
1190 const llvm::FunctionType *FTy;
1191 std::vector<const llvm::Type*> ArgTys;
1192 const llvm::Type *ResultType = llvm::Type::getVoidTy(VMContext);
1193 ArgTys.push_back(PtrToInt8Ty);
1194 ArgTys.push_back(PtrToInt8Ty);
1195 ArgTys.push_back(llvm::Type::getInt32Ty(VMContext));
1196 FTy = llvm::FunctionType::get(ResultType, ArgTys, false);
1197 CGM.BlockObjectAssign
1198 = CGM.CreateRuntimeFunction(FTy, "_Block_object_assign");
1200 return CGM.BlockObjectAssign;
1203 void BlockFunction::BuildBlockRelease(llvm::Value *V, int flag) {
1204 llvm::Value *F = getBlockObjectDispose();
1206 V = Builder.CreateBitCast(V, PtrToInt8Ty);
1207 N = llvm::ConstantInt::get(llvm::Type::getInt32Ty(V->getContext()), flag);
1208 Builder.CreateCall2(F, V, N);
1211 ASTContext &BlockFunction::getContext() const { return CGM.getContext(); }
1213 BlockFunction::BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf,
1215 : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()), Builder(B) {
1216 PtrToInt8Ty = llvm::PointerType::getUnqual(
1217 llvm::Type::getInt8Ty(VMContext));
1219 BlockHasCopyDispose = false;