1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This coordinates the per-function state used while generating code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
17 #include "CGDebugInfo.h"
18 #include "CodeGenModule.h"
19 #include "clang/AST/ASTContext.h"
20 #include "clang/AST/Decl.h"
21 #include "clang/AST/DeclCXX.h"
22 #include "clang/AST/StmtCXX.h"
23 #include "clang/Basic/OpenCL.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/Frontend/CodeGenOptions.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/MDBuilder.h"
29 #include "llvm/IR/Operator.h"
30 using namespace clang;
31 using namespace CodeGen;
33 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
34 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
35 Builder(cgm.getModule().getContext()),
36 SanitizePerformTypeCheck(CGM.getSanOpts().Null |
37 CGM.getSanOpts().Alignment |
38 CGM.getSanOpts().ObjectSize |
39 CGM.getSanOpts().Vptr),
40 SanOpts(&CGM.getSanOpts()),
41 AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
42 LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
43 FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
44 DebugInfo(0), DisableDebugInfo(false), CalleeWithThisReturn(0),
45 DidCallStackSave(false),
46 IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
47 NumReturnExprs(0), NumSimpleReturnExprs(0),
48 CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0),
49 CXXDefaultInitExprThis(0),
50 CXXStructorImplicitParamDecl(0), CXXStructorImplicitParamValue(0),
51 OutermostConditional(0), CurLexicalScope(0), TerminateLandingPad(0),
52 TerminateHandler(0), TrapBB(0) {
53 if (!suppressNewContext)
54 CGM.getCXXABI().getMangleContext().startNewFunction();
56 llvm::FastMathFlags FMF;
57 if (CGM.getLangOpts().FastMath)
58 FMF.setUnsafeAlgebra();
59 if (CGM.getLangOpts().FiniteMathOnly) {
63 Builder.SetFastMathFlags(FMF);
66 CodeGenFunction::~CodeGenFunction() {
67 // If there are any unclaimed block infos, go ahead and destroy them
68 // now. This can happen if IR-gen gets clever and skips evaluating
71 destroyBlockInfos(FirstBlockInfo);
75 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
76 return CGM.getTypes().ConvertTypeForMem(T);
79 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
80 return CGM.getTypes().ConvertType(T);
83 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
84 type = type.getCanonicalType();
86 switch (type->getTypeClass()) {
87 #define TYPE(name, parent)
88 #define ABSTRACT_TYPE(name, parent)
89 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
90 #define DEPENDENT_TYPE(name, parent) case Type::name:
91 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
92 #include "clang/AST/TypeNodes.def"
93 llvm_unreachable("non-canonical or dependent type in IR-generation");
96 llvm_unreachable("undeduced auto type in IR-generation");
98 // Various scalar types.
101 case Type::BlockPointer:
102 case Type::LValueReference:
103 case Type::RValueReference:
104 case Type::MemberPointer:
106 case Type::ExtVector:
107 case Type::FunctionProto:
108 case Type::FunctionNoProto:
110 case Type::ObjCObjectPointer:
117 // Arrays, records, and Objective-C objects.
118 case Type::ConstantArray:
119 case Type::IncompleteArray:
120 case Type::VariableArray:
122 case Type::ObjCObject:
123 case Type::ObjCInterface:
124 return TEK_Aggregate;
126 // We operate on atomic values according to their underlying type.
128 type = cast<AtomicType>(type)->getValueType();
131 llvm_unreachable("unknown type kind!");
135 void CodeGenFunction::EmitReturnBlock() {
136 // For cleanliness, we try to avoid emitting the return block for
138 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
141 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
143 // We have a valid insert point, reuse it if it is empty or there are no
144 // explicit jumps to the return block.
145 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
146 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
147 delete ReturnBlock.getBlock();
149 EmitBlock(ReturnBlock.getBlock());
153 // Otherwise, if the return block is the target of a single direct
154 // branch then we can just put the code in that block instead. This
155 // cleans up functions which started with a unified return block.
156 if (ReturnBlock.getBlock()->hasOneUse()) {
157 llvm::BranchInst *BI =
158 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
159 if (BI && BI->isUnconditional() &&
160 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
161 // Reset insertion point, including debug location, and delete the
162 // branch. This is really subtle and only works because the next change
163 // in location will hit the caching in CGDebugInfo::EmitLocation and not
165 Builder.SetCurrentDebugLocation(BI->getDebugLoc());
166 Builder.SetInsertPoint(BI->getParent());
167 BI->eraseFromParent();
168 delete ReturnBlock.getBlock();
173 // FIXME: We are at an unreachable point, there is no reason to emit the block
174 // unless it has uses. However, we still need a place to put the debug
175 // region.end for now.
177 EmitBlock(ReturnBlock.getBlock());
180 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
182 if (!BB->use_empty())
183 return CGF.CurFn->getBasicBlockList().push_back(BB);
187 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
188 assert(BreakContinueStack.empty() &&
189 "mismatched push/pop in break/continue stack!");
191 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
192 && NumSimpleReturnExprs == NumReturnExprs;
193 // If the function contains only a simple return statement, the
194 // cleanup code may become the first breakpoint in the function. To
195 // be safe, set the debug location for it to the location of the
196 // return statement. Otherwise point it to end of the function's
198 if (CGDebugInfo *DI = getDebugInfo()) {
199 if (OnlySimpleReturnStmts)
200 DI->EmitLocation(Builder, LastStopPoint);
202 DI->EmitLocation(Builder, EndLoc);
205 // Pop any cleanups that might have been associated with the
206 // parameters. Do this in whatever block we're currently in; it's
207 // important to do this before we enter the return block or return
208 // edges will be *really* confused.
209 bool EmitRetDbgLoc = true;
210 if (EHStack.stable_begin() != PrologueCleanupDepth) {
211 PopCleanupBlocks(PrologueCleanupDepth, EndLoc);
213 // Make sure the line table doesn't jump back into the body for
214 // the ret after it's been at EndLoc.
215 EmitRetDbgLoc = false;
217 if (CGDebugInfo *DI = getDebugInfo())
218 if (OnlySimpleReturnStmts)
219 DI->EmitLocation(Builder, EndLoc);
222 // Emit function epilog (to return).
225 if (ShouldInstrumentFunction())
226 EmitFunctionInstrumentation("__cyg_profile_func_exit");
228 // Emit debug descriptor for function end.
229 if (CGDebugInfo *DI = getDebugInfo()) {
230 DI->EmitFunctionEnd(Builder);
233 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc);
234 EmitEndEHSpec(CurCodeDecl);
236 assert(EHStack.empty() &&
237 "did not remove all scopes from cleanup stack!");
239 // If someone did an indirect goto, emit the indirect goto block at the end of
241 if (IndirectBranch) {
242 EmitBlock(IndirectBranch->getParent());
243 Builder.ClearInsertionPoint();
246 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
247 llvm::Instruction *Ptr = AllocaInsertPt;
249 Ptr->eraseFromParent();
251 // If someone took the address of a label but never did an indirect goto, we
252 // made a zero entry PHI node, which is illegal, zap it now.
253 if (IndirectBranch) {
254 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
255 if (PN->getNumIncomingValues() == 0) {
256 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
257 PN->eraseFromParent();
261 EmitIfUsed(*this, EHResumeBlock);
262 EmitIfUsed(*this, TerminateLandingPad);
263 EmitIfUsed(*this, TerminateHandler);
264 EmitIfUsed(*this, UnreachableBlock);
266 if (CGM.getCodeGenOpts().EmitDeclMetadata)
270 /// ShouldInstrumentFunction - Return true if the current function should be
271 /// instrumented with __cyg_profile_func_* calls
272 bool CodeGenFunction::ShouldInstrumentFunction() {
273 if (!CGM.getCodeGenOpts().InstrumentFunctions)
275 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
280 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
281 /// instrumentation function with the current function and the call site, if
282 /// function instrumentation is enabled.
283 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
284 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
285 llvm::PointerType *PointerTy = Int8PtrTy;
286 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
287 llvm::FunctionType *FunctionTy =
288 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
290 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
291 llvm::CallInst *CallSite = Builder.CreateCall(
292 CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
293 llvm::ConstantInt::get(Int32Ty, 0),
296 llvm::Value *args[] = {
297 llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
301 EmitNounwindRuntimeCall(F, args);
304 void CodeGenFunction::EmitMCountInstrumentation() {
305 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
307 llvm::Constant *MCountFn =
308 CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
309 EmitNounwindRuntimeCall(MCountFn);
312 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
313 // information in the program executable. The argument information stored
314 // includes the argument name, its type, the address and access qualifiers used.
315 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
316 CodeGenModule &CGM,llvm::LLVMContext &Context,
317 SmallVector <llvm::Value*, 5> &kernelMDArgs,
318 CGBuilderTy& Builder, ASTContext &ASTCtx) {
319 // Create MDNodes that represent the kernel arg metadata.
320 // Each MDNode is a list in the form of "key", N number of values which is
321 // the same number of values as their are kernel arguments.
323 // MDNode for the kernel argument address space qualifiers.
324 SmallVector<llvm::Value*, 8> addressQuals;
325 addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
327 // MDNode for the kernel argument access qualifiers (images only).
328 SmallVector<llvm::Value*, 8> accessQuals;
329 accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
331 // MDNode for the kernel argument type names.
332 SmallVector<llvm::Value*, 8> argTypeNames;
333 argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
335 // MDNode for the kernel argument type qualifiers.
336 SmallVector<llvm::Value*, 8> argTypeQuals;
337 argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
339 // MDNode for the kernel argument names.
340 SmallVector<llvm::Value*, 8> argNames;
341 argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
343 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
344 const ParmVarDecl *parm = FD->getParamDecl(i);
345 QualType ty = parm->getType();
346 std::string typeQuals;
348 if (ty->isPointerType()) {
349 QualType pointeeTy = ty->getPointeeType();
351 // Get address qualifier.
352 addressQuals.push_back(Builder.getInt32(ASTCtx.getTargetAddressSpace(
353 pointeeTy.getAddressSpace())));
355 // Get argument type name.
356 std::string typeName = pointeeTy.getUnqualifiedType().getAsString() + "*";
358 // Turn "unsigned type" to "utype"
359 std::string::size_type pos = typeName.find("unsigned");
360 if (pos != std::string::npos)
361 typeName.erase(pos+1, 8);
363 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
365 // Get argument type qualifiers:
366 if (ty.isRestrictQualified())
367 typeQuals = "restrict";
368 if (pointeeTy.isConstQualified() ||
369 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
370 typeQuals += typeQuals.empty() ? "const" : " const";
371 if (pointeeTy.isVolatileQualified())
372 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
374 addressQuals.push_back(Builder.getInt32(0));
376 // Get argument type name.
377 std::string typeName = ty.getUnqualifiedType().getAsString();
379 // Turn "unsigned type" to "utype"
380 std::string::size_type pos = typeName.find("unsigned");
381 if (pos != std::string::npos)
382 typeName.erase(pos+1, 8);
384 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
386 // Get argument type qualifiers:
387 if (ty.isConstQualified())
389 if (ty.isVolatileQualified())
390 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
393 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
395 // Get image access qualifier:
396 if (ty->isImageType()) {
397 if (parm->hasAttr<OpenCLImageAccessAttr>() &&
398 parm->getAttr<OpenCLImageAccessAttr>()->getAccess() == CLIA_write_only)
399 accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
401 accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
403 accessQuals.push_back(llvm::MDString::get(Context, "none"));
405 // Get argument name.
406 argNames.push_back(llvm::MDString::get(Context, parm->getName()));
409 kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
410 kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
411 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
412 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
413 kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
416 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
419 if (!FD->hasAttr<OpenCLKernelAttr>())
422 llvm::LLVMContext &Context = getLLVMContext();
424 SmallVector <llvm::Value*, 5> kernelMDArgs;
425 kernelMDArgs.push_back(Fn);
427 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
428 GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs,
429 Builder, getContext());
431 if (FD->hasAttr<VecTypeHintAttr>()) {
432 VecTypeHintAttr *attr = FD->getAttr<VecTypeHintAttr>();
433 QualType hintQTy = attr->getTypeHint();
434 const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
435 bool isSignedInteger =
436 hintQTy->isSignedIntegerType() ||
437 (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
438 llvm::Value *attrMDArgs[] = {
439 llvm::MDString::get(Context, "vec_type_hint"),
440 llvm::UndefValue::get(CGM.getTypes().ConvertType(attr->getTypeHint())),
441 llvm::ConstantInt::get(
442 llvm::IntegerType::get(Context, 32),
443 llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0)))
445 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
448 if (FD->hasAttr<WorkGroupSizeHintAttr>()) {
449 WorkGroupSizeHintAttr *attr = FD->getAttr<WorkGroupSizeHintAttr>();
450 llvm::Value *attrMDArgs[] = {
451 llvm::MDString::get(Context, "work_group_size_hint"),
452 Builder.getInt32(attr->getXDim()),
453 Builder.getInt32(attr->getYDim()),
454 Builder.getInt32(attr->getZDim())
456 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
459 if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
460 ReqdWorkGroupSizeAttr *attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
461 llvm::Value *attrMDArgs[] = {
462 llvm::MDString::get(Context, "reqd_work_group_size"),
463 Builder.getInt32(attr->getXDim()),
464 Builder.getInt32(attr->getYDim()),
465 Builder.getInt32(attr->getZDim())
467 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
470 llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
471 llvm::NamedMDNode *OpenCLKernelMetadata =
472 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
473 OpenCLKernelMetadata->addOperand(kernelMDNode);
476 void CodeGenFunction::StartFunction(GlobalDecl GD,
479 const CGFunctionInfo &FnInfo,
480 const FunctionArgList &Args,
481 SourceLocation StartLoc) {
482 const Decl *D = GD.getDecl();
484 DidCallStackSave = false;
486 CurFuncDecl = (D ? D->getNonClosureContext() : 0);
490 assert(CurFn->isDeclaration() && "Function already has body?");
492 if (CGM.getSanitizerBlacklist().isIn(*Fn)) {
493 SanOpts = &SanitizerOptions::Disabled;
494 SanitizePerformTypeCheck = false;
497 // Pass inline keyword to optimizer if it appears explicitly on any
499 if (!CGM.getCodeGenOpts().NoInline)
500 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
501 for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
502 RE = FD->redecls_end(); RI != RE; ++RI)
503 if (RI->isInlineSpecified()) {
504 Fn->addFnAttr(llvm::Attribute::InlineHint);
508 if (getLangOpts().OpenCL) {
509 // Add metadata for a kernel function.
510 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
511 EmitOpenCLKernelMetadata(FD, Fn);
514 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
516 // Create a marker to make it easy to insert allocas into the entryblock
517 // later. Don't create this with the builder, because we don't want it
519 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
520 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
521 if (Builder.isNamePreserving())
522 AllocaInsertPt->setName("allocapt");
524 ReturnBlock = getJumpDestInCurrentScope("return");
526 Builder.SetInsertPoint(EntryBB);
528 // Emit subprogram debug descriptor.
529 if (CGDebugInfo *DI = getDebugInfo()) {
530 SmallVector<QualType, 16> ArgTypes;
531 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
533 ArgTypes.push_back((*i)->getType());
537 getContext().getFunctionType(RetTy, ArgTypes,
538 FunctionProtoType::ExtProtoInfo());
540 DI->setLocation(StartLoc);
541 DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
544 if (ShouldInstrumentFunction())
545 EmitFunctionInstrumentation("__cyg_profile_func_enter");
547 if (CGM.getCodeGenOpts().InstrumentForProfiling)
548 EmitMCountInstrumentation();
550 if (RetTy->isVoidType()) {
551 // Void type; nothing to return.
553 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
554 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
555 // Indirect aggregate return; emit returned value directly into sret slot.
556 // This reduces code size, and affects correctness in C++.
557 ReturnValue = CurFn->arg_begin();
559 ReturnValue = CreateIRTemp(RetTy, "retval");
561 // Tell the epilog emitter to autorelease the result. We do this
562 // now so that various specialized functions can suppress it
563 // during their IR-generation.
564 if (getLangOpts().ObjCAutoRefCount &&
565 !CurFnInfo->isReturnsRetained() &&
566 RetTy->isObjCRetainableType())
567 AutoreleaseResult = true;
570 EmitStartEHSpec(CurCodeDecl);
572 PrologueCleanupDepth = EHStack.stable_begin();
573 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
575 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
576 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
577 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
578 if (MD->getParent()->isLambda() &&
579 MD->getOverloadedOperator() == OO_Call) {
580 // We're in a lambda; figure out the captures.
581 MD->getParent()->getCaptureFields(LambdaCaptureFields,
582 LambdaThisCaptureField);
583 if (LambdaThisCaptureField) {
584 // If this lambda captures this, load it.
585 LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
586 CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
589 // Not in a lambda; just use 'this' from the method.
590 // FIXME: Should we generate a new load for each use of 'this'? The
591 // fast register allocator would be happier...
592 CXXThisValue = CXXABIThisValue;
596 // If any of the arguments have a variably modified type, make sure to
597 // emit the type size.
598 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
600 const VarDecl *VD = *i;
602 // Dig out the type as written from ParmVarDecls; it's unclear whether
603 // the standard (C99 6.9.1p10) requires this, but we're following the
604 // precedent set by gcc.
606 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
607 Ty = PVD->getOriginalType();
611 if (Ty->isVariablyModifiedType())
612 EmitVariablyModifiedType(Ty);
614 // Emit a location at the end of the prologue.
615 if (CGDebugInfo *DI = getDebugInfo())
616 DI->EmitLocation(Builder, StartLoc);
619 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
620 const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
621 assert(FD->getBody());
622 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(FD->getBody()))
623 EmitCompoundStmtWithoutScope(*S);
625 EmitStmt(FD->getBody());
628 /// Tries to mark the given function nounwind based on the
629 /// non-existence of any throwing calls within it. We believe this is
630 /// lightweight enough to do at -O0.
631 static void TryMarkNoThrow(llvm::Function *F) {
632 // LLVM treats 'nounwind' on a function as part of the type, so we
633 // can't do this on functions that can be overwritten.
634 if (F->mayBeOverridden()) return;
636 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
637 for (llvm::BasicBlock::iterator
638 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
639 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
640 if (!Call->doesNotThrow())
642 } else if (isa<llvm::ResumeInst>(&*BI)) {
645 F->setDoesNotThrow();
648 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
649 const CGFunctionInfo &FnInfo) {
650 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
652 // Check if we should generate debug info for this function.
653 if (!FD->hasAttr<NoDebugAttr>())
654 maybeInitializeDebugInfo();
656 FunctionArgList Args;
657 QualType ResTy = FD->getResultType();
660 if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
661 CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
663 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
664 Args.push_back(FD->getParamDecl(i));
666 SourceRange BodyRange;
667 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
669 // CalleeWithThisReturn keeps track of the last callee inside this function
670 // that returns 'this'. Before starting the function, we set it to null.
671 CalleeWithThisReturn = 0;
673 // Emit the standard function prologue.
674 StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
676 // Generate the body of the function.
677 if (isa<CXXDestructorDecl>(FD))
678 EmitDestructorBody(Args);
679 else if (isa<CXXConstructorDecl>(FD))
680 EmitConstructorBody(Args);
681 else if (getLangOpts().CUDA &&
682 !CGM.getCodeGenOpts().CUDAIsDevice &&
683 FD->hasAttr<CUDAGlobalAttr>())
684 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
685 else if (isa<CXXConversionDecl>(FD) &&
686 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
687 // The lambda conversion to block pointer is special; the semantics can't be
688 // expressed in the AST, so IRGen needs to special-case it.
689 EmitLambdaToBlockPointerBody(Args);
690 } else if (isa<CXXMethodDecl>(FD) &&
691 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
692 // The lambda "__invoke" function is special, because it forwards or
693 // clones the body of the function call operator (but is actually static).
694 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
695 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
696 cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator()) {
697 // Implicit copy-assignment gets the same special treatment as implicit
698 // copy-constructors.
699 emitImplicitAssignmentOperatorBody(Args);
702 EmitFunctionBody(Args);
704 // C++11 [stmt.return]p2:
705 // Flowing off the end of a function [...] results in undefined behavior in
706 // a value-returning function.
708 // If the '}' that terminates a function is reached, and the value of the
709 // function call is used by the caller, the behavior is undefined.
710 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() &&
711 !FD->getResultType()->isVoidType() && Builder.GetInsertBlock()) {
713 EmitCheck(Builder.getFalse(), "missing_return",
714 EmitCheckSourceLocation(FD->getLocation()),
715 ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
716 else if (CGM.getCodeGenOpts().OptimizationLevel == 0)
717 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
718 Builder.CreateUnreachable();
719 Builder.ClearInsertionPoint();
722 // Emit the standard function epilogue.
723 FinishFunction(BodyRange.getEnd());
724 // CalleeWithThisReturn keeps track of the last callee inside this function
725 // that returns 'this'. After finishing the function, we set it to null.
726 CalleeWithThisReturn = 0;
728 // If we haven't marked the function nothrow through other means, do
729 // a quick pass now to see if we can.
730 if (!CurFn->doesNotThrow())
731 TryMarkNoThrow(CurFn);
734 /// ContainsLabel - Return true if the statement contains a label in it. If
735 /// this statement is not executed normally, it not containing a label means
736 /// that we can just remove the code.
737 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
738 // Null statement, not a label!
739 if (S == 0) return false;
741 // If this is a label, we have to emit the code, consider something like:
742 // if (0) { ... foo: bar(); } goto foo;
744 // TODO: If anyone cared, we could track __label__'s, since we know that you
745 // can't jump to one from outside their declared region.
746 if (isa<LabelStmt>(S))
749 // If this is a case/default statement, and we haven't seen a switch, we have
751 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
754 // If this is a switch statement, we want to ignore cases below it.
755 if (isa<SwitchStmt>(S))
756 IgnoreCaseStmts = true;
758 // Scan subexpressions for verboten labels.
759 for (Stmt::const_child_range I = S->children(); I; ++I)
760 if (ContainsLabel(*I, IgnoreCaseStmts))
766 /// containsBreak - Return true if the statement contains a break out of it.
767 /// If the statement (recursively) contains a switch or loop with a break
768 /// inside of it, this is fine.
769 bool CodeGenFunction::containsBreak(const Stmt *S) {
770 // Null statement, not a label!
771 if (S == 0) return false;
773 // If this is a switch or loop that defines its own break scope, then we can
774 // include it and anything inside of it.
775 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
779 if (isa<BreakStmt>(S))
782 // Scan subexpressions for verboten breaks.
783 for (Stmt::const_child_range I = S->children(); I; ++I)
784 if (containsBreak(*I))
791 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
792 /// to a constant, or if it does but contains a label, return false. If it
793 /// constant folds return true and set the boolean result in Result.
794 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
796 llvm::APSInt ResultInt;
797 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
800 ResultBool = ResultInt.getBoolValue();
804 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
805 /// to a constant, or if it does but contains a label, return false. If it
806 /// constant folds return true and set the folded value.
807 bool CodeGenFunction::
808 ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
809 // FIXME: Rename and handle conversion of other evaluatable things
812 if (!Cond->EvaluateAsInt(Int, getContext()))
813 return false; // Not foldable, not integer or not fully evaluatable.
815 if (CodeGenFunction::ContainsLabel(Cond))
816 return false; // Contains a label.
824 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
825 /// statement) to the specified blocks. Based on the condition, this might try
826 /// to simplify the codegen of the conditional based on the branch.
828 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
829 llvm::BasicBlock *TrueBlock,
830 llvm::BasicBlock *FalseBlock) {
831 Cond = Cond->IgnoreParens();
833 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
834 // Handle X && Y in a condition.
835 if (CondBOp->getOpcode() == BO_LAnd) {
836 // If we have "1 && X", simplify the code. "0 && X" would have constant
837 // folded if the case was simple enough.
838 bool ConstantBool = false;
839 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
841 // br(1 && X) -> br(X).
842 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
845 // If we have "X && 1", simplify the code to use an uncond branch.
846 // "X && 0" would have been constant folded to 0.
847 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
849 // br(X && 1) -> br(X).
850 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
853 // Emit the LHS as a conditional. If the LHS conditional is false, we
854 // want to jump to the FalseBlock.
855 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
857 ConditionalEvaluation eval(*this);
858 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
861 // Any temporaries created here are conditional.
863 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
869 if (CondBOp->getOpcode() == BO_LOr) {
870 // If we have "0 || X", simplify the code. "1 || X" would have constant
871 // folded if the case was simple enough.
872 bool ConstantBool = false;
873 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
875 // br(0 || X) -> br(X).
876 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
879 // If we have "X || 0", simplify the code to use an uncond branch.
880 // "X || 1" would have been constant folded to 1.
881 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
883 // br(X || 0) -> br(X).
884 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
887 // Emit the LHS as a conditional. If the LHS conditional is true, we
888 // want to jump to the TrueBlock.
889 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
891 ConditionalEvaluation eval(*this);
892 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
895 // Any temporaries created here are conditional.
897 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
904 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
905 // br(!x, t, f) -> br(x, f, t)
906 if (CondUOp->getOpcode() == UO_LNot)
907 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
910 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
911 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
912 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
913 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
915 ConditionalEvaluation cond(*this);
916 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
920 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
925 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
931 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
932 // Conditional operator handling can give us a throw expression as a
933 // condition for a case like:
934 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
936 // br(c, throw x, br(y, t, f))
937 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
941 // Emit the code with the fully general case.
942 llvm::Value *CondV = EvaluateExprAsBool(Cond);
943 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
946 /// ErrorUnsupported - Print out an error that codegen doesn't support the
947 /// specified stmt yet.
948 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
950 CGM.ErrorUnsupported(S, Type, OmitOnError);
953 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
954 /// variable-length array whose elements have a non-zero bit-pattern.
956 /// \param baseType the inner-most element type of the array
957 /// \param src - a char* pointing to the bit-pattern for a single
958 /// base element of the array
959 /// \param sizeInChars - the total size of the VLA, in chars
960 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
961 llvm::Value *dest, llvm::Value *src,
962 llvm::Value *sizeInChars) {
963 std::pair<CharUnits,CharUnits> baseSizeAndAlign
964 = CGF.getContext().getTypeInfoInChars(baseType);
966 CGBuilderTy &Builder = CGF.Builder;
968 llvm::Value *baseSizeInChars
969 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
971 llvm::Type *i8p = Builder.getInt8PtrTy();
973 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
974 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
976 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
977 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
978 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
980 // Make a loop over the VLA. C99 guarantees that the VLA element
981 // count must be nonzero.
982 CGF.EmitBlock(loopBB);
984 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
985 cur->addIncoming(begin, originBB);
987 // memcpy the individual element bit-pattern.
988 Builder.CreateMemCpy(cur, src, baseSizeInChars,
989 baseSizeAndAlign.second.getQuantity(),
992 // Go to the next element.
993 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
995 // Leave if that's the end of the VLA.
996 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
997 Builder.CreateCondBr(done, contBB, loopBB);
998 cur->addIncoming(next, loopBB);
1000 CGF.EmitBlock(contBB);
1004 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
1005 // Ignore empty classes in C++.
1006 if (getLangOpts().CPlusPlus) {
1007 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1008 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1013 // Cast the dest ptr to the appropriate i8 pointer type.
1015 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
1016 llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
1017 if (DestPtr->getType() != BP)
1018 DestPtr = Builder.CreateBitCast(DestPtr, BP);
1020 // Get size and alignment info for this aggregate.
1021 std::pair<CharUnits, CharUnits> TypeInfo =
1022 getContext().getTypeInfoInChars(Ty);
1023 CharUnits Size = TypeInfo.first;
1024 CharUnits Align = TypeInfo.second;
1026 llvm::Value *SizeVal;
1027 const VariableArrayType *vla;
1029 // Don't bother emitting a zero-byte memset.
1030 if (Size.isZero()) {
1031 // But note that getTypeInfo returns 0 for a VLA.
1032 if (const VariableArrayType *vlaType =
1033 dyn_cast_or_null<VariableArrayType>(
1034 getContext().getAsArrayType(Ty))) {
1036 llvm::Value *numElts;
1037 llvm::tie(numElts, eltType) = getVLASize(vlaType);
1040 CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
1041 if (!eltSize.isOne())
1042 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1048 SizeVal = CGM.getSize(Size);
1052 // If the type contains a pointer to data member we can't memset it to zero.
1053 // Instead, create a null constant and copy it to the destination.
1054 // TODO: there are other patterns besides zero that we can usefully memset,
1055 // like -1, which happens to be the pattern used by member-pointers.
1056 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1057 // For a VLA, emit a single element, then splat that over the VLA.
1058 if (vla) Ty = getContext().getBaseElementType(vla);
1060 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1062 llvm::GlobalVariable *NullVariable =
1063 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1064 /*isConstant=*/true,
1065 llvm::GlobalVariable::PrivateLinkage,
1066 NullConstant, Twine());
1067 llvm::Value *SrcPtr =
1068 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
1070 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1072 // Get and call the appropriate llvm.memcpy overload.
1073 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
1077 // Otherwise, just memset the whole thing to zero. This is legal
1078 // because in LLVM, all default initializers (other than the ones we just
1079 // handled above) are guaranteed to have a bit pattern of all zeros.
1080 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
1081 Align.getQuantity(), false);
1084 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1085 // Make sure that there is a block for the indirect goto.
1086 if (IndirectBranch == 0)
1087 GetIndirectGotoBlock();
1089 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1091 // Make sure the indirect branch includes all of the address-taken blocks.
1092 IndirectBranch->addDestination(BB);
1093 return llvm::BlockAddress::get(CurFn, BB);
1096 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1097 // If we already made the indirect branch for indirect goto, return its block.
1098 if (IndirectBranch) return IndirectBranch->getParent();
1100 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
1102 // Create the PHI node that indirect gotos will add entries to.
1103 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1104 "indirect.goto.dest");
1106 // Create the indirect branch instruction.
1107 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1108 return IndirectBranch->getParent();
1111 /// Computes the length of an array in elements, as well as the base
1112 /// element type and a properly-typed first element pointer.
1113 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1115 llvm::Value *&addr) {
1116 const ArrayType *arrayType = origArrayType;
1118 // If it's a VLA, we have to load the stored size. Note that
1119 // this is the size of the VLA in bytes, not its size in elements.
1120 llvm::Value *numVLAElements = 0;
1121 if (isa<VariableArrayType>(arrayType)) {
1122 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
1124 // Walk into all VLAs. This doesn't require changes to addr,
1125 // which has type T* where T is the first non-VLA element type.
1127 QualType elementType = arrayType->getElementType();
1128 arrayType = getContext().getAsArrayType(elementType);
1130 // If we only have VLA components, 'addr' requires no adjustment.
1132 baseType = elementType;
1133 return numVLAElements;
1135 } while (isa<VariableArrayType>(arrayType));
1137 // We get out here only if we find a constant array type
1141 // We have some number of constant-length arrays, so addr should
1142 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1143 // down to the first element of addr.
1144 SmallVector<llvm::Value*, 8> gepIndices;
1146 // GEP down to the array type.
1147 llvm::ConstantInt *zero = Builder.getInt32(0);
1148 gepIndices.push_back(zero);
1150 uint64_t countFromCLAs = 1;
1153 llvm::ArrayType *llvmArrayType =
1154 dyn_cast<llvm::ArrayType>(
1155 cast<llvm::PointerType>(addr->getType())->getElementType());
1156 while (llvmArrayType) {
1157 assert(isa<ConstantArrayType>(arrayType));
1158 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1159 == llvmArrayType->getNumElements());
1161 gepIndices.push_back(zero);
1162 countFromCLAs *= llvmArrayType->getNumElements();
1163 eltType = arrayType->getElementType();
1166 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1167 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1168 assert((!llvmArrayType || arrayType) &&
1169 "LLVM and Clang types are out-of-synch");
1173 // From this point onwards, the Clang array type has been emitted
1174 // as some other type (probably a packed struct). Compute the array
1175 // size, and just emit the 'begin' expression as a bitcast.
1178 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1179 eltType = arrayType->getElementType();
1180 arrayType = getContext().getAsArrayType(eltType);
1183 unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
1184 llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
1185 addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
1187 // Create the actual GEP.
1188 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
1193 llvm::Value *numElements
1194 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1196 // If we had any VLA dimensions, factor them in.
1198 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1203 std::pair<llvm::Value*, QualType>
1204 CodeGenFunction::getVLASize(QualType type) {
1205 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1206 assert(vla && "type was not a variable array type!");
1207 return getVLASize(vla);
1210 std::pair<llvm::Value*, QualType>
1211 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1212 // The number of elements so far; always size_t.
1213 llvm::Value *numElements = 0;
1215 QualType elementType;
1217 elementType = type->getElementType();
1218 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1219 assert(vlaSize && "no size for VLA!");
1220 assert(vlaSize->getType() == SizeTy);
1223 numElements = vlaSize;
1225 // It's undefined behavior if this wraps around, so mark it that way.
1226 // FIXME: Teach -fcatch-undefined-behavior to trap this.
1227 numElements = Builder.CreateNUWMul(numElements, vlaSize);
1229 } while ((type = getContext().getAsVariableArrayType(elementType)));
1231 return std::pair<llvm::Value*,QualType>(numElements, elementType);
1234 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1235 assert(type->isVariablyModifiedType() &&
1236 "Must pass variably modified type to EmitVLASizes!");
1238 EnsureInsertPoint();
1240 // We're going to walk down into the type and look for VLA
1243 assert(type->isVariablyModifiedType());
1245 const Type *ty = type.getTypePtr();
1246 switch (ty->getTypeClass()) {
1248 #define TYPE(Class, Base)
1249 #define ABSTRACT_TYPE(Class, Base)
1250 #define NON_CANONICAL_TYPE(Class, Base)
1251 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1252 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1253 #include "clang/AST/TypeNodes.def"
1254 llvm_unreachable("unexpected dependent type!");
1256 // These types are never variably-modified.
1260 case Type::ExtVector:
1263 case Type::Elaborated:
1264 case Type::TemplateSpecialization:
1265 case Type::ObjCObject:
1266 case Type::ObjCInterface:
1267 case Type::ObjCObjectPointer:
1268 llvm_unreachable("type class is never variably-modified!");
1271 type = cast<PointerType>(ty)->getPointeeType();
1274 case Type::BlockPointer:
1275 type = cast<BlockPointerType>(ty)->getPointeeType();
1278 case Type::LValueReference:
1279 case Type::RValueReference:
1280 type = cast<ReferenceType>(ty)->getPointeeType();
1283 case Type::MemberPointer:
1284 type = cast<MemberPointerType>(ty)->getPointeeType();
1287 case Type::ConstantArray:
1288 case Type::IncompleteArray:
1289 // Losing element qualification here is fine.
1290 type = cast<ArrayType>(ty)->getElementType();
1293 case Type::VariableArray: {
1294 // Losing element qualification here is fine.
1295 const VariableArrayType *vat = cast<VariableArrayType>(ty);
1297 // Unknown size indication requires no size computation.
1298 // Otherwise, evaluate and record it.
1299 if (const Expr *size = vat->getSizeExpr()) {
1300 // It's possible that we might have emitted this already,
1301 // e.g. with a typedef and a pointer to it.
1302 llvm::Value *&entry = VLASizeMap[size];
1304 llvm::Value *Size = EmitScalarExpr(size);
1307 // If the size is an expression that is not an integer constant
1308 // expression [...] each time it is evaluated it shall have a value
1309 // greater than zero.
1310 if (SanOpts->VLABound &&
1311 size->getType()->isSignedIntegerType()) {
1312 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1313 llvm::Constant *StaticArgs[] = {
1314 EmitCheckSourceLocation(size->getLocStart()),
1315 EmitCheckTypeDescriptor(size->getType())
1317 EmitCheck(Builder.CreateICmpSGT(Size, Zero),
1318 "vla_bound_not_positive", StaticArgs, Size,
1322 // Always zexting here would be wrong if it weren't
1323 // undefined behavior to have a negative bound.
1324 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1327 type = vat->getElementType();
1331 case Type::FunctionProto:
1332 case Type::FunctionNoProto:
1333 type = cast<FunctionType>(ty)->getResultType();
1338 case Type::UnaryTransform:
1339 case Type::Attributed:
1340 case Type::SubstTemplateTypeParm:
1341 // Keep walking after single level desugaring.
1342 type = type.getSingleStepDesugaredType(getContext());
1346 case Type::Decltype:
1348 // Stop walking: nothing to do.
1351 case Type::TypeOfExpr:
1352 // Stop walking: emit typeof expression.
1353 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1357 type = cast<AtomicType>(ty)->getValueType();
1360 } while (type->isVariablyModifiedType());
1363 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
1364 if (getContext().getBuiltinVaListType()->isArrayType())
1365 return EmitScalarExpr(E);
1366 return EmitLValue(E).getAddress();
1369 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
1370 llvm::Constant *Init) {
1371 assert (Init && "Invalid DeclRefExpr initializer!");
1372 if (CGDebugInfo *Dbg = getDebugInfo())
1373 if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
1374 Dbg->EmitGlobalVariable(E->getDecl(), Init);
1377 CodeGenFunction::PeepholeProtection
1378 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
1379 // At the moment, the only aggressive peephole we do in IR gen
1380 // is trunc(zext) folding, but if we add more, we can easily
1381 // extend this protection.
1383 if (!rvalue.isScalar()) return PeepholeProtection();
1384 llvm::Value *value = rvalue.getScalarVal();
1385 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
1387 // Just make an extra bitcast.
1388 assert(HaveInsertPoint());
1389 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
1390 Builder.GetInsertBlock());
1392 PeepholeProtection protection;
1393 protection.Inst = inst;
1397 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
1398 if (!protection.Inst) return;
1400 // In theory, we could try to duplicate the peepholes now, but whatever.
1401 protection.Inst->eraseFromParent();
1404 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
1405 llvm::Value *AnnotatedVal,
1406 StringRef AnnotationStr,
1407 SourceLocation Location) {
1408 llvm::Value *Args[4] = {
1410 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
1411 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
1412 CGM.EmitAnnotationLineNo(Location)
1414 return Builder.CreateCall(AnnotationFn, Args);
1417 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
1418 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1419 // FIXME We create a new bitcast for every annotation because that's what
1420 // llvm-gcc was doing.
1421 for (specific_attr_iterator<AnnotateAttr>
1422 ai = D->specific_attr_begin<AnnotateAttr>(),
1423 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
1424 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
1425 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
1426 (*ai)->getAnnotation(), D->getLocation());
1429 llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
1431 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1432 llvm::Type *VTy = V->getType();
1433 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
1436 for (specific_attr_iterator<AnnotateAttr>
1437 ai = D->specific_attr_begin<AnnotateAttr>(),
1438 ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) {
1439 // FIXME Always emit the cast inst so we can differentiate between
1440 // annotation on the first field of a struct and annotation on the struct
1442 if (VTy != CGM.Int8PtrTy)
1443 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
1444 V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation());
1445 V = Builder.CreateBitCast(V, VTy);