1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This coordinates the per-function state used while generating code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
17 #include "CGDebugInfo.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenModule.h"
20 #include "CodeGenPGO.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/StmtCXX.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "clang/CodeGen/CGFunctionInfo.h"
28 #include "clang/Frontend/CodeGenOptions.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Operator.h"
33 using namespace clang;
34 using namespace CodeGen;
36 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
37 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
38 Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
39 CGBuilderInserterTy(this)),
40 CapturedStmtInfo(nullptr), SanOpts(&CGM.getLangOpts().Sanitize),
41 IsSanitizerScope(false), AutoreleaseResult(false), BlockInfo(nullptr),
42 BlockPointer(nullptr), LambdaThisCaptureField(nullptr),
43 NormalCleanupDest(nullptr), NextCleanupDestIndex(1),
44 FirstBlockInfo(nullptr), EHResumeBlock(nullptr), ExceptionSlot(nullptr),
45 EHSelectorSlot(nullptr), DebugInfo(CGM.getModuleDebugInfo()),
46 DisableDebugInfo(false), DidCallStackSave(false), IndirectBranch(nullptr),
47 PGO(cgm), SwitchInsn(nullptr), SwitchWeights(nullptr),
48 CaseRangeBlock(nullptr), UnreachableBlock(nullptr), NumReturnExprs(0),
49 NumSimpleReturnExprs(0), CXXABIThisDecl(nullptr),
50 CXXABIThisValue(nullptr), CXXThisValue(nullptr),
51 CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
52 CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
53 CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
54 TerminateHandler(nullptr), TrapBB(nullptr) {
55 if (!suppressNewContext)
56 CGM.getCXXABI().getMangleContext().startNewFunction();
58 llvm::FastMathFlags FMF;
59 if (CGM.getLangOpts().FastMath)
60 FMF.setUnsafeAlgebra();
61 if (CGM.getLangOpts().FiniteMathOnly) {
65 Builder.SetFastMathFlags(FMF);
68 CodeGenFunction::~CodeGenFunction() {
69 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
71 // If there are any unclaimed block infos, go ahead and destroy them
72 // now. This can happen if IR-gen gets clever and skips evaluating
75 destroyBlockInfos(FirstBlockInfo);
77 if (getLangOpts().OpenMP) {
78 CGM.getOpenMPRuntime().FunctionFinished(*this);
83 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
84 return CGM.getTypes().ConvertTypeForMem(T);
87 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
88 return CGM.getTypes().ConvertType(T);
91 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
92 type = type.getCanonicalType();
94 switch (type->getTypeClass()) {
95 #define TYPE(name, parent)
96 #define ABSTRACT_TYPE(name, parent)
97 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
98 #define DEPENDENT_TYPE(name, parent) case Type::name:
99 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
100 #include "clang/AST/TypeNodes.def"
101 llvm_unreachable("non-canonical or dependent type in IR-generation");
104 llvm_unreachable("undeduced auto type in IR-generation");
106 // Various scalar types.
109 case Type::BlockPointer:
110 case Type::LValueReference:
111 case Type::RValueReference:
112 case Type::MemberPointer:
114 case Type::ExtVector:
115 case Type::FunctionProto:
116 case Type::FunctionNoProto:
118 case Type::ObjCObjectPointer:
125 // Arrays, records, and Objective-C objects.
126 case Type::ConstantArray:
127 case Type::IncompleteArray:
128 case Type::VariableArray:
130 case Type::ObjCObject:
131 case Type::ObjCInterface:
132 return TEK_Aggregate;
134 // We operate on atomic values according to their underlying type.
136 type = cast<AtomicType>(type)->getValueType();
139 llvm_unreachable("unknown type kind!");
143 void CodeGenFunction::EmitReturnBlock() {
144 // For cleanliness, we try to avoid emitting the return block for
146 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
149 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
151 // We have a valid insert point, reuse it if it is empty or there are no
152 // explicit jumps to the return block.
153 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
154 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
155 delete ReturnBlock.getBlock();
157 EmitBlock(ReturnBlock.getBlock());
161 // Otherwise, if the return block is the target of a single direct
162 // branch then we can just put the code in that block instead. This
163 // cleans up functions which started with a unified return block.
164 if (ReturnBlock.getBlock()->hasOneUse()) {
165 llvm::BranchInst *BI =
166 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
167 if (BI && BI->isUnconditional() &&
168 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
169 // Reset insertion point, including debug location, and delete the
170 // branch. This is really subtle and only works because the next change
171 // in location will hit the caching in CGDebugInfo::EmitLocation and not
173 Builder.SetCurrentDebugLocation(BI->getDebugLoc());
174 Builder.SetInsertPoint(BI->getParent());
175 BI->eraseFromParent();
176 delete ReturnBlock.getBlock();
181 // FIXME: We are at an unreachable point, there is no reason to emit the block
182 // unless it has uses. However, we still need a place to put the debug
183 // region.end for now.
185 EmitBlock(ReturnBlock.getBlock());
188 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
190 if (!BB->use_empty())
191 return CGF.CurFn->getBasicBlockList().push_back(BB);
195 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
196 assert(BreakContinueStack.empty() &&
197 "mismatched push/pop in break/continue stack!");
199 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
200 && NumSimpleReturnExprs == NumReturnExprs
201 && ReturnBlock.getBlock()->use_empty();
202 // Usually the return expression is evaluated before the cleanup
203 // code. If the function contains only a simple return statement,
204 // such as a constant, the location before the cleanup code becomes
205 // the last useful breakpoint in the function, because the simple
206 // return expression will be evaluated after the cleanup code. To be
207 // safe, set the debug location for cleanup code to the location of
208 // the return statement. Otherwise the cleanup code should be at the
209 // end of the function's lexical scope.
211 // If there are multiple branches to the return block, the branch
212 // instructions will get the location of the return statements and
214 if (CGDebugInfo *DI = getDebugInfo()) {
215 if (OnlySimpleReturnStmts)
216 DI->EmitLocation(Builder, LastStopPoint);
218 DI->EmitLocation(Builder, EndLoc);
221 // Pop any cleanups that might have been associated with the
222 // parameters. Do this in whatever block we're currently in; it's
223 // important to do this before we enter the return block or return
224 // edges will be *really* confused.
225 bool EmitRetDbgLoc = true;
226 if (EHStack.stable_begin() != PrologueCleanupDepth) {
227 PopCleanupBlocks(PrologueCleanupDepth);
229 // Make sure the line table doesn't jump back into the body for
230 // the ret after it's been at EndLoc.
231 EmitRetDbgLoc = false;
233 if (CGDebugInfo *DI = getDebugInfo())
234 if (OnlySimpleReturnStmts)
235 DI->EmitLocation(Builder, EndLoc);
238 // Emit function epilog (to return).
241 if (ShouldInstrumentFunction())
242 EmitFunctionInstrumentation("__cyg_profile_func_exit");
244 // Emit debug descriptor for function end.
245 if (CGDebugInfo *DI = getDebugInfo()) {
246 DI->EmitFunctionEnd(Builder);
249 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
250 EmitEndEHSpec(CurCodeDecl);
252 assert(EHStack.empty() &&
253 "did not remove all scopes from cleanup stack!");
255 // If someone did an indirect goto, emit the indirect goto block at the end of
257 if (IndirectBranch) {
258 EmitBlock(IndirectBranch->getParent());
259 Builder.ClearInsertionPoint();
262 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
263 llvm::Instruction *Ptr = AllocaInsertPt;
264 AllocaInsertPt = nullptr;
265 Ptr->eraseFromParent();
267 // If someone took the address of a label but never did an indirect goto, we
268 // made a zero entry PHI node, which is illegal, zap it now.
269 if (IndirectBranch) {
270 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
271 if (PN->getNumIncomingValues() == 0) {
272 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
273 PN->eraseFromParent();
277 EmitIfUsed(*this, EHResumeBlock);
278 EmitIfUsed(*this, TerminateLandingPad);
279 EmitIfUsed(*this, TerminateHandler);
280 EmitIfUsed(*this, UnreachableBlock);
282 if (CGM.getCodeGenOpts().EmitDeclMetadata)
285 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
286 I = DeferredReplacements.begin(),
287 E = DeferredReplacements.end();
289 I->first->replaceAllUsesWith(I->second);
290 I->first->eraseFromParent();
294 /// ShouldInstrumentFunction - Return true if the current function should be
295 /// instrumented with __cyg_profile_func_* calls
296 bool CodeGenFunction::ShouldInstrumentFunction() {
297 if (!CGM.getCodeGenOpts().InstrumentFunctions)
299 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
304 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
305 /// instrumentation function with the current function and the call site, if
306 /// function instrumentation is enabled.
307 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
308 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
309 llvm::PointerType *PointerTy = Int8PtrTy;
310 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
311 llvm::FunctionType *FunctionTy =
312 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
314 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
315 llvm::CallInst *CallSite = Builder.CreateCall(
316 CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
317 llvm::ConstantInt::get(Int32Ty, 0),
320 llvm::Value *args[] = {
321 llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
325 EmitNounwindRuntimeCall(F, args);
328 void CodeGenFunction::EmitMCountInstrumentation() {
329 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
331 llvm::Constant *MCountFn =
332 CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
333 EmitNounwindRuntimeCall(MCountFn);
336 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
337 // information in the program executable. The argument information stored
338 // includes the argument name, its type, the address and access qualifiers used.
339 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
340 CodeGenModule &CGM,llvm::LLVMContext &Context,
341 SmallVector <llvm::Value*, 5> &kernelMDArgs,
342 CGBuilderTy& Builder, ASTContext &ASTCtx) {
343 // Create MDNodes that represent the kernel arg metadata.
344 // Each MDNode is a list in the form of "key", N number of values which is
345 // the same number of values as their are kernel arguments.
347 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
349 // MDNode for the kernel argument address space qualifiers.
350 SmallVector<llvm::Value*, 8> addressQuals;
351 addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
353 // MDNode for the kernel argument access qualifiers (images only).
354 SmallVector<llvm::Value*, 8> accessQuals;
355 accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
357 // MDNode for the kernel argument type names.
358 SmallVector<llvm::Value*, 8> argTypeNames;
359 argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
361 // MDNode for the kernel argument type qualifiers.
362 SmallVector<llvm::Value*, 8> argTypeQuals;
363 argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
365 // MDNode for the kernel argument names.
366 SmallVector<llvm::Value*, 8> argNames;
367 argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
369 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
370 const ParmVarDecl *parm = FD->getParamDecl(i);
371 QualType ty = parm->getType();
372 std::string typeQuals;
374 if (ty->isPointerType()) {
375 QualType pointeeTy = ty->getPointeeType();
377 // Get address qualifier.
378 addressQuals.push_back(Builder.getInt32(ASTCtx.getTargetAddressSpace(
379 pointeeTy.getAddressSpace())));
381 // Get argument type name.
382 std::string typeName =
383 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
385 // Turn "unsigned type" to "utype"
386 std::string::size_type pos = typeName.find("unsigned");
387 if (pos != std::string::npos)
388 typeName.erase(pos+1, 8);
390 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
392 // Get argument type qualifiers:
393 if (ty.isRestrictQualified())
394 typeQuals = "restrict";
395 if (pointeeTy.isConstQualified() ||
396 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
397 typeQuals += typeQuals.empty() ? "const" : " const";
398 if (pointeeTy.isVolatileQualified())
399 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
401 uint32_t AddrSpc = 0;
402 if (ty->isImageType())
404 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
406 addressQuals.push_back(Builder.getInt32(AddrSpc));
408 // Get argument type name.
409 std::string typeName = ty.getUnqualifiedType().getAsString(Policy);
411 // Turn "unsigned type" to "utype"
412 std::string::size_type pos = typeName.find("unsigned");
413 if (pos != std::string::npos)
414 typeName.erase(pos+1, 8);
416 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
418 // Get argument type qualifiers:
419 if (ty.isConstQualified())
421 if (ty.isVolatileQualified())
422 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
425 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
427 // Get image access qualifier:
428 if (ty->isImageType()) {
429 const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
430 if (A && A->isWriteOnly())
431 accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
433 accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
434 // FIXME: what about read_write?
436 accessQuals.push_back(llvm::MDString::get(Context, "none"));
438 // Get argument name.
439 argNames.push_back(llvm::MDString::get(Context, parm->getName()));
442 kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
443 kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
444 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
445 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
446 kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
449 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
452 if (!FD->hasAttr<OpenCLKernelAttr>())
455 llvm::LLVMContext &Context = getLLVMContext();
457 SmallVector <llvm::Value*, 5> kernelMDArgs;
458 kernelMDArgs.push_back(Fn);
460 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
461 GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs,
462 Builder, getContext());
464 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
465 QualType hintQTy = A->getTypeHint();
466 const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
467 bool isSignedInteger =
468 hintQTy->isSignedIntegerType() ||
469 (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
470 llvm::Value *attrMDArgs[] = {
471 llvm::MDString::get(Context, "vec_type_hint"),
472 llvm::UndefValue::get(CGM.getTypes().ConvertType(A->getTypeHint())),
473 llvm::ConstantInt::get(
474 llvm::IntegerType::get(Context, 32),
475 llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0)))
477 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
480 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
481 llvm::Value *attrMDArgs[] = {
482 llvm::MDString::get(Context, "work_group_size_hint"),
483 Builder.getInt32(A->getXDim()),
484 Builder.getInt32(A->getYDim()),
485 Builder.getInt32(A->getZDim())
487 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
490 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
491 llvm::Value *attrMDArgs[] = {
492 llvm::MDString::get(Context, "reqd_work_group_size"),
493 Builder.getInt32(A->getXDim()),
494 Builder.getInt32(A->getYDim()),
495 Builder.getInt32(A->getZDim())
497 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
500 llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
501 llvm::NamedMDNode *OpenCLKernelMetadata =
502 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
503 OpenCLKernelMetadata->addOperand(kernelMDNode);
506 /// Determine whether the function F ends with a return stmt.
507 static bool endsWithReturn(const Decl* F) {
508 const Stmt *Body = nullptr;
509 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
510 Body = FD->getBody();
511 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
512 Body = OMD->getBody();
514 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
515 auto LastStmt = CS->body_rbegin();
516 if (LastStmt != CS->body_rend())
517 return isa<ReturnStmt>(*LastStmt);
522 void CodeGenFunction::StartFunction(GlobalDecl GD,
525 const CGFunctionInfo &FnInfo,
526 const FunctionArgList &Args,
528 SourceLocation StartLoc) {
529 const Decl *D = GD.getDecl();
531 DidCallStackSave = false;
533 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
537 assert(CurFn->isDeclaration() && "Function already has body?");
539 if (CGM.getSanitizerBlacklist().isIn(*Fn))
540 SanOpts = &SanitizerOptions::Disabled;
542 // Pass inline keyword to optimizer if it appears explicitly on any
543 // declaration. Also, in the case of -fno-inline attach NoInline
544 // attribute to all function that are not marked AlwaysInline.
545 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
546 if (!CGM.getCodeGenOpts().NoInline) {
547 for (auto RI : FD->redecls())
548 if (RI->isInlineSpecified()) {
549 Fn->addFnAttr(llvm::Attribute::InlineHint);
552 } else if (!FD->hasAttr<AlwaysInlineAttr>())
553 Fn->addFnAttr(llvm::Attribute::NoInline);
556 if (getLangOpts().OpenCL) {
557 // Add metadata for a kernel function.
558 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
559 EmitOpenCLKernelMetadata(FD, Fn);
562 // If we are checking function types, emit a function type signature as
564 if (getLangOpts().CPlusPlus && SanOpts->Function) {
565 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
566 if (llvm::Constant *PrefixSig =
567 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
568 llvm::Constant *FTRTTIConst =
569 CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
570 llvm::Constant *PrefixStructElems[] = { PrefixSig, FTRTTIConst };
571 llvm::Constant *PrefixStructConst =
572 llvm::ConstantStruct::getAnon(PrefixStructElems, /*Packed=*/true);
573 Fn->setPrefixData(PrefixStructConst);
578 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
580 // Create a marker to make it easy to insert allocas into the entryblock
581 // later. Don't create this with the builder, because we don't want it
583 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
584 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
585 if (Builder.isNamePreserving())
586 AllocaInsertPt->setName("allocapt");
588 ReturnBlock = getJumpDestInCurrentScope("return");
590 Builder.SetInsertPoint(EntryBB);
592 // Emit subprogram debug descriptor.
593 if (CGDebugInfo *DI = getDebugInfo()) {
594 SmallVector<QualType, 16> ArgTypes;
595 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
597 ArgTypes.push_back((*i)->getType());
601 getContext().getFunctionType(RetTy, ArgTypes,
602 FunctionProtoType::ExtProtoInfo());
603 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
606 if (ShouldInstrumentFunction())
607 EmitFunctionInstrumentation("__cyg_profile_func_enter");
609 if (CGM.getCodeGenOpts().InstrumentForProfiling)
610 EmitMCountInstrumentation();
612 if (RetTy->isVoidType()) {
613 // Void type; nothing to return.
614 ReturnValue = nullptr;
616 // Count the implicit return.
617 if (!endsWithReturn(D))
619 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
620 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
621 // Indirect aggregate return; emit returned value directly into sret slot.
622 // This reduces code size, and affects correctness in C++.
623 auto AI = CurFn->arg_begin();
624 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
627 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
628 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
629 // Load the sret pointer from the argument struct and return into that.
630 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
631 llvm::Function::arg_iterator EI = CurFn->arg_end();
633 llvm::Value *Addr = Builder.CreateStructGEP(EI, Idx);
634 ReturnValue = Builder.CreateLoad(Addr, "agg.result");
636 ReturnValue = CreateIRTemp(RetTy, "retval");
638 // Tell the epilog emitter to autorelease the result. We do this
639 // now so that various specialized functions can suppress it
640 // during their IR-generation.
641 if (getLangOpts().ObjCAutoRefCount &&
642 !CurFnInfo->isReturnsRetained() &&
643 RetTy->isObjCRetainableType())
644 AutoreleaseResult = true;
647 EmitStartEHSpec(CurCodeDecl);
649 PrologueCleanupDepth = EHStack.stable_begin();
650 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
652 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
653 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
654 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
655 if (MD->getParent()->isLambda() &&
656 MD->getOverloadedOperator() == OO_Call) {
657 // We're in a lambda; figure out the captures.
658 MD->getParent()->getCaptureFields(LambdaCaptureFields,
659 LambdaThisCaptureField);
660 if (LambdaThisCaptureField) {
661 // If this lambda captures this, load it.
662 LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
663 CXXThisValue = EmitLoadOfLValue(ThisLValue,
664 SourceLocation()).getScalarVal();
667 // Not in a lambda; just use 'this' from the method.
668 // FIXME: Should we generate a new load for each use of 'this'? The
669 // fast register allocator would be happier...
670 CXXThisValue = CXXABIThisValue;
674 // If any of the arguments have a variably modified type, make sure to
675 // emit the type size.
676 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
678 const VarDecl *VD = *i;
680 // Dig out the type as written from ParmVarDecls; it's unclear whether
681 // the standard (C99 6.9.1p10) requires this, but we're following the
682 // precedent set by gcc.
684 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
685 Ty = PVD->getOriginalType();
689 if (Ty->isVariablyModifiedType())
690 EmitVariablyModifiedType(Ty);
692 // Emit a location at the end of the prologue.
693 if (CGDebugInfo *DI = getDebugInfo())
694 DI->EmitLocation(Builder, StartLoc);
697 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
699 RegionCounter Cnt = getPGORegionCounter(Body);
700 Cnt.beginRegion(Builder);
701 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
702 EmitCompoundStmtWithoutScope(*S);
707 /// When instrumenting to collect profile data, the counts for some blocks
708 /// such as switch cases need to not include the fall-through counts, so
709 /// emit a branch around the instrumentation code. When not instrumenting,
710 /// this just calls EmitBlock().
711 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
712 RegionCounter &Cnt) {
713 llvm::BasicBlock *SkipCountBB = nullptr;
714 if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
715 // When instrumenting for profiling, the fallthrough to certain
716 // statements needs to skip over the instrumentation code so that we
717 // get an accurate count.
718 SkipCountBB = createBasicBlock("skipcount");
719 EmitBranch(SkipCountBB);
722 Cnt.beginRegion(Builder, /*AddIncomingFallThrough=*/true);
724 EmitBlock(SkipCountBB);
727 /// Tries to mark the given function nounwind based on the
728 /// non-existence of any throwing calls within it. We believe this is
729 /// lightweight enough to do at -O0.
730 static void TryMarkNoThrow(llvm::Function *F) {
731 // LLVM treats 'nounwind' on a function as part of the type, so we
732 // can't do this on functions that can be overwritten.
733 if (F->mayBeOverridden()) return;
735 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
736 for (llvm::BasicBlock::iterator
737 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
738 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
739 if (!Call->doesNotThrow())
741 } else if (isa<llvm::ResumeInst>(&*BI)) {
744 F->setDoesNotThrow();
747 static void EmitSizedDeallocationFunction(CodeGenFunction &CGF,
748 const FunctionDecl *UnsizedDealloc) {
749 // This is a weak discardable definition of the sized deallocation function.
750 CGF.CurFn->setLinkage(llvm::Function::LinkOnceAnyLinkage);
752 // Call the unsized deallocation function and forward the first argument
754 llvm::Constant *Unsized = CGF.CGM.GetAddrOfFunction(UnsizedDealloc);
755 CGF.Builder.CreateCall(Unsized, &*CGF.CurFn->arg_begin());
758 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
759 const CGFunctionInfo &FnInfo) {
760 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
762 // Check if we should generate debug info for this function.
763 if (FD->hasAttr<NoDebugAttr>())
764 DebugInfo = nullptr; // disable debug info indefinitely for this function
766 FunctionArgList Args;
767 QualType ResTy = FD->getReturnType();
770 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
771 if (MD && MD->isInstance()) {
772 if (CGM.getCXXABI().HasThisReturn(GD))
773 ResTy = MD->getThisType(getContext());
774 CGM.getCXXABI().buildThisParam(*this, Args);
777 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
778 Args.push_back(FD->getParamDecl(i));
780 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
781 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
783 SourceRange BodyRange;
784 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
785 CurEHLocation = BodyRange.getEnd();
787 // Use the location of the start of the function to determine where
788 // the function definition is located. By default use the location
789 // of the declaration as the location for the subprogram. A function
790 // may lack a declaration in the source code if it is created by code
791 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
792 SourceLocation Loc = FD->getLocation();
794 // If this is a function specialization then use the pattern body
795 // as the location for the function.
796 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
797 if (SpecDecl->hasBody(SpecDecl))
798 Loc = SpecDecl->getLocation();
800 // Emit the standard function prologue.
801 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
803 // Generate the body of the function.
804 PGO.assignRegionCounters(GD.getDecl(), CurFn);
805 if (isa<CXXDestructorDecl>(FD))
806 EmitDestructorBody(Args);
807 else if (isa<CXXConstructorDecl>(FD))
808 EmitConstructorBody(Args);
809 else if (getLangOpts().CUDA &&
810 !CGM.getCodeGenOpts().CUDAIsDevice &&
811 FD->hasAttr<CUDAGlobalAttr>())
812 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
813 else if (isa<CXXConversionDecl>(FD) &&
814 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
815 // The lambda conversion to block pointer is special; the semantics can't be
816 // expressed in the AST, so IRGen needs to special-case it.
817 EmitLambdaToBlockPointerBody(Args);
818 } else if (isa<CXXMethodDecl>(FD) &&
819 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
820 // The lambda static invoker function is special, because it forwards or
821 // clones the body of the function call operator (but is actually static).
822 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
823 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
824 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
825 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
826 // Implicit copy-assignment gets the same special treatment as implicit
827 // copy-constructors.
828 emitImplicitAssignmentOperatorBody(Args);
829 } else if (Stmt *Body = FD->getBody()) {
830 EmitFunctionBody(Args, Body);
831 } else if (FunctionDecl *UnsizedDealloc =
832 FD->getCorrespondingUnsizedGlobalDeallocationFunction()) {
833 // Global sized deallocation functions get an implicit weak definition if
834 // they don't have an explicit definition.
835 EmitSizedDeallocationFunction(*this, UnsizedDealloc);
837 llvm_unreachable("no definition for emitted function");
839 // C++11 [stmt.return]p2:
840 // Flowing off the end of a function [...] results in undefined behavior in
841 // a value-returning function.
843 // If the '}' that terminates a function is reached, and the value of the
844 // function call is used by the caller, the behavior is undefined.
845 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() &&
846 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
847 if (SanOpts->Return) {
848 SanitizerScope SanScope(this);
849 EmitCheck(Builder.getFalse(), "missing_return",
850 EmitCheckSourceLocation(FD->getLocation()),
851 ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
852 } else if (CGM.getCodeGenOpts().OptimizationLevel == 0)
853 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
854 Builder.CreateUnreachable();
855 Builder.ClearInsertionPoint();
858 // Emit the standard function epilogue.
859 FinishFunction(BodyRange.getEnd());
861 // If we haven't marked the function nothrow through other means, do
862 // a quick pass now to see if we can.
863 if (!CurFn->doesNotThrow())
864 TryMarkNoThrow(CurFn);
866 PGO.emitInstrumentationData();
867 PGO.destroyRegionCounters();
870 /// ContainsLabel - Return true if the statement contains a label in it. If
871 /// this statement is not executed normally, it not containing a label means
872 /// that we can just remove the code.
873 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
874 // Null statement, not a label!
875 if (!S) return false;
877 // If this is a label, we have to emit the code, consider something like:
878 // if (0) { ... foo: bar(); } goto foo;
880 // TODO: If anyone cared, we could track __label__'s, since we know that you
881 // can't jump to one from outside their declared region.
882 if (isa<LabelStmt>(S))
885 // If this is a case/default statement, and we haven't seen a switch, we have
887 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
890 // If this is a switch statement, we want to ignore cases below it.
891 if (isa<SwitchStmt>(S))
892 IgnoreCaseStmts = true;
894 // Scan subexpressions for verboten labels.
895 for (Stmt::const_child_range I = S->children(); I; ++I)
896 if (ContainsLabel(*I, IgnoreCaseStmts))
902 /// containsBreak - Return true if the statement contains a break out of it.
903 /// If the statement (recursively) contains a switch or loop with a break
904 /// inside of it, this is fine.
905 bool CodeGenFunction::containsBreak(const Stmt *S) {
906 // Null statement, not a label!
907 if (!S) return false;
909 // If this is a switch or loop that defines its own break scope, then we can
910 // include it and anything inside of it.
911 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
915 if (isa<BreakStmt>(S))
918 // Scan subexpressions for verboten breaks.
919 for (Stmt::const_child_range I = S->children(); I; ++I)
920 if (containsBreak(*I))
927 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
928 /// to a constant, or if it does but contains a label, return false. If it
929 /// constant folds return true and set the boolean result in Result.
930 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
932 llvm::APSInt ResultInt;
933 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
936 ResultBool = ResultInt.getBoolValue();
940 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
941 /// to a constant, or if it does but contains a label, return false. If it
942 /// constant folds return true and set the folded value.
943 bool CodeGenFunction::
944 ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
945 // FIXME: Rename and handle conversion of other evaluatable things
948 if (!Cond->EvaluateAsInt(Int, getContext()))
949 return false; // Not foldable, not integer or not fully evaluatable.
951 if (CodeGenFunction::ContainsLabel(Cond))
952 return false; // Contains a label.
960 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
961 /// statement) to the specified blocks. Based on the condition, this might try
962 /// to simplify the codegen of the conditional based on the branch.
964 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
965 llvm::BasicBlock *TrueBlock,
966 llvm::BasicBlock *FalseBlock,
967 uint64_t TrueCount) {
968 Cond = Cond->IgnoreParens();
970 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
972 // Handle X && Y in a condition.
973 if (CondBOp->getOpcode() == BO_LAnd) {
974 RegionCounter Cnt = getPGORegionCounter(CondBOp);
976 // If we have "1 && X", simplify the code. "0 && X" would have constant
977 // folded if the case was simple enough.
978 bool ConstantBool = false;
979 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
981 // br(1 && X) -> br(X).
982 Cnt.beginRegion(Builder);
983 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
987 // If we have "X && 1", simplify the code to use an uncond branch.
988 // "X && 0" would have been constant folded to 0.
989 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
991 // br(X && 1) -> br(X).
992 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
996 // Emit the LHS as a conditional. If the LHS conditional is false, we
997 // want to jump to the FalseBlock.
998 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
999 // The counter tells us how often we evaluate RHS, and all of TrueCount
1000 // can be propagated to that branch.
1001 uint64_t RHSCount = Cnt.getCount();
1003 ConditionalEvaluation eval(*this);
1004 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1007 // Any temporaries created here are conditional.
1008 Cnt.beginRegion(Builder);
1010 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1016 if (CondBOp->getOpcode() == BO_LOr) {
1017 RegionCounter Cnt = getPGORegionCounter(CondBOp);
1019 // If we have "0 || X", simplify the code. "1 || X" would have constant
1020 // folded if the case was simple enough.
1021 bool ConstantBool = false;
1022 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1024 // br(0 || X) -> br(X).
1025 Cnt.beginRegion(Builder);
1026 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1030 // If we have "X || 0", simplify the code to use an uncond branch.
1031 // "X || 1" would have been constant folded to 1.
1032 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1034 // br(X || 0) -> br(X).
1035 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1039 // Emit the LHS as a conditional. If the LHS conditional is true, we
1040 // want to jump to the TrueBlock.
1041 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1042 // We have the count for entry to the RHS and for the whole expression
1043 // being true, so we can divy up True count between the short circuit and
1045 uint64_t LHSCount = Cnt.getParentCount() - Cnt.getCount();
1046 uint64_t RHSCount = TrueCount - LHSCount;
1048 ConditionalEvaluation eval(*this);
1049 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1050 EmitBlock(LHSFalse);
1052 // Any temporaries created here are conditional.
1053 Cnt.beginRegion(Builder);
1055 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1063 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1064 // br(!x, t, f) -> br(x, f, t)
1065 if (CondUOp->getOpcode() == UO_LNot) {
1066 // Negate the count.
1067 uint64_t FalseCount = PGO.getCurrentRegionCount() - TrueCount;
1068 // Negate the condition and swap the destination blocks.
1069 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1074 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1075 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1076 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1077 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1079 RegionCounter Cnt = getPGORegionCounter(CondOp);
1080 ConditionalEvaluation cond(*this);
1081 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
1083 // When computing PGO branch weights, we only know the overall count for
1084 // the true block. This code is essentially doing tail duplication of the
1085 // naive code-gen, introducing new edges for which counts are not
1086 // available. Divide the counts proportionally between the LHS and RHS of
1087 // the conditional operator.
1088 uint64_t LHSScaledTrueCount = 0;
1090 double LHSRatio = Cnt.getCount() / (double) Cnt.getParentCount();
1091 LHSScaledTrueCount = TrueCount * LHSRatio;
1095 EmitBlock(LHSBlock);
1096 Cnt.beginRegion(Builder);
1097 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1098 LHSScaledTrueCount);
1102 EmitBlock(RHSBlock);
1103 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1104 TrueCount - LHSScaledTrueCount);
1110 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1111 // Conditional operator handling can give us a throw expression as a
1112 // condition for a case like:
1113 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1115 // br(c, throw x, br(y, t, f))
1116 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1120 // Create branch weights based on the number of times we get here and the
1121 // number of times the condition should be true.
1122 uint64_t CurrentCount = std::max(PGO.getCurrentRegionCount(), TrueCount);
1123 llvm::MDNode *Weights = PGO.createBranchWeights(TrueCount,
1124 CurrentCount - TrueCount);
1126 // Emit the code with the fully general case.
1127 llvm::Value *CondV = EvaluateExprAsBool(Cond);
1128 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights);
1131 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1132 /// specified stmt yet.
1133 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1134 CGM.ErrorUnsupported(S, Type);
1137 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1138 /// variable-length array whose elements have a non-zero bit-pattern.
1140 /// \param baseType the inner-most element type of the array
1141 /// \param src - a char* pointing to the bit-pattern for a single
1142 /// base element of the array
1143 /// \param sizeInChars - the total size of the VLA, in chars
1144 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1145 llvm::Value *dest, llvm::Value *src,
1146 llvm::Value *sizeInChars) {
1147 std::pair<CharUnits,CharUnits> baseSizeAndAlign
1148 = CGF.getContext().getTypeInfoInChars(baseType);
1150 CGBuilderTy &Builder = CGF.Builder;
1152 llvm::Value *baseSizeInChars
1153 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
1155 llvm::Type *i8p = Builder.getInt8PtrTy();
1157 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
1158 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
1160 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1161 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1162 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1164 // Make a loop over the VLA. C99 guarantees that the VLA element
1165 // count must be nonzero.
1166 CGF.EmitBlock(loopBB);
1168 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
1169 cur->addIncoming(begin, originBB);
1171 // memcpy the individual element bit-pattern.
1172 Builder.CreateMemCpy(cur, src, baseSizeInChars,
1173 baseSizeAndAlign.second.getQuantity(),
1174 /*volatile*/ false);
1176 // Go to the next element.
1177 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
1179 // Leave if that's the end of the VLA.
1180 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1181 Builder.CreateCondBr(done, contBB, loopBB);
1182 cur->addIncoming(next, loopBB);
1184 CGF.EmitBlock(contBB);
1188 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
1189 // Ignore empty classes in C++.
1190 if (getLangOpts().CPlusPlus) {
1191 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1192 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1197 // Cast the dest ptr to the appropriate i8 pointer type.
1199 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
1200 llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
1201 if (DestPtr->getType() != BP)
1202 DestPtr = Builder.CreateBitCast(DestPtr, BP);
1204 // Get size and alignment info for this aggregate.
1205 std::pair<CharUnits, CharUnits> TypeInfo =
1206 getContext().getTypeInfoInChars(Ty);
1207 CharUnits Size = TypeInfo.first;
1208 CharUnits Align = TypeInfo.second;
1210 llvm::Value *SizeVal;
1211 const VariableArrayType *vla;
1213 // Don't bother emitting a zero-byte memset.
1214 if (Size.isZero()) {
1215 // But note that getTypeInfo returns 0 for a VLA.
1216 if (const VariableArrayType *vlaType =
1217 dyn_cast_or_null<VariableArrayType>(
1218 getContext().getAsArrayType(Ty))) {
1220 llvm::Value *numElts;
1221 std::tie(numElts, eltType) = getVLASize(vlaType);
1224 CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
1225 if (!eltSize.isOne())
1226 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1232 SizeVal = CGM.getSize(Size);
1236 // If the type contains a pointer to data member we can't memset it to zero.
1237 // Instead, create a null constant and copy it to the destination.
1238 // TODO: there are other patterns besides zero that we can usefully memset,
1239 // like -1, which happens to be the pattern used by member-pointers.
1240 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1241 // For a VLA, emit a single element, then splat that over the VLA.
1242 if (vla) Ty = getContext().getBaseElementType(vla);
1244 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1246 llvm::GlobalVariable *NullVariable =
1247 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1248 /*isConstant=*/true,
1249 llvm::GlobalVariable::PrivateLinkage,
1250 NullConstant, Twine());
1251 llvm::Value *SrcPtr =
1252 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
1254 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1256 // Get and call the appropriate llvm.memcpy overload.
1257 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
1261 // Otherwise, just memset the whole thing to zero. This is legal
1262 // because in LLVM, all default initializers (other than the ones we just
1263 // handled above) are guaranteed to have a bit pattern of all zeros.
1264 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
1265 Align.getQuantity(), false);
1268 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1269 // Make sure that there is a block for the indirect goto.
1270 if (!IndirectBranch)
1271 GetIndirectGotoBlock();
1273 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1275 // Make sure the indirect branch includes all of the address-taken blocks.
1276 IndirectBranch->addDestination(BB);
1277 return llvm::BlockAddress::get(CurFn, BB);
1280 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1281 // If we already made the indirect branch for indirect goto, return its block.
1282 if (IndirectBranch) return IndirectBranch->getParent();
1284 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
1286 // Create the PHI node that indirect gotos will add entries to.
1287 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1288 "indirect.goto.dest");
1290 // Create the indirect branch instruction.
1291 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1292 return IndirectBranch->getParent();
1295 /// Computes the length of an array in elements, as well as the base
1296 /// element type and a properly-typed first element pointer.
1297 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1299 llvm::Value *&addr) {
1300 const ArrayType *arrayType = origArrayType;
1302 // If it's a VLA, we have to load the stored size. Note that
1303 // this is the size of the VLA in bytes, not its size in elements.
1304 llvm::Value *numVLAElements = nullptr;
1305 if (isa<VariableArrayType>(arrayType)) {
1306 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
1308 // Walk into all VLAs. This doesn't require changes to addr,
1309 // which has type T* where T is the first non-VLA element type.
1311 QualType elementType = arrayType->getElementType();
1312 arrayType = getContext().getAsArrayType(elementType);
1314 // If we only have VLA components, 'addr' requires no adjustment.
1316 baseType = elementType;
1317 return numVLAElements;
1319 } while (isa<VariableArrayType>(arrayType));
1321 // We get out here only if we find a constant array type
1325 // We have some number of constant-length arrays, so addr should
1326 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1327 // down to the first element of addr.
1328 SmallVector<llvm::Value*, 8> gepIndices;
1330 // GEP down to the array type.
1331 llvm::ConstantInt *zero = Builder.getInt32(0);
1332 gepIndices.push_back(zero);
1334 uint64_t countFromCLAs = 1;
1337 llvm::ArrayType *llvmArrayType =
1338 dyn_cast<llvm::ArrayType>(
1339 cast<llvm::PointerType>(addr->getType())->getElementType());
1340 while (llvmArrayType) {
1341 assert(isa<ConstantArrayType>(arrayType));
1342 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1343 == llvmArrayType->getNumElements());
1345 gepIndices.push_back(zero);
1346 countFromCLAs *= llvmArrayType->getNumElements();
1347 eltType = arrayType->getElementType();
1350 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1351 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1352 assert((!llvmArrayType || arrayType) &&
1353 "LLVM and Clang types are out-of-synch");
1357 // From this point onwards, the Clang array type has been emitted
1358 // as some other type (probably a packed struct). Compute the array
1359 // size, and just emit the 'begin' expression as a bitcast.
1362 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1363 eltType = arrayType->getElementType();
1364 arrayType = getContext().getAsArrayType(eltType);
1367 unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
1368 llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
1369 addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
1371 // Create the actual GEP.
1372 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
1377 llvm::Value *numElements
1378 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1380 // If we had any VLA dimensions, factor them in.
1382 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1387 std::pair<llvm::Value*, QualType>
1388 CodeGenFunction::getVLASize(QualType type) {
1389 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1390 assert(vla && "type was not a variable array type!");
1391 return getVLASize(vla);
1394 std::pair<llvm::Value*, QualType>
1395 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1396 // The number of elements so far; always size_t.
1397 llvm::Value *numElements = nullptr;
1399 QualType elementType;
1401 elementType = type->getElementType();
1402 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1403 assert(vlaSize && "no size for VLA!");
1404 assert(vlaSize->getType() == SizeTy);
1407 numElements = vlaSize;
1409 // It's undefined behavior if this wraps around, so mark it that way.
1410 // FIXME: Teach -fsanitize=undefined to trap this.
1411 numElements = Builder.CreateNUWMul(numElements, vlaSize);
1413 } while ((type = getContext().getAsVariableArrayType(elementType)));
1415 return std::pair<llvm::Value*,QualType>(numElements, elementType);
1418 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1419 assert(type->isVariablyModifiedType() &&
1420 "Must pass variably modified type to EmitVLASizes!");
1422 EnsureInsertPoint();
1424 // We're going to walk down into the type and look for VLA
1427 assert(type->isVariablyModifiedType());
1429 const Type *ty = type.getTypePtr();
1430 switch (ty->getTypeClass()) {
1432 #define TYPE(Class, Base)
1433 #define ABSTRACT_TYPE(Class, Base)
1434 #define NON_CANONICAL_TYPE(Class, Base)
1435 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1436 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1437 #include "clang/AST/TypeNodes.def"
1438 llvm_unreachable("unexpected dependent type!");
1440 // These types are never variably-modified.
1444 case Type::ExtVector:
1447 case Type::Elaborated:
1448 case Type::TemplateSpecialization:
1449 case Type::ObjCObject:
1450 case Type::ObjCInterface:
1451 case Type::ObjCObjectPointer:
1452 llvm_unreachable("type class is never variably-modified!");
1454 case Type::Adjusted:
1455 type = cast<AdjustedType>(ty)->getAdjustedType();
1459 type = cast<DecayedType>(ty)->getPointeeType();
1463 type = cast<PointerType>(ty)->getPointeeType();
1466 case Type::BlockPointer:
1467 type = cast<BlockPointerType>(ty)->getPointeeType();
1470 case Type::LValueReference:
1471 case Type::RValueReference:
1472 type = cast<ReferenceType>(ty)->getPointeeType();
1475 case Type::MemberPointer:
1476 type = cast<MemberPointerType>(ty)->getPointeeType();
1479 case Type::ConstantArray:
1480 case Type::IncompleteArray:
1481 // Losing element qualification here is fine.
1482 type = cast<ArrayType>(ty)->getElementType();
1485 case Type::VariableArray: {
1486 // Losing element qualification here is fine.
1487 const VariableArrayType *vat = cast<VariableArrayType>(ty);
1489 // Unknown size indication requires no size computation.
1490 // Otherwise, evaluate and record it.
1491 if (const Expr *size = vat->getSizeExpr()) {
1492 // It's possible that we might have emitted this already,
1493 // e.g. with a typedef and a pointer to it.
1494 llvm::Value *&entry = VLASizeMap[size];
1496 llvm::Value *Size = EmitScalarExpr(size);
1499 // If the size is an expression that is not an integer constant
1500 // expression [...] each time it is evaluated it shall have a value
1501 // greater than zero.
1502 if (SanOpts->VLABound &&
1503 size->getType()->isSignedIntegerType()) {
1504 SanitizerScope SanScope(this);
1505 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1506 llvm::Constant *StaticArgs[] = {
1507 EmitCheckSourceLocation(size->getLocStart()),
1508 EmitCheckTypeDescriptor(size->getType())
1510 EmitCheck(Builder.CreateICmpSGT(Size, Zero),
1511 "vla_bound_not_positive", StaticArgs, Size,
1515 // Always zexting here would be wrong if it weren't
1516 // undefined behavior to have a negative bound.
1517 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1520 type = vat->getElementType();
1524 case Type::FunctionProto:
1525 case Type::FunctionNoProto:
1526 type = cast<FunctionType>(ty)->getReturnType();
1531 case Type::UnaryTransform:
1532 case Type::Attributed:
1533 case Type::SubstTemplateTypeParm:
1534 case Type::PackExpansion:
1535 // Keep walking after single level desugaring.
1536 type = type.getSingleStepDesugaredType(getContext());
1540 case Type::Decltype:
1542 // Stop walking: nothing to do.
1545 case Type::TypeOfExpr:
1546 // Stop walking: emit typeof expression.
1547 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1551 type = cast<AtomicType>(ty)->getValueType();
1554 } while (type->isVariablyModifiedType());
1557 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
1558 if (getContext().getBuiltinVaListType()->isArrayType())
1559 return EmitScalarExpr(E);
1560 return EmitLValue(E).getAddress();
1563 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
1564 llvm::Constant *Init) {
1565 assert (Init && "Invalid DeclRefExpr initializer!");
1566 if (CGDebugInfo *Dbg = getDebugInfo())
1567 if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
1568 Dbg->EmitGlobalVariable(E->getDecl(), Init);
1571 CodeGenFunction::PeepholeProtection
1572 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
1573 // At the moment, the only aggressive peephole we do in IR gen
1574 // is trunc(zext) folding, but if we add more, we can easily
1575 // extend this protection.
1577 if (!rvalue.isScalar()) return PeepholeProtection();
1578 llvm::Value *value = rvalue.getScalarVal();
1579 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
1581 // Just make an extra bitcast.
1582 assert(HaveInsertPoint());
1583 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
1584 Builder.GetInsertBlock());
1586 PeepholeProtection protection;
1587 protection.Inst = inst;
1591 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
1592 if (!protection.Inst) return;
1594 // In theory, we could try to duplicate the peepholes now, but whatever.
1595 protection.Inst->eraseFromParent();
1598 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
1599 llvm::Value *AnnotatedVal,
1600 StringRef AnnotationStr,
1601 SourceLocation Location) {
1602 llvm::Value *Args[4] = {
1604 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
1605 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
1606 CGM.EmitAnnotationLineNo(Location)
1608 return Builder.CreateCall(AnnotationFn, Args);
1611 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
1612 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1613 // FIXME We create a new bitcast for every annotation because that's what
1614 // llvm-gcc was doing.
1615 for (const auto *I : D->specific_attrs<AnnotateAttr>())
1616 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
1617 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
1618 I->getAnnotation(), D->getLocation());
1621 llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
1623 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1624 llvm::Type *VTy = V->getType();
1625 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
1628 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
1629 // FIXME Always emit the cast inst so we can differentiate between
1630 // annotation on the first field of a struct and annotation on the struct
1632 if (VTy != CGM.Int8PtrTy)
1633 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
1634 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
1635 V = Builder.CreateBitCast(V, VTy);
1641 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
1643 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
1645 assert(!CGF->IsSanitizerScope);
1646 CGF->IsSanitizerScope = true;
1649 CodeGenFunction::SanitizerScope::~SanitizerScope() {
1650 CGF->IsSanitizerScope = false;
1653 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
1654 const llvm::Twine &Name,
1655 llvm::BasicBlock *BB,
1656 llvm::BasicBlock::iterator InsertPt) const {
1657 LoopStack.InsertHelper(I);
1658 if (IsSanitizerScope) {
1660 CGM.getModule().getMDKindID("nosanitize"),
1661 llvm::MDNode::get(CGM.getLLVMContext(), ArrayRef<llvm::Value *>()));
1665 template <bool PreserveNames>
1666 void CGBuilderInserter<PreserveNames>::InsertHelper(
1667 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
1668 llvm::BasicBlock::iterator InsertPt) const {
1669 llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB,
1672 CGF->InsertHelper(I, Name, BB, InsertPt);
1676 #define PreserveNames false
1678 #define PreserveNames true
1680 template void CGBuilderInserter<PreserveNames>::InsertHelper(
1681 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
1682 llvm::BasicBlock::iterator InsertPt) const;
1683 #undef PreserveNames