1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This coordinates the per-function state used while generating code.
12 //===----------------------------------------------------------------------===//
14 #include "CodeGenFunction.h"
16 #include "CGCleanup.h"
17 #include "CGCUDARuntime.h"
19 #include "CGDebugInfo.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CodeGenModule.h"
22 #include "CodeGenPGO.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/ASTLambda.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/StmtCXX.h"
29 #include "clang/AST/StmtObjC.h"
30 #include "clang/Basic/Builtins.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "clang/CodeGen/CGFunctionInfo.h"
33 #include "clang/Frontend/CodeGenOptions.h"
34 #include "clang/Sema/SemaDiagnostic.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
41 using namespace clang;
42 using namespace CodeGen;
44 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
46 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
47 const LangOptions &LangOpts) {
48 if (CGOpts.DisableLifetimeMarkers)
51 // Disable lifetime markers in msan builds.
52 // FIXME: Remove this when msan works with lifetime markers.
53 if (LangOpts.Sanitize.has(SanitizerKind::Memory))
56 // Asan uses markers for use-after-scope checks.
57 if (CGOpts.SanitizeAddressUseAfterScope)
60 // For now, only in optimized builds.
61 return CGOpts.OptimizationLevel != 0;
64 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
65 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
66 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
67 CGBuilderInserterTy(this)),
68 SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
69 PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
70 CGM.getCodeGenOpts(), CGM.getLangOpts())) {
71 if (!suppressNewContext)
72 CGM.getCXXABI().getMangleContext().startNewFunction();
74 llvm::FastMathFlags FMF;
75 if (CGM.getLangOpts().FastMath)
77 if (CGM.getLangOpts().FiniteMathOnly) {
81 if (CGM.getCodeGenOpts().NoNaNsFPMath) {
84 if (CGM.getCodeGenOpts().NoSignedZeros) {
85 FMF.setNoSignedZeros();
87 if (CGM.getCodeGenOpts().ReciprocalMath) {
88 FMF.setAllowReciprocal();
90 if (CGM.getCodeGenOpts().Reassociate) {
91 FMF.setAllowReassoc();
93 Builder.setFastMathFlags(FMF);
96 CodeGenFunction::~CodeGenFunction() {
97 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
99 // If there are any unclaimed block infos, go ahead and destroy them
100 // now. This can happen if IR-gen gets clever and skips evaluating
103 destroyBlockInfos(FirstBlockInfo);
105 if (getLangOpts().OpenMP && CurFn)
106 CGM.getOpenMPRuntime().functionFinished(*this);
109 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
110 LValueBaseInfo *BaseInfo,
111 TBAAAccessInfo *TBAAInfo) {
112 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
113 /* forPointeeType= */ true);
116 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
117 LValueBaseInfo *BaseInfo,
118 TBAAAccessInfo *TBAAInfo,
119 bool forPointeeType) {
121 *TBAAInfo = CGM.getTBAAAccessInfo(T);
123 // Honor alignment typedef attributes even on incomplete types.
124 // We also honor them straight for C++ class types, even as pointees;
125 // there's an expressivity gap here.
126 if (auto TT = T->getAs<TypedefType>()) {
127 if (auto Align = TT->getDecl()->getMaxAlignment()) {
129 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
130 return getContext().toCharUnitsFromBits(Align);
135 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
138 if (T->isIncompleteType()) {
139 Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
141 // For C++ class pointees, we don't know whether we're pointing at a
142 // base or a complete object, so we generally need to use the
143 // non-virtual alignment.
144 const CXXRecordDecl *RD;
145 if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
146 Alignment = CGM.getClassPointerAlignment(RD);
148 Alignment = getContext().getTypeAlignInChars(T);
149 if (T.getQualifiers().hasUnaligned())
150 Alignment = CharUnits::One();
153 // Cap to the global maximum type alignment unless the alignment
154 // was somehow explicit on the type.
155 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
156 if (Alignment.getQuantity() > MaxAlign &&
157 !getContext().isAlignmentRequired(T))
158 Alignment = CharUnits::fromQuantity(MaxAlign);
164 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
165 LValueBaseInfo BaseInfo;
166 TBAAAccessInfo TBAAInfo;
167 CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
168 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
172 /// Given a value of type T* that may not be to a complete object,
173 /// construct an l-value with the natural pointee alignment of T.
175 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
176 LValueBaseInfo BaseInfo;
177 TBAAAccessInfo TBAAInfo;
178 CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
179 /* forPointeeType= */ true);
180 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
184 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
185 return CGM.getTypes().ConvertTypeForMem(T);
188 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
189 return CGM.getTypes().ConvertType(T);
192 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
193 type = type.getCanonicalType();
195 switch (type->getTypeClass()) {
196 #define TYPE(name, parent)
197 #define ABSTRACT_TYPE(name, parent)
198 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
199 #define DEPENDENT_TYPE(name, parent) case Type::name:
200 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
201 #include "clang/AST/TypeNodes.def"
202 llvm_unreachable("non-canonical or dependent type in IR-generation");
205 case Type::DeducedTemplateSpecialization:
206 llvm_unreachable("undeduced type in IR-generation");
208 // Various scalar types.
211 case Type::BlockPointer:
212 case Type::LValueReference:
213 case Type::RValueReference:
214 case Type::MemberPointer:
216 case Type::ExtVector:
217 case Type::FunctionProto:
218 case Type::FunctionNoProto:
220 case Type::ObjCObjectPointer:
228 // Arrays, records, and Objective-C objects.
229 case Type::ConstantArray:
230 case Type::IncompleteArray:
231 case Type::VariableArray:
233 case Type::ObjCObject:
234 case Type::ObjCInterface:
235 return TEK_Aggregate;
237 // We operate on atomic values according to their underlying type.
239 type = cast<AtomicType>(type)->getValueType();
242 llvm_unreachable("unknown type kind!");
246 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
247 // For cleanliness, we try to avoid emitting the return block for
249 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
252 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
254 // We have a valid insert point, reuse it if it is empty or there are no
255 // explicit jumps to the return block.
256 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
257 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
258 delete ReturnBlock.getBlock();
260 EmitBlock(ReturnBlock.getBlock());
261 return llvm::DebugLoc();
264 // Otherwise, if the return block is the target of a single direct
265 // branch then we can just put the code in that block instead. This
266 // cleans up functions which started with a unified return block.
267 if (ReturnBlock.getBlock()->hasOneUse()) {
268 llvm::BranchInst *BI =
269 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
270 if (BI && BI->isUnconditional() &&
271 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
272 // Record/return the DebugLoc of the simple 'return' expression to be used
273 // later by the actual 'ret' instruction.
274 llvm::DebugLoc Loc = BI->getDebugLoc();
275 Builder.SetInsertPoint(BI->getParent());
276 BI->eraseFromParent();
277 delete ReturnBlock.getBlock();
282 // FIXME: We are at an unreachable point, there is no reason to emit the block
283 // unless it has uses. However, we still need a place to put the debug
284 // region.end for now.
286 EmitBlock(ReturnBlock.getBlock());
287 return llvm::DebugLoc();
290 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
292 if (!BB->use_empty())
293 return CGF.CurFn->getBasicBlockList().push_back(BB);
297 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
298 assert(BreakContinueStack.empty() &&
299 "mismatched push/pop in break/continue stack!");
301 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
302 && NumSimpleReturnExprs == NumReturnExprs
303 && ReturnBlock.getBlock()->use_empty();
304 // Usually the return expression is evaluated before the cleanup
305 // code. If the function contains only a simple return statement,
306 // such as a constant, the location before the cleanup code becomes
307 // the last useful breakpoint in the function, because the simple
308 // return expression will be evaluated after the cleanup code. To be
309 // safe, set the debug location for cleanup code to the location of
310 // the return statement. Otherwise the cleanup code should be at the
311 // end of the function's lexical scope.
313 // If there are multiple branches to the return block, the branch
314 // instructions will get the location of the return statements and
316 if (CGDebugInfo *DI = getDebugInfo()) {
317 if (OnlySimpleReturnStmts)
318 DI->EmitLocation(Builder, LastStopPoint);
320 DI->EmitLocation(Builder, EndLoc);
323 // Pop any cleanups that might have been associated with the
324 // parameters. Do this in whatever block we're currently in; it's
325 // important to do this before we enter the return block or return
326 // edges will be *really* confused.
327 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
328 bool HasOnlyLifetimeMarkers =
329 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
330 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
332 // Make sure the line table doesn't jump back into the body for
333 // the ret after it's been at EndLoc.
334 if (CGDebugInfo *DI = getDebugInfo())
335 if (OnlySimpleReturnStmts)
336 DI->EmitLocation(Builder, EndLoc);
338 PopCleanupBlocks(PrologueCleanupDepth);
341 // Emit function epilog (to return).
342 llvm::DebugLoc Loc = EmitReturnBlock();
344 if (ShouldInstrumentFunction()) {
345 if (CGM.getCodeGenOpts().InstrumentFunctions)
346 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
347 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
348 CurFn->addFnAttr("instrument-function-exit-inlined",
349 "__cyg_profile_func_exit");
352 // Emit debug descriptor for function end.
353 if (CGDebugInfo *DI = getDebugInfo())
354 DI->EmitFunctionEnd(Builder, CurFn);
356 // Reset the debug location to that of the simple 'return' expression, if any
357 // rather than that of the end of the function's scope '}'.
358 ApplyDebugLocation AL(*this, Loc);
359 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
360 EmitEndEHSpec(CurCodeDecl);
362 assert(EHStack.empty() &&
363 "did not remove all scopes from cleanup stack!");
365 // If someone did an indirect goto, emit the indirect goto block at the end of
367 if (IndirectBranch) {
368 EmitBlock(IndirectBranch->getParent());
369 Builder.ClearInsertionPoint();
372 // If some of our locals escaped, insert a call to llvm.localescape in the
374 if (!EscapedLocals.empty()) {
375 // Invert the map from local to index into a simple vector. There should be
377 SmallVector<llvm::Value *, 4> EscapeArgs;
378 EscapeArgs.resize(EscapedLocals.size());
379 for (auto &Pair : EscapedLocals)
380 EscapeArgs[Pair.second] = Pair.first;
381 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
382 &CGM.getModule(), llvm::Intrinsic::localescape);
383 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
386 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
387 llvm::Instruction *Ptr = AllocaInsertPt;
388 AllocaInsertPt = nullptr;
389 Ptr->eraseFromParent();
391 // If someone took the address of a label but never did an indirect goto, we
392 // made a zero entry PHI node, which is illegal, zap it now.
393 if (IndirectBranch) {
394 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
395 if (PN->getNumIncomingValues() == 0) {
396 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
397 PN->eraseFromParent();
401 EmitIfUsed(*this, EHResumeBlock);
402 EmitIfUsed(*this, TerminateLandingPad);
403 EmitIfUsed(*this, TerminateHandler);
404 EmitIfUsed(*this, UnreachableBlock);
406 for (const auto &FuncletAndParent : TerminateFunclets)
407 EmitIfUsed(*this, FuncletAndParent.second);
409 if (CGM.getCodeGenOpts().EmitDeclMetadata)
412 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
413 I = DeferredReplacements.begin(),
414 E = DeferredReplacements.end();
416 I->first->replaceAllUsesWith(I->second);
417 I->first->eraseFromParent();
420 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
421 // PHIs if the current function is a coroutine. We don't do it for all
422 // functions as it may result in slight increase in numbers of instructions
423 // if compiled with no optimizations. We do it for coroutine as the lifetime
424 // of CleanupDestSlot alloca make correct coroutine frame building very
426 if (NormalCleanupDest.isValid() && isCoroutine()) {
427 llvm::DominatorTree DT(*CurFn);
428 llvm::PromoteMemToReg(
429 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
430 NormalCleanupDest = Address::invalid();
433 // Add the required-vector-width attribute.
434 if (LargestVectorWidth != 0)
435 CurFn->addFnAttr("min-legal-vector-width",
436 llvm::utostr(LargestVectorWidth));
439 /// ShouldInstrumentFunction - Return true if the current function should be
440 /// instrumented with __cyg_profile_func_* calls
441 bool CodeGenFunction::ShouldInstrumentFunction() {
442 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
443 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
444 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
446 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
451 /// ShouldXRayInstrument - Return true if the current function should be
452 /// instrumented with XRay nop sleds.
453 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
454 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
457 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
458 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
459 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
460 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
461 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
462 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
463 XRayInstrKind::Custom);
466 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
467 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
468 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
469 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
470 XRayInstrKind::Typed);
474 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
475 llvm::Constant *Addr) {
476 // Addresses stored in prologue data can't require run-time fixups and must
477 // be PC-relative. Run-time fixups are undesirable because they necessitate
478 // writable text segments, which are unsafe. And absolute addresses are
479 // undesirable because they break PIE mode.
481 // Add a layer of indirection through a private global. Taking its address
482 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
483 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
485 llvm::GlobalValue::PrivateLinkage, Addr);
487 // Create a PC-relative address.
488 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
489 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
490 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
491 return (IntPtrTy == Int32Ty)
493 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
497 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
498 llvm::Value *EncodedAddr) {
499 // Reconstruct the address of the global.
500 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
501 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
502 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
503 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
505 // Load the original pointer through the global.
506 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
510 static void removeImageAccessQualifier(std::string& TyName) {
511 std::string ReadOnlyQual("__read_only");
512 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
513 if (ReadOnlyPos != std::string::npos)
514 // "+ 1" for the space after access qualifier.
515 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
517 std::string WriteOnlyQual("__write_only");
518 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
519 if (WriteOnlyPos != std::string::npos)
520 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
522 std::string ReadWriteQual("__read_write");
523 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
524 if (ReadWritePos != std::string::npos)
525 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
530 // Returns the address space id that should be produced to the
531 // kernel_arg_addr_space metadata. This is always fixed to the ids
532 // as specified in the SPIR 2.0 specification in order to differentiate
533 // for example in clGetKernelArgInfo() implementation between the address
534 // spaces with targets without unique mapping to the OpenCL address spaces
535 // (basically all single AS CPUs).
536 static unsigned ArgInfoAddressSpace(LangAS AS) {
538 case LangAS::opencl_global: return 1;
539 case LangAS::opencl_constant: return 2;
540 case LangAS::opencl_local: return 3;
541 case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs.
543 return 0; // Assume private.
547 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
548 // information in the program executable. The argument information stored
549 // includes the argument name, its type, the address and access qualifiers used.
550 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
551 CodeGenModule &CGM, llvm::LLVMContext &Context,
552 CGBuilderTy &Builder, ASTContext &ASTCtx) {
553 // Create MDNodes that represent the kernel arg metadata.
554 // Each MDNode is a list in the form of "key", N number of values which is
555 // the same number of values as their are kernel arguments.
557 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
559 // MDNode for the kernel argument address space qualifiers.
560 SmallVector<llvm::Metadata *, 8> addressQuals;
562 // MDNode for the kernel argument access qualifiers (images only).
563 SmallVector<llvm::Metadata *, 8> accessQuals;
565 // MDNode for the kernel argument type names.
566 SmallVector<llvm::Metadata *, 8> argTypeNames;
568 // MDNode for the kernel argument base type names.
569 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
571 // MDNode for the kernel argument type qualifiers.
572 SmallVector<llvm::Metadata *, 8> argTypeQuals;
574 // MDNode for the kernel argument names.
575 SmallVector<llvm::Metadata *, 8> argNames;
577 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
578 const ParmVarDecl *parm = FD->getParamDecl(i);
579 QualType ty = parm->getType();
580 std::string typeQuals;
582 if (ty->isPointerType()) {
583 QualType pointeeTy = ty->getPointeeType();
585 // Get address qualifier.
586 addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
587 ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
589 // Get argument type name.
590 std::string typeName =
591 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
593 // Turn "unsigned type" to "utype"
594 std::string::size_type pos = typeName.find("unsigned");
595 if (pointeeTy.isCanonical() && pos != std::string::npos)
596 typeName.erase(pos+1, 8);
598 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
600 std::string baseTypeName =
601 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
605 // Turn "unsigned type" to "utype"
606 pos = baseTypeName.find("unsigned");
607 if (pos != std::string::npos)
608 baseTypeName.erase(pos+1, 8);
610 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
612 // Get argument type qualifiers:
613 if (ty.isRestrictQualified())
614 typeQuals = "restrict";
615 if (pointeeTy.isConstQualified() ||
616 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
617 typeQuals += typeQuals.empty() ? "const" : " const";
618 if (pointeeTy.isVolatileQualified())
619 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
621 uint32_t AddrSpc = 0;
622 bool isPipe = ty->isPipeType();
623 if (ty->isImageType() || isPipe)
624 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
626 addressQuals.push_back(
627 llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
629 // Get argument type name.
630 std::string typeName;
632 typeName = ty.getCanonicalType()->getAs<PipeType>()->getElementType()
633 .getAsString(Policy);
635 typeName = ty.getUnqualifiedType().getAsString(Policy);
637 // Turn "unsigned type" to "utype"
638 std::string::size_type pos = typeName.find("unsigned");
639 if (ty.isCanonical() && pos != std::string::npos)
640 typeName.erase(pos+1, 8);
642 std::string baseTypeName;
644 baseTypeName = ty.getCanonicalType()->getAs<PipeType>()
645 ->getElementType().getCanonicalType()
646 .getAsString(Policy);
649 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
651 // Remove access qualifiers on images
652 // (as they are inseparable from type in clang implementation,
653 // but OpenCL spec provides a special query to get access qualifier
654 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
655 if (ty->isImageType()) {
656 removeImageAccessQualifier(typeName);
657 removeImageAccessQualifier(baseTypeName);
660 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
662 // Turn "unsigned type" to "utype"
663 pos = baseTypeName.find("unsigned");
664 if (pos != std::string::npos)
665 baseTypeName.erase(pos+1, 8);
667 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
673 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
675 // Get image and pipe access qualifier:
676 if (ty->isImageType()|| ty->isPipeType()) {
677 const Decl *PDecl = parm;
678 if (auto *TD = dyn_cast<TypedefType>(ty))
679 PDecl = TD->getDecl();
680 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
681 if (A && A->isWriteOnly())
682 accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
683 else if (A && A->isReadWrite())
684 accessQuals.push_back(llvm::MDString::get(Context, "read_write"));
686 accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
688 accessQuals.push_back(llvm::MDString::get(Context, "none"));
690 // Get argument name.
691 argNames.push_back(llvm::MDString::get(Context, parm->getName()));
694 Fn->setMetadata("kernel_arg_addr_space",
695 llvm::MDNode::get(Context, addressQuals));
696 Fn->setMetadata("kernel_arg_access_qual",
697 llvm::MDNode::get(Context, accessQuals));
698 Fn->setMetadata("kernel_arg_type",
699 llvm::MDNode::get(Context, argTypeNames));
700 Fn->setMetadata("kernel_arg_base_type",
701 llvm::MDNode::get(Context, argBaseTypeNames));
702 Fn->setMetadata("kernel_arg_type_qual",
703 llvm::MDNode::get(Context, argTypeQuals));
704 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
705 Fn->setMetadata("kernel_arg_name",
706 llvm::MDNode::get(Context, argNames));
709 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
712 if (!FD->hasAttr<OpenCLKernelAttr>())
715 llvm::LLVMContext &Context = getLLVMContext();
717 GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext());
719 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
720 QualType HintQTy = A->getTypeHint();
721 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
722 bool IsSignedInteger =
723 HintQTy->isSignedIntegerType() ||
724 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
725 llvm::Metadata *AttrMDArgs[] = {
726 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
727 CGM.getTypes().ConvertType(A->getTypeHint()))),
728 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
729 llvm::IntegerType::get(Context, 32),
730 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
731 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
734 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
735 llvm::Metadata *AttrMDArgs[] = {
736 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
737 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
738 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
739 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
742 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
743 llvm::Metadata *AttrMDArgs[] = {
744 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
745 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
746 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
747 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
750 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
751 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
752 llvm::Metadata *AttrMDArgs[] = {
753 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
754 Fn->setMetadata("intel_reqd_sub_group_size",
755 llvm::MDNode::get(Context, AttrMDArgs));
759 /// Determine whether the function F ends with a return stmt.
760 static bool endsWithReturn(const Decl* F) {
761 const Stmt *Body = nullptr;
762 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
763 Body = FD->getBody();
764 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
765 Body = OMD->getBody();
767 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
768 auto LastStmt = CS->body_rbegin();
769 if (LastStmt != CS->body_rend())
770 return isa<ReturnStmt>(*LastStmt);
775 static void markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
776 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
777 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
780 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
781 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
782 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
783 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
784 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
787 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
790 if (MD->getNumParams() == 2) {
791 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
792 if (!PT || !PT->isVoidPointerType() ||
793 !PT->getPointeeType().isConstQualified())
800 /// Return the UBSan prologue signature for \p FD if one is available.
801 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
802 const FunctionDecl *FD) {
803 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
806 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
809 void CodeGenFunction::StartFunction(GlobalDecl GD,
812 const CGFunctionInfo &FnInfo,
813 const FunctionArgList &Args,
815 SourceLocation StartLoc) {
817 "Do not use a CodeGenFunction object for more than one function");
819 const Decl *D = GD.getDecl();
821 DidCallStackSave = false;
823 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
824 if (FD->usesSEHTry())
826 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
830 assert(CurFn->isDeclaration() && "Function already has body?");
832 // If this function has been blacklisted for any of the enabled sanitizers,
833 // disable the sanitizer for the function.
835 #define SANITIZER(NAME, ID) \
836 if (SanOpts.empty()) \
838 if (SanOpts.has(SanitizerKind::ID)) \
839 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
840 SanOpts.set(SanitizerKind::ID, false);
842 #include "clang/Basic/Sanitizers.def"
847 // Apply the no_sanitize* attributes to SanOpts.
848 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
849 SanitizerMask mask = Attr->getMask();
850 SanOpts.Mask &= ~mask;
851 if (mask & SanitizerKind::Address)
852 SanOpts.set(SanitizerKind::KernelAddress, false);
853 if (mask & SanitizerKind::KernelAddress)
854 SanOpts.set(SanitizerKind::Address, false);
855 if (mask & SanitizerKind::HWAddress)
856 SanOpts.set(SanitizerKind::KernelHWAddress, false);
857 if (mask & SanitizerKind::KernelHWAddress)
858 SanOpts.set(SanitizerKind::HWAddress, false);
862 // Apply sanitizer attributes to the function.
863 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
864 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
865 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
866 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
867 if (SanOpts.has(SanitizerKind::Thread))
868 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
869 if (SanOpts.has(SanitizerKind::Memory))
870 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
871 if (SanOpts.has(SanitizerKind::SafeStack))
872 Fn->addFnAttr(llvm::Attribute::SafeStack);
873 if (SanOpts.has(SanitizerKind::ShadowCallStack))
874 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
876 // Apply fuzzing attribute to the function.
877 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
878 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
880 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
881 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
882 if (SanOpts.has(SanitizerKind::Thread)) {
883 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
884 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
885 if (OMD->getMethodFamily() == OMF_dealloc ||
886 OMD->getMethodFamily() == OMF_initialize ||
887 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
888 markAsIgnoreThreadCheckingAtRuntime(Fn);
890 } else if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
891 IdentifierInfo *II = FD->getIdentifier();
892 if (II && II->isStr("__destroy_helper_block_"))
893 markAsIgnoreThreadCheckingAtRuntime(Fn);
897 // Ignore unrelated casts in STL allocate() since the allocator must cast
898 // from void* to T* before object initialization completes. Don't match on the
899 // namespace because not all allocators are in std::
900 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
901 if (matchesStlAllocatorFn(D, getContext()))
902 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
905 // Apply xray attributes to the function (as a string, for now)
906 bool InstrumentXray = ShouldXRayInstrumentFunction() &&
907 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
908 XRayInstrKind::Function);
909 if (D && InstrumentXray) {
910 if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
911 if (XRayAttr->alwaysXRayInstrument())
912 Fn->addFnAttr("function-instrument", "xray-always");
913 if (XRayAttr->neverXRayInstrument())
914 Fn->addFnAttr("function-instrument", "xray-never");
915 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>()) {
916 Fn->addFnAttr("xray-log-args",
917 llvm::utostr(LogArgs->getArgumentCount()));
920 if (!CGM.imbueXRayAttrs(Fn, Loc))
922 "xray-instruction-threshold",
923 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
927 // Add no-jump-tables value.
928 Fn->addFnAttr("no-jump-tables",
929 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
931 // Add profile-sample-accurate value.
932 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
933 Fn->addFnAttr("profile-sample-accurate");
935 if (getLangOpts().OpenCL) {
936 // Add metadata for a kernel function.
937 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
938 EmitOpenCLKernelMetadata(FD, Fn);
941 // If we are checking function types, emit a function type signature as
943 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
944 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
945 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
946 // Remove any (C++17) exception specifications, to allow calling e.g. a
947 // noexcept function through a non-noexcept pointer.
949 getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
951 llvm::Constant *FTRTTIConst =
952 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
953 llvm::Constant *FTRTTIConstEncoded =
954 EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
955 llvm::Constant *PrologueStructElems[] = {PrologueSig,
957 llvm::Constant *PrologueStructConst =
958 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
959 Fn->setPrologueData(PrologueStructConst);
964 // If we're checking nullability, we need to know whether we can check the
965 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
966 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
967 auto Nullability = FnRetTy->getNullability(getContext());
968 if (Nullability && *Nullability == NullabilityKind::NonNull) {
969 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
970 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
971 RetValNullabilityPrecondition =
972 llvm::ConstantInt::getTrue(getLLVMContext());
976 // If we're in C++ mode and the function name is "main", it is guaranteed
977 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
978 // used within a program").
979 if (getLangOpts().CPlusPlus)
980 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
982 Fn->addFnAttr(llvm::Attribute::NoRecurse);
984 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
986 // Create a marker to make it easy to insert allocas into the entryblock
987 // later. Don't create this with the builder, because we don't want it
989 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
990 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
992 ReturnBlock = getJumpDestInCurrentScope("return");
994 Builder.SetInsertPoint(EntryBB);
996 // If we're checking the return value, allocate space for a pointer to a
997 // precise source location of the checked return statement.
998 if (requiresReturnValueCheck()) {
999 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1000 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
1003 // Emit subprogram debug descriptor.
1004 if (CGDebugInfo *DI = getDebugInfo()) {
1005 // Reconstruct the type from the argument list so that implicit parameters,
1006 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1008 CallingConv CC = CallingConv::CC_C;
1009 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
1010 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
1011 CC = SrcFnTy->getCallConv();
1012 SmallVector<QualType, 16> ArgTypes;
1013 for (const VarDecl *VD : Args)
1014 ArgTypes.push_back(VD->getType());
1015 QualType FnType = getContext().getFunctionType(
1016 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
1017 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
1021 if (ShouldInstrumentFunction()) {
1022 if (CGM.getCodeGenOpts().InstrumentFunctions)
1023 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1024 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1025 CurFn->addFnAttr("instrument-function-entry-inlined",
1026 "__cyg_profile_func_enter");
1027 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1028 CurFn->addFnAttr("instrument-function-entry-inlined",
1029 "__cyg_profile_func_enter_bare");
1032 // Since emitting the mcount call here impacts optimizations such as function
1033 // inlining, we just add an attribute to insert a mcount call in backend.
1034 // The attribute "counting-function" is set to mcount function name which is
1035 // architecture dependent.
1036 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1037 // Calls to fentry/mcount should not be generated if function has
1038 // the no_instrument_function attribute.
1039 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1040 if (CGM.getCodeGenOpts().CallFEntry)
1041 Fn->addFnAttr("fentry-call", "true");
1043 Fn->addFnAttr("instrument-function-entry-inlined",
1044 getTarget().getMCountName());
1049 if (RetTy->isVoidType()) {
1050 // Void type; nothing to return.
1051 ReturnValue = Address::invalid();
1053 // Count the implicit return.
1054 if (!endsWithReturn(D))
1056 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
1057 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1058 // Indirect aggregate return; emit returned value directly into sret slot.
1059 // This reduces code size, and affects correctness in C++.
1060 auto AI = CurFn->arg_begin();
1061 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1063 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
1064 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1065 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1066 // Load the sret pointer from the argument struct and return into that.
1067 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1068 llvm::Function::arg_iterator EI = CurFn->arg_end();
1070 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1071 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1072 ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
1074 ReturnValue = CreateIRTemp(RetTy, "retval");
1076 // Tell the epilog emitter to autorelease the result. We do this
1077 // now so that various specialized functions can suppress it
1078 // during their IR-generation.
1079 if (getLangOpts().ObjCAutoRefCount &&
1080 !CurFnInfo->isReturnsRetained() &&
1081 RetTy->isObjCRetainableType())
1082 AutoreleaseResult = true;
1085 EmitStartEHSpec(CurCodeDecl);
1087 PrologueCleanupDepth = EHStack.stable_begin();
1089 // Emit OpenMP specific initialization of the device functions.
1090 if (getLangOpts().OpenMP && CurCodeDecl)
1091 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1093 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1095 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
1096 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1097 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1098 if (MD->getParent()->isLambda() &&
1099 MD->getOverloadedOperator() == OO_Call) {
1100 // We're in a lambda; figure out the captures.
1101 MD->getParent()->getCaptureFields(LambdaCaptureFields,
1102 LambdaThisCaptureField);
1103 if (LambdaThisCaptureField) {
1104 // If the lambda captures the object referred to by '*this' - either by
1105 // value or by reference, make sure CXXThisValue points to the correct
1108 // Get the lvalue for the field (which is a copy of the enclosing object
1109 // or contains the address of the enclosing object).
1110 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1111 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1112 // If the enclosing object was captured by value, just use its address.
1113 CXXThisValue = ThisFieldLValue.getAddress().getPointer();
1115 // Load the lvalue pointed to by the field, since '*this' was captured
1118 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1121 for (auto *FD : MD->getParent()->fields()) {
1122 if (FD->hasCapturedVLAType()) {
1123 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1124 SourceLocation()).getScalarVal();
1125 auto VAT = FD->getCapturedVLAType();
1126 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1130 // Not in a lambda; just use 'this' from the method.
1131 // FIXME: Should we generate a new load for each use of 'this'? The
1132 // fast register allocator would be happier...
1133 CXXThisValue = CXXABIThisValue;
1136 // Check the 'this' pointer once per function, if it's available.
1137 if (CXXABIThisValue) {
1138 SanitizerSet SkippedChecks;
1139 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1140 QualType ThisTy = MD->getThisType(getContext());
1142 // If this is the call operator of a lambda with no capture-default, it
1143 // may have a static invoker function, which may call this operator with
1144 // a null 'this' pointer.
1145 if (isLambdaCallOperator(MD) &&
1146 MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1147 SkippedChecks.set(SanitizerKind::Null, true);
1149 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1151 Loc, CXXABIThisValue, ThisTy,
1152 getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1157 // If any of the arguments have a variably modified type, make sure to
1158 // emit the type size.
1159 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1161 const VarDecl *VD = *i;
1163 // Dig out the type as written from ParmVarDecls; it's unclear whether
1164 // the standard (C99 6.9.1p10) requires this, but we're following the
1165 // precedent set by gcc.
1167 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1168 Ty = PVD->getOriginalType();
1172 if (Ty->isVariablyModifiedType())
1173 EmitVariablyModifiedType(Ty);
1175 // Emit a location at the end of the prologue.
1176 if (CGDebugInfo *DI = getDebugInfo())
1177 DI->EmitLocation(Builder, StartLoc);
1179 // TODO: Do we need to handle this in two places like we do with
1180 // target-features/target-cpu?
1182 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1183 LargestVectorWidth = VecWidth->getVectorWidth();
1186 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
1188 incrementProfileCounter(Body);
1189 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1190 EmitCompoundStmtWithoutScope(*S);
1195 /// When instrumenting to collect profile data, the counts for some blocks
1196 /// such as switch cases need to not include the fall-through counts, so
1197 /// emit a branch around the instrumentation code. When not instrumenting,
1198 /// this just calls EmitBlock().
1199 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1201 llvm::BasicBlock *SkipCountBB = nullptr;
1202 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1203 // When instrumenting for profiling, the fallthrough to certain
1204 // statements needs to skip over the instrumentation code so that we
1205 // get an accurate count.
1206 SkipCountBB = createBasicBlock("skipcount");
1207 EmitBranch(SkipCountBB);
1210 uint64_t CurrentCount = getCurrentProfileCount();
1211 incrementProfileCounter(S);
1212 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1214 EmitBlock(SkipCountBB);
1217 /// Tries to mark the given function nounwind based on the
1218 /// non-existence of any throwing calls within it. We believe this is
1219 /// lightweight enough to do at -O0.
1220 static void TryMarkNoThrow(llvm::Function *F) {
1221 // LLVM treats 'nounwind' on a function as part of the type, so we
1222 // can't do this on functions that can be overwritten.
1223 if (F->isInterposable()) return;
1225 for (llvm::BasicBlock &BB : *F)
1226 for (llvm::Instruction &I : BB)
1230 F->setDoesNotThrow();
1233 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1234 FunctionArgList &Args) {
1235 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1236 QualType ResTy = FD->getReturnType();
1238 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1239 if (MD && MD->isInstance()) {
1240 if (CGM.getCXXABI().HasThisReturn(GD))
1241 ResTy = MD->getThisType(getContext());
1242 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1243 ResTy = CGM.getContext().VoidPtrTy;
1244 CGM.getCXXABI().buildThisParam(*this, Args);
1247 // The base version of an inheriting constructor whose constructed base is a
1248 // virtual base is not passed any arguments (because it doesn't actually call
1249 // the inherited constructor).
1250 bool PassedParams = true;
1251 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1252 if (auto Inherited = CD->getInheritedConstructor())
1254 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1257 for (auto *Param : FD->parameters()) {
1258 Args.push_back(Param);
1259 if (!Param->hasAttr<PassObjectSizeAttr>())
1262 auto *Implicit = ImplicitParamDecl::Create(
1263 getContext(), Param->getDeclContext(), Param->getLocation(),
1264 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1265 SizeArguments[Param] = Implicit;
1266 Args.push_back(Implicit);
1270 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1271 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1277 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1278 const ASTContext &Context) {
1279 QualType T = FD->getReturnType();
1280 // Avoid the optimization for functions that return a record type with a
1281 // trivial destructor or another trivially copyable type.
1282 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1283 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1284 return !ClassDecl->hasTrivialDestructor();
1286 return !T.isTriviallyCopyableType(Context);
1289 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1290 const CGFunctionInfo &FnInfo) {
1291 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1294 FunctionArgList Args;
1295 QualType ResTy = BuildFunctionArgList(GD, Args);
1297 // Check if we should generate debug info for this function.
1298 if (FD->hasAttr<NoDebugAttr>())
1299 DebugInfo = nullptr; // disable debug info indefinitely for this function
1301 // The function might not have a body if we're generating thunks for a
1302 // function declaration.
1303 SourceRange BodyRange;
1304 if (Stmt *Body = FD->getBody())
1305 BodyRange = Body->getSourceRange();
1307 BodyRange = FD->getLocation();
1308 CurEHLocation = BodyRange.getEnd();
1310 // Use the location of the start of the function to determine where
1311 // the function definition is located. By default use the location
1312 // of the declaration as the location for the subprogram. A function
1313 // may lack a declaration in the source code if it is created by code
1314 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1315 SourceLocation Loc = FD->getLocation();
1317 // If this is a function specialization then use the pattern body
1318 // as the location for the function.
1319 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1320 if (SpecDecl->hasBody(SpecDecl))
1321 Loc = SpecDecl->getLocation();
1323 Stmt *Body = FD->getBody();
1325 // Initialize helper which will detect jumps which can cause invalid lifetime
1327 if (Body && ShouldEmitLifetimeMarkers)
1328 Bypasses.Init(Body);
1330 // Emit the standard function prologue.
1331 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1333 // Generate the body of the function.
1334 PGO.assignRegionCounters(GD, CurFn);
1335 if (isa<CXXDestructorDecl>(FD))
1336 EmitDestructorBody(Args);
1337 else if (isa<CXXConstructorDecl>(FD))
1338 EmitConstructorBody(Args);
1339 else if (getLangOpts().CUDA &&
1340 !getLangOpts().CUDAIsDevice &&
1341 FD->hasAttr<CUDAGlobalAttr>())
1342 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1343 else if (isa<CXXMethodDecl>(FD) &&
1344 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1345 // The lambda static invoker function is special, because it forwards or
1346 // clones the body of the function call operator (but is actually static).
1347 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1348 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1349 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1350 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1351 // Implicit copy-assignment gets the same special treatment as implicit
1352 // copy-constructors.
1353 emitImplicitAssignmentOperatorBody(Args);
1355 EmitFunctionBody(Args, Body);
1357 llvm_unreachable("no definition for emitted function");
1359 // C++11 [stmt.return]p2:
1360 // Flowing off the end of a function [...] results in undefined behavior in
1361 // a value-returning function.
1363 // If the '}' that terminates a function is reached, and the value of the
1364 // function call is used by the caller, the behavior is undefined.
1365 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1366 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1367 bool ShouldEmitUnreachable =
1368 CGM.getCodeGenOpts().StrictReturn ||
1369 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1370 if (SanOpts.has(SanitizerKind::Return)) {
1371 SanitizerScope SanScope(this);
1372 llvm::Value *IsFalse = Builder.getFalse();
1373 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1374 SanitizerHandler::MissingReturn,
1375 EmitCheckSourceLocation(FD->getLocation()), None);
1376 } else if (ShouldEmitUnreachable) {
1377 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1378 EmitTrapCall(llvm::Intrinsic::trap);
1380 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1381 Builder.CreateUnreachable();
1382 Builder.ClearInsertionPoint();
1386 // Emit the standard function epilogue.
1387 FinishFunction(BodyRange.getEnd());
1389 // If we haven't marked the function nothrow through other means, do
1390 // a quick pass now to see if we can.
1391 if (!CurFn->doesNotThrow())
1392 TryMarkNoThrow(CurFn);
1395 /// ContainsLabel - Return true if the statement contains a label in it. If
1396 /// this statement is not executed normally, it not containing a label means
1397 /// that we can just remove the code.
1398 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1399 // Null statement, not a label!
1400 if (!S) return false;
1402 // If this is a label, we have to emit the code, consider something like:
1403 // if (0) { ... foo: bar(); } goto foo;
1405 // TODO: If anyone cared, we could track __label__'s, since we know that you
1406 // can't jump to one from outside their declared region.
1407 if (isa<LabelStmt>(S))
1410 // If this is a case/default statement, and we haven't seen a switch, we have
1411 // to emit the code.
1412 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1415 // If this is a switch statement, we want to ignore cases below it.
1416 if (isa<SwitchStmt>(S))
1417 IgnoreCaseStmts = true;
1419 // Scan subexpressions for verboten labels.
1420 for (const Stmt *SubStmt : S->children())
1421 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1427 /// containsBreak - Return true if the statement contains a break out of it.
1428 /// If the statement (recursively) contains a switch or loop with a break
1429 /// inside of it, this is fine.
1430 bool CodeGenFunction::containsBreak(const Stmt *S) {
1431 // Null statement, not a label!
1432 if (!S) return false;
1434 // If this is a switch or loop that defines its own break scope, then we can
1435 // include it and anything inside of it.
1436 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1440 if (isa<BreakStmt>(S))
1443 // Scan subexpressions for verboten breaks.
1444 for (const Stmt *SubStmt : S->children())
1445 if (containsBreak(SubStmt))
1451 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1452 if (!S) return false;
1454 // Some statement kinds add a scope and thus never add a decl to the current
1455 // scope. Note, this list is longer than the list of statements that might
1456 // have an unscoped decl nested within them, but this way is conservatively
1457 // correct even if more statement kinds are added.
1458 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1459 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1460 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1461 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1464 if (isa<DeclStmt>(S))
1467 for (const Stmt *SubStmt : S->children())
1468 if (mightAddDeclToScope(SubStmt))
1474 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1475 /// to a constant, or if it does but contains a label, return false. If it
1476 /// constant folds return true and set the boolean result in Result.
1477 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1480 llvm::APSInt ResultInt;
1481 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1484 ResultBool = ResultInt.getBoolValue();
1488 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1489 /// to a constant, or if it does but contains a label, return false. If it
1490 /// constant folds return true and set the folded value.
1491 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1492 llvm::APSInt &ResultInt,
1494 // FIXME: Rename and handle conversion of other evaluatable things
1497 if (!Cond->EvaluateAsInt(Int, getContext()))
1498 return false; // Not foldable, not integer or not fully evaluatable.
1500 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1501 return false; // Contains a label.
1509 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1510 /// statement) to the specified blocks. Based on the condition, this might try
1511 /// to simplify the codegen of the conditional based on the branch.
1513 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1514 llvm::BasicBlock *TrueBlock,
1515 llvm::BasicBlock *FalseBlock,
1516 uint64_t TrueCount) {
1517 Cond = Cond->IgnoreParens();
1519 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1521 // Handle X && Y in a condition.
1522 if (CondBOp->getOpcode() == BO_LAnd) {
1523 // If we have "1 && X", simplify the code. "0 && X" would have constant
1524 // folded if the case was simple enough.
1525 bool ConstantBool = false;
1526 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1528 // br(1 && X) -> br(X).
1529 incrementProfileCounter(CondBOp);
1530 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1534 // If we have "X && 1", simplify the code to use an uncond branch.
1535 // "X && 0" would have been constant folded to 0.
1536 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1538 // br(X && 1) -> br(X).
1539 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1543 // Emit the LHS as a conditional. If the LHS conditional is false, we
1544 // want to jump to the FalseBlock.
1545 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1546 // The counter tells us how often we evaluate RHS, and all of TrueCount
1547 // can be propagated to that branch.
1548 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1550 ConditionalEvaluation eval(*this);
1552 ApplyDebugLocation DL(*this, Cond);
1553 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1557 incrementProfileCounter(CondBOp);
1558 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1560 // Any temporaries created here are conditional.
1562 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1568 if (CondBOp->getOpcode() == BO_LOr) {
1569 // If we have "0 || X", simplify the code. "1 || X" would have constant
1570 // folded if the case was simple enough.
1571 bool ConstantBool = false;
1572 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1574 // br(0 || X) -> br(X).
1575 incrementProfileCounter(CondBOp);
1576 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1580 // If we have "X || 0", simplify the code to use an uncond branch.
1581 // "X || 1" would have been constant folded to 1.
1582 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1584 // br(X || 0) -> br(X).
1585 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1589 // Emit the LHS as a conditional. If the LHS conditional is true, we
1590 // want to jump to the TrueBlock.
1591 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1592 // We have the count for entry to the RHS and for the whole expression
1593 // being true, so we can divy up True count between the short circuit and
1596 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1597 uint64_t RHSCount = TrueCount - LHSCount;
1599 ConditionalEvaluation eval(*this);
1601 ApplyDebugLocation DL(*this, Cond);
1602 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1603 EmitBlock(LHSFalse);
1606 incrementProfileCounter(CondBOp);
1607 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1609 // Any temporaries created here are conditional.
1611 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1619 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1620 // br(!x, t, f) -> br(x, f, t)
1621 if (CondUOp->getOpcode() == UO_LNot) {
1622 // Negate the count.
1623 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1624 // Negate the condition and swap the destination blocks.
1625 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1630 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1631 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1632 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1633 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1635 ConditionalEvaluation cond(*this);
1636 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1637 getProfileCount(CondOp));
1639 // When computing PGO branch weights, we only know the overall count for
1640 // the true block. This code is essentially doing tail duplication of the
1641 // naive code-gen, introducing new edges for which counts are not
1642 // available. Divide the counts proportionally between the LHS and RHS of
1643 // the conditional operator.
1644 uint64_t LHSScaledTrueCount = 0;
1647 getProfileCount(CondOp) / (double)getCurrentProfileCount();
1648 LHSScaledTrueCount = TrueCount * LHSRatio;
1652 EmitBlock(LHSBlock);
1653 incrementProfileCounter(CondOp);
1655 ApplyDebugLocation DL(*this, Cond);
1656 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1657 LHSScaledTrueCount);
1662 EmitBlock(RHSBlock);
1663 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1664 TrueCount - LHSScaledTrueCount);
1670 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1671 // Conditional operator handling can give us a throw expression as a
1672 // condition for a case like:
1673 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1675 // br(c, throw x, br(y, t, f))
1676 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1680 // If the branch has a condition wrapped by __builtin_unpredictable,
1681 // create metadata that specifies that the branch is unpredictable.
1682 // Don't bother if not optimizing because that metadata would not be used.
1683 llvm::MDNode *Unpredictable = nullptr;
1684 auto *Call = dyn_cast<CallExpr>(Cond);
1685 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1686 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1687 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1688 llvm::MDBuilder MDHelper(getLLVMContext());
1689 Unpredictable = MDHelper.createUnpredictable();
1693 // Create branch weights based on the number of times we get here and the
1694 // number of times the condition should be true.
1695 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1696 llvm::MDNode *Weights =
1697 createProfileWeights(TrueCount, CurrentCount - TrueCount);
1699 // Emit the code with the fully general case.
1702 ApplyDebugLocation DL(*this, Cond);
1703 CondV = EvaluateExprAsBool(Cond);
1705 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1708 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1709 /// specified stmt yet.
1710 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1711 CGM.ErrorUnsupported(S, Type);
1714 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1715 /// variable-length array whose elements have a non-zero bit-pattern.
1717 /// \param baseType the inner-most element type of the array
1718 /// \param src - a char* pointing to the bit-pattern for a single
1719 /// base element of the array
1720 /// \param sizeInChars - the total size of the VLA, in chars
1721 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1722 Address dest, Address src,
1723 llvm::Value *sizeInChars) {
1724 CGBuilderTy &Builder = CGF.Builder;
1726 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1727 llvm::Value *baseSizeInChars
1728 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1731 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1733 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1735 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1736 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1737 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1739 // Make a loop over the VLA. C99 guarantees that the VLA element
1740 // count must be nonzero.
1741 CGF.EmitBlock(loopBB);
1743 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1744 cur->addIncoming(begin.getPointer(), originBB);
1746 CharUnits curAlign =
1747 dest.getAlignment().alignmentOfArrayElement(baseSize);
1749 // memcpy the individual element bit-pattern.
1750 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1751 /*volatile*/ false);
1753 // Go to the next element.
1755 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1757 // Leave if that's the end of the VLA.
1758 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1759 Builder.CreateCondBr(done, contBB, loopBB);
1760 cur->addIncoming(next, loopBB);
1762 CGF.EmitBlock(contBB);
1766 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1767 // Ignore empty classes in C++.
1768 if (getLangOpts().CPlusPlus) {
1769 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1770 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1775 // Cast the dest ptr to the appropriate i8 pointer type.
1776 if (DestPtr.getElementType() != Int8Ty)
1777 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1779 // Get size and alignment info for this aggregate.
1780 CharUnits size = getContext().getTypeSizeInChars(Ty);
1782 llvm::Value *SizeVal;
1783 const VariableArrayType *vla;
1785 // Don't bother emitting a zero-byte memset.
1786 if (size.isZero()) {
1787 // But note that getTypeInfo returns 0 for a VLA.
1788 if (const VariableArrayType *vlaType =
1789 dyn_cast_or_null<VariableArrayType>(
1790 getContext().getAsArrayType(Ty))) {
1791 auto VlaSize = getVLASize(vlaType);
1792 SizeVal = VlaSize.NumElts;
1793 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1794 if (!eltSize.isOne())
1795 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1801 SizeVal = CGM.getSize(size);
1805 // If the type contains a pointer to data member we can't memset it to zero.
1806 // Instead, create a null constant and copy it to the destination.
1807 // TODO: there are other patterns besides zero that we can usefully memset,
1808 // like -1, which happens to be the pattern used by member-pointers.
1809 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1810 // For a VLA, emit a single element, then splat that over the VLA.
1811 if (vla) Ty = getContext().getBaseElementType(vla);
1813 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1815 llvm::GlobalVariable *NullVariable =
1816 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1817 /*isConstant=*/true,
1818 llvm::GlobalVariable::PrivateLinkage,
1819 NullConstant, Twine());
1820 CharUnits NullAlign = DestPtr.getAlignment();
1821 NullVariable->setAlignment(NullAlign.getQuantity());
1822 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1825 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1827 // Get and call the appropriate llvm.memcpy overload.
1828 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1832 // Otherwise, just memset the whole thing to zero. This is legal
1833 // because in LLVM, all default initializers (other than the ones we just
1834 // handled above) are guaranteed to have a bit pattern of all zeros.
1835 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1838 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1839 // Make sure that there is a block for the indirect goto.
1840 if (!IndirectBranch)
1841 GetIndirectGotoBlock();
1843 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1845 // Make sure the indirect branch includes all of the address-taken blocks.
1846 IndirectBranch->addDestination(BB);
1847 return llvm::BlockAddress::get(CurFn, BB);
1850 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1851 // If we already made the indirect branch for indirect goto, return its block.
1852 if (IndirectBranch) return IndirectBranch->getParent();
1854 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1856 // Create the PHI node that indirect gotos will add entries to.
1857 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1858 "indirect.goto.dest");
1860 // Create the indirect branch instruction.
1861 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1862 return IndirectBranch->getParent();
1865 /// Computes the length of an array in elements, as well as the base
1866 /// element type and a properly-typed first element pointer.
1867 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1870 const ArrayType *arrayType = origArrayType;
1872 // If it's a VLA, we have to load the stored size. Note that
1873 // this is the size of the VLA in bytes, not its size in elements.
1874 llvm::Value *numVLAElements = nullptr;
1875 if (isa<VariableArrayType>(arrayType)) {
1876 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1878 // Walk into all VLAs. This doesn't require changes to addr,
1879 // which has type T* where T is the first non-VLA element type.
1881 QualType elementType = arrayType->getElementType();
1882 arrayType = getContext().getAsArrayType(elementType);
1884 // If we only have VLA components, 'addr' requires no adjustment.
1886 baseType = elementType;
1887 return numVLAElements;
1889 } while (isa<VariableArrayType>(arrayType));
1891 // We get out here only if we find a constant array type
1895 // We have some number of constant-length arrays, so addr should
1896 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1897 // down to the first element of addr.
1898 SmallVector<llvm::Value*, 8> gepIndices;
1900 // GEP down to the array type.
1901 llvm::ConstantInt *zero = Builder.getInt32(0);
1902 gepIndices.push_back(zero);
1904 uint64_t countFromCLAs = 1;
1907 llvm::ArrayType *llvmArrayType =
1908 dyn_cast<llvm::ArrayType>(addr.getElementType());
1909 while (llvmArrayType) {
1910 assert(isa<ConstantArrayType>(arrayType));
1911 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1912 == llvmArrayType->getNumElements());
1914 gepIndices.push_back(zero);
1915 countFromCLAs *= llvmArrayType->getNumElements();
1916 eltType = arrayType->getElementType();
1919 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1920 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1921 assert((!llvmArrayType || arrayType) &&
1922 "LLVM and Clang types are out-of-synch");
1926 // From this point onwards, the Clang array type has been emitted
1927 // as some other type (probably a packed struct). Compute the array
1928 // size, and just emit the 'begin' expression as a bitcast.
1931 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1932 eltType = arrayType->getElementType();
1933 arrayType = getContext().getAsArrayType(eltType);
1936 llvm::Type *baseType = ConvertType(eltType);
1937 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1939 // Create the actual GEP.
1940 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1941 gepIndices, "array.begin"),
1942 addr.getAlignment());
1947 llvm::Value *numElements
1948 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1950 // If we had any VLA dimensions, factor them in.
1952 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1957 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1958 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1959 assert(vla && "type was not a variable array type!");
1960 return getVLASize(vla);
1963 CodeGenFunction::VlaSizePair
1964 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1965 // The number of elements so far; always size_t.
1966 llvm::Value *numElements = nullptr;
1968 QualType elementType;
1970 elementType = type->getElementType();
1971 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1972 assert(vlaSize && "no size for VLA!");
1973 assert(vlaSize->getType() == SizeTy);
1976 numElements = vlaSize;
1978 // It's undefined behavior if this wraps around, so mark it that way.
1979 // FIXME: Teach -fsanitize=undefined to trap this.
1980 numElements = Builder.CreateNUWMul(numElements, vlaSize);
1982 } while ((type = getContext().getAsVariableArrayType(elementType)));
1984 return { numElements, elementType };
1987 CodeGenFunction::VlaSizePair
1988 CodeGenFunction::getVLAElements1D(QualType type) {
1989 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1990 assert(vla && "type was not a variable array type!");
1991 return getVLAElements1D(vla);
1994 CodeGenFunction::VlaSizePair
1995 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1996 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1997 assert(VlaSize && "no size for VLA!");
1998 assert(VlaSize->getType() == SizeTy);
1999 return { VlaSize, Vla->getElementType() };
2002 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2003 assert(type->isVariablyModifiedType() &&
2004 "Must pass variably modified type to EmitVLASizes!");
2006 EnsureInsertPoint();
2008 // We're going to walk down into the type and look for VLA
2011 assert(type->isVariablyModifiedType());
2013 const Type *ty = type.getTypePtr();
2014 switch (ty->getTypeClass()) {
2016 #define TYPE(Class, Base)
2017 #define ABSTRACT_TYPE(Class, Base)
2018 #define NON_CANONICAL_TYPE(Class, Base)
2019 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2020 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2021 #include "clang/AST/TypeNodes.def"
2022 llvm_unreachable("unexpected dependent type!");
2024 // These types are never variably-modified.
2028 case Type::ExtVector:
2031 case Type::Elaborated:
2032 case Type::TemplateSpecialization:
2033 case Type::ObjCTypeParam:
2034 case Type::ObjCObject:
2035 case Type::ObjCInterface:
2036 case Type::ObjCObjectPointer:
2037 llvm_unreachable("type class is never variably-modified!");
2039 case Type::Adjusted:
2040 type = cast<AdjustedType>(ty)->getAdjustedType();
2044 type = cast<DecayedType>(ty)->getPointeeType();
2048 type = cast<PointerType>(ty)->getPointeeType();
2051 case Type::BlockPointer:
2052 type = cast<BlockPointerType>(ty)->getPointeeType();
2055 case Type::LValueReference:
2056 case Type::RValueReference:
2057 type = cast<ReferenceType>(ty)->getPointeeType();
2060 case Type::MemberPointer:
2061 type = cast<MemberPointerType>(ty)->getPointeeType();
2064 case Type::ConstantArray:
2065 case Type::IncompleteArray:
2066 // Losing element qualification here is fine.
2067 type = cast<ArrayType>(ty)->getElementType();
2070 case Type::VariableArray: {
2071 // Losing element qualification here is fine.
2072 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2074 // Unknown size indication requires no size computation.
2075 // Otherwise, evaluate and record it.
2076 if (const Expr *size = vat->getSizeExpr()) {
2077 // It's possible that we might have emitted this already,
2078 // e.g. with a typedef and a pointer to it.
2079 llvm::Value *&entry = VLASizeMap[size];
2081 llvm::Value *Size = EmitScalarExpr(size);
2084 // If the size is an expression that is not an integer constant
2085 // expression [...] each time it is evaluated it shall have a value
2086 // greater than zero.
2087 if (SanOpts.has(SanitizerKind::VLABound) &&
2088 size->getType()->isSignedIntegerType()) {
2089 SanitizerScope SanScope(this);
2090 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2091 llvm::Constant *StaticArgs[] = {
2092 EmitCheckSourceLocation(size->getLocStart()),
2093 EmitCheckTypeDescriptor(size->getType())
2095 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2096 SanitizerKind::VLABound),
2097 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2100 // Always zexting here would be wrong if it weren't
2101 // undefined behavior to have a negative bound.
2102 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2105 type = vat->getElementType();
2109 case Type::FunctionProto:
2110 case Type::FunctionNoProto:
2111 type = cast<FunctionType>(ty)->getReturnType();
2116 case Type::UnaryTransform:
2117 case Type::Attributed:
2118 case Type::SubstTemplateTypeParm:
2119 case Type::PackExpansion:
2120 // Keep walking after single level desugaring.
2121 type = type.getSingleStepDesugaredType(getContext());
2125 case Type::Decltype:
2127 case Type::DeducedTemplateSpecialization:
2128 // Stop walking: nothing to do.
2131 case Type::TypeOfExpr:
2132 // Stop walking: emit typeof expression.
2133 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2137 type = cast<AtomicType>(ty)->getValueType();
2141 type = cast<PipeType>(ty)->getElementType();
2144 } while (type->isVariablyModifiedType());
2147 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2148 if (getContext().getBuiltinVaListType()->isArrayType())
2149 return EmitPointerWithAlignment(E);
2150 return EmitLValue(E).getAddress();
2153 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2154 return EmitLValue(E).getAddress();
2157 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2158 const APValue &Init) {
2159 assert(!Init.isUninit() && "Invalid DeclRefExpr initializer!");
2160 if (CGDebugInfo *Dbg = getDebugInfo())
2161 if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2162 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2165 CodeGenFunction::PeepholeProtection
2166 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2167 // At the moment, the only aggressive peephole we do in IR gen
2168 // is trunc(zext) folding, but if we add more, we can easily
2169 // extend this protection.
2171 if (!rvalue.isScalar()) return PeepholeProtection();
2172 llvm::Value *value = rvalue.getScalarVal();
2173 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2175 // Just make an extra bitcast.
2176 assert(HaveInsertPoint());
2177 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2178 Builder.GetInsertBlock());
2180 PeepholeProtection protection;
2181 protection.Inst = inst;
2185 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2186 if (!protection.Inst) return;
2188 // In theory, we could try to duplicate the peepholes now, but whatever.
2189 protection.Inst->eraseFromParent();
2192 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
2193 llvm::Value *AnnotatedVal,
2194 StringRef AnnotationStr,
2195 SourceLocation Location) {
2196 llvm::Value *Args[4] = {
2198 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2199 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2200 CGM.EmitAnnotationLineNo(Location)
2202 return Builder.CreateCall(AnnotationFn, Args);
2205 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2206 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2207 // FIXME We create a new bitcast for every annotation because that's what
2208 // llvm-gcc was doing.
2209 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2210 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2211 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2212 I->getAnnotation(), D->getLocation());
2215 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2217 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2218 llvm::Value *V = Addr.getPointer();
2219 llvm::Type *VTy = V->getType();
2220 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2223 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2224 // FIXME Always emit the cast inst so we can differentiate between
2225 // annotation on the first field of a struct and annotation on the struct
2227 if (VTy != CGM.Int8PtrTy)
2228 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
2229 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2230 V = Builder.CreateBitCast(V, VTy);
2233 return Address(V, Addr.getAlignment());
2236 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2238 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2240 assert(!CGF->IsSanitizerScope);
2241 CGF->IsSanitizerScope = true;
2244 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2245 CGF->IsSanitizerScope = false;
2248 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2249 const llvm::Twine &Name,
2250 llvm::BasicBlock *BB,
2251 llvm::BasicBlock::iterator InsertPt) const {
2252 LoopStack.InsertHelper(I);
2253 if (IsSanitizerScope)
2254 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2257 void CGBuilderInserter::InsertHelper(
2258 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2259 llvm::BasicBlock::iterator InsertPt) const {
2260 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2262 CGF->InsertHelper(I, Name, BB, InsertPt);
2265 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2266 CodeGenModule &CGM, const FunctionDecl *FD,
2267 std::string &FirstMissing) {
2268 // If there aren't any required features listed then go ahead and return.
2269 if (ReqFeatures.empty())
2272 // Now build up the set of caller features and verify that all the required
2273 // features are there.
2274 llvm::StringMap<bool> CallerFeatureMap;
2275 CGM.getFunctionFeatureMap(CallerFeatureMap, FD);
2277 // If we have at least one of the features in the feature list return
2278 // true, otherwise return false.
2280 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2281 SmallVector<StringRef, 1> OrFeatures;
2282 Feature.split(OrFeatures, '|');
2283 return std::any_of(OrFeatures.begin(), OrFeatures.end(),
2284 [&](StringRef Feature) {
2285 if (!CallerFeatureMap.lookup(Feature)) {
2286 FirstMissing = Feature.str();
2294 // Emits an error if we don't have a valid set of target features for the
2296 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2297 const FunctionDecl *TargetDecl) {
2298 // Early exit if this is an indirect call.
2302 // Get the current enclosing function if it exists. If it doesn't
2303 // we can't check the target features anyhow.
2304 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
2308 // Grab the required features for the call. For a builtin this is listed in
2309 // the td file with the default cpu, for an always_inline function this is any
2310 // listed cpu and any listed features.
2311 unsigned BuiltinID = TargetDecl->getBuiltinID();
2312 std::string MissingFeature;
2314 SmallVector<StringRef, 1> ReqFeatures;
2315 const char *FeatureList =
2316 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2317 // Return if the builtin doesn't have any required features.
2318 if (!FeatureList || StringRef(FeatureList) == "")
2320 StringRef(FeatureList).split(ReqFeatures, ',');
2321 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2322 CGM.getDiags().Report(E->getLocStart(), diag::err_builtin_needs_feature)
2323 << TargetDecl->getDeclName()
2324 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2326 } else if (TargetDecl->hasAttr<TargetAttr>() ||
2327 TargetDecl->hasAttr<CPUSpecificAttr>()) {
2328 // Get the required features for the callee.
2330 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2331 TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2333 SmallVector<StringRef, 1> ReqFeatures;
2334 llvm::StringMap<bool> CalleeFeatureMap;
2335 CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2337 for (const auto &F : ParsedAttr.Features) {
2338 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2339 ReqFeatures.push_back(StringRef(F).substr(1));
2342 for (const auto &F : CalleeFeatureMap) {
2343 // Only positive features are "required".
2345 ReqFeatures.push_back(F.getKey());
2347 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2348 CGM.getDiags().Report(E->getLocStart(), diag::err_function_needs_feature)
2349 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2353 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2354 if (!CGM.getCodeGenOpts().SanitizeStats)
2357 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2358 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2359 CGM.getSanStats().create(IRB, SSK);
2362 llvm::Value *CodeGenFunction::FormResolverCondition(
2363 const TargetMultiVersionResolverOption &RO) {
2364 llvm::Value *TrueCondition = nullptr;
2365 if (!RO.ParsedAttribute.Architecture.empty())
2366 TrueCondition = EmitX86CpuIs(RO.ParsedAttribute.Architecture);
2368 if (!RO.ParsedAttribute.Features.empty()) {
2369 SmallVector<StringRef, 8> FeatureList;
2370 llvm::for_each(RO.ParsedAttribute.Features,
2371 [&FeatureList](const std::string &Feature) {
2372 FeatureList.push_back(StringRef{Feature}.substr(1));
2374 llvm::Value *FeatureCmp = EmitX86CpuSupports(FeatureList);
2375 TrueCondition = TrueCondition ? Builder.CreateAnd(TrueCondition, FeatureCmp)
2378 return TrueCondition;
2381 void CodeGenFunction::EmitTargetMultiVersionResolver(
2382 llvm::Function *Resolver,
2383 ArrayRef<TargetMultiVersionResolverOption> Options) {
2384 assert((getContext().getTargetInfo().getTriple().getArch() ==
2385 llvm::Triple::x86 ||
2386 getContext().getTargetInfo().getTriple().getArch() ==
2387 llvm::Triple::x86_64) &&
2388 "Only implemented for x86 targets");
2390 // Main function's basic block.
2391 llvm::BasicBlock *CurBlock = createBasicBlock("entry", Resolver);
2392 Builder.SetInsertPoint(CurBlock);
2395 llvm::Function *DefaultFunc = nullptr;
2396 for (const TargetMultiVersionResolverOption &RO : Options) {
2397 Builder.SetInsertPoint(CurBlock);
2398 llvm::Value *TrueCondition = FormResolverCondition(RO);
2400 if (!TrueCondition) {
2401 DefaultFunc = RO.Function;
2403 llvm::BasicBlock *RetBlock = createBasicBlock("ro_ret", Resolver);
2404 llvm::IRBuilder<> RetBuilder(RetBlock);
2405 RetBuilder.CreateRet(RO.Function);
2406 CurBlock = createBasicBlock("ro_else", Resolver);
2407 Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
2411 assert(DefaultFunc && "No default version?");
2412 // Emit return from the 'else-ist' block.
2413 Builder.SetInsertPoint(CurBlock);
2414 Builder.CreateRet(DefaultFunc);
2417 void CodeGenFunction::EmitCPUDispatchMultiVersionResolver(
2418 llvm::Function *Resolver,
2419 ArrayRef<CPUDispatchMultiVersionResolverOption> Options) {
2420 assert((getContext().getTargetInfo().getTriple().getArch() ==
2421 llvm::Triple::x86 ||
2422 getContext().getTargetInfo().getTriple().getArch() ==
2423 llvm::Triple::x86_64) &&
2424 "Only implemented for x86 targets");
2426 // Main function's basic block.
2427 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2428 Builder.SetInsertPoint(CurBlock);
2431 for (const CPUDispatchMultiVersionResolverOption &RO : Options) {
2432 Builder.SetInsertPoint(CurBlock);
2434 // "generic" case should catch-all.
2435 if (RO.FeatureMask == 0) {
2436 Builder.CreateRet(RO.Function);
2439 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2440 llvm::IRBuilder<> RetBuilder(RetBlock);
2441 RetBuilder.CreateRet(RO.Function);
2442 CurBlock = createBasicBlock("resolver_else", Resolver);
2443 llvm::Value *TrueCondition = EmitX86CpuSupports(RO.FeatureMask);
2444 Builder.CreateCondBr(TrueCondition, RetBlock, CurBlock);
2447 Builder.SetInsertPoint(CurBlock);
2448 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2449 TrapCall->setDoesNotReturn();
2450 TrapCall->setDoesNotThrow();
2451 Builder.CreateUnreachable();
2452 Builder.ClearInsertionPoint();
2455 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2456 if (CGDebugInfo *DI = getDebugInfo())
2457 return DI->SourceLocToDebugLoc(Location);
2459 return llvm::DebugLoc();