1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This coordinates the per-function state used while generating code.
11 //===----------------------------------------------------------------------===//
13 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/StmtCXX.h"
29 #include "clang/AST/StmtObjC.h"
30 #include "clang/Basic/Builtins.h"
31 #include "clang/Basic/CodeGenOptions.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Frontend/FrontendDiagnostic.h"
35 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/FPEnv.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/Operator.h"
43 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
44 using namespace clang;
45 using namespace CodeGen;
47 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
49 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
50 const LangOptions &LangOpts) {
51 if (CGOpts.DisableLifetimeMarkers)
54 // Sanitizers may use markers.
55 if (CGOpts.SanitizeAddressUseAfterScope ||
56 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
57 LangOpts.Sanitize.has(SanitizerKind::Memory))
60 // For now, only in optimized builds.
61 return CGOpts.OptimizationLevel != 0;
64 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
65 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
66 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
67 CGBuilderInserterTy(this)),
68 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
69 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
70 ShouldEmitLifetimeMarkers(
71 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
72 if (!suppressNewContext)
73 CGM.getCXXABI().getMangleContext().startNewFunction();
75 SetFastMathFlags(CurFPFeatures);
79 CodeGenFunction::~CodeGenFunction() {
80 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
82 if (getLangOpts().OpenMP && CurFn)
83 CGM.getOpenMPRuntime().functionFinished(*this);
85 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
86 // outlining etc) at some point. Doing it once the function codegen is done
87 // seems to be a reasonable spot. We do it here, as opposed to the deletion
88 // time of the CodeGenModule, because we have to ensure the IR has not yet
89 // been "emitted" to the outside, thus, modifications are still sensible.
90 if (CGM.getLangOpts().OpenMPIRBuilder)
91 CGM.getOpenMPRuntime().getOMPBuilder().finalize();
94 // Map the LangOption for exception behavior into
95 // the corresponding enum in the IR.
96 llvm::fp::ExceptionBehavior
97 clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
100 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
101 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
102 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
104 llvm_unreachable("Unsupported FP Exception Behavior");
107 void CodeGenFunction::SetFPModel() {
108 llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
109 auto fpExceptionBehavior = ToConstrainedExceptMD(
110 getLangOpts().getFPExceptionMode());
112 Builder.setDefaultConstrainedRounding(RM);
113 Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
114 Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore ||
115 RM != llvm::RoundingMode::NearestTiesToEven);
118 void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
119 llvm::FastMathFlags FMF;
120 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
121 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
122 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
123 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
124 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
125 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
126 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
127 Builder.setFastMathFlags(FMF);
130 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
131 FPOptions FPFeatures)
132 : CGF(CGF), OldFPFeatures(CGF.CurFPFeatures) {
133 CGF.CurFPFeatures = FPFeatures;
135 if (OldFPFeatures == FPFeatures)
138 FMFGuard.emplace(CGF.Builder);
140 llvm::RoundingMode NewRoundingBehavior =
141 static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
142 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
143 auto NewExceptionBehavior =
144 ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
145 FPFeatures.getFPExceptionMode()));
146 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
148 CGF.SetFastMathFlags(FPFeatures);
150 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
151 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
152 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
153 (NewExceptionBehavior == llvm::fp::ebIgnore &&
154 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
155 "FPConstrained should be enabled on entire function");
157 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
159 CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true";
160 auto NewValue = OldValue & Value;
161 if (OldValue != NewValue)
162 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
164 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
165 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
166 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
167 mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
168 FPFeatures.getAllowReciprocal() &&
169 FPFeatures.getAllowApproxFunc() &&
170 FPFeatures.getNoSignedZero());
173 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
174 CGF.CurFPFeatures = OldFPFeatures;
177 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
178 LValueBaseInfo BaseInfo;
179 TBAAAccessInfo TBAAInfo;
180 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
181 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
185 /// Given a value of type T* that may not be to a complete object,
186 /// construct an l-value with the natural pointee alignment of T.
188 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
189 LValueBaseInfo BaseInfo;
190 TBAAAccessInfo TBAAInfo;
191 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
192 /* forPointeeType= */ true);
193 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
197 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
198 return CGM.getTypes().ConvertTypeForMem(T);
201 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
202 return CGM.getTypes().ConvertType(T);
205 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
206 type = type.getCanonicalType();
208 switch (type->getTypeClass()) {
209 #define TYPE(name, parent)
210 #define ABSTRACT_TYPE(name, parent)
211 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
212 #define DEPENDENT_TYPE(name, parent) case Type::name:
213 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
214 #include "clang/AST/TypeNodes.inc"
215 llvm_unreachable("non-canonical or dependent type in IR-generation");
218 case Type::DeducedTemplateSpecialization:
219 llvm_unreachable("undeduced type in IR-generation");
221 // Various scalar types.
224 case Type::BlockPointer:
225 case Type::LValueReference:
226 case Type::RValueReference:
227 case Type::MemberPointer:
229 case Type::ExtVector:
230 case Type::ConstantMatrix:
231 case Type::FunctionProto:
232 case Type::FunctionNoProto:
234 case Type::ObjCObjectPointer:
243 // Arrays, records, and Objective-C objects.
244 case Type::ConstantArray:
245 case Type::IncompleteArray:
246 case Type::VariableArray:
248 case Type::ObjCObject:
249 case Type::ObjCInterface:
250 return TEK_Aggregate;
252 // We operate on atomic values according to their underlying type.
254 type = cast<AtomicType>(type)->getValueType();
257 llvm_unreachable("unknown type kind!");
261 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
262 // For cleanliness, we try to avoid emitting the return block for
264 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
267 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
269 // We have a valid insert point, reuse it if it is empty or there are no
270 // explicit jumps to the return block.
271 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
272 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
273 delete ReturnBlock.getBlock();
274 ReturnBlock = JumpDest();
276 EmitBlock(ReturnBlock.getBlock());
277 return llvm::DebugLoc();
280 // Otherwise, if the return block is the target of a single direct
281 // branch then we can just put the code in that block instead. This
282 // cleans up functions which started with a unified return block.
283 if (ReturnBlock.getBlock()->hasOneUse()) {
284 llvm::BranchInst *BI =
285 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
286 if (BI && BI->isUnconditional() &&
287 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
288 // Record/return the DebugLoc of the simple 'return' expression to be used
289 // later by the actual 'ret' instruction.
290 llvm::DebugLoc Loc = BI->getDebugLoc();
291 Builder.SetInsertPoint(BI->getParent());
292 BI->eraseFromParent();
293 delete ReturnBlock.getBlock();
294 ReturnBlock = JumpDest();
299 // FIXME: We are at an unreachable point, there is no reason to emit the block
300 // unless it has uses. However, we still need a place to put the debug
301 // region.end for now.
303 EmitBlock(ReturnBlock.getBlock());
304 return llvm::DebugLoc();
307 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
309 if (!BB->use_empty())
310 return CGF.CurFn->getBasicBlockList().push_back(BB);
314 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
315 assert(BreakContinueStack.empty() &&
316 "mismatched push/pop in break/continue stack!");
318 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
319 && NumSimpleReturnExprs == NumReturnExprs
320 && ReturnBlock.getBlock()->use_empty();
321 // Usually the return expression is evaluated before the cleanup
322 // code. If the function contains only a simple return statement,
323 // such as a constant, the location before the cleanup code becomes
324 // the last useful breakpoint in the function, because the simple
325 // return expression will be evaluated after the cleanup code. To be
326 // safe, set the debug location for cleanup code to the location of
327 // the return statement. Otherwise the cleanup code should be at the
328 // end of the function's lexical scope.
330 // If there are multiple branches to the return block, the branch
331 // instructions will get the location of the return statements and
333 if (CGDebugInfo *DI = getDebugInfo()) {
334 if (OnlySimpleReturnStmts)
335 DI->EmitLocation(Builder, LastStopPoint);
337 DI->EmitLocation(Builder, EndLoc);
340 // Pop any cleanups that might have been associated with the
341 // parameters. Do this in whatever block we're currently in; it's
342 // important to do this before we enter the return block or return
343 // edges will be *really* confused.
344 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
345 bool HasOnlyLifetimeMarkers =
346 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
347 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
349 // Make sure the line table doesn't jump back into the body for
350 // the ret after it's been at EndLoc.
351 Optional<ApplyDebugLocation> AL;
352 if (CGDebugInfo *DI = getDebugInfo()) {
353 if (OnlySimpleReturnStmts)
354 DI->EmitLocation(Builder, EndLoc);
356 // We may not have a valid end location. Try to apply it anyway, and
357 // fall back to an artificial location if needed.
358 AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
361 PopCleanupBlocks(PrologueCleanupDepth);
364 // Emit function epilog (to return).
365 llvm::DebugLoc Loc = EmitReturnBlock();
367 if (ShouldInstrumentFunction()) {
368 if (CGM.getCodeGenOpts().InstrumentFunctions)
369 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
370 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
371 CurFn->addFnAttr("instrument-function-exit-inlined",
372 "__cyg_profile_func_exit");
375 // Emit debug descriptor for function end.
376 if (CGDebugInfo *DI = getDebugInfo())
377 DI->EmitFunctionEnd(Builder, CurFn);
379 // Reset the debug location to that of the simple 'return' expression, if any
380 // rather than that of the end of the function's scope '}'.
381 ApplyDebugLocation AL(*this, Loc);
382 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
383 EmitEndEHSpec(CurCodeDecl);
385 assert(EHStack.empty() &&
386 "did not remove all scopes from cleanup stack!");
388 // If someone did an indirect goto, emit the indirect goto block at the end of
390 if (IndirectBranch) {
391 EmitBlock(IndirectBranch->getParent());
392 Builder.ClearInsertionPoint();
395 // If some of our locals escaped, insert a call to llvm.localescape in the
397 if (!EscapedLocals.empty()) {
398 // Invert the map from local to index into a simple vector. There should be
400 SmallVector<llvm::Value *, 4> EscapeArgs;
401 EscapeArgs.resize(EscapedLocals.size());
402 for (auto &Pair : EscapedLocals)
403 EscapeArgs[Pair.second] = Pair.first;
404 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
405 &CGM.getModule(), llvm::Intrinsic::localescape);
406 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
409 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
410 llvm::Instruction *Ptr = AllocaInsertPt;
411 AllocaInsertPt = nullptr;
412 Ptr->eraseFromParent();
414 // If someone took the address of a label but never did an indirect goto, we
415 // made a zero entry PHI node, which is illegal, zap it now.
416 if (IndirectBranch) {
417 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
418 if (PN->getNumIncomingValues() == 0) {
419 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
420 PN->eraseFromParent();
424 EmitIfUsed(*this, EHResumeBlock);
425 EmitIfUsed(*this, TerminateLandingPad);
426 EmitIfUsed(*this, TerminateHandler);
427 EmitIfUsed(*this, UnreachableBlock);
429 for (const auto &FuncletAndParent : TerminateFunclets)
430 EmitIfUsed(*this, FuncletAndParent.second);
432 if (CGM.getCodeGenOpts().EmitDeclMetadata)
435 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
436 I = DeferredReplacements.begin(),
437 E = DeferredReplacements.end();
439 I->first->replaceAllUsesWith(I->second);
440 I->first->eraseFromParent();
443 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
444 // PHIs if the current function is a coroutine. We don't do it for all
445 // functions as it may result in slight increase in numbers of instructions
446 // if compiled with no optimizations. We do it for coroutine as the lifetime
447 // of CleanupDestSlot alloca make correct coroutine frame building very
449 if (NormalCleanupDest.isValid() && isCoroutine()) {
450 llvm::DominatorTree DT(*CurFn);
451 llvm::PromoteMemToReg(
452 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
453 NormalCleanupDest = Address::invalid();
456 // Scan function arguments for vector width.
457 for (llvm::Argument &A : CurFn->args())
458 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
460 std::max((uint64_t)LargestVectorWidth,
461 VT->getPrimitiveSizeInBits().getKnownMinSize());
463 // Update vector width based on return type.
464 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
466 std::max((uint64_t)LargestVectorWidth,
467 VT->getPrimitiveSizeInBits().getKnownMinSize());
469 // Add the required-vector-width attribute. This contains the max width from:
470 // 1. min-vector-width attribute used in the source program.
471 // 2. Any builtins used that have a vector width specified.
472 // 3. Values passed in and out of inline assembly.
473 // 4. Width of vector arguments and return types for this function.
474 // 5. Width of vector aguments and return types for functions called by this
476 CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
478 // If we generated an unreachable return block, delete it now.
479 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
480 Builder.ClearInsertionPoint();
481 ReturnBlock.getBlock()->eraseFromParent();
483 if (ReturnValue.isValid()) {
484 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
485 if (RetAlloca && RetAlloca->use_empty()) {
486 RetAlloca->eraseFromParent();
487 ReturnValue = Address::invalid();
492 /// ShouldInstrumentFunction - Return true if the current function should be
493 /// instrumented with __cyg_profile_func_* calls
494 bool CodeGenFunction::ShouldInstrumentFunction() {
495 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
496 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
497 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
499 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
504 /// ShouldXRayInstrument - Return true if the current function should be
505 /// instrumented with XRay nop sleds.
506 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
507 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
510 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
511 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
512 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
513 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
514 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
515 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
516 XRayInstrKind::Custom);
519 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
520 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
521 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
522 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
523 XRayInstrKind::Typed);
527 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
528 llvm::Constant *Addr) {
529 // Addresses stored in prologue data can't require run-time fixups and must
530 // be PC-relative. Run-time fixups are undesirable because they necessitate
531 // writable text segments, which are unsafe. And absolute addresses are
532 // undesirable because they break PIE mode.
534 // Add a layer of indirection through a private global. Taking its address
535 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
536 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
538 llvm::GlobalValue::PrivateLinkage, Addr);
540 // Create a PC-relative address.
541 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
542 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
543 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
544 return (IntPtrTy == Int32Ty)
546 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
550 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
551 llvm::Value *EncodedAddr) {
552 // Reconstruct the address of the global.
553 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
554 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
555 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
556 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
558 // Load the original pointer through the global.
559 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
563 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
566 if (!FD->hasAttr<OpenCLKernelAttr>())
569 llvm::LLVMContext &Context = getLLVMContext();
571 CGM.GenOpenCLArgMetadata(Fn, FD, this);
573 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
574 QualType HintQTy = A->getTypeHint();
575 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
576 bool IsSignedInteger =
577 HintQTy->isSignedIntegerType() ||
578 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
579 llvm::Metadata *AttrMDArgs[] = {
580 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
581 CGM.getTypes().ConvertType(A->getTypeHint()))),
582 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
583 llvm::IntegerType::get(Context, 32),
584 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
585 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
588 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
589 llvm::Metadata *AttrMDArgs[] = {
590 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
591 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
592 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
593 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
596 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
597 llvm::Metadata *AttrMDArgs[] = {
598 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
599 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
600 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
601 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
604 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
605 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
606 llvm::Metadata *AttrMDArgs[] = {
607 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
608 Fn->setMetadata("intel_reqd_sub_group_size",
609 llvm::MDNode::get(Context, AttrMDArgs));
613 /// Determine whether the function F ends with a return stmt.
614 static bool endsWithReturn(const Decl* F) {
615 const Stmt *Body = nullptr;
616 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
617 Body = FD->getBody();
618 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
619 Body = OMD->getBody();
621 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
622 auto LastStmt = CS->body_rbegin();
623 if (LastStmt != CS->body_rend())
624 return isa<ReturnStmt>(*LastStmt);
629 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
630 if (SanOpts.has(SanitizerKind::Thread)) {
631 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
632 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
636 /// Check if the return value of this function requires sanitization.
637 bool CodeGenFunction::requiresReturnValueCheck() const {
638 return requiresReturnValueNullabilityCheck() ||
639 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
640 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
643 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
644 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
645 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
646 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
647 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
650 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
653 if (MD->getNumParams() == 2) {
654 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
655 if (!PT || !PT->isVoidPointerType() ||
656 !PT->getPointeeType().isConstQualified())
663 /// Return the UBSan prologue signature for \p FD if one is available.
664 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
665 const FunctionDecl *FD) {
666 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
669 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
672 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
674 const CGFunctionInfo &FnInfo,
675 const FunctionArgList &Args,
677 SourceLocation StartLoc) {
679 "Do not use a CodeGenFunction object for more than one function");
681 const Decl *D = GD.getDecl();
683 DidCallStackSave = false;
685 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
686 if (FD->usesSEHTry())
688 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
692 assert(CurFn->isDeclaration() && "Function already has body?");
694 // If this function has been blacklisted for any of the enabled sanitizers,
695 // disable the sanitizer for the function.
697 #define SANITIZER(NAME, ID) \
698 if (SanOpts.empty()) \
700 if (SanOpts.has(SanitizerKind::ID)) \
701 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
702 SanOpts.set(SanitizerKind::ID, false);
704 #include "clang/Basic/Sanitizers.def"
709 // Apply the no_sanitize* attributes to SanOpts.
710 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
711 SanitizerMask mask = Attr->getMask();
712 SanOpts.Mask &= ~mask;
713 if (mask & SanitizerKind::Address)
714 SanOpts.set(SanitizerKind::KernelAddress, false);
715 if (mask & SanitizerKind::KernelAddress)
716 SanOpts.set(SanitizerKind::Address, false);
717 if (mask & SanitizerKind::HWAddress)
718 SanOpts.set(SanitizerKind::KernelHWAddress, false);
719 if (mask & SanitizerKind::KernelHWAddress)
720 SanOpts.set(SanitizerKind::HWAddress, false);
724 // Apply sanitizer attributes to the function.
725 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
726 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
727 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
728 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
729 if (SanOpts.has(SanitizerKind::MemTag))
730 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
731 if (SanOpts.has(SanitizerKind::Thread))
732 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
733 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
734 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
735 if (SanOpts.has(SanitizerKind::SafeStack))
736 Fn->addFnAttr(llvm::Attribute::SafeStack);
737 if (SanOpts.has(SanitizerKind::ShadowCallStack))
738 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
740 // Apply fuzzing attribute to the function.
741 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
742 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
744 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
745 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
746 if (SanOpts.has(SanitizerKind::Thread)) {
747 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
748 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
749 if (OMD->getMethodFamily() == OMF_dealloc ||
750 OMD->getMethodFamily() == OMF_initialize ||
751 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
752 markAsIgnoreThreadCheckingAtRuntime(Fn);
757 // Ignore unrelated casts in STL allocate() since the allocator must cast
758 // from void* to T* before object initialization completes. Don't match on the
759 // namespace because not all allocators are in std::
760 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
761 if (matchesStlAllocatorFn(D, getContext()))
762 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
765 // Ignore null checks in coroutine functions since the coroutines passes
766 // are not aware of how to move the extra UBSan instructions across the split
767 // coroutine boundaries.
768 if (D && SanOpts.has(SanitizerKind::Null))
769 if (const auto *FD = dyn_cast<FunctionDecl>(D))
771 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
772 SanOpts.Mask &= ~SanitizerKind::Null;
774 // Apply xray attributes to the function (as a string, for now)
775 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
776 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
777 XRayInstrKind::FunctionEntry) ||
778 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
779 XRayInstrKind::FunctionExit)) {
780 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
781 Fn->addFnAttr("function-instrument", "xray-always");
782 if (XRayAttr->neverXRayInstrument())
783 Fn->addFnAttr("function-instrument", "xray-never");
784 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
785 if (ShouldXRayInstrumentFunction())
786 Fn->addFnAttr("xray-log-args",
787 llvm::utostr(LogArgs->getArgumentCount()));
790 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
792 "xray-instruction-threshold",
793 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
796 if (ShouldXRayInstrumentFunction()) {
797 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
798 Fn->addFnAttr("xray-ignore-loops");
800 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
801 XRayInstrKind::FunctionExit))
802 Fn->addFnAttr("xray-skip-exit");
804 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
805 XRayInstrKind::FunctionEntry))
806 Fn->addFnAttr("xray-skip-entry");
809 unsigned Count, Offset;
810 if (const auto *Attr =
811 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
812 Count = Attr->getCount();
813 Offset = Attr->getOffset();
815 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
816 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
818 if (Count && Offset <= Count) {
819 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
821 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
824 // Add no-jump-tables value.
825 Fn->addFnAttr("no-jump-tables",
826 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
828 // Add no-inline-line-tables value.
829 if (CGM.getCodeGenOpts().NoInlineLineTables)
830 Fn->addFnAttr("no-inline-line-tables");
832 // Add profile-sample-accurate value.
833 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
834 Fn->addFnAttr("profile-sample-accurate");
836 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
837 Fn->addFnAttr("use-sample-profile");
839 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
840 Fn->addFnAttr("cfi-canonical-jump-table");
842 if (getLangOpts().OpenCL) {
843 // Add metadata for a kernel function.
844 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
845 EmitOpenCLKernelMetadata(FD, Fn);
848 // If we are checking function types, emit a function type signature as
850 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
851 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
852 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
853 // Remove any (C++17) exception specifications, to allow calling e.g. a
854 // noexcept function through a non-noexcept pointer.
856 getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
858 llvm::Constant *FTRTTIConst =
859 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
860 llvm::Constant *FTRTTIConstEncoded =
861 EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
862 llvm::Constant *PrologueStructElems[] = {PrologueSig,
864 llvm::Constant *PrologueStructConst =
865 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
866 Fn->setPrologueData(PrologueStructConst);
871 // If we're checking nullability, we need to know whether we can check the
872 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
873 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
874 auto Nullability = FnRetTy->getNullability(getContext());
875 if (Nullability && *Nullability == NullabilityKind::NonNull) {
876 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
877 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
878 RetValNullabilityPrecondition =
879 llvm::ConstantInt::getTrue(getLLVMContext());
883 // If we're in C++ mode and the function name is "main", it is guaranteed
884 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
885 // used within a program").
887 // OpenCL C 2.0 v2.2-11 s6.9.i:
888 // Recursion is not supported.
890 // SYCL v1.2.1 s3.10:
891 // kernels cannot include RTTI information, exception classes,
892 // recursive code, virtual functions or make use of C++ libraries that
893 // are not compiled for the device.
894 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
895 if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
896 getLangOpts().SYCLIsDevice ||
897 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))
898 Fn->addFnAttr(llvm::Attribute::NoRecurse);
901 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
902 Builder.setIsFPConstrained(FD->usesFPIntrin());
903 if (FD->usesFPIntrin())
904 Fn->addFnAttr(llvm::Attribute::StrictFP);
907 // If a custom alignment is used, force realigning to this alignment on
908 // any main function which certainly will need it.
909 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
910 if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
911 CGM.getCodeGenOpts().StackAlignment)
912 Fn->addFnAttr("stackrealign");
914 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
916 // Create a marker to make it easy to insert allocas into the entryblock
917 // later. Don't create this with the builder, because we don't want it
919 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
920 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
922 ReturnBlock = getJumpDestInCurrentScope("return");
924 Builder.SetInsertPoint(EntryBB);
926 // If we're checking the return value, allocate space for a pointer to a
927 // precise source location of the checked return statement.
928 if (requiresReturnValueCheck()) {
929 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
930 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
933 // Emit subprogram debug descriptor.
934 if (CGDebugInfo *DI = getDebugInfo()) {
935 // Reconstruct the type from the argument list so that implicit parameters,
936 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
938 CallingConv CC = CallingConv::CC_C;
939 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
940 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
941 CC = SrcFnTy->getCallConv();
942 SmallVector<QualType, 16> ArgTypes;
943 for (const VarDecl *VD : Args)
944 ArgTypes.push_back(VD->getType());
945 QualType FnType = getContext().getFunctionType(
946 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
947 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
951 if (ShouldInstrumentFunction()) {
952 if (CGM.getCodeGenOpts().InstrumentFunctions)
953 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
954 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
955 CurFn->addFnAttr("instrument-function-entry-inlined",
956 "__cyg_profile_func_enter");
957 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
958 CurFn->addFnAttr("instrument-function-entry-inlined",
959 "__cyg_profile_func_enter_bare");
962 // Since emitting the mcount call here impacts optimizations such as function
963 // inlining, we just add an attribute to insert a mcount call in backend.
964 // The attribute "counting-function" is set to mcount function name which is
965 // architecture dependent.
966 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
967 // Calls to fentry/mcount should not be generated if function has
968 // the no_instrument_function attribute.
969 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
970 if (CGM.getCodeGenOpts().CallFEntry)
971 Fn->addFnAttr("fentry-call", "true");
973 Fn->addFnAttr("instrument-function-entry-inlined",
974 getTarget().getMCountName());
976 if (CGM.getCodeGenOpts().MNopMCount) {
977 if (!CGM.getCodeGenOpts().CallFEntry)
978 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
979 << "-mnop-mcount" << "-mfentry";
980 Fn->addFnAttr("mnop-mcount");
983 if (CGM.getCodeGenOpts().RecordMCount) {
984 if (!CGM.getCodeGenOpts().CallFEntry)
985 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
986 << "-mrecord-mcount" << "-mfentry";
987 Fn->addFnAttr("mrecord-mcount");
992 if (CGM.getCodeGenOpts().PackedStack) {
993 if (getContext().getTargetInfo().getTriple().getArch() !=
994 llvm::Triple::systemz)
995 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
997 Fn->addFnAttr("packed-stack");
1000 if (RetTy->isVoidType()) {
1001 // Void type; nothing to return.
1002 ReturnValue = Address::invalid();
1004 // Count the implicit return.
1005 if (!endsWithReturn(D))
1007 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1008 // Indirect return; emit returned value directly into sret slot.
1009 // This reduces code size, and affects correctness in C++.
1010 auto AI = CurFn->arg_begin();
1011 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1013 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
1014 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1015 ReturnValuePointer =
1016 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1017 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1018 ReturnValue.getPointer(), Int8PtrTy),
1019 ReturnValuePointer);
1021 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1022 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1023 // Load the sret pointer from the argument struct and return into that.
1024 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1025 llvm::Function::arg_iterator EI = CurFn->arg_end();
1027 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1028 ReturnValuePointer = Address(Addr, getPointerAlign());
1029 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1030 ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
1032 ReturnValue = CreateIRTemp(RetTy, "retval");
1034 // Tell the epilog emitter to autorelease the result. We do this
1035 // now so that various specialized functions can suppress it
1036 // during their IR-generation.
1037 if (getLangOpts().ObjCAutoRefCount &&
1038 !CurFnInfo->isReturnsRetained() &&
1039 RetTy->isObjCRetainableType())
1040 AutoreleaseResult = true;
1043 EmitStartEHSpec(CurCodeDecl);
1045 PrologueCleanupDepth = EHStack.stable_begin();
1047 // Emit OpenMP specific initialization of the device functions.
1048 if (getLangOpts().OpenMP && CurCodeDecl)
1049 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1051 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1053 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
1054 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1055 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1056 if (MD->getParent()->isLambda() &&
1057 MD->getOverloadedOperator() == OO_Call) {
1058 // We're in a lambda; figure out the captures.
1059 MD->getParent()->getCaptureFields(LambdaCaptureFields,
1060 LambdaThisCaptureField);
1061 if (LambdaThisCaptureField) {
1062 // If the lambda captures the object referred to by '*this' - either by
1063 // value or by reference, make sure CXXThisValue points to the correct
1066 // Get the lvalue for the field (which is a copy of the enclosing object
1067 // or contains the address of the enclosing object).
1068 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1069 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1070 // If the enclosing object was captured by value, just use its address.
1071 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1073 // Load the lvalue pointed to by the field, since '*this' was captured
1076 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1079 for (auto *FD : MD->getParent()->fields()) {
1080 if (FD->hasCapturedVLAType()) {
1081 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1082 SourceLocation()).getScalarVal();
1083 auto VAT = FD->getCapturedVLAType();
1084 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1088 // Not in a lambda; just use 'this' from the method.
1089 // FIXME: Should we generate a new load for each use of 'this'? The
1090 // fast register allocator would be happier...
1091 CXXThisValue = CXXABIThisValue;
1094 // Check the 'this' pointer once per function, if it's available.
1095 if (CXXABIThisValue) {
1096 SanitizerSet SkippedChecks;
1097 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1098 QualType ThisTy = MD->getThisType();
1100 // If this is the call operator of a lambda with no capture-default, it
1101 // may have a static invoker function, which may call this operator with
1102 // a null 'this' pointer.
1103 if (isLambdaCallOperator(MD) &&
1104 MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1105 SkippedChecks.set(SanitizerKind::Null, true);
1107 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1109 Loc, CXXABIThisValue, ThisTy,
1110 getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1115 // If any of the arguments have a variably modified type, make sure to
1116 // emit the type size.
1117 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1119 const VarDecl *VD = *i;
1121 // Dig out the type as written from ParmVarDecls; it's unclear whether
1122 // the standard (C99 6.9.1p10) requires this, but we're following the
1123 // precedent set by gcc.
1125 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1126 Ty = PVD->getOriginalType();
1130 if (Ty->isVariablyModifiedType())
1131 EmitVariablyModifiedType(Ty);
1133 // Emit a location at the end of the prologue.
1134 if (CGDebugInfo *DI = getDebugInfo())
1135 DI->EmitLocation(Builder, StartLoc);
1137 // TODO: Do we need to handle this in two places like we do with
1138 // target-features/target-cpu?
1140 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1141 LargestVectorWidth = VecWidth->getVectorWidth();
1144 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1145 incrementProfileCounter(Body);
1146 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1147 EmitCompoundStmtWithoutScope(*S);
1152 /// When instrumenting to collect profile data, the counts for some blocks
1153 /// such as switch cases need to not include the fall-through counts, so
1154 /// emit a branch around the instrumentation code. When not instrumenting,
1155 /// this just calls EmitBlock().
1156 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1158 llvm::BasicBlock *SkipCountBB = nullptr;
1159 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1160 // When instrumenting for profiling, the fallthrough to certain
1161 // statements needs to skip over the instrumentation code so that we
1162 // get an accurate count.
1163 SkipCountBB = createBasicBlock("skipcount");
1164 EmitBranch(SkipCountBB);
1167 uint64_t CurrentCount = getCurrentProfileCount();
1168 incrementProfileCounter(S);
1169 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1171 EmitBlock(SkipCountBB);
1174 /// Tries to mark the given function nounwind based on the
1175 /// non-existence of any throwing calls within it. We believe this is
1176 /// lightweight enough to do at -O0.
1177 static void TryMarkNoThrow(llvm::Function *F) {
1178 // LLVM treats 'nounwind' on a function as part of the type, so we
1179 // can't do this on functions that can be overwritten.
1180 if (F->isInterposable()) return;
1182 for (llvm::BasicBlock &BB : *F)
1183 for (llvm::Instruction &I : BB)
1187 F->setDoesNotThrow();
1190 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1191 FunctionArgList &Args) {
1192 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1193 QualType ResTy = FD->getReturnType();
1195 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1196 if (MD && MD->isInstance()) {
1197 if (CGM.getCXXABI().HasThisReturn(GD))
1198 ResTy = MD->getThisType();
1199 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1200 ResTy = CGM.getContext().VoidPtrTy;
1201 CGM.getCXXABI().buildThisParam(*this, Args);
1204 // The base version of an inheriting constructor whose constructed base is a
1205 // virtual base is not passed any arguments (because it doesn't actually call
1206 // the inherited constructor).
1207 bool PassedParams = true;
1208 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1209 if (auto Inherited = CD->getInheritedConstructor())
1211 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1214 for (auto *Param : FD->parameters()) {
1215 Args.push_back(Param);
1216 if (!Param->hasAttr<PassObjectSizeAttr>())
1219 auto *Implicit = ImplicitParamDecl::Create(
1220 getContext(), Param->getDeclContext(), Param->getLocation(),
1221 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1222 SizeArguments[Param] = Implicit;
1223 Args.push_back(Implicit);
1227 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1228 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1234 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1235 const ASTContext &Context) {
1236 QualType T = FD->getReturnType();
1237 // Avoid the optimization for functions that return a record type with a
1238 // trivial destructor or another trivially copyable type.
1239 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1240 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1241 return !ClassDecl->hasTrivialDestructor();
1243 return !T.isTriviallyCopyableType(Context);
1246 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1247 const CGFunctionInfo &FnInfo) {
1248 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1251 FunctionArgList Args;
1252 QualType ResTy = BuildFunctionArgList(GD, Args);
1254 // Check if we should generate debug info for this function.
1255 if (FD->hasAttr<NoDebugAttr>())
1256 DebugInfo = nullptr; // disable debug info indefinitely for this function
1258 // The function might not have a body if we're generating thunks for a
1259 // function declaration.
1260 SourceRange BodyRange;
1261 if (Stmt *Body = FD->getBody())
1262 BodyRange = Body->getSourceRange();
1264 BodyRange = FD->getLocation();
1265 CurEHLocation = BodyRange.getEnd();
1267 // Use the location of the start of the function to determine where
1268 // the function definition is located. By default use the location
1269 // of the declaration as the location for the subprogram. A function
1270 // may lack a declaration in the source code if it is created by code
1271 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1272 SourceLocation Loc = FD->getLocation();
1274 // If this is a function specialization then use the pattern body
1275 // as the location for the function.
1276 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1277 if (SpecDecl->hasBody(SpecDecl))
1278 Loc = SpecDecl->getLocation();
1280 Stmt *Body = FD->getBody();
1282 // Initialize helper which will detect jumps which can cause invalid lifetime
1284 if (Body && ShouldEmitLifetimeMarkers)
1285 Bypasses.Init(Body);
1287 // Emit the standard function prologue.
1288 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1290 // Generate the body of the function.
1291 PGO.assignRegionCounters(GD, CurFn);
1292 if (isa<CXXDestructorDecl>(FD))
1293 EmitDestructorBody(Args);
1294 else if (isa<CXXConstructorDecl>(FD))
1295 EmitConstructorBody(Args);
1296 else if (getLangOpts().CUDA &&
1297 !getLangOpts().CUDAIsDevice &&
1298 FD->hasAttr<CUDAGlobalAttr>())
1299 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1300 else if (isa<CXXMethodDecl>(FD) &&
1301 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1302 // The lambda static invoker function is special, because it forwards or
1303 // clones the body of the function call operator (but is actually static).
1304 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1305 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1306 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1307 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1308 // Implicit copy-assignment gets the same special treatment as implicit
1309 // copy-constructors.
1310 emitImplicitAssignmentOperatorBody(Args);
1312 EmitFunctionBody(Body);
1314 llvm_unreachable("no definition for emitted function");
1316 // C++11 [stmt.return]p2:
1317 // Flowing off the end of a function [...] results in undefined behavior in
1318 // a value-returning function.
1320 // If the '}' that terminates a function is reached, and the value of the
1321 // function call is used by the caller, the behavior is undefined.
1322 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1323 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1324 bool ShouldEmitUnreachable =
1325 CGM.getCodeGenOpts().StrictReturn ||
1326 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1327 if (SanOpts.has(SanitizerKind::Return)) {
1328 SanitizerScope SanScope(this);
1329 llvm::Value *IsFalse = Builder.getFalse();
1330 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1331 SanitizerHandler::MissingReturn,
1332 EmitCheckSourceLocation(FD->getLocation()), None);
1333 } else if (ShouldEmitUnreachable) {
1334 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1335 EmitTrapCall(llvm::Intrinsic::trap);
1337 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1338 Builder.CreateUnreachable();
1339 Builder.ClearInsertionPoint();
1343 // Emit the standard function epilogue.
1344 FinishFunction(BodyRange.getEnd());
1346 // If we haven't marked the function nothrow through other means, do
1347 // a quick pass now to see if we can.
1348 if (!CurFn->doesNotThrow())
1349 TryMarkNoThrow(CurFn);
1352 /// ContainsLabel - Return true if the statement contains a label in it. If
1353 /// this statement is not executed normally, it not containing a label means
1354 /// that we can just remove the code.
1355 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1356 // Null statement, not a label!
1357 if (!S) return false;
1359 // If this is a label, we have to emit the code, consider something like:
1360 // if (0) { ... foo: bar(); } goto foo;
1362 // TODO: If anyone cared, we could track __label__'s, since we know that you
1363 // can't jump to one from outside their declared region.
1364 if (isa<LabelStmt>(S))
1367 // If this is a case/default statement, and we haven't seen a switch, we have
1368 // to emit the code.
1369 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1372 // If this is a switch statement, we want to ignore cases below it.
1373 if (isa<SwitchStmt>(S))
1374 IgnoreCaseStmts = true;
1376 // Scan subexpressions for verboten labels.
1377 for (const Stmt *SubStmt : S->children())
1378 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1384 /// containsBreak - Return true if the statement contains a break out of it.
1385 /// If the statement (recursively) contains a switch or loop with a break
1386 /// inside of it, this is fine.
1387 bool CodeGenFunction::containsBreak(const Stmt *S) {
1388 // Null statement, not a label!
1389 if (!S) return false;
1391 // If this is a switch or loop that defines its own break scope, then we can
1392 // include it and anything inside of it.
1393 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1397 if (isa<BreakStmt>(S))
1400 // Scan subexpressions for verboten breaks.
1401 for (const Stmt *SubStmt : S->children())
1402 if (containsBreak(SubStmt))
1408 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1409 if (!S) return false;
1411 // Some statement kinds add a scope and thus never add a decl to the current
1412 // scope. Note, this list is longer than the list of statements that might
1413 // have an unscoped decl nested within them, but this way is conservatively
1414 // correct even if more statement kinds are added.
1415 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1416 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1417 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1418 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1421 if (isa<DeclStmt>(S))
1424 for (const Stmt *SubStmt : S->children())
1425 if (mightAddDeclToScope(SubStmt))
1431 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1432 /// to a constant, or if it does but contains a label, return false. If it
1433 /// constant folds return true and set the boolean result in Result.
1434 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1437 llvm::APSInt ResultInt;
1438 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1441 ResultBool = ResultInt.getBoolValue();
1445 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1446 /// to a constant, or if it does but contains a label, return false. If it
1447 /// constant folds return true and set the folded value.
1448 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1449 llvm::APSInt &ResultInt,
1451 // FIXME: Rename and handle conversion of other evaluatable things
1453 Expr::EvalResult Result;
1454 if (!Cond->EvaluateAsInt(Result, getContext()))
1455 return false; // Not foldable, not integer or not fully evaluatable.
1457 llvm::APSInt Int = Result.Val.getInt();
1458 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1459 return false; // Contains a label.
1467 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1468 /// statement) to the specified blocks. Based on the condition, this might try
1469 /// to simplify the codegen of the conditional based on the branch.
1471 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1472 llvm::BasicBlock *TrueBlock,
1473 llvm::BasicBlock *FalseBlock,
1474 uint64_t TrueCount) {
1475 Cond = Cond->IgnoreParens();
1477 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1479 // Handle X && Y in a condition.
1480 if (CondBOp->getOpcode() == BO_LAnd) {
1481 // If we have "1 && X", simplify the code. "0 && X" would have constant
1482 // folded if the case was simple enough.
1483 bool ConstantBool = false;
1484 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1486 // br(1 && X) -> br(X).
1487 incrementProfileCounter(CondBOp);
1488 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1492 // If we have "X && 1", simplify the code to use an uncond branch.
1493 // "X && 0" would have been constant folded to 0.
1494 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1496 // br(X && 1) -> br(X).
1497 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1501 // Emit the LHS as a conditional. If the LHS conditional is false, we
1502 // want to jump to the FalseBlock.
1503 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1504 // The counter tells us how often we evaluate RHS, and all of TrueCount
1505 // can be propagated to that branch.
1506 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1508 ConditionalEvaluation eval(*this);
1510 ApplyDebugLocation DL(*this, Cond);
1511 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1515 incrementProfileCounter(CondBOp);
1516 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1518 // Any temporaries created here are conditional.
1520 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1526 if (CondBOp->getOpcode() == BO_LOr) {
1527 // If we have "0 || X", simplify the code. "1 || X" would have constant
1528 // folded if the case was simple enough.
1529 bool ConstantBool = false;
1530 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1532 // br(0 || X) -> br(X).
1533 incrementProfileCounter(CondBOp);
1534 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1538 // If we have "X || 0", simplify the code to use an uncond branch.
1539 // "X || 1" would have been constant folded to 1.
1540 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1542 // br(X || 0) -> br(X).
1543 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1547 // Emit the LHS as a conditional. If the LHS conditional is true, we
1548 // want to jump to the TrueBlock.
1549 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1550 // We have the count for entry to the RHS and for the whole expression
1551 // being true, so we can divy up True count between the short circuit and
1554 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1555 uint64_t RHSCount = TrueCount - LHSCount;
1557 ConditionalEvaluation eval(*this);
1559 ApplyDebugLocation DL(*this, Cond);
1560 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1561 EmitBlock(LHSFalse);
1564 incrementProfileCounter(CondBOp);
1565 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1567 // Any temporaries created here are conditional.
1569 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1577 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1578 // br(!x, t, f) -> br(x, f, t)
1579 if (CondUOp->getOpcode() == UO_LNot) {
1580 // Negate the count.
1581 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1582 // Negate the condition and swap the destination blocks.
1583 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1588 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1589 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1590 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1591 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1593 ConditionalEvaluation cond(*this);
1594 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1595 getProfileCount(CondOp));
1597 // When computing PGO branch weights, we only know the overall count for
1598 // the true block. This code is essentially doing tail duplication of the
1599 // naive code-gen, introducing new edges for which counts are not
1600 // available. Divide the counts proportionally between the LHS and RHS of
1601 // the conditional operator.
1602 uint64_t LHSScaledTrueCount = 0;
1605 getProfileCount(CondOp) / (double)getCurrentProfileCount();
1606 LHSScaledTrueCount = TrueCount * LHSRatio;
1610 EmitBlock(LHSBlock);
1611 incrementProfileCounter(CondOp);
1613 ApplyDebugLocation DL(*this, Cond);
1614 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1615 LHSScaledTrueCount);
1620 EmitBlock(RHSBlock);
1621 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1622 TrueCount - LHSScaledTrueCount);
1628 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1629 // Conditional operator handling can give us a throw expression as a
1630 // condition for a case like:
1631 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1633 // br(c, throw x, br(y, t, f))
1634 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1638 // If the branch has a condition wrapped by __builtin_unpredictable,
1639 // create metadata that specifies that the branch is unpredictable.
1640 // Don't bother if not optimizing because that metadata would not be used.
1641 llvm::MDNode *Unpredictable = nullptr;
1642 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1643 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1644 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1645 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1646 llvm::MDBuilder MDHelper(getLLVMContext());
1647 Unpredictable = MDHelper.createUnpredictable();
1651 // Create branch weights based on the number of times we get here and the
1652 // number of times the condition should be true.
1653 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1654 llvm::MDNode *Weights =
1655 createProfileWeights(TrueCount, CurrentCount - TrueCount);
1657 // Emit the code with the fully general case.
1660 ApplyDebugLocation DL(*this, Cond);
1661 CondV = EvaluateExprAsBool(Cond);
1663 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1666 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1667 /// specified stmt yet.
1668 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1669 CGM.ErrorUnsupported(S, Type);
1672 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1673 /// variable-length array whose elements have a non-zero bit-pattern.
1675 /// \param baseType the inner-most element type of the array
1676 /// \param src - a char* pointing to the bit-pattern for a single
1677 /// base element of the array
1678 /// \param sizeInChars - the total size of the VLA, in chars
1679 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1680 Address dest, Address src,
1681 llvm::Value *sizeInChars) {
1682 CGBuilderTy &Builder = CGF.Builder;
1684 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1685 llvm::Value *baseSizeInChars
1686 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1689 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1691 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1693 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1694 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1695 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1697 // Make a loop over the VLA. C99 guarantees that the VLA element
1698 // count must be nonzero.
1699 CGF.EmitBlock(loopBB);
1701 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1702 cur->addIncoming(begin.getPointer(), originBB);
1704 CharUnits curAlign =
1705 dest.getAlignment().alignmentOfArrayElement(baseSize);
1707 // memcpy the individual element bit-pattern.
1708 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1709 /*volatile*/ false);
1711 // Go to the next element.
1713 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1715 // Leave if that's the end of the VLA.
1716 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1717 Builder.CreateCondBr(done, contBB, loopBB);
1718 cur->addIncoming(next, loopBB);
1720 CGF.EmitBlock(contBB);
1724 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1725 // Ignore empty classes in C++.
1726 if (getLangOpts().CPlusPlus) {
1727 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1728 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1733 // Cast the dest ptr to the appropriate i8 pointer type.
1734 if (DestPtr.getElementType() != Int8Ty)
1735 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1737 // Get size and alignment info for this aggregate.
1738 CharUnits size = getContext().getTypeSizeInChars(Ty);
1740 llvm::Value *SizeVal;
1741 const VariableArrayType *vla;
1743 // Don't bother emitting a zero-byte memset.
1744 if (size.isZero()) {
1745 // But note that getTypeInfo returns 0 for a VLA.
1746 if (const VariableArrayType *vlaType =
1747 dyn_cast_or_null<VariableArrayType>(
1748 getContext().getAsArrayType(Ty))) {
1749 auto VlaSize = getVLASize(vlaType);
1750 SizeVal = VlaSize.NumElts;
1751 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1752 if (!eltSize.isOne())
1753 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1759 SizeVal = CGM.getSize(size);
1763 // If the type contains a pointer to data member we can't memset it to zero.
1764 // Instead, create a null constant and copy it to the destination.
1765 // TODO: there are other patterns besides zero that we can usefully memset,
1766 // like -1, which happens to be the pattern used by member-pointers.
1767 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1768 // For a VLA, emit a single element, then splat that over the VLA.
1769 if (vla) Ty = getContext().getBaseElementType(vla);
1771 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1773 llvm::GlobalVariable *NullVariable =
1774 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1775 /*isConstant=*/true,
1776 llvm::GlobalVariable::PrivateLinkage,
1777 NullConstant, Twine());
1778 CharUnits NullAlign = DestPtr.getAlignment();
1779 NullVariable->setAlignment(NullAlign.getAsAlign());
1780 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1783 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1785 // Get and call the appropriate llvm.memcpy overload.
1786 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1790 // Otherwise, just memset the whole thing to zero. This is legal
1791 // because in LLVM, all default initializers (other than the ones we just
1792 // handled above) are guaranteed to have a bit pattern of all zeros.
1793 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1796 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1797 // Make sure that there is a block for the indirect goto.
1798 if (!IndirectBranch)
1799 GetIndirectGotoBlock();
1801 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1803 // Make sure the indirect branch includes all of the address-taken blocks.
1804 IndirectBranch->addDestination(BB);
1805 return llvm::BlockAddress::get(CurFn, BB);
1808 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1809 // If we already made the indirect branch for indirect goto, return its block.
1810 if (IndirectBranch) return IndirectBranch->getParent();
1812 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1814 // Create the PHI node that indirect gotos will add entries to.
1815 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1816 "indirect.goto.dest");
1818 // Create the indirect branch instruction.
1819 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1820 return IndirectBranch->getParent();
1823 /// Computes the length of an array in elements, as well as the base
1824 /// element type and a properly-typed first element pointer.
1825 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1828 const ArrayType *arrayType = origArrayType;
1830 // If it's a VLA, we have to load the stored size. Note that
1831 // this is the size of the VLA in bytes, not its size in elements.
1832 llvm::Value *numVLAElements = nullptr;
1833 if (isa<VariableArrayType>(arrayType)) {
1834 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1836 // Walk into all VLAs. This doesn't require changes to addr,
1837 // which has type T* where T is the first non-VLA element type.
1839 QualType elementType = arrayType->getElementType();
1840 arrayType = getContext().getAsArrayType(elementType);
1842 // If we only have VLA components, 'addr' requires no adjustment.
1844 baseType = elementType;
1845 return numVLAElements;
1847 } while (isa<VariableArrayType>(arrayType));
1849 // We get out here only if we find a constant array type
1853 // We have some number of constant-length arrays, so addr should
1854 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1855 // down to the first element of addr.
1856 SmallVector<llvm::Value*, 8> gepIndices;
1858 // GEP down to the array type.
1859 llvm::ConstantInt *zero = Builder.getInt32(0);
1860 gepIndices.push_back(zero);
1862 uint64_t countFromCLAs = 1;
1865 llvm::ArrayType *llvmArrayType =
1866 dyn_cast<llvm::ArrayType>(addr.getElementType());
1867 while (llvmArrayType) {
1868 assert(isa<ConstantArrayType>(arrayType));
1869 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1870 == llvmArrayType->getNumElements());
1872 gepIndices.push_back(zero);
1873 countFromCLAs *= llvmArrayType->getNumElements();
1874 eltType = arrayType->getElementType();
1877 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1878 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1879 assert((!llvmArrayType || arrayType) &&
1880 "LLVM and Clang types are out-of-synch");
1884 // From this point onwards, the Clang array type has been emitted
1885 // as some other type (probably a packed struct). Compute the array
1886 // size, and just emit the 'begin' expression as a bitcast.
1889 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1890 eltType = arrayType->getElementType();
1891 arrayType = getContext().getAsArrayType(eltType);
1894 llvm::Type *baseType = ConvertType(eltType);
1895 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1897 // Create the actual GEP.
1898 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1899 gepIndices, "array.begin"),
1900 addr.getAlignment());
1905 llvm::Value *numElements
1906 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1908 // If we had any VLA dimensions, factor them in.
1910 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1915 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1916 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1917 assert(vla && "type was not a variable array type!");
1918 return getVLASize(vla);
1921 CodeGenFunction::VlaSizePair
1922 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1923 // The number of elements so far; always size_t.
1924 llvm::Value *numElements = nullptr;
1926 QualType elementType;
1928 elementType = type->getElementType();
1929 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1930 assert(vlaSize && "no size for VLA!");
1931 assert(vlaSize->getType() == SizeTy);
1934 numElements = vlaSize;
1936 // It's undefined behavior if this wraps around, so mark it that way.
1937 // FIXME: Teach -fsanitize=undefined to trap this.
1938 numElements = Builder.CreateNUWMul(numElements, vlaSize);
1940 } while ((type = getContext().getAsVariableArrayType(elementType)));
1942 return { numElements, elementType };
1945 CodeGenFunction::VlaSizePair
1946 CodeGenFunction::getVLAElements1D(QualType type) {
1947 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1948 assert(vla && "type was not a variable array type!");
1949 return getVLAElements1D(vla);
1952 CodeGenFunction::VlaSizePair
1953 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1954 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1955 assert(VlaSize && "no size for VLA!");
1956 assert(VlaSize->getType() == SizeTy);
1957 return { VlaSize, Vla->getElementType() };
1960 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1961 assert(type->isVariablyModifiedType() &&
1962 "Must pass variably modified type to EmitVLASizes!");
1964 EnsureInsertPoint();
1966 // We're going to walk down into the type and look for VLA
1969 assert(type->isVariablyModifiedType());
1971 const Type *ty = type.getTypePtr();
1972 switch (ty->getTypeClass()) {
1974 #define TYPE(Class, Base)
1975 #define ABSTRACT_TYPE(Class, Base)
1976 #define NON_CANONICAL_TYPE(Class, Base)
1977 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1978 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1979 #include "clang/AST/TypeNodes.inc"
1980 llvm_unreachable("unexpected dependent type!");
1982 // These types are never variably-modified.
1986 case Type::ExtVector:
1987 case Type::ConstantMatrix:
1990 case Type::Elaborated:
1991 case Type::TemplateSpecialization:
1992 case Type::ObjCTypeParam:
1993 case Type::ObjCObject:
1994 case Type::ObjCInterface:
1995 case Type::ObjCObjectPointer:
1997 llvm_unreachable("type class is never variably-modified!");
1999 case Type::Adjusted:
2000 type = cast<AdjustedType>(ty)->getAdjustedType();
2004 type = cast<DecayedType>(ty)->getPointeeType();
2008 type = cast<PointerType>(ty)->getPointeeType();
2011 case Type::BlockPointer:
2012 type = cast<BlockPointerType>(ty)->getPointeeType();
2015 case Type::LValueReference:
2016 case Type::RValueReference:
2017 type = cast<ReferenceType>(ty)->getPointeeType();
2020 case Type::MemberPointer:
2021 type = cast<MemberPointerType>(ty)->getPointeeType();
2024 case Type::ConstantArray:
2025 case Type::IncompleteArray:
2026 // Losing element qualification here is fine.
2027 type = cast<ArrayType>(ty)->getElementType();
2030 case Type::VariableArray: {
2031 // Losing element qualification here is fine.
2032 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2034 // Unknown size indication requires no size computation.
2035 // Otherwise, evaluate and record it.
2036 if (const Expr *size = vat->getSizeExpr()) {
2037 // It's possible that we might have emitted this already,
2038 // e.g. with a typedef and a pointer to it.
2039 llvm::Value *&entry = VLASizeMap[size];
2041 llvm::Value *Size = EmitScalarExpr(size);
2044 // If the size is an expression that is not an integer constant
2045 // expression [...] each time it is evaluated it shall have a value
2046 // greater than zero.
2047 if (SanOpts.has(SanitizerKind::VLABound) &&
2048 size->getType()->isSignedIntegerType()) {
2049 SanitizerScope SanScope(this);
2050 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2051 llvm::Constant *StaticArgs[] = {
2052 EmitCheckSourceLocation(size->getBeginLoc()),
2053 EmitCheckTypeDescriptor(size->getType())};
2054 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2055 SanitizerKind::VLABound),
2056 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2059 // Always zexting here would be wrong if it weren't
2060 // undefined behavior to have a negative bound.
2061 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2064 type = vat->getElementType();
2068 case Type::FunctionProto:
2069 case Type::FunctionNoProto:
2070 type = cast<FunctionType>(ty)->getReturnType();
2075 case Type::UnaryTransform:
2076 case Type::Attributed:
2077 case Type::SubstTemplateTypeParm:
2078 case Type::PackExpansion:
2079 case Type::MacroQualified:
2080 // Keep walking after single level desugaring.
2081 type = type.getSingleStepDesugaredType(getContext());
2085 case Type::Decltype:
2087 case Type::DeducedTemplateSpecialization:
2088 // Stop walking: nothing to do.
2091 case Type::TypeOfExpr:
2092 // Stop walking: emit typeof expression.
2093 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2097 type = cast<AtomicType>(ty)->getValueType();
2101 type = cast<PipeType>(ty)->getElementType();
2104 } while (type->isVariablyModifiedType());
2107 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2108 if (getContext().getBuiltinVaListType()->isArrayType())
2109 return EmitPointerWithAlignment(E);
2110 return EmitLValue(E).getAddress(*this);
2113 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2114 return EmitLValue(E).getAddress(*this);
2117 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2118 const APValue &Init) {
2119 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2120 if (CGDebugInfo *Dbg = getDebugInfo())
2121 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2122 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2125 CodeGenFunction::PeepholeProtection
2126 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2127 // At the moment, the only aggressive peephole we do in IR gen
2128 // is trunc(zext) folding, but if we add more, we can easily
2129 // extend this protection.
2131 if (!rvalue.isScalar()) return PeepholeProtection();
2132 llvm::Value *value = rvalue.getScalarVal();
2133 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2135 // Just make an extra bitcast.
2136 assert(HaveInsertPoint());
2137 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2138 Builder.GetInsertBlock());
2140 PeepholeProtection protection;
2141 protection.Inst = inst;
2145 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2146 if (!protection.Inst) return;
2148 // In theory, we could try to duplicate the peepholes now, but whatever.
2149 protection.Inst->eraseFromParent();
2152 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2153 QualType Ty, SourceLocation Loc,
2154 SourceLocation AssumptionLoc,
2155 llvm::Value *Alignment,
2156 llvm::Value *OffsetValue) {
2157 llvm::Value *TheCheck;
2158 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2159 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2160 if (SanOpts.has(SanitizerKind::Alignment)) {
2161 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2162 OffsetValue, TheCheck, Assumption);
2166 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2168 SourceLocation AssumptionLoc,
2169 llvm::Value *Alignment,
2170 llvm::Value *OffsetValue) {
2171 if (auto *CE = dyn_cast<CastExpr>(E))
2172 E = CE->getSubExprAsWritten();
2173 QualType Ty = E->getType();
2174 SourceLocation Loc = E->getExprLoc();
2176 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2180 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2181 llvm::Value *AnnotatedVal,
2182 StringRef AnnotationStr,
2183 SourceLocation Location) {
2184 llvm::Value *Args[4] = {
2186 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2187 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2188 CGM.EmitAnnotationLineNo(Location)
2190 return Builder.CreateCall(AnnotationFn, Args);
2193 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2194 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2195 // FIXME We create a new bitcast for every annotation because that's what
2196 // llvm-gcc was doing.
2197 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2198 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2199 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2200 I->getAnnotation(), D->getLocation());
2203 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2205 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2206 llvm::Value *V = Addr.getPointer();
2207 llvm::Type *VTy = V->getType();
2208 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2211 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2212 // FIXME Always emit the cast inst so we can differentiate between
2213 // annotation on the first field of a struct and annotation on the struct
2215 if (VTy != CGM.Int8PtrTy)
2216 V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2217 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2218 V = Builder.CreateBitCast(V, VTy);
2221 return Address(V, Addr.getAlignment());
2224 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2226 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2228 assert(!CGF->IsSanitizerScope);
2229 CGF->IsSanitizerScope = true;
2232 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2233 CGF->IsSanitizerScope = false;
2236 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2237 const llvm::Twine &Name,
2238 llvm::BasicBlock *BB,
2239 llvm::BasicBlock::iterator InsertPt) const {
2240 LoopStack.InsertHelper(I);
2241 if (IsSanitizerScope)
2242 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2245 void CGBuilderInserter::InsertHelper(
2246 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2247 llvm::BasicBlock::iterator InsertPt) const {
2248 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2250 CGF->InsertHelper(I, Name, BB, InsertPt);
2253 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2254 CodeGenModule &CGM, const FunctionDecl *FD,
2255 std::string &FirstMissing) {
2256 // If there aren't any required features listed then go ahead and return.
2257 if (ReqFeatures.empty())
2260 // Now build up the set of caller features and verify that all the required
2261 // features are there.
2262 llvm::StringMap<bool> CallerFeatureMap;
2263 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2265 // If we have at least one of the features in the feature list return
2266 // true, otherwise return false.
2268 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2269 SmallVector<StringRef, 1> OrFeatures;
2270 Feature.split(OrFeatures, '|');
2271 return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2272 if (!CallerFeatureMap.lookup(Feature)) {
2273 FirstMissing = Feature.str();
2281 // Emits an error if we don't have a valid set of target features for the
2283 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2284 const FunctionDecl *TargetDecl) {
2285 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2288 // Emits an error if we don't have a valid set of target features for the
2290 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2291 const FunctionDecl *TargetDecl) {
2292 // Early exit if this is an indirect call.
2296 // Get the current enclosing function if it exists. If it doesn't
2297 // we can't check the target features anyhow.
2298 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2302 // Grab the required features for the call. For a builtin this is listed in
2303 // the td file with the default cpu, for an always_inline function this is any
2304 // listed cpu and any listed features.
2305 unsigned BuiltinID = TargetDecl->getBuiltinID();
2306 std::string MissingFeature;
2308 SmallVector<StringRef, 1> ReqFeatures;
2309 const char *FeatureList =
2310 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2311 // Return if the builtin doesn't have any required features.
2312 if (!FeatureList || StringRef(FeatureList) == "")
2314 StringRef(FeatureList).split(ReqFeatures, ',');
2315 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2316 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2317 << TargetDecl->getDeclName()
2318 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2320 } else if (!TargetDecl->isMultiVersion() &&
2321 TargetDecl->hasAttr<TargetAttr>()) {
2322 // Get the required features for the callee.
2324 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2325 ParsedTargetAttr ParsedAttr =
2326 CGM.getContext().filterFunctionTargetAttrs(TD);
2328 SmallVector<StringRef, 1> ReqFeatures;
2329 llvm::StringMap<bool> CalleeFeatureMap;
2330 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2332 for (const auto &F : ParsedAttr.Features) {
2333 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2334 ReqFeatures.push_back(StringRef(F).substr(1));
2337 for (const auto &F : CalleeFeatureMap) {
2338 // Only positive features are "required".
2340 ReqFeatures.push_back(F.getKey());
2342 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2343 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2344 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2348 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2349 if (!CGM.getCodeGenOpts().SanitizeStats)
2352 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2353 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2354 CGM.getSanStats().create(IRB, SSK);
2358 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2359 llvm::Value *Condition = nullptr;
2361 if (!RO.Conditions.Architecture.empty())
2362 Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2364 if (!RO.Conditions.Features.empty()) {
2365 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2367 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2372 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2373 llvm::Function *Resolver,
2374 CGBuilderTy &Builder,
2375 llvm::Function *FuncToReturn,
2376 bool SupportsIFunc) {
2377 if (SupportsIFunc) {
2378 Builder.CreateRet(FuncToReturn);
2382 llvm::SmallVector<llvm::Value *, 10> Args;
2383 llvm::for_each(Resolver->args(),
2384 [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2386 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2387 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2389 if (Resolver->getReturnType()->isVoidTy())
2390 Builder.CreateRetVoid();
2392 Builder.CreateRet(Result);
2395 void CodeGenFunction::EmitMultiVersionResolver(
2396 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2397 assert(getContext().getTargetInfo().getTriple().isX86() &&
2398 "Only implemented for x86 targets");
2400 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2402 // Main function's basic block.
2403 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2404 Builder.SetInsertPoint(CurBlock);
2407 for (const MultiVersionResolverOption &RO : Options) {
2408 Builder.SetInsertPoint(CurBlock);
2409 llvm::Value *Condition = FormResolverCondition(RO);
2411 // The 'default' or 'generic' case.
2413 assert(&RO == Options.end() - 1 &&
2414 "Default or Generic case must be last");
2415 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2420 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2421 CGBuilderTy RetBuilder(*this, RetBlock);
2422 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2424 CurBlock = createBasicBlock("resolver_else", Resolver);
2425 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2428 // If no generic/default, emit an unreachable.
2429 Builder.SetInsertPoint(CurBlock);
2430 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2431 TrapCall->setDoesNotReturn();
2432 TrapCall->setDoesNotThrow();
2433 Builder.CreateUnreachable();
2434 Builder.ClearInsertionPoint();
2437 // Loc - where the diagnostic will point, where in the source code this
2438 // alignment has failed.
2439 // SecondaryLoc - if present (will be present if sufficiently different from
2440 // Loc), the diagnostic will additionally point a "Note:" to this location.
2441 // It should be the location where the __attribute__((assume_aligned))
2443 void CodeGenFunction::emitAlignmentAssumptionCheck(
2444 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2445 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2446 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2447 llvm::Instruction *Assumption) {
2448 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2449 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2450 llvm::Intrinsic::getDeclaration(
2451 Builder.GetInsertBlock()->getParent()->getParent(),
2452 llvm::Intrinsic::assume) &&
2453 "Assumption should be a call to llvm.assume().");
2454 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2455 "Assumption should be the last instruction of the basic block, "
2456 "since the basic block is still being generated.");
2458 if (!SanOpts.has(SanitizerKind::Alignment))
2461 // Don't check pointers to volatile data. The behavior here is implementation-
2463 if (Ty->getPointeeType().isVolatileQualified())
2466 // We need to temorairly remove the assumption so we can insert the
2467 // sanitizer check before it, else the check will be dropped by optimizations.
2468 Assumption->removeFromParent();
2471 SanitizerScope SanScope(this);
2474 OffsetValue = Builder.getInt1(0); // no offset.
2476 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2477 EmitCheckSourceLocation(SecondaryLoc),
2478 EmitCheckTypeDescriptor(Ty)};
2479 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2480 EmitCheckValue(Alignment),
2481 EmitCheckValue(OffsetValue)};
2482 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2483 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2486 // We are now in the (new, empty) "cont" basic block.
2487 // Reintroduce the assumption.
2488 Builder.Insert(Assumption);
2489 // FIXME: Assumption still has it's original basic block as it's Parent.
2492 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2493 if (CGDebugInfo *DI = getDebugInfo())
2494 return DI->SourceLocToDebugLoc(Location);
2496 return llvm::DebugLoc();