1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This coordinates the per-function state used while generating code.
11 //===----------------------------------------------------------------------===//
13 #include "CodeGenFunction.h"
15 #include "CGCleanup.h"
16 #include "CGCUDARuntime.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/StmtCXX.h"
28 #include "clang/AST/StmtObjC.h"
29 #include "clang/Basic/Builtins.h"
30 #include "clang/Basic/CodeGenOptions.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "clang/CodeGen/CGFunctionInfo.h"
33 #include "clang/Frontend/FrontendDiagnostic.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/MDBuilder.h"
38 #include "llvm/IR/Operator.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 using namespace clang;
41 using namespace CodeGen;
43 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
45 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
46 const LangOptions &LangOpts) {
47 if (CGOpts.DisableLifetimeMarkers)
50 // Disable lifetime markers in msan builds.
51 // FIXME: Remove this when msan works with lifetime markers.
52 if (LangOpts.Sanitize.has(SanitizerKind::Memory))
55 // Asan uses markers for use-after-scope checks.
56 if (CGOpts.SanitizeAddressUseAfterScope)
59 // For now, only in optimized builds.
60 return CGOpts.OptimizationLevel != 0;
63 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
64 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
65 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
66 CGBuilderInserterTy(this)),
67 SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
68 PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
69 CGM.getCodeGenOpts(), CGM.getLangOpts())) {
70 if (!suppressNewContext)
71 CGM.getCXXABI().getMangleContext().startNewFunction();
73 llvm::FastMathFlags FMF;
74 if (CGM.getLangOpts().FastMath)
76 if (CGM.getLangOpts().FiniteMathOnly) {
80 if (CGM.getCodeGenOpts().NoNaNsFPMath) {
83 if (CGM.getCodeGenOpts().NoSignedZeros) {
84 FMF.setNoSignedZeros();
86 if (CGM.getCodeGenOpts().ReciprocalMath) {
87 FMF.setAllowReciprocal();
89 if (CGM.getCodeGenOpts().Reassociate) {
90 FMF.setAllowReassoc();
92 Builder.setFastMathFlags(FMF);
95 CodeGenFunction::~CodeGenFunction() {
96 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
98 // If there are any unclaimed block infos, go ahead and destroy them
99 // now. This can happen if IR-gen gets clever and skips evaluating
102 destroyBlockInfos(FirstBlockInfo);
104 if (getLangOpts().OpenMP && CurFn)
105 CGM.getOpenMPRuntime().functionFinished(*this);
108 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
109 LValueBaseInfo *BaseInfo,
110 TBAAAccessInfo *TBAAInfo) {
111 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
112 /* forPointeeType= */ true);
115 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
116 LValueBaseInfo *BaseInfo,
117 TBAAAccessInfo *TBAAInfo,
118 bool forPointeeType) {
120 *TBAAInfo = CGM.getTBAAAccessInfo(T);
122 // Honor alignment typedef attributes even on incomplete types.
123 // We also honor them straight for C++ class types, even as pointees;
124 // there's an expressivity gap here.
125 if (auto TT = T->getAs<TypedefType>()) {
126 if (auto Align = TT->getDecl()->getMaxAlignment()) {
128 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
129 return getContext().toCharUnitsFromBits(Align);
134 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
137 if (T->isIncompleteType()) {
138 Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
140 // For C++ class pointees, we don't know whether we're pointing at a
141 // base or a complete object, so we generally need to use the
142 // non-virtual alignment.
143 const CXXRecordDecl *RD;
144 if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
145 Alignment = CGM.getClassPointerAlignment(RD);
147 Alignment = getContext().getTypeAlignInChars(T);
148 if (T.getQualifiers().hasUnaligned())
149 Alignment = CharUnits::One();
152 // Cap to the global maximum type alignment unless the alignment
153 // was somehow explicit on the type.
154 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
155 if (Alignment.getQuantity() > MaxAlign &&
156 !getContext().isAlignmentRequired(T))
157 Alignment = CharUnits::fromQuantity(MaxAlign);
163 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
164 LValueBaseInfo BaseInfo;
165 TBAAAccessInfo TBAAInfo;
166 CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
167 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
171 /// Given a value of type T* that may not be to a complete object,
172 /// construct an l-value with the natural pointee alignment of T.
174 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
175 LValueBaseInfo BaseInfo;
176 TBAAAccessInfo TBAAInfo;
177 CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
178 /* forPointeeType= */ true);
179 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
183 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
184 return CGM.getTypes().ConvertTypeForMem(T);
187 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
188 return CGM.getTypes().ConvertType(T);
191 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
192 type = type.getCanonicalType();
194 switch (type->getTypeClass()) {
195 #define TYPE(name, parent)
196 #define ABSTRACT_TYPE(name, parent)
197 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
198 #define DEPENDENT_TYPE(name, parent) case Type::name:
199 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
200 #include "clang/AST/TypeNodes.def"
201 llvm_unreachable("non-canonical or dependent type in IR-generation");
204 case Type::DeducedTemplateSpecialization:
205 llvm_unreachable("undeduced type in IR-generation");
207 // Various scalar types.
210 case Type::BlockPointer:
211 case Type::LValueReference:
212 case Type::RValueReference:
213 case Type::MemberPointer:
215 case Type::ExtVector:
216 case Type::FunctionProto:
217 case Type::FunctionNoProto:
219 case Type::ObjCObjectPointer:
227 // Arrays, records, and Objective-C objects.
228 case Type::ConstantArray:
229 case Type::IncompleteArray:
230 case Type::VariableArray:
232 case Type::ObjCObject:
233 case Type::ObjCInterface:
234 return TEK_Aggregate;
236 // We operate on atomic values according to their underlying type.
238 type = cast<AtomicType>(type)->getValueType();
241 llvm_unreachable("unknown type kind!");
245 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
246 // For cleanliness, we try to avoid emitting the return block for
248 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
251 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
253 // We have a valid insert point, reuse it if it is empty or there are no
254 // explicit jumps to the return block.
255 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
256 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
257 delete ReturnBlock.getBlock();
258 ReturnBlock = JumpDest();
260 EmitBlock(ReturnBlock.getBlock());
261 return llvm::DebugLoc();
264 // Otherwise, if the return block is the target of a single direct
265 // branch then we can just put the code in that block instead. This
266 // cleans up functions which started with a unified return block.
267 if (ReturnBlock.getBlock()->hasOneUse()) {
268 llvm::BranchInst *BI =
269 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
270 if (BI && BI->isUnconditional() &&
271 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
272 // Record/return the DebugLoc of the simple 'return' expression to be used
273 // later by the actual 'ret' instruction.
274 llvm::DebugLoc Loc = BI->getDebugLoc();
275 Builder.SetInsertPoint(BI->getParent());
276 BI->eraseFromParent();
277 delete ReturnBlock.getBlock();
278 ReturnBlock = JumpDest();
283 // FIXME: We are at an unreachable point, there is no reason to emit the block
284 // unless it has uses. However, we still need a place to put the debug
285 // region.end for now.
287 EmitBlock(ReturnBlock.getBlock());
288 return llvm::DebugLoc();
291 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
293 if (!BB->use_empty())
294 return CGF.CurFn->getBasicBlockList().push_back(BB);
298 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
299 assert(BreakContinueStack.empty() &&
300 "mismatched push/pop in break/continue stack!");
302 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
303 && NumSimpleReturnExprs == NumReturnExprs
304 && ReturnBlock.getBlock()->use_empty();
305 // Usually the return expression is evaluated before the cleanup
306 // code. If the function contains only a simple return statement,
307 // such as a constant, the location before the cleanup code becomes
308 // the last useful breakpoint in the function, because the simple
309 // return expression will be evaluated after the cleanup code. To be
310 // safe, set the debug location for cleanup code to the location of
311 // the return statement. Otherwise the cleanup code should be at the
312 // end of the function's lexical scope.
314 // If there are multiple branches to the return block, the branch
315 // instructions will get the location of the return statements and
317 if (CGDebugInfo *DI = getDebugInfo()) {
318 if (OnlySimpleReturnStmts)
319 DI->EmitLocation(Builder, LastStopPoint);
321 DI->EmitLocation(Builder, EndLoc);
324 // Pop any cleanups that might have been associated with the
325 // parameters. Do this in whatever block we're currently in; it's
326 // important to do this before we enter the return block or return
327 // edges will be *really* confused.
328 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
329 bool HasOnlyLifetimeMarkers =
330 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
331 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
333 // Make sure the line table doesn't jump back into the body for
334 // the ret after it's been at EndLoc.
335 if (CGDebugInfo *DI = getDebugInfo())
336 if (OnlySimpleReturnStmts)
337 DI->EmitLocation(Builder, EndLoc);
339 PopCleanupBlocks(PrologueCleanupDepth);
342 // Emit function epilog (to return).
343 llvm::DebugLoc Loc = EmitReturnBlock();
345 if (ShouldInstrumentFunction()) {
346 if (CGM.getCodeGenOpts().InstrumentFunctions)
347 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
348 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
349 CurFn->addFnAttr("instrument-function-exit-inlined",
350 "__cyg_profile_func_exit");
353 // Emit debug descriptor for function end.
354 if (CGDebugInfo *DI = getDebugInfo())
355 DI->EmitFunctionEnd(Builder, CurFn);
357 // Reset the debug location to that of the simple 'return' expression, if any
358 // rather than that of the end of the function's scope '}'.
359 ApplyDebugLocation AL(*this, Loc);
360 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
361 EmitEndEHSpec(CurCodeDecl);
363 assert(EHStack.empty() &&
364 "did not remove all scopes from cleanup stack!");
366 // If someone did an indirect goto, emit the indirect goto block at the end of
368 if (IndirectBranch) {
369 EmitBlock(IndirectBranch->getParent());
370 Builder.ClearInsertionPoint();
373 // If some of our locals escaped, insert a call to llvm.localescape in the
375 if (!EscapedLocals.empty()) {
376 // Invert the map from local to index into a simple vector. There should be
378 SmallVector<llvm::Value *, 4> EscapeArgs;
379 EscapeArgs.resize(EscapedLocals.size());
380 for (auto &Pair : EscapedLocals)
381 EscapeArgs[Pair.second] = Pair.first;
382 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
383 &CGM.getModule(), llvm::Intrinsic::localescape);
384 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
387 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
388 llvm::Instruction *Ptr = AllocaInsertPt;
389 AllocaInsertPt = nullptr;
390 Ptr->eraseFromParent();
392 // If someone took the address of a label but never did an indirect goto, we
393 // made a zero entry PHI node, which is illegal, zap it now.
394 if (IndirectBranch) {
395 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
396 if (PN->getNumIncomingValues() == 0) {
397 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
398 PN->eraseFromParent();
402 EmitIfUsed(*this, EHResumeBlock);
403 EmitIfUsed(*this, TerminateLandingPad);
404 EmitIfUsed(*this, TerminateHandler);
405 EmitIfUsed(*this, UnreachableBlock);
407 for (const auto &FuncletAndParent : TerminateFunclets)
408 EmitIfUsed(*this, FuncletAndParent.second);
410 if (CGM.getCodeGenOpts().EmitDeclMetadata)
413 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
414 I = DeferredReplacements.begin(),
415 E = DeferredReplacements.end();
417 I->first->replaceAllUsesWith(I->second);
418 I->first->eraseFromParent();
421 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
422 // PHIs if the current function is a coroutine. We don't do it for all
423 // functions as it may result in slight increase in numbers of instructions
424 // if compiled with no optimizations. We do it for coroutine as the lifetime
425 // of CleanupDestSlot alloca make correct coroutine frame building very
427 if (NormalCleanupDest.isValid() && isCoroutine()) {
428 llvm::DominatorTree DT(*CurFn);
429 llvm::PromoteMemToReg(
430 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
431 NormalCleanupDest = Address::invalid();
434 // Scan function arguments for vector width.
435 for (llvm::Argument &A : CurFn->args())
436 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
437 LargestVectorWidth = std::max(LargestVectorWidth,
438 VT->getPrimitiveSizeInBits());
440 // Update vector width based on return type.
441 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
442 LargestVectorWidth = std::max(LargestVectorWidth,
443 VT->getPrimitiveSizeInBits());
445 // Add the required-vector-width attribute. This contains the max width from:
446 // 1. min-vector-width attribute used in the source program.
447 // 2. Any builtins used that have a vector width specified.
448 // 3. Values passed in and out of inline assembly.
449 // 4. Width of vector arguments and return types for this function.
450 // 5. Width of vector aguments and return types for functions called by this
452 CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
454 // If we generated an unreachable return block, delete it now.
455 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
456 Builder.ClearInsertionPoint();
457 ReturnBlock.getBlock()->eraseFromParent();
459 if (ReturnValue.isValid()) {
460 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
461 if (RetAlloca && RetAlloca->use_empty()) {
462 RetAlloca->eraseFromParent();
463 ReturnValue = Address::invalid();
468 /// ShouldInstrumentFunction - Return true if the current function should be
469 /// instrumented with __cyg_profile_func_* calls
470 bool CodeGenFunction::ShouldInstrumentFunction() {
471 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
472 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
473 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
475 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
480 /// ShouldXRayInstrument - Return true if the current function should be
481 /// instrumented with XRay nop sleds.
482 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
483 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
486 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
487 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
488 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
489 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
490 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
491 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
492 XRayInstrKind::Custom);
495 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
496 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
497 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
498 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
499 XRayInstrKind::Typed);
503 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
504 llvm::Constant *Addr) {
505 // Addresses stored in prologue data can't require run-time fixups and must
506 // be PC-relative. Run-time fixups are undesirable because they necessitate
507 // writable text segments, which are unsafe. And absolute addresses are
508 // undesirable because they break PIE mode.
510 // Add a layer of indirection through a private global. Taking its address
511 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
512 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
514 llvm::GlobalValue::PrivateLinkage, Addr);
516 // Create a PC-relative address.
517 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
518 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
519 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
520 return (IntPtrTy == Int32Ty)
522 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
526 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
527 llvm::Value *EncodedAddr) {
528 // Reconstruct the address of the global.
529 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
530 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
531 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
532 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
534 // Load the original pointer through the global.
535 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
539 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
542 if (!FD->hasAttr<OpenCLKernelAttr>())
545 llvm::LLVMContext &Context = getLLVMContext();
547 CGM.GenOpenCLArgMetadata(Fn, FD, this);
549 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
550 QualType HintQTy = A->getTypeHint();
551 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
552 bool IsSignedInteger =
553 HintQTy->isSignedIntegerType() ||
554 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
555 llvm::Metadata *AttrMDArgs[] = {
556 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
557 CGM.getTypes().ConvertType(A->getTypeHint()))),
558 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
559 llvm::IntegerType::get(Context, 32),
560 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
561 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
564 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
565 llvm::Metadata *AttrMDArgs[] = {
566 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
567 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
568 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
569 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
572 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
573 llvm::Metadata *AttrMDArgs[] = {
574 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
575 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
576 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
577 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
580 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
581 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
582 llvm::Metadata *AttrMDArgs[] = {
583 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
584 Fn->setMetadata("intel_reqd_sub_group_size",
585 llvm::MDNode::get(Context, AttrMDArgs));
589 /// Determine whether the function F ends with a return stmt.
590 static bool endsWithReturn(const Decl* F) {
591 const Stmt *Body = nullptr;
592 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
593 Body = FD->getBody();
594 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
595 Body = OMD->getBody();
597 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
598 auto LastStmt = CS->body_rbegin();
599 if (LastStmt != CS->body_rend())
600 return isa<ReturnStmt>(*LastStmt);
605 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
606 if (SanOpts.has(SanitizerKind::Thread)) {
607 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
608 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
612 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
613 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
614 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
615 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
616 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
619 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
622 if (MD->getNumParams() == 2) {
623 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
624 if (!PT || !PT->isVoidPointerType() ||
625 !PT->getPointeeType().isConstQualified())
632 /// Return the UBSan prologue signature for \p FD if one is available.
633 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
634 const FunctionDecl *FD) {
635 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
638 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
641 void CodeGenFunction::StartFunction(GlobalDecl GD,
644 const CGFunctionInfo &FnInfo,
645 const FunctionArgList &Args,
647 SourceLocation StartLoc) {
649 "Do not use a CodeGenFunction object for more than one function");
651 const Decl *D = GD.getDecl();
653 DidCallStackSave = false;
655 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
656 if (FD->usesSEHTry())
658 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
662 assert(CurFn->isDeclaration() && "Function already has body?");
664 // If this function has been blacklisted for any of the enabled sanitizers,
665 // disable the sanitizer for the function.
667 #define SANITIZER(NAME, ID) \
668 if (SanOpts.empty()) \
670 if (SanOpts.has(SanitizerKind::ID)) \
671 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
672 SanOpts.set(SanitizerKind::ID, false);
674 #include "clang/Basic/Sanitizers.def"
679 // Apply the no_sanitize* attributes to SanOpts.
680 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
681 SanitizerMask mask = Attr->getMask();
682 SanOpts.Mask &= ~mask;
683 if (mask & SanitizerKind::Address)
684 SanOpts.set(SanitizerKind::KernelAddress, false);
685 if (mask & SanitizerKind::KernelAddress)
686 SanOpts.set(SanitizerKind::Address, false);
687 if (mask & SanitizerKind::HWAddress)
688 SanOpts.set(SanitizerKind::KernelHWAddress, false);
689 if (mask & SanitizerKind::KernelHWAddress)
690 SanOpts.set(SanitizerKind::HWAddress, false);
694 // Apply sanitizer attributes to the function.
695 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
696 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
697 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
698 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
699 if (SanOpts.has(SanitizerKind::MemTag))
700 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
701 if (SanOpts.has(SanitizerKind::Thread))
702 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
703 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
704 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
705 if (SanOpts.has(SanitizerKind::SafeStack))
706 Fn->addFnAttr(llvm::Attribute::SafeStack);
707 if (SanOpts.has(SanitizerKind::ShadowCallStack))
708 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
710 // Apply fuzzing attribute to the function.
711 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
712 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
714 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
715 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
716 if (SanOpts.has(SanitizerKind::Thread)) {
717 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
718 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
719 if (OMD->getMethodFamily() == OMF_dealloc ||
720 OMD->getMethodFamily() == OMF_initialize ||
721 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
722 markAsIgnoreThreadCheckingAtRuntime(Fn);
727 // Ignore unrelated casts in STL allocate() since the allocator must cast
728 // from void* to T* before object initialization completes. Don't match on the
729 // namespace because not all allocators are in std::
730 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
731 if (matchesStlAllocatorFn(D, getContext()))
732 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
735 // Apply xray attributes to the function (as a string, for now)
737 if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
738 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
739 XRayInstrKind::Function)) {
740 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
741 Fn->addFnAttr("function-instrument", "xray-always");
742 if (XRayAttr->neverXRayInstrument())
743 Fn->addFnAttr("function-instrument", "xray-never");
744 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
745 if (ShouldXRayInstrumentFunction())
746 Fn->addFnAttr("xray-log-args",
747 llvm::utostr(LogArgs->getArgumentCount()));
750 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
752 "xray-instruction-threshold",
753 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
757 // Add no-jump-tables value.
758 Fn->addFnAttr("no-jump-tables",
759 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
761 // Add profile-sample-accurate value.
762 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
763 Fn->addFnAttr("profile-sample-accurate");
765 if (getLangOpts().OpenCL) {
766 // Add metadata for a kernel function.
767 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
768 EmitOpenCLKernelMetadata(FD, Fn);
771 // If we are checking function types, emit a function type signature as
773 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
774 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
775 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
776 // Remove any (C++17) exception specifications, to allow calling e.g. a
777 // noexcept function through a non-noexcept pointer.
779 getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
781 llvm::Constant *FTRTTIConst =
782 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
783 llvm::Constant *FTRTTIConstEncoded =
784 EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
785 llvm::Constant *PrologueStructElems[] = {PrologueSig,
787 llvm::Constant *PrologueStructConst =
788 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
789 Fn->setPrologueData(PrologueStructConst);
794 // If we're checking nullability, we need to know whether we can check the
795 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
796 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
797 auto Nullability = FnRetTy->getNullability(getContext());
798 if (Nullability && *Nullability == NullabilityKind::NonNull) {
799 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
800 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
801 RetValNullabilityPrecondition =
802 llvm::ConstantInt::getTrue(getLLVMContext());
806 // If we're in C++ mode and the function name is "main", it is guaranteed
807 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
808 // used within a program").
809 if (getLangOpts().CPlusPlus)
810 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
812 Fn->addFnAttr(llvm::Attribute::NoRecurse);
814 // If a custom alignment is used, force realigning to this alignment on
815 // any main function which certainly will need it.
816 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
817 if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
818 CGM.getCodeGenOpts().StackAlignment)
819 Fn->addFnAttr("stackrealign");
821 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
823 // Create a marker to make it easy to insert allocas into the entryblock
824 // later. Don't create this with the builder, because we don't want it
826 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
827 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
829 ReturnBlock = getJumpDestInCurrentScope("return");
831 Builder.SetInsertPoint(EntryBB);
833 // If we're checking the return value, allocate space for a pointer to a
834 // precise source location of the checked return statement.
835 if (requiresReturnValueCheck()) {
836 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
837 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
840 // Emit subprogram debug descriptor.
841 if (CGDebugInfo *DI = getDebugInfo()) {
842 // Reconstruct the type from the argument list so that implicit parameters,
843 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
845 CallingConv CC = CallingConv::CC_C;
846 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
847 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
848 CC = SrcFnTy->getCallConv();
849 SmallVector<QualType, 16> ArgTypes;
850 for (const VarDecl *VD : Args)
851 ArgTypes.push_back(VD->getType());
852 QualType FnType = getContext().getFunctionType(
853 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
854 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
858 if (ShouldInstrumentFunction()) {
859 if (CGM.getCodeGenOpts().InstrumentFunctions)
860 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
861 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
862 CurFn->addFnAttr("instrument-function-entry-inlined",
863 "__cyg_profile_func_enter");
864 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
865 CurFn->addFnAttr("instrument-function-entry-inlined",
866 "__cyg_profile_func_enter_bare");
869 // Since emitting the mcount call here impacts optimizations such as function
870 // inlining, we just add an attribute to insert a mcount call in backend.
871 // The attribute "counting-function" is set to mcount function name which is
872 // architecture dependent.
873 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
874 // Calls to fentry/mcount should not be generated if function has
875 // the no_instrument_function attribute.
876 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
877 if (CGM.getCodeGenOpts().CallFEntry)
878 Fn->addFnAttr("fentry-call", "true");
880 Fn->addFnAttr("instrument-function-entry-inlined",
881 getTarget().getMCountName());
886 if (RetTy->isVoidType()) {
887 // Void type; nothing to return.
888 ReturnValue = Address::invalid();
890 // Count the implicit return.
891 if (!endsWithReturn(D))
893 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
894 // Indirect return; emit returned value directly into sret slot.
895 // This reduces code size, and affects correctness in C++.
896 auto AI = CurFn->arg_begin();
897 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
899 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
900 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
902 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
903 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
904 ReturnValue.getPointer(), Int8PtrTy),
907 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
908 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
909 // Load the sret pointer from the argument struct and return into that.
910 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
911 llvm::Function::arg_iterator EI = CurFn->arg_end();
913 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
914 ReturnValuePointer = Address(Addr, getPointerAlign());
915 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
916 ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
918 ReturnValue = CreateIRTemp(RetTy, "retval");
920 // Tell the epilog emitter to autorelease the result. We do this
921 // now so that various specialized functions can suppress it
922 // during their IR-generation.
923 if (getLangOpts().ObjCAutoRefCount &&
924 !CurFnInfo->isReturnsRetained() &&
925 RetTy->isObjCRetainableType())
926 AutoreleaseResult = true;
929 EmitStartEHSpec(CurCodeDecl);
931 PrologueCleanupDepth = EHStack.stable_begin();
933 // Emit OpenMP specific initialization of the device functions.
934 if (getLangOpts().OpenMP && CurCodeDecl)
935 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
937 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
939 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
940 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
941 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
942 if (MD->getParent()->isLambda() &&
943 MD->getOverloadedOperator() == OO_Call) {
944 // We're in a lambda; figure out the captures.
945 MD->getParent()->getCaptureFields(LambdaCaptureFields,
946 LambdaThisCaptureField);
947 if (LambdaThisCaptureField) {
948 // If the lambda captures the object referred to by '*this' - either by
949 // value or by reference, make sure CXXThisValue points to the correct
952 // Get the lvalue for the field (which is a copy of the enclosing object
953 // or contains the address of the enclosing object).
954 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
955 if (!LambdaThisCaptureField->getType()->isPointerType()) {
956 // If the enclosing object was captured by value, just use its address.
957 CXXThisValue = ThisFieldLValue.getAddress().getPointer();
959 // Load the lvalue pointed to by the field, since '*this' was captured
962 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
965 for (auto *FD : MD->getParent()->fields()) {
966 if (FD->hasCapturedVLAType()) {
967 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
968 SourceLocation()).getScalarVal();
969 auto VAT = FD->getCapturedVLAType();
970 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
974 // Not in a lambda; just use 'this' from the method.
975 // FIXME: Should we generate a new load for each use of 'this'? The
976 // fast register allocator would be happier...
977 CXXThisValue = CXXABIThisValue;
980 // Check the 'this' pointer once per function, if it's available.
981 if (CXXABIThisValue) {
982 SanitizerSet SkippedChecks;
983 SkippedChecks.set(SanitizerKind::ObjectSize, true);
984 QualType ThisTy = MD->getThisType();
986 // If this is the call operator of a lambda with no capture-default, it
987 // may have a static invoker function, which may call this operator with
988 // a null 'this' pointer.
989 if (isLambdaCallOperator(MD) &&
990 MD->getParent()->getLambdaCaptureDefault() == LCD_None)
991 SkippedChecks.set(SanitizerKind::Null, true);
993 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
995 Loc, CXXABIThisValue, ThisTy,
996 getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1001 // If any of the arguments have a variably modified type, make sure to
1002 // emit the type size.
1003 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1005 const VarDecl *VD = *i;
1007 // Dig out the type as written from ParmVarDecls; it's unclear whether
1008 // the standard (C99 6.9.1p10) requires this, but we're following the
1009 // precedent set by gcc.
1011 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1012 Ty = PVD->getOriginalType();
1016 if (Ty->isVariablyModifiedType())
1017 EmitVariablyModifiedType(Ty);
1019 // Emit a location at the end of the prologue.
1020 if (CGDebugInfo *DI = getDebugInfo())
1021 DI->EmitLocation(Builder, StartLoc);
1023 // TODO: Do we need to handle this in two places like we do with
1024 // target-features/target-cpu?
1026 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1027 LargestVectorWidth = VecWidth->getVectorWidth();
1030 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1031 incrementProfileCounter(Body);
1032 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1033 EmitCompoundStmtWithoutScope(*S);
1038 /// When instrumenting to collect profile data, the counts for some blocks
1039 /// such as switch cases need to not include the fall-through counts, so
1040 /// emit a branch around the instrumentation code. When not instrumenting,
1041 /// this just calls EmitBlock().
1042 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1044 llvm::BasicBlock *SkipCountBB = nullptr;
1045 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1046 // When instrumenting for profiling, the fallthrough to certain
1047 // statements needs to skip over the instrumentation code so that we
1048 // get an accurate count.
1049 SkipCountBB = createBasicBlock("skipcount");
1050 EmitBranch(SkipCountBB);
1053 uint64_t CurrentCount = getCurrentProfileCount();
1054 incrementProfileCounter(S);
1055 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1057 EmitBlock(SkipCountBB);
1060 /// Tries to mark the given function nounwind based on the
1061 /// non-existence of any throwing calls within it. We believe this is
1062 /// lightweight enough to do at -O0.
1063 static void TryMarkNoThrow(llvm::Function *F) {
1064 // LLVM treats 'nounwind' on a function as part of the type, so we
1065 // can't do this on functions that can be overwritten.
1066 if (F->isInterposable()) return;
1068 for (llvm::BasicBlock &BB : *F)
1069 for (llvm::Instruction &I : BB)
1073 F->setDoesNotThrow();
1076 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1077 FunctionArgList &Args) {
1078 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1079 QualType ResTy = FD->getReturnType();
1081 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1082 if (MD && MD->isInstance()) {
1083 if (CGM.getCXXABI().HasThisReturn(GD))
1084 ResTy = MD->getThisType();
1085 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1086 ResTy = CGM.getContext().VoidPtrTy;
1087 CGM.getCXXABI().buildThisParam(*this, Args);
1090 // The base version of an inheriting constructor whose constructed base is a
1091 // virtual base is not passed any arguments (because it doesn't actually call
1092 // the inherited constructor).
1093 bool PassedParams = true;
1094 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1095 if (auto Inherited = CD->getInheritedConstructor())
1097 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1100 for (auto *Param : FD->parameters()) {
1101 Args.push_back(Param);
1102 if (!Param->hasAttr<PassObjectSizeAttr>())
1105 auto *Implicit = ImplicitParamDecl::Create(
1106 getContext(), Param->getDeclContext(), Param->getLocation(),
1107 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1108 SizeArguments[Param] = Implicit;
1109 Args.push_back(Implicit);
1113 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1114 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1120 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1121 const ASTContext &Context) {
1122 QualType T = FD->getReturnType();
1123 // Avoid the optimization for functions that return a record type with a
1124 // trivial destructor or another trivially copyable type.
1125 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1126 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1127 return !ClassDecl->hasTrivialDestructor();
1129 return !T.isTriviallyCopyableType(Context);
1132 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1133 const CGFunctionInfo &FnInfo) {
1134 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1137 FunctionArgList Args;
1138 QualType ResTy = BuildFunctionArgList(GD, Args);
1140 // Check if we should generate debug info for this function.
1141 if (FD->hasAttr<NoDebugAttr>())
1142 DebugInfo = nullptr; // disable debug info indefinitely for this function
1144 // The function might not have a body if we're generating thunks for a
1145 // function declaration.
1146 SourceRange BodyRange;
1147 if (Stmt *Body = FD->getBody())
1148 BodyRange = Body->getSourceRange();
1150 BodyRange = FD->getLocation();
1151 CurEHLocation = BodyRange.getEnd();
1153 // Use the location of the start of the function to determine where
1154 // the function definition is located. By default use the location
1155 // of the declaration as the location for the subprogram. A function
1156 // may lack a declaration in the source code if it is created by code
1157 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1158 SourceLocation Loc = FD->getLocation();
1160 // If this is a function specialization then use the pattern body
1161 // as the location for the function.
1162 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1163 if (SpecDecl->hasBody(SpecDecl))
1164 Loc = SpecDecl->getLocation();
1166 Stmt *Body = FD->getBody();
1168 // Initialize helper which will detect jumps which can cause invalid lifetime
1170 if (Body && ShouldEmitLifetimeMarkers)
1171 Bypasses.Init(Body);
1173 // Emit the standard function prologue.
1174 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1176 // Generate the body of the function.
1177 PGO.assignRegionCounters(GD, CurFn);
1178 if (isa<CXXDestructorDecl>(FD))
1179 EmitDestructorBody(Args);
1180 else if (isa<CXXConstructorDecl>(FD))
1181 EmitConstructorBody(Args);
1182 else if (getLangOpts().CUDA &&
1183 !getLangOpts().CUDAIsDevice &&
1184 FD->hasAttr<CUDAGlobalAttr>())
1185 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1186 else if (isa<CXXMethodDecl>(FD) &&
1187 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1188 // The lambda static invoker function is special, because it forwards or
1189 // clones the body of the function call operator (but is actually static).
1190 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1191 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1192 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1193 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1194 // Implicit copy-assignment gets the same special treatment as implicit
1195 // copy-constructors.
1196 emitImplicitAssignmentOperatorBody(Args);
1198 EmitFunctionBody(Body);
1200 llvm_unreachable("no definition for emitted function");
1202 // C++11 [stmt.return]p2:
1203 // Flowing off the end of a function [...] results in undefined behavior in
1204 // a value-returning function.
1206 // If the '}' that terminates a function is reached, and the value of the
1207 // function call is used by the caller, the behavior is undefined.
1208 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1209 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1210 bool ShouldEmitUnreachable =
1211 CGM.getCodeGenOpts().StrictReturn ||
1212 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1213 if (SanOpts.has(SanitizerKind::Return)) {
1214 SanitizerScope SanScope(this);
1215 llvm::Value *IsFalse = Builder.getFalse();
1216 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1217 SanitizerHandler::MissingReturn,
1218 EmitCheckSourceLocation(FD->getLocation()), None);
1219 } else if (ShouldEmitUnreachable) {
1220 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1221 EmitTrapCall(llvm::Intrinsic::trap);
1223 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1224 Builder.CreateUnreachable();
1225 Builder.ClearInsertionPoint();
1229 // Emit the standard function epilogue.
1230 FinishFunction(BodyRange.getEnd());
1232 // If we haven't marked the function nothrow through other means, do
1233 // a quick pass now to see if we can.
1234 if (!CurFn->doesNotThrow())
1235 TryMarkNoThrow(CurFn);
1238 /// ContainsLabel - Return true if the statement contains a label in it. If
1239 /// this statement is not executed normally, it not containing a label means
1240 /// that we can just remove the code.
1241 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1242 // Null statement, not a label!
1243 if (!S) return false;
1245 // If this is a label, we have to emit the code, consider something like:
1246 // if (0) { ... foo: bar(); } goto foo;
1248 // TODO: If anyone cared, we could track __label__'s, since we know that you
1249 // can't jump to one from outside their declared region.
1250 if (isa<LabelStmt>(S))
1253 // If this is a case/default statement, and we haven't seen a switch, we have
1254 // to emit the code.
1255 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1258 // If this is a switch statement, we want to ignore cases below it.
1259 if (isa<SwitchStmt>(S))
1260 IgnoreCaseStmts = true;
1262 // Scan subexpressions for verboten labels.
1263 for (const Stmt *SubStmt : S->children())
1264 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1270 /// containsBreak - Return true if the statement contains a break out of it.
1271 /// If the statement (recursively) contains a switch or loop with a break
1272 /// inside of it, this is fine.
1273 bool CodeGenFunction::containsBreak(const Stmt *S) {
1274 // Null statement, not a label!
1275 if (!S) return false;
1277 // If this is a switch or loop that defines its own break scope, then we can
1278 // include it and anything inside of it.
1279 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1283 if (isa<BreakStmt>(S))
1286 // Scan subexpressions for verboten breaks.
1287 for (const Stmt *SubStmt : S->children())
1288 if (containsBreak(SubStmt))
1294 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1295 if (!S) return false;
1297 // Some statement kinds add a scope and thus never add a decl to the current
1298 // scope. Note, this list is longer than the list of statements that might
1299 // have an unscoped decl nested within them, but this way is conservatively
1300 // correct even if more statement kinds are added.
1301 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1302 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1303 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1304 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1307 if (isa<DeclStmt>(S))
1310 for (const Stmt *SubStmt : S->children())
1311 if (mightAddDeclToScope(SubStmt))
1317 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1318 /// to a constant, or if it does but contains a label, return false. If it
1319 /// constant folds return true and set the boolean result in Result.
1320 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1323 llvm::APSInt ResultInt;
1324 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1327 ResultBool = ResultInt.getBoolValue();
1331 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1332 /// to a constant, or if it does but contains a label, return false. If it
1333 /// constant folds return true and set the folded value.
1334 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1335 llvm::APSInt &ResultInt,
1337 // FIXME: Rename and handle conversion of other evaluatable things
1339 Expr::EvalResult Result;
1340 if (!Cond->EvaluateAsInt(Result, getContext()))
1341 return false; // Not foldable, not integer or not fully evaluatable.
1343 llvm::APSInt Int = Result.Val.getInt();
1344 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1345 return false; // Contains a label.
1353 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1354 /// statement) to the specified blocks. Based on the condition, this might try
1355 /// to simplify the codegen of the conditional based on the branch.
1357 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1358 llvm::BasicBlock *TrueBlock,
1359 llvm::BasicBlock *FalseBlock,
1360 uint64_t TrueCount) {
1361 Cond = Cond->IgnoreParens();
1363 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1365 // Handle X && Y in a condition.
1366 if (CondBOp->getOpcode() == BO_LAnd) {
1367 // If we have "1 && X", simplify the code. "0 && X" would have constant
1368 // folded if the case was simple enough.
1369 bool ConstantBool = false;
1370 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1372 // br(1 && X) -> br(X).
1373 incrementProfileCounter(CondBOp);
1374 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1378 // If we have "X && 1", simplify the code to use an uncond branch.
1379 // "X && 0" would have been constant folded to 0.
1380 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1382 // br(X && 1) -> br(X).
1383 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1387 // Emit the LHS as a conditional. If the LHS conditional is false, we
1388 // want to jump to the FalseBlock.
1389 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1390 // The counter tells us how often we evaluate RHS, and all of TrueCount
1391 // can be propagated to that branch.
1392 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1394 ConditionalEvaluation eval(*this);
1396 ApplyDebugLocation DL(*this, Cond);
1397 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1401 incrementProfileCounter(CondBOp);
1402 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1404 // Any temporaries created here are conditional.
1406 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1412 if (CondBOp->getOpcode() == BO_LOr) {
1413 // If we have "0 || X", simplify the code. "1 || X" would have constant
1414 // folded if the case was simple enough.
1415 bool ConstantBool = false;
1416 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1418 // br(0 || X) -> br(X).
1419 incrementProfileCounter(CondBOp);
1420 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1424 // If we have "X || 0", simplify the code to use an uncond branch.
1425 // "X || 1" would have been constant folded to 1.
1426 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1428 // br(X || 0) -> br(X).
1429 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1433 // Emit the LHS as a conditional. If the LHS conditional is true, we
1434 // want to jump to the TrueBlock.
1435 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1436 // We have the count for entry to the RHS and for the whole expression
1437 // being true, so we can divy up True count between the short circuit and
1440 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1441 uint64_t RHSCount = TrueCount - LHSCount;
1443 ConditionalEvaluation eval(*this);
1445 ApplyDebugLocation DL(*this, Cond);
1446 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1447 EmitBlock(LHSFalse);
1450 incrementProfileCounter(CondBOp);
1451 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1453 // Any temporaries created here are conditional.
1455 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1463 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1464 // br(!x, t, f) -> br(x, f, t)
1465 if (CondUOp->getOpcode() == UO_LNot) {
1466 // Negate the count.
1467 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1468 // Negate the condition and swap the destination blocks.
1469 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1474 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1475 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1476 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1477 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1479 ConditionalEvaluation cond(*this);
1480 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1481 getProfileCount(CondOp));
1483 // When computing PGO branch weights, we only know the overall count for
1484 // the true block. This code is essentially doing tail duplication of the
1485 // naive code-gen, introducing new edges for which counts are not
1486 // available. Divide the counts proportionally between the LHS and RHS of
1487 // the conditional operator.
1488 uint64_t LHSScaledTrueCount = 0;
1491 getProfileCount(CondOp) / (double)getCurrentProfileCount();
1492 LHSScaledTrueCount = TrueCount * LHSRatio;
1496 EmitBlock(LHSBlock);
1497 incrementProfileCounter(CondOp);
1499 ApplyDebugLocation DL(*this, Cond);
1500 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1501 LHSScaledTrueCount);
1506 EmitBlock(RHSBlock);
1507 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1508 TrueCount - LHSScaledTrueCount);
1514 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1515 // Conditional operator handling can give us a throw expression as a
1516 // condition for a case like:
1517 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1519 // br(c, throw x, br(y, t, f))
1520 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1524 // If the branch has a condition wrapped by __builtin_unpredictable,
1525 // create metadata that specifies that the branch is unpredictable.
1526 // Don't bother if not optimizing because that metadata would not be used.
1527 llvm::MDNode *Unpredictable = nullptr;
1528 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1529 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1530 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1531 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1532 llvm::MDBuilder MDHelper(getLLVMContext());
1533 Unpredictable = MDHelper.createUnpredictable();
1537 // Create branch weights based on the number of times we get here and the
1538 // number of times the condition should be true.
1539 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1540 llvm::MDNode *Weights =
1541 createProfileWeights(TrueCount, CurrentCount - TrueCount);
1543 // Emit the code with the fully general case.
1546 ApplyDebugLocation DL(*this, Cond);
1547 CondV = EvaluateExprAsBool(Cond);
1549 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1552 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1553 /// specified stmt yet.
1554 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1555 CGM.ErrorUnsupported(S, Type);
1558 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1559 /// variable-length array whose elements have a non-zero bit-pattern.
1561 /// \param baseType the inner-most element type of the array
1562 /// \param src - a char* pointing to the bit-pattern for a single
1563 /// base element of the array
1564 /// \param sizeInChars - the total size of the VLA, in chars
1565 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1566 Address dest, Address src,
1567 llvm::Value *sizeInChars) {
1568 CGBuilderTy &Builder = CGF.Builder;
1570 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1571 llvm::Value *baseSizeInChars
1572 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1575 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1577 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1579 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1580 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1581 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1583 // Make a loop over the VLA. C99 guarantees that the VLA element
1584 // count must be nonzero.
1585 CGF.EmitBlock(loopBB);
1587 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1588 cur->addIncoming(begin.getPointer(), originBB);
1590 CharUnits curAlign =
1591 dest.getAlignment().alignmentOfArrayElement(baseSize);
1593 // memcpy the individual element bit-pattern.
1594 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1595 /*volatile*/ false);
1597 // Go to the next element.
1599 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1601 // Leave if that's the end of the VLA.
1602 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1603 Builder.CreateCondBr(done, contBB, loopBB);
1604 cur->addIncoming(next, loopBB);
1606 CGF.EmitBlock(contBB);
1610 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1611 // Ignore empty classes in C++.
1612 if (getLangOpts().CPlusPlus) {
1613 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1614 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1619 // Cast the dest ptr to the appropriate i8 pointer type.
1620 if (DestPtr.getElementType() != Int8Ty)
1621 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1623 // Get size and alignment info for this aggregate.
1624 CharUnits size = getContext().getTypeSizeInChars(Ty);
1626 llvm::Value *SizeVal;
1627 const VariableArrayType *vla;
1629 // Don't bother emitting a zero-byte memset.
1630 if (size.isZero()) {
1631 // But note that getTypeInfo returns 0 for a VLA.
1632 if (const VariableArrayType *vlaType =
1633 dyn_cast_or_null<VariableArrayType>(
1634 getContext().getAsArrayType(Ty))) {
1635 auto VlaSize = getVLASize(vlaType);
1636 SizeVal = VlaSize.NumElts;
1637 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1638 if (!eltSize.isOne())
1639 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1645 SizeVal = CGM.getSize(size);
1649 // If the type contains a pointer to data member we can't memset it to zero.
1650 // Instead, create a null constant and copy it to the destination.
1651 // TODO: there are other patterns besides zero that we can usefully memset,
1652 // like -1, which happens to be the pattern used by member-pointers.
1653 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1654 // For a VLA, emit a single element, then splat that over the VLA.
1655 if (vla) Ty = getContext().getBaseElementType(vla);
1657 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1659 llvm::GlobalVariable *NullVariable =
1660 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1661 /*isConstant=*/true,
1662 llvm::GlobalVariable::PrivateLinkage,
1663 NullConstant, Twine());
1664 CharUnits NullAlign = DestPtr.getAlignment();
1665 NullVariable->setAlignment(NullAlign.getQuantity());
1666 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1669 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1671 // Get and call the appropriate llvm.memcpy overload.
1672 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1676 // Otherwise, just memset the whole thing to zero. This is legal
1677 // because in LLVM, all default initializers (other than the ones we just
1678 // handled above) are guaranteed to have a bit pattern of all zeros.
1679 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1682 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1683 // Make sure that there is a block for the indirect goto.
1684 if (!IndirectBranch)
1685 GetIndirectGotoBlock();
1687 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1689 // Make sure the indirect branch includes all of the address-taken blocks.
1690 IndirectBranch->addDestination(BB);
1691 return llvm::BlockAddress::get(CurFn, BB);
1694 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1695 // If we already made the indirect branch for indirect goto, return its block.
1696 if (IndirectBranch) return IndirectBranch->getParent();
1698 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1700 // Create the PHI node that indirect gotos will add entries to.
1701 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1702 "indirect.goto.dest");
1704 // Create the indirect branch instruction.
1705 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1706 return IndirectBranch->getParent();
1709 /// Computes the length of an array in elements, as well as the base
1710 /// element type and a properly-typed first element pointer.
1711 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1714 const ArrayType *arrayType = origArrayType;
1716 // If it's a VLA, we have to load the stored size. Note that
1717 // this is the size of the VLA in bytes, not its size in elements.
1718 llvm::Value *numVLAElements = nullptr;
1719 if (isa<VariableArrayType>(arrayType)) {
1720 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1722 // Walk into all VLAs. This doesn't require changes to addr,
1723 // which has type T* where T is the first non-VLA element type.
1725 QualType elementType = arrayType->getElementType();
1726 arrayType = getContext().getAsArrayType(elementType);
1728 // If we only have VLA components, 'addr' requires no adjustment.
1730 baseType = elementType;
1731 return numVLAElements;
1733 } while (isa<VariableArrayType>(arrayType));
1735 // We get out here only if we find a constant array type
1739 // We have some number of constant-length arrays, so addr should
1740 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1741 // down to the first element of addr.
1742 SmallVector<llvm::Value*, 8> gepIndices;
1744 // GEP down to the array type.
1745 llvm::ConstantInt *zero = Builder.getInt32(0);
1746 gepIndices.push_back(zero);
1748 uint64_t countFromCLAs = 1;
1751 llvm::ArrayType *llvmArrayType =
1752 dyn_cast<llvm::ArrayType>(addr.getElementType());
1753 while (llvmArrayType) {
1754 assert(isa<ConstantArrayType>(arrayType));
1755 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1756 == llvmArrayType->getNumElements());
1758 gepIndices.push_back(zero);
1759 countFromCLAs *= llvmArrayType->getNumElements();
1760 eltType = arrayType->getElementType();
1763 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1764 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1765 assert((!llvmArrayType || arrayType) &&
1766 "LLVM and Clang types are out-of-synch");
1770 // From this point onwards, the Clang array type has been emitted
1771 // as some other type (probably a packed struct). Compute the array
1772 // size, and just emit the 'begin' expression as a bitcast.
1775 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1776 eltType = arrayType->getElementType();
1777 arrayType = getContext().getAsArrayType(eltType);
1780 llvm::Type *baseType = ConvertType(eltType);
1781 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1783 // Create the actual GEP.
1784 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1785 gepIndices, "array.begin"),
1786 addr.getAlignment());
1791 llvm::Value *numElements
1792 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1794 // If we had any VLA dimensions, factor them in.
1796 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1801 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1802 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1803 assert(vla && "type was not a variable array type!");
1804 return getVLASize(vla);
1807 CodeGenFunction::VlaSizePair
1808 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1809 // The number of elements so far; always size_t.
1810 llvm::Value *numElements = nullptr;
1812 QualType elementType;
1814 elementType = type->getElementType();
1815 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1816 assert(vlaSize && "no size for VLA!");
1817 assert(vlaSize->getType() == SizeTy);
1820 numElements = vlaSize;
1822 // It's undefined behavior if this wraps around, so mark it that way.
1823 // FIXME: Teach -fsanitize=undefined to trap this.
1824 numElements = Builder.CreateNUWMul(numElements, vlaSize);
1826 } while ((type = getContext().getAsVariableArrayType(elementType)));
1828 return { numElements, elementType };
1831 CodeGenFunction::VlaSizePair
1832 CodeGenFunction::getVLAElements1D(QualType type) {
1833 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1834 assert(vla && "type was not a variable array type!");
1835 return getVLAElements1D(vla);
1838 CodeGenFunction::VlaSizePair
1839 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
1840 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
1841 assert(VlaSize && "no size for VLA!");
1842 assert(VlaSize->getType() == SizeTy);
1843 return { VlaSize, Vla->getElementType() };
1846 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1847 assert(type->isVariablyModifiedType() &&
1848 "Must pass variably modified type to EmitVLASizes!");
1850 EnsureInsertPoint();
1852 // We're going to walk down into the type and look for VLA
1855 assert(type->isVariablyModifiedType());
1857 const Type *ty = type.getTypePtr();
1858 switch (ty->getTypeClass()) {
1860 #define TYPE(Class, Base)
1861 #define ABSTRACT_TYPE(Class, Base)
1862 #define NON_CANONICAL_TYPE(Class, Base)
1863 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1864 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1865 #include "clang/AST/TypeNodes.def"
1866 llvm_unreachable("unexpected dependent type!");
1868 // These types are never variably-modified.
1872 case Type::ExtVector:
1875 case Type::Elaborated:
1876 case Type::TemplateSpecialization:
1877 case Type::ObjCTypeParam:
1878 case Type::ObjCObject:
1879 case Type::ObjCInterface:
1880 case Type::ObjCObjectPointer:
1881 llvm_unreachable("type class is never variably-modified!");
1883 case Type::Adjusted:
1884 type = cast<AdjustedType>(ty)->getAdjustedType();
1888 type = cast<DecayedType>(ty)->getPointeeType();
1892 type = cast<PointerType>(ty)->getPointeeType();
1895 case Type::BlockPointer:
1896 type = cast<BlockPointerType>(ty)->getPointeeType();
1899 case Type::LValueReference:
1900 case Type::RValueReference:
1901 type = cast<ReferenceType>(ty)->getPointeeType();
1904 case Type::MemberPointer:
1905 type = cast<MemberPointerType>(ty)->getPointeeType();
1908 case Type::ConstantArray:
1909 case Type::IncompleteArray:
1910 // Losing element qualification here is fine.
1911 type = cast<ArrayType>(ty)->getElementType();
1914 case Type::VariableArray: {
1915 // Losing element qualification here is fine.
1916 const VariableArrayType *vat = cast<VariableArrayType>(ty);
1918 // Unknown size indication requires no size computation.
1919 // Otherwise, evaluate and record it.
1920 if (const Expr *size = vat->getSizeExpr()) {
1921 // It's possible that we might have emitted this already,
1922 // e.g. with a typedef and a pointer to it.
1923 llvm::Value *&entry = VLASizeMap[size];
1925 llvm::Value *Size = EmitScalarExpr(size);
1928 // If the size is an expression that is not an integer constant
1929 // expression [...] each time it is evaluated it shall have a value
1930 // greater than zero.
1931 if (SanOpts.has(SanitizerKind::VLABound) &&
1932 size->getType()->isSignedIntegerType()) {
1933 SanitizerScope SanScope(this);
1934 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1935 llvm::Constant *StaticArgs[] = {
1936 EmitCheckSourceLocation(size->getBeginLoc()),
1937 EmitCheckTypeDescriptor(size->getType())};
1938 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1939 SanitizerKind::VLABound),
1940 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
1943 // Always zexting here would be wrong if it weren't
1944 // undefined behavior to have a negative bound.
1945 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1948 type = vat->getElementType();
1952 case Type::FunctionProto:
1953 case Type::FunctionNoProto:
1954 type = cast<FunctionType>(ty)->getReturnType();
1959 case Type::UnaryTransform:
1960 case Type::Attributed:
1961 case Type::SubstTemplateTypeParm:
1962 case Type::PackExpansion:
1963 case Type::MacroQualified:
1964 // Keep walking after single level desugaring.
1965 type = type.getSingleStepDesugaredType(getContext());
1969 case Type::Decltype:
1971 case Type::DeducedTemplateSpecialization:
1972 // Stop walking: nothing to do.
1975 case Type::TypeOfExpr:
1976 // Stop walking: emit typeof expression.
1977 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1981 type = cast<AtomicType>(ty)->getValueType();
1985 type = cast<PipeType>(ty)->getElementType();
1988 } while (type->isVariablyModifiedType());
1991 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
1992 if (getContext().getBuiltinVaListType()->isArrayType())
1993 return EmitPointerWithAlignment(E);
1994 return EmitLValue(E).getAddress();
1997 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
1998 return EmitLValue(E).getAddress();
2001 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2002 const APValue &Init) {
2003 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2004 if (CGDebugInfo *Dbg = getDebugInfo())
2005 if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2006 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2009 CodeGenFunction::PeepholeProtection
2010 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2011 // At the moment, the only aggressive peephole we do in IR gen
2012 // is trunc(zext) folding, but if we add more, we can easily
2013 // extend this protection.
2015 if (!rvalue.isScalar()) return PeepholeProtection();
2016 llvm::Value *value = rvalue.getScalarVal();
2017 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2019 // Just make an extra bitcast.
2020 assert(HaveInsertPoint());
2021 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2022 Builder.GetInsertBlock());
2024 PeepholeProtection protection;
2025 protection.Inst = inst;
2029 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2030 if (!protection.Inst) return;
2032 // In theory, we could try to duplicate the peepholes now, but whatever.
2033 protection.Inst->eraseFromParent();
2036 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2037 QualType Ty, SourceLocation Loc,
2038 SourceLocation AssumptionLoc,
2039 llvm::Value *Alignment,
2040 llvm::Value *OffsetValue) {
2041 llvm::Value *TheCheck;
2042 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2043 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2044 if (SanOpts.has(SanitizerKind::Alignment)) {
2045 EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2046 OffsetValue, TheCheck, Assumption);
2050 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2051 QualType Ty, SourceLocation Loc,
2052 SourceLocation AssumptionLoc,
2054 llvm::Value *OffsetValue) {
2055 llvm::Value *TheCheck;
2056 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2057 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2058 if (SanOpts.has(SanitizerKind::Alignment)) {
2059 llvm::Value *AlignmentVal = llvm::ConstantInt::get(IntPtrTy, Alignment);
2060 EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, AlignmentVal,
2061 OffsetValue, TheCheck, Assumption);
2065 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2067 SourceLocation AssumptionLoc,
2069 llvm::Value *OffsetValue) {
2070 if (auto *CE = dyn_cast<CastExpr>(E))
2071 E = CE->getSubExprAsWritten();
2072 QualType Ty = E->getType();
2073 SourceLocation Loc = E->getExprLoc();
2075 EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2079 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2080 llvm::Value *AnnotatedVal,
2081 StringRef AnnotationStr,
2082 SourceLocation Location) {
2083 llvm::Value *Args[4] = {
2085 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2086 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2087 CGM.EmitAnnotationLineNo(Location)
2089 return Builder.CreateCall(AnnotationFn, Args);
2092 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2093 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2094 // FIXME We create a new bitcast for every annotation because that's what
2095 // llvm-gcc was doing.
2096 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2097 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2098 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2099 I->getAnnotation(), D->getLocation());
2102 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2104 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2105 llvm::Value *V = Addr.getPointer();
2106 llvm::Type *VTy = V->getType();
2107 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2110 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2111 // FIXME Always emit the cast inst so we can differentiate between
2112 // annotation on the first field of a struct and annotation on the struct
2114 if (VTy != CGM.Int8PtrTy)
2115 V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2116 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2117 V = Builder.CreateBitCast(V, VTy);
2120 return Address(V, Addr.getAlignment());
2123 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2125 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2127 assert(!CGF->IsSanitizerScope);
2128 CGF->IsSanitizerScope = true;
2131 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2132 CGF->IsSanitizerScope = false;
2135 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2136 const llvm::Twine &Name,
2137 llvm::BasicBlock *BB,
2138 llvm::BasicBlock::iterator InsertPt) const {
2139 LoopStack.InsertHelper(I);
2140 if (IsSanitizerScope)
2141 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2144 void CGBuilderInserter::InsertHelper(
2145 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2146 llvm::BasicBlock::iterator InsertPt) const {
2147 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2149 CGF->InsertHelper(I, Name, BB, InsertPt);
2152 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2153 CodeGenModule &CGM, const FunctionDecl *FD,
2154 std::string &FirstMissing) {
2155 // If there aren't any required features listed then go ahead and return.
2156 if (ReqFeatures.empty())
2159 // Now build up the set of caller features and verify that all the required
2160 // features are there.
2161 llvm::StringMap<bool> CallerFeatureMap;
2162 CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2164 // If we have at least one of the features in the feature list return
2165 // true, otherwise return false.
2167 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2168 SmallVector<StringRef, 1> OrFeatures;
2169 Feature.split(OrFeatures, '|');
2170 return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2171 if (!CallerFeatureMap.lookup(Feature)) {
2172 FirstMissing = Feature.str();
2180 // Emits an error if we don't have a valid set of target features for the
2182 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2183 const FunctionDecl *TargetDecl) {
2184 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2187 // Emits an error if we don't have a valid set of target features for the
2189 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2190 const FunctionDecl *TargetDecl) {
2191 // Early exit if this is an indirect call.
2195 // Get the current enclosing function if it exists. If it doesn't
2196 // we can't check the target features anyhow.
2197 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
2201 // Grab the required features for the call. For a builtin this is listed in
2202 // the td file with the default cpu, for an always_inline function this is any
2203 // listed cpu and any listed features.
2204 unsigned BuiltinID = TargetDecl->getBuiltinID();
2205 std::string MissingFeature;
2207 SmallVector<StringRef, 1> ReqFeatures;
2208 const char *FeatureList =
2209 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2210 // Return if the builtin doesn't have any required features.
2211 if (!FeatureList || StringRef(FeatureList) == "")
2213 StringRef(FeatureList).split(ReqFeatures, ',');
2214 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2215 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2216 << TargetDecl->getDeclName()
2217 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2219 } else if (TargetDecl->hasAttr<TargetAttr>() ||
2220 TargetDecl->hasAttr<CPUSpecificAttr>()) {
2221 // Get the required features for the callee.
2223 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2224 TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2226 SmallVector<StringRef, 1> ReqFeatures;
2227 llvm::StringMap<bool> CalleeFeatureMap;
2228 CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2230 for (const auto &F : ParsedAttr.Features) {
2231 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2232 ReqFeatures.push_back(StringRef(F).substr(1));
2235 for (const auto &F : CalleeFeatureMap) {
2236 // Only positive features are "required".
2238 ReqFeatures.push_back(F.getKey());
2240 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2241 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2242 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2246 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2247 if (!CGM.getCodeGenOpts().SanitizeStats)
2250 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2251 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2252 CGM.getSanStats().create(IRB, SSK);
2256 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2257 llvm::Value *Condition = nullptr;
2259 if (!RO.Conditions.Architecture.empty())
2260 Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2262 if (!RO.Conditions.Features.empty()) {
2263 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2265 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2270 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2271 llvm::Function *Resolver,
2272 CGBuilderTy &Builder,
2273 llvm::Function *FuncToReturn,
2274 bool SupportsIFunc) {
2275 if (SupportsIFunc) {
2276 Builder.CreateRet(FuncToReturn);
2280 llvm::SmallVector<llvm::Value *, 10> Args;
2281 llvm::for_each(Resolver->args(),
2282 [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2284 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2285 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2287 if (Resolver->getReturnType()->isVoidTy())
2288 Builder.CreateRetVoid();
2290 Builder.CreateRet(Result);
2293 void CodeGenFunction::EmitMultiVersionResolver(
2294 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2295 assert((getContext().getTargetInfo().getTriple().getArch() ==
2296 llvm::Triple::x86 ||
2297 getContext().getTargetInfo().getTriple().getArch() ==
2298 llvm::Triple::x86_64) &&
2299 "Only implemented for x86 targets");
2301 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2303 // Main function's basic block.
2304 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2305 Builder.SetInsertPoint(CurBlock);
2308 for (const MultiVersionResolverOption &RO : Options) {
2309 Builder.SetInsertPoint(CurBlock);
2310 llvm::Value *Condition = FormResolverCondition(RO);
2312 // The 'default' or 'generic' case.
2314 assert(&RO == Options.end() - 1 &&
2315 "Default or Generic case must be last");
2316 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2321 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2322 CGBuilderTy RetBuilder(*this, RetBlock);
2323 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2325 CurBlock = createBasicBlock("resolver_else", Resolver);
2326 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2329 // If no generic/default, emit an unreachable.
2330 Builder.SetInsertPoint(CurBlock);
2331 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2332 TrapCall->setDoesNotReturn();
2333 TrapCall->setDoesNotThrow();
2334 Builder.CreateUnreachable();
2335 Builder.ClearInsertionPoint();
2338 // Loc - where the diagnostic will point, where in the source code this
2339 // alignment has failed.
2340 // SecondaryLoc - if present (will be present if sufficiently different from
2341 // Loc), the diagnostic will additionally point a "Note:" to this location.
2342 // It should be the location where the __attribute__((assume_aligned))
2344 void CodeGenFunction::EmitAlignmentAssumptionCheck(
2345 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2346 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2347 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2348 llvm::Instruction *Assumption) {
2349 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2350 cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2351 llvm::Intrinsic::getDeclaration(
2352 Builder.GetInsertBlock()->getParent()->getParent(),
2353 llvm::Intrinsic::assume) &&
2354 "Assumption should be a call to llvm.assume().");
2355 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2356 "Assumption should be the last instruction of the basic block, "
2357 "since the basic block is still being generated.");
2359 if (!SanOpts.has(SanitizerKind::Alignment))
2362 // Don't check pointers to volatile data. The behavior here is implementation-
2364 if (Ty->getPointeeType().isVolatileQualified())
2367 // We need to temorairly remove the assumption so we can insert the
2368 // sanitizer check before it, else the check will be dropped by optimizations.
2369 Assumption->removeFromParent();
2372 SanitizerScope SanScope(this);
2375 OffsetValue = Builder.getInt1(0); // no offset.
2377 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2378 EmitCheckSourceLocation(SecondaryLoc),
2379 EmitCheckTypeDescriptor(Ty)};
2380 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2381 EmitCheckValue(Alignment),
2382 EmitCheckValue(OffsetValue)};
2383 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2384 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2387 // We are now in the (new, empty) "cont" basic block.
2388 // Reintroduce the assumption.
2389 Builder.Insert(Assumption);
2390 // FIXME: Assumption still has it's original basic block as it's Parent.
2393 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2394 if (CGDebugInfo *DI = getDebugInfo())
2395 return DI->SourceLocToDebugLoc(Location);
2397 return llvm::DebugLoc();