1 //===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This contains code to emit Decl nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
15 #include "CGCleanup.h"
16 #include "CGDebugInfo.h"
17 #include "CGOpenCLRuntime.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "ConstantEmitter.h"
22 #include "PatternInit.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/CharUnits.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/AST/DeclOpenMP.h"
29 #include "clang/Basic/CodeGenOptions.h"
30 #include "clang/Basic/SourceManager.h"
31 #include "clang/Basic/TargetInfo.h"
32 #include "clang/CodeGen/CGFunctionInfo.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Intrinsics.h"
37 #include "llvm/IR/Type.h"
39 using namespace clang;
40 using namespace CodeGen;
42 void CodeGenFunction::EmitDecl(const Decl &D) {
43 switch (D.getKind()) {
44 case Decl::BuiltinTemplate:
45 case Decl::TranslationUnit:
46 case Decl::ExternCContext:
48 case Decl::UnresolvedUsingTypename:
49 case Decl::ClassTemplateSpecialization:
50 case Decl::ClassTemplatePartialSpecialization:
51 case Decl::VarTemplateSpecialization:
52 case Decl::VarTemplatePartialSpecialization:
53 case Decl::TemplateTypeParm:
54 case Decl::UnresolvedUsingValue:
55 case Decl::NonTypeTemplateParm:
56 case Decl::CXXDeductionGuide:
58 case Decl::CXXConstructor:
59 case Decl::CXXDestructor:
60 case Decl::CXXConversion:
62 case Decl::MSProperty:
63 case Decl::IndirectField:
65 case Decl::ObjCAtDefsField:
67 case Decl::ImplicitParam:
68 case Decl::ClassTemplate:
69 case Decl::VarTemplate:
70 case Decl::FunctionTemplate:
71 case Decl::TypeAliasTemplate:
72 case Decl::TemplateTemplateParm:
73 case Decl::ObjCMethod:
74 case Decl::ObjCCategory:
75 case Decl::ObjCProtocol:
76 case Decl::ObjCInterface:
77 case Decl::ObjCCategoryImpl:
78 case Decl::ObjCImplementation:
79 case Decl::ObjCProperty:
80 case Decl::ObjCCompatibleAlias:
81 case Decl::PragmaComment:
82 case Decl::PragmaDetectMismatch:
83 case Decl::AccessSpec:
84 case Decl::LinkageSpec:
86 case Decl::ObjCPropertyImpl:
87 case Decl::FileScopeAsm:
89 case Decl::FriendTemplate:
92 case Decl::ClassScopeFunctionSpecialization:
93 case Decl::UsingShadow:
94 case Decl::ConstructorUsingShadow:
95 case Decl::ObjCTypeParam:
97 llvm_unreachable("Declaration should not be in declstmts!");
98 case Decl::Function: // void X();
99 case Decl::Record: // struct/union/class X;
100 case Decl::Enum: // enum X;
101 case Decl::EnumConstant: // enum ? { X = ? }
102 case Decl::CXXRecord: // struct/union/class X; [C++]
103 case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
104 case Decl::Label: // __label__ x;
106 case Decl::OMPThreadPrivate:
107 case Decl::OMPAllocate:
108 case Decl::OMPCapturedExpr:
109 case Decl::OMPRequires:
112 // None of these decls require codegen support.
115 case Decl::NamespaceAlias:
116 if (CGDebugInfo *DI = getDebugInfo())
117 DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
119 case Decl::Using: // using X; [C++]
120 if (CGDebugInfo *DI = getDebugInfo())
121 DI->EmitUsingDecl(cast<UsingDecl>(D));
123 case Decl::UsingPack:
124 for (auto *Using : cast<UsingPackDecl>(D).expansions())
127 case Decl::UsingDirective: // using namespace X; [C++]
128 if (CGDebugInfo *DI = getDebugInfo())
129 DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
132 case Decl::Decomposition: {
133 const VarDecl &VD = cast<VarDecl>(D);
134 assert(VD.isLocalVarDecl() &&
135 "Should not see file-scope variables inside a function!");
137 if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
138 for (auto *B : DD->bindings())
139 if (auto *HD = B->getHoldingVar())
144 case Decl::OMPDeclareReduction:
145 return CGM.EmitOMPDeclareReduction(cast<OMPDeclareReductionDecl>(&D), this);
147 case Decl::OMPDeclareMapper:
148 return CGM.EmitOMPDeclareMapper(cast<OMPDeclareMapperDecl>(&D), this);
150 case Decl::Typedef: // typedef int X;
151 case Decl::TypeAlias: { // using X = int; [C++0x]
152 const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
153 QualType Ty = TD.getUnderlyingType();
155 if (Ty->isVariablyModifiedType())
156 EmitVariablyModifiedType(Ty);
163 /// EmitVarDecl - This method handles emission of any variable declaration
164 /// inside a function, including static vars etc.
165 void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
166 if (D.hasExternalStorage())
167 // Don't emit it now, allow it to be emitted lazily on its first use.
170 // Some function-scope variable does not have static storage but still
171 // needs to be emitted like a static variable, e.g. a function-scope
172 // variable in constant address space in OpenCL.
173 if (D.getStorageDuration() != SD_Automatic) {
174 // Static sampler variables translated to function calls.
175 if (D.getType()->isSamplerT())
178 llvm::GlobalValue::LinkageTypes Linkage =
179 CGM.getLLVMLinkageVarDefinition(&D, /*IsConstant=*/false);
181 // FIXME: We need to force the emission/use of a guard variable for
182 // some variables even if we can constant-evaluate them because
183 // we can't guarantee every translation unit will constant-evaluate them.
185 return EmitStaticVarDecl(D, Linkage);
188 if (D.getType().getAddressSpace() == LangAS::opencl_local)
189 return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
191 assert(D.hasLocalStorage());
192 return EmitAutoVarDecl(D);
195 static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
196 if (CGM.getLangOpts().CPlusPlus)
197 return CGM.getMangledName(&D).str();
199 // If this isn't C++, we don't need a mangled name, just a pretty one.
200 assert(!D.isExternallyVisible() && "name shouldn't matter");
201 std::string ContextName;
202 const DeclContext *DC = D.getDeclContext();
203 if (auto *CD = dyn_cast<CapturedDecl>(DC))
204 DC = cast<DeclContext>(CD->getNonClosureContext());
205 if (const auto *FD = dyn_cast<FunctionDecl>(DC))
206 ContextName = CGM.getMangledName(FD);
207 else if (const auto *BD = dyn_cast<BlockDecl>(DC))
208 ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
209 else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
210 ContextName = OMD->getSelector().getAsString();
212 llvm_unreachable("Unknown context for static var decl");
214 ContextName += "." + D.getNameAsString();
218 llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
219 const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
220 // In general, we don't always emit static var decls once before we reference
221 // them. It is possible to reference them before emitting the function that
222 // contains them, and it is possible to emit the containing function multiple
224 if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
227 QualType Ty = D.getType();
228 assert(Ty->isConstantSizeType() && "VLAs can't be static");
230 // Use the label if the variable is renamed with the asm-label extension.
232 if (D.hasAttr<AsmLabelAttr>())
233 Name = getMangledName(&D);
235 Name = getStaticDeclName(*this, D);
237 llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
238 LangAS AS = GetGlobalVarAddressSpace(&D);
239 unsigned TargetAS = getContext().getTargetAddressSpace(AS);
241 // OpenCL variables in local address space and CUDA shared
242 // variables cannot have an initializer.
243 llvm::Constant *Init = nullptr;
244 if (Ty.getAddressSpace() == LangAS::opencl_local ||
245 D.hasAttr<CUDASharedAttr>())
246 Init = llvm::UndefValue::get(LTy);
248 Init = EmitNullConstant(Ty);
250 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
251 getModule(), LTy, Ty.isConstant(getContext()), Linkage, Init, Name,
252 nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
253 GV->setAlignment(getContext().getDeclAlign(&D).getAsAlign());
255 if (supportsCOMDAT() && GV->isWeakForLinker())
256 GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
261 setGVProperties(GV, &D);
263 // Make sure the result is of the correct type.
264 LangAS ExpectedAS = Ty.getAddressSpace();
265 llvm::Constant *Addr = GV;
266 if (AS != ExpectedAS) {
267 Addr = getTargetCodeGenInfo().performAddrSpaceCast(
268 *this, GV, AS, ExpectedAS,
269 LTy->getPointerTo(getContext().getTargetAddressSpace(ExpectedAS)));
272 setStaticLocalDeclAddress(&D, Addr);
274 // Ensure that the static local gets initialized by making sure the parent
275 // function gets emitted eventually.
276 const Decl *DC = cast<Decl>(D.getDeclContext());
278 // We can't name blocks or captured statements directly, so try to emit their
280 if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
281 DC = DC->getNonClosureContext();
282 // FIXME: Ensure that global blocks get emitted.
288 if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
289 GD = GlobalDecl(CD, Ctor_Base);
290 else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
291 GD = GlobalDecl(DD, Dtor_Base);
292 else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
295 // Don't do anything for Obj-C method decls or global closures. We should
297 assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
300 // Disable emission of the parent function for the OpenMP device codegen.
301 CGOpenMPRuntime::DisableAutoDeclareTargetRAII NoDeclTarget(*this);
302 (void)GetAddrOfGlobal(GD);
308 /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
309 /// global variable that has already been created for it. If the initializer
310 /// has a different type than GV does, this may free GV and return a different
311 /// one. Otherwise it just returns GV.
312 llvm::GlobalVariable *
313 CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
314 llvm::GlobalVariable *GV) {
315 ConstantEmitter emitter(*this);
316 llvm::Constant *Init = emitter.tryEmitForInitializer(D);
318 // If constant emission failed, then this should be a C++ static
321 if (!getLangOpts().CPlusPlus)
322 CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
323 else if (HaveInsertPoint()) {
324 // Since we have a static initializer, this global variable can't
326 GV->setConstant(false);
328 EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
333 // The initializer may differ in type from the global. Rewrite
334 // the global to match the initializer. (We have to do this
335 // because some types, like unions, can't be completely represented
336 // in the LLVM type system.)
337 if (GV->getType()->getElementType() != Init->getType()) {
338 llvm::GlobalVariable *OldGV = GV;
340 GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
342 OldGV->getLinkage(), Init, "",
343 /*InsertBefore*/ OldGV,
344 OldGV->getThreadLocalMode(),
345 CGM.getContext().getTargetAddressSpace(D.getType()));
346 GV->setVisibility(OldGV->getVisibility());
347 GV->setDSOLocal(OldGV->isDSOLocal());
348 GV->setComdat(OldGV->getComdat());
350 // Steal the name of the old global
353 // Replace all uses of the old global with the new global
354 llvm::Constant *NewPtrForOldDecl =
355 llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
356 OldGV->replaceAllUsesWith(NewPtrForOldDecl);
358 // Erase the old global, since it is no longer used.
359 OldGV->eraseFromParent();
362 GV->setConstant(CGM.isTypeConstant(D.getType(), true));
363 GV->setInitializer(Init);
365 emitter.finalize(GV);
367 if (D.needsDestruction(getContext()) && HaveInsertPoint()) {
368 // We have a constant initializer, but a nontrivial destructor. We still
369 // need to perform a guarded "initialization" in order to register the
371 EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
377 void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
378 llvm::GlobalValue::LinkageTypes Linkage) {
379 // Check to see if we already have a global variable for this
380 // declaration. This can happen when double-emitting function
381 // bodies, e.g. with complete and base constructors.
382 llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
383 CharUnits alignment = getContext().getDeclAlign(&D);
385 // Store into LocalDeclMap before generating initializer to handle
386 // circular references.
387 setAddrOfLocalVar(&D, Address(addr, alignment));
389 // We can't have a VLA here, but we can have a pointer to a VLA,
390 // even though that doesn't really make any sense.
391 // Make sure to evaluate VLA bounds now so that we have them for later.
392 if (D.getType()->isVariablyModifiedType())
393 EmitVariablyModifiedType(D.getType());
395 // Save the type in case adding the initializer forces a type change.
396 llvm::Type *expectedType = addr->getType();
398 llvm::GlobalVariable *var =
399 cast<llvm::GlobalVariable>(addr->stripPointerCasts());
401 // CUDA's local and local static __shared__ variables should not
402 // have any non-empty initializers. This is ensured by Sema.
403 // Whatever initializer such variable may have when it gets here is
404 // a no-op and should not be emitted.
405 bool isCudaSharedVar = getLangOpts().CUDA && getLangOpts().CUDAIsDevice &&
406 D.hasAttr<CUDASharedAttr>();
407 // If this value has an initializer, emit it.
408 if (D.getInit() && !isCudaSharedVar)
409 var = AddInitializerToStaticVarDecl(D, var);
411 var->setAlignment(alignment.getAsAlign());
413 if (D.hasAttr<AnnotateAttr>())
414 CGM.AddGlobalAnnotations(&D, var);
416 if (auto *SA = D.getAttr<PragmaClangBSSSectionAttr>())
417 var->addAttribute("bss-section", SA->getName());
418 if (auto *SA = D.getAttr<PragmaClangDataSectionAttr>())
419 var->addAttribute("data-section", SA->getName());
420 if (auto *SA = D.getAttr<PragmaClangRodataSectionAttr>())
421 var->addAttribute("rodata-section", SA->getName());
422 if (auto *SA = D.getAttr<PragmaClangRelroSectionAttr>())
423 var->addAttribute("relro-section", SA->getName());
425 if (const SectionAttr *SA = D.getAttr<SectionAttr>())
426 var->setSection(SA->getName());
428 if (D.hasAttr<UsedAttr>())
429 CGM.addUsedGlobal(var);
431 // We may have to cast the constant because of the initializer
434 // FIXME: It is really dangerous to store this in the map; if anyone
435 // RAUW's the GV uses of this constant will be invalid.
436 llvm::Constant *castedAddr =
437 llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(var, expectedType);
438 if (var != castedAddr)
439 LocalDeclMap.find(&D)->second = Address(castedAddr, alignment);
440 CGM.setStaticLocalDeclAddress(&D, castedAddr);
442 CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
444 // Emit global variable debug descriptor for static vars.
445 CGDebugInfo *DI = getDebugInfo();
447 CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo) {
448 DI->setLocation(D.getLocation());
449 DI->EmitGlobalVariable(var, &D);
454 struct DestroyObject final : EHScopeStack::Cleanup {
455 DestroyObject(Address addr, QualType type,
456 CodeGenFunction::Destroyer *destroyer,
457 bool useEHCleanupForArray)
458 : addr(addr), type(type), destroyer(destroyer),
459 useEHCleanupForArray(useEHCleanupForArray) {}
463 CodeGenFunction::Destroyer *destroyer;
464 bool useEHCleanupForArray;
466 void Emit(CodeGenFunction &CGF, Flags flags) override {
467 // Don't use an EH cleanup recursively from an EH cleanup.
468 bool useEHCleanupForArray =
469 flags.isForNormalCleanup() && this->useEHCleanupForArray;
471 CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
475 template <class Derived>
476 struct DestroyNRVOVariable : EHScopeStack::Cleanup {
477 DestroyNRVOVariable(Address addr, QualType type, llvm::Value *NRVOFlag)
478 : NRVOFlag(NRVOFlag), Loc(addr), Ty(type) {}
480 llvm::Value *NRVOFlag;
484 void Emit(CodeGenFunction &CGF, Flags flags) override {
485 // Along the exceptions path we always execute the dtor.
486 bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
488 llvm::BasicBlock *SkipDtorBB = nullptr;
490 // If we exited via NRVO, we skip the destructor call.
491 llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
492 SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
493 llvm::Value *DidNRVO =
494 CGF.Builder.CreateFlagLoad(NRVOFlag, "nrvo.val");
495 CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
496 CGF.EmitBlock(RunDtorBB);
499 static_cast<Derived *>(this)->emitDestructorCall(CGF);
501 if (NRVO) CGF.EmitBlock(SkipDtorBB);
504 virtual ~DestroyNRVOVariable() = default;
507 struct DestroyNRVOVariableCXX final
508 : DestroyNRVOVariable<DestroyNRVOVariableCXX> {
509 DestroyNRVOVariableCXX(Address addr, QualType type,
510 const CXXDestructorDecl *Dtor, llvm::Value *NRVOFlag)
511 : DestroyNRVOVariable<DestroyNRVOVariableCXX>(addr, type, NRVOFlag),
514 const CXXDestructorDecl *Dtor;
516 void emitDestructorCall(CodeGenFunction &CGF) {
517 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
518 /*ForVirtualBase=*/false,
519 /*Delegating=*/false, Loc, Ty);
523 struct DestroyNRVOVariableC final
524 : DestroyNRVOVariable<DestroyNRVOVariableC> {
525 DestroyNRVOVariableC(Address addr, llvm::Value *NRVOFlag, QualType Ty)
526 : DestroyNRVOVariable<DestroyNRVOVariableC>(addr, Ty, NRVOFlag) {}
528 void emitDestructorCall(CodeGenFunction &CGF) {
529 CGF.destroyNonTrivialCStruct(CGF, Loc, Ty);
533 struct CallStackRestore final : EHScopeStack::Cleanup {
535 CallStackRestore(Address Stack) : Stack(Stack) {}
536 void Emit(CodeGenFunction &CGF, Flags flags) override {
537 llvm::Value *V = CGF.Builder.CreateLoad(Stack);
538 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
539 CGF.Builder.CreateCall(F, V);
543 struct ExtendGCLifetime final : EHScopeStack::Cleanup {
545 ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
547 void Emit(CodeGenFunction &CGF, Flags flags) override {
548 // Compute the address of the local variable, in case it's a
549 // byref or something.
550 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
551 Var.getType(), VK_LValue, SourceLocation());
552 llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
554 CGF.EmitExtendGCLifetime(value);
558 struct CallCleanupFunction final : EHScopeStack::Cleanup {
559 llvm::Constant *CleanupFn;
560 const CGFunctionInfo &FnInfo;
563 CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
565 : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
567 void Emit(CodeGenFunction &CGF, Flags flags) override {
568 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(&Var), false,
569 Var.getType(), VK_LValue, SourceLocation());
570 // Compute the address of the local variable, in case it's a byref
572 llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getPointer();
574 // In some cases, the type of the function argument will be different from
575 // the type of the pointer. An example of this is
576 // void f(void* arg);
577 // __attribute__((cleanup(f))) void *g;
579 // To fix this we insert a bitcast here.
580 QualType ArgTy = FnInfo.arg_begin()->type;
582 CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
585 Args.add(RValue::get(Arg),
586 CGF.getContext().getPointerType(Var.getType()));
587 auto Callee = CGCallee::forDirect(CleanupFn);
588 CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
591 } // end anonymous namespace
593 /// EmitAutoVarWithLifetime - Does the setup required for an automatic
594 /// variable with lifetime.
595 static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
597 Qualifiers::ObjCLifetime lifetime) {
599 case Qualifiers::OCL_None:
600 llvm_unreachable("present but none");
602 case Qualifiers::OCL_ExplicitNone:
606 case Qualifiers::OCL_Strong: {
607 CodeGenFunction::Destroyer *destroyer =
608 (var.hasAttr<ObjCPreciseLifetimeAttr>()
609 ? CodeGenFunction::destroyARCStrongPrecise
610 : CodeGenFunction::destroyARCStrongImprecise);
612 CleanupKind cleanupKind = CGF.getARCCleanupKind();
613 CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
614 cleanupKind & EHCleanup);
617 case Qualifiers::OCL_Autoreleasing:
621 case Qualifiers::OCL_Weak:
622 // __weak objects always get EH cleanups; otherwise, exceptions
623 // could cause really nasty crashes instead of mere leaks.
624 CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
625 CodeGenFunction::destroyARCWeak,
626 /*useEHCleanup*/ true);
631 static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
632 if (const Expr *e = dyn_cast<Expr>(s)) {
633 // Skip the most common kinds of expressions that make
634 // hierarchy-walking expensive.
635 s = e = e->IgnoreParenCasts();
637 if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
638 return (ref->getDecl() == &var);
639 if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
640 const BlockDecl *block = be->getBlockDecl();
641 for (const auto &I : block->captures()) {
642 if (I.getVariable() == &var)
648 for (const Stmt *SubStmt : s->children())
649 // SubStmt might be null; as in missing decl or conditional of an if-stmt.
650 if (SubStmt && isAccessedBy(var, SubStmt))
656 static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
657 if (!decl) return false;
658 if (!isa<VarDecl>(decl)) return false;
659 const VarDecl *var = cast<VarDecl>(decl);
660 return isAccessedBy(*var, e);
663 static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
664 const LValue &destLV, const Expr *init) {
665 bool needsCast = false;
667 while (auto castExpr = dyn_cast<CastExpr>(init->IgnoreParens())) {
668 switch (castExpr->getCastKind()) {
669 // Look through casts that don't require representation changes.
672 case CK_BlockPointerToObjCPointerCast:
676 // If we find an l-value to r-value cast from a __weak variable,
677 // emit this operation as a copy or move.
678 case CK_LValueToRValue: {
679 const Expr *srcExpr = castExpr->getSubExpr();
680 if (srcExpr->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
683 // Emit the source l-value.
684 LValue srcLV = CGF.EmitLValue(srcExpr);
686 // Handle a formal type change to avoid asserting.
687 auto srcAddr = srcLV.getAddress();
689 srcAddr = CGF.Builder.CreateElementBitCast(srcAddr,
690 destLV.getAddress().getElementType());
693 // If it was an l-value, use objc_copyWeak.
694 if (srcExpr->getValueKind() == VK_LValue) {
695 CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
697 assert(srcExpr->getValueKind() == VK_XValue);
698 CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
703 // Stop at anything else.
708 init = castExpr->getSubExpr();
713 static void drillIntoBlockVariable(CodeGenFunction &CGF,
715 const VarDecl *var) {
716 lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
719 void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
720 SourceLocation Loc) {
721 if (!SanOpts.has(SanitizerKind::NullabilityAssign))
724 auto Nullability = LHS.getType()->getNullability(getContext());
725 if (!Nullability || *Nullability != NullabilityKind::NonNull)
728 // Check if the right hand side of the assignment is nonnull, if the left
729 // hand side must be nonnull.
730 SanitizerScope SanScope(this);
731 llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
732 llvm::Constant *StaticData[] = {
733 EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
734 llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
735 llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
736 EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
737 SanitizerHandler::TypeMismatch, StaticData, RHS);
740 void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
741 LValue lvalue, bool capturedByInit) {
742 Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
744 llvm::Value *value = EmitScalarExpr(init);
746 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
747 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
748 EmitStoreThroughLValue(RValue::get(value), lvalue, true);
752 if (const CXXDefaultInitExpr *DIE = dyn_cast<CXXDefaultInitExpr>(init))
753 init = DIE->getExpr();
755 // If we're emitting a value with lifetime, we have to do the
756 // initialization *before* we leave the cleanup scopes.
757 if (const FullExpr *fe = dyn_cast<FullExpr>(init)) {
758 enterFullExpression(fe);
759 init = fe->getSubExpr();
761 CodeGenFunction::RunCleanupsScope Scope(*this);
763 // We have to maintain the illusion that the variable is
764 // zero-initialized. If the variable might be accessed in its
765 // initializer, zero-initialize before running the initializer, then
766 // actually perform the initialization with an assign.
767 bool accessedByInit = false;
768 if (lifetime != Qualifiers::OCL_ExplicitNone)
769 accessedByInit = (capturedByInit || isAccessedBy(D, init));
770 if (accessedByInit) {
771 LValue tempLV = lvalue;
772 // Drill down to the __block object if necessary.
773 if (capturedByInit) {
774 // We can use a simple GEP for this because it can't have been
776 tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
781 auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
782 llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
784 // If __weak, we want to use a barrier under certain conditions.
785 if (lifetime == Qualifiers::OCL_Weak)
786 EmitARCInitWeak(tempLV.getAddress(), zero);
788 // Otherwise just do a simple store.
790 EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
793 // Emit the initializer.
794 llvm::Value *value = nullptr;
797 case Qualifiers::OCL_None:
798 llvm_unreachable("present but none");
800 case Qualifiers::OCL_Strong: {
801 if (!D || !isa<VarDecl>(D) || !cast<VarDecl>(D)->isARCPseudoStrong()) {
802 value = EmitARCRetainScalarExpr(init);
805 // If D is pseudo-strong, treat it like __unsafe_unretained here. This means
806 // that we omit the retain, and causes non-autoreleased return values to be
807 // immediately released.
811 case Qualifiers::OCL_ExplicitNone:
812 value = EmitARCUnsafeUnretainedScalarExpr(init);
815 case Qualifiers::OCL_Weak: {
816 // If it's not accessed by the initializer, try to emit the
817 // initialization with a copy or move.
818 if (!accessedByInit && tryEmitARCCopyWeakInit(*this, lvalue, init)) {
822 // No way to optimize a producing initializer into this. It's not
823 // worth optimizing for, because the value will immediately
824 // disappear in the common case.
825 value = EmitScalarExpr(init);
827 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
829 EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
831 EmitARCInitWeak(lvalue.getAddress(), value);
835 case Qualifiers::OCL_Autoreleasing:
836 value = EmitARCRetainAutoreleaseScalarExpr(init);
840 if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
842 EmitNullabilityCheck(lvalue, value, init->getExprLoc());
844 // If the variable might have been accessed by its initializer, we
845 // might have to initialize with a barrier. We have to do this for
846 // both __weak and __strong, but __weak got filtered out above.
847 if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
848 llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
849 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
850 EmitARCRelease(oldValue, ARCImpreciseLifetime);
854 EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
857 /// Decide whether we can emit the non-zero parts of the specified initializer
858 /// with equal or fewer than NumStores scalar stores.
859 static bool canEmitInitWithFewStoresAfterBZero(llvm::Constant *Init,
860 unsigned &NumStores) {
861 // Zero and Undef never requires any extra stores.
862 if (isa<llvm::ConstantAggregateZero>(Init) ||
863 isa<llvm::ConstantPointerNull>(Init) ||
864 isa<llvm::UndefValue>(Init))
866 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
867 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
868 isa<llvm::ConstantExpr>(Init))
869 return Init->isNullValue() || NumStores--;
871 // See if we can emit each element.
872 if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
873 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
874 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
875 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
881 if (llvm::ConstantDataSequential *CDS =
882 dyn_cast<llvm::ConstantDataSequential>(Init)) {
883 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
884 llvm::Constant *Elt = CDS->getElementAsConstant(i);
885 if (!canEmitInitWithFewStoresAfterBZero(Elt, NumStores))
891 // Anything else is hard and scary.
895 /// For inits that canEmitInitWithFewStoresAfterBZero returned true for, emit
896 /// the scalar stores that would be required.
897 static void emitStoresForInitAfterBZero(CodeGenModule &CGM,
898 llvm::Constant *Init, Address Loc,
899 bool isVolatile, CGBuilderTy &Builder) {
900 assert(!Init->isNullValue() && !isa<llvm::UndefValue>(Init) &&
901 "called emitStoresForInitAfterBZero for zero or undef value.");
903 if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
904 isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
905 isa<llvm::ConstantExpr>(Init)) {
906 Builder.CreateStore(Init, Loc, isVolatile);
910 if (llvm::ConstantDataSequential *CDS =
911 dyn_cast<llvm::ConstantDataSequential>(Init)) {
912 for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
913 llvm::Constant *Elt = CDS->getElementAsConstant(i);
915 // If necessary, get a pointer to the element and emit it.
916 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
917 emitStoresForInitAfterBZero(
918 CGM, Elt, Builder.CreateConstInBoundsGEP2_32(Loc, 0, i), isVolatile,
924 assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
925 "Unknown value type!");
927 for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
928 llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
930 // If necessary, get a pointer to the element and emit it.
931 if (!Elt->isNullValue() && !isa<llvm::UndefValue>(Elt))
932 emitStoresForInitAfterBZero(CGM, Elt,
933 Builder.CreateConstInBoundsGEP2_32(Loc, 0, i),
934 isVolatile, Builder);
938 /// Decide whether we should use bzero plus some stores to initialize a local
939 /// variable instead of using a memcpy from a constant global. It is beneficial
940 /// to use bzero if the global is all zeros, or mostly zeros and large.
941 static bool shouldUseBZeroPlusStoresToInitialize(llvm::Constant *Init,
942 uint64_t GlobalSize) {
943 // If a global is all zeros, always use a bzero.
944 if (isa<llvm::ConstantAggregateZero>(Init)) return true;
946 // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
947 // do it if it will require 6 or fewer scalar stores.
948 // TODO: Should budget depends on the size? Avoiding a large global warrants
949 // plopping in more stores.
950 unsigned StoreBudget = 6;
951 uint64_t SizeLimit = 32;
953 return GlobalSize > SizeLimit &&
954 canEmitInitWithFewStoresAfterBZero(Init, StoreBudget);
957 /// Decide whether we should use memset to initialize a local variable instead
958 /// of using a memcpy from a constant global. Assumes we've already decided to
960 /// FIXME We could be more clever, as we are for bzero above, and generate
961 /// memset followed by stores. It's unclear that's worth the effort.
962 static llvm::Value *shouldUseMemSetToInitialize(llvm::Constant *Init,
964 const llvm::DataLayout &DL) {
965 uint64_t SizeLimit = 32;
966 if (GlobalSize <= SizeLimit)
968 return llvm::isBytewiseValue(Init, DL);
971 /// Decide whether we want to split a constant structure or array store into a
972 /// sequence of its fields' stores. This may cost us code size and compilation
973 /// speed, but plays better with store optimizations.
974 static bool shouldSplitConstantStore(CodeGenModule &CGM,
975 uint64_t GlobalByteSize) {
976 // Don't break things that occupy more than one cacheline.
977 uint64_t ByteSizeLimit = 64;
978 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
980 if (GlobalByteSize <= ByteSizeLimit)
985 enum class IsPattern { No, Yes };
987 /// Generate a constant filled with either a pattern or zeroes.
988 static llvm::Constant *patternOrZeroFor(CodeGenModule &CGM, IsPattern isPattern,
990 if (isPattern == IsPattern::Yes)
991 return initializationPatternFor(CGM, Ty);
993 return llvm::Constant::getNullValue(Ty);
996 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
997 llvm::Constant *constant);
999 /// Helper function for constWithPadding() to deal with padding in structures.
1000 static llvm::Constant *constStructWithPadding(CodeGenModule &CGM,
1001 IsPattern isPattern,
1002 llvm::StructType *STy,
1003 llvm::Constant *constant) {
1004 const llvm::DataLayout &DL = CGM.getDataLayout();
1005 const llvm::StructLayout *Layout = DL.getStructLayout(STy);
1006 llvm::Type *Int8Ty = llvm::IntegerType::getInt8Ty(CGM.getLLVMContext());
1007 unsigned SizeSoFar = 0;
1008 SmallVector<llvm::Constant *, 8> Values;
1009 bool NestedIntact = true;
1010 for (unsigned i = 0, e = STy->getNumElements(); i != e; i++) {
1011 unsigned CurOff = Layout->getElementOffset(i);
1012 if (SizeSoFar < CurOff) {
1013 assert(!STy->isPacked());
1014 auto *PadTy = llvm::ArrayType::get(Int8Ty, CurOff - SizeSoFar);
1015 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1017 llvm::Constant *CurOp;
1018 if (constant->isZeroValue())
1019 CurOp = llvm::Constant::getNullValue(STy->getElementType(i));
1021 CurOp = cast<llvm::Constant>(constant->getAggregateElement(i));
1022 auto *NewOp = constWithPadding(CGM, isPattern, CurOp);
1024 NestedIntact = false;
1025 Values.push_back(NewOp);
1026 SizeSoFar = CurOff + DL.getTypeAllocSize(CurOp->getType());
1028 unsigned TotalSize = Layout->getSizeInBytes();
1029 if (SizeSoFar < TotalSize) {
1030 auto *PadTy = llvm::ArrayType::get(Int8Ty, TotalSize - SizeSoFar);
1031 Values.push_back(patternOrZeroFor(CGM, isPattern, PadTy));
1033 if (NestedIntact && Values.size() == STy->getNumElements())
1035 return llvm::ConstantStruct::getAnon(Values, STy->isPacked());
1038 /// Replace all padding bytes in a given constant with either a pattern byte or
1040 static llvm::Constant *constWithPadding(CodeGenModule &CGM, IsPattern isPattern,
1041 llvm::Constant *constant) {
1042 llvm::Type *OrigTy = constant->getType();
1043 if (const auto STy = dyn_cast<llvm::StructType>(OrigTy))
1044 return constStructWithPadding(CGM, isPattern, STy, constant);
1045 if (auto *STy = dyn_cast<llvm::SequentialType>(OrigTy)) {
1046 llvm::SmallVector<llvm::Constant *, 8> Values;
1047 unsigned Size = STy->getNumElements();
1050 llvm::Type *ElemTy = STy->getElementType();
1051 bool ZeroInitializer = constant->isZeroValue();
1052 llvm::Constant *OpValue, *PaddedOp;
1053 if (ZeroInitializer) {
1054 OpValue = llvm::Constant::getNullValue(ElemTy);
1055 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1057 for (unsigned Op = 0; Op != Size; ++Op) {
1058 if (!ZeroInitializer) {
1059 OpValue = constant->getAggregateElement(Op);
1060 PaddedOp = constWithPadding(CGM, isPattern, OpValue);
1062 Values.push_back(PaddedOp);
1064 auto *NewElemTy = Values[0]->getType();
1065 if (NewElemTy == ElemTy)
1067 if (OrigTy->isArrayTy()) {
1068 auto *ArrayTy = llvm::ArrayType::get(NewElemTy, Size);
1069 return llvm::ConstantArray::get(ArrayTy, Values);
1071 return llvm::ConstantVector::get(Values);
1077 Address CodeGenModule::createUnnamedGlobalFrom(const VarDecl &D,
1078 llvm::Constant *Constant,
1080 auto FunctionName = [&](const DeclContext *DC) -> std::string {
1081 if (const auto *FD = dyn_cast<FunctionDecl>(DC)) {
1082 if (const auto *CC = dyn_cast<CXXConstructorDecl>(FD))
1083 return CC->getNameAsString();
1084 if (const auto *CD = dyn_cast<CXXDestructorDecl>(FD))
1085 return CD->getNameAsString();
1086 return getMangledName(FD);
1087 } else if (const auto *OM = dyn_cast<ObjCMethodDecl>(DC)) {
1088 return OM->getNameAsString();
1089 } else if (isa<BlockDecl>(DC)) {
1091 } else if (isa<CapturedDecl>(DC)) {
1092 return "<captured>";
1094 llvm_unreachable("expected a function or method");
1098 // Form a simple per-variable cache of these values in case we find we
1099 // want to reuse them.
1100 llvm::GlobalVariable *&CacheEntry = InitializerConstants[&D];
1101 if (!CacheEntry || CacheEntry->getInitializer() != Constant) {
1102 auto *Ty = Constant->getType();
1103 bool isConstant = true;
1104 llvm::GlobalVariable *InsertBefore = nullptr;
1106 getContext().getTargetAddressSpace(getStringLiteralAddressSpace());
1108 if (D.hasGlobalStorage())
1109 Name = getMangledName(&D).str() + ".const";
1110 else if (const DeclContext *DC = D.getParentFunctionOrMethod())
1111 Name = ("__const." + FunctionName(DC) + "." + D.getName()).str();
1113 llvm_unreachable("local variable has no parent function or method");
1114 llvm::GlobalVariable *GV = new llvm::GlobalVariable(
1115 getModule(), Ty, isConstant, llvm::GlobalValue::PrivateLinkage,
1116 Constant, Name, InsertBefore, llvm::GlobalValue::NotThreadLocal, AS);
1117 GV->setAlignment(Align.getAsAlign());
1118 GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1120 } else if (CacheEntry->getAlignment() < Align.getQuantity()) {
1121 CacheEntry->setAlignment(Align.getAsAlign());
1124 return Address(CacheEntry, Align);
1127 static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
1129 CGBuilderTy &Builder,
1130 llvm::Constant *Constant,
1132 Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
1133 llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
1134 SrcPtr.getAddressSpace());
1135 if (SrcPtr.getType() != BP)
1136 SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
1140 static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
1141 Address Loc, bool isVolatile,
1142 CGBuilderTy &Builder,
1143 llvm::Constant *constant) {
1144 auto *Ty = constant->getType();
1145 uint64_t ConstantSize = CGM.getDataLayout().getTypeAllocSize(Ty);
1149 bool canDoSingleStore = Ty->isIntOrIntVectorTy() ||
1150 Ty->isPtrOrPtrVectorTy() || Ty->isFPOrFPVectorTy();
1151 if (canDoSingleStore) {
1152 Builder.CreateStore(constant, Loc, isVolatile);
1156 auto *SizeVal = llvm::ConstantInt::get(CGM.IntPtrTy, ConstantSize);
1158 // If the initializer is all or mostly the same, codegen with bzero / memset
1159 // then do a few stores afterward.
1160 if (shouldUseBZeroPlusStoresToInitialize(constant, ConstantSize)) {
1161 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, 0), SizeVal,
1164 bool valueAlreadyCorrect =
1165 constant->isNullValue() || isa<llvm::UndefValue>(constant);
1166 if (!valueAlreadyCorrect) {
1167 Loc = Builder.CreateBitCast(Loc, Ty->getPointerTo(Loc.getAddressSpace()));
1168 emitStoresForInitAfterBZero(CGM, constant, Loc, isVolatile, Builder);
1173 // If the initializer is a repeated byte pattern, use memset.
1174 llvm::Value *Pattern =
1175 shouldUseMemSetToInitialize(constant, ConstantSize, CGM.getDataLayout());
1177 uint64_t Value = 0x00;
1178 if (!isa<llvm::UndefValue>(Pattern)) {
1179 const llvm::APInt &AP = cast<llvm::ConstantInt>(Pattern)->getValue();
1180 assert(AP.getBitWidth() <= 8);
1181 Value = AP.getLimitedValue();
1183 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(CGM.Int8Ty, Value), SizeVal,
1188 // If the initializer is small, use a handful of stores.
1189 if (shouldSplitConstantStore(CGM, ConstantSize)) {
1190 if (auto *STy = dyn_cast<llvm::StructType>(Ty)) {
1191 // FIXME: handle the case when STy != Loc.getElementType().
1192 if (STy == Loc.getElementType()) {
1193 for (unsigned i = 0; i != constant->getNumOperands(); i++) {
1194 Address EltPtr = Builder.CreateStructGEP(Loc, i);
1195 emitStoresForConstant(
1196 CGM, D, EltPtr, isVolatile, Builder,
1197 cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
1201 } else if (auto *ATy = dyn_cast<llvm::ArrayType>(Ty)) {
1202 // FIXME: handle the case when ATy != Loc.getElementType().
1203 if (ATy == Loc.getElementType()) {
1204 for (unsigned i = 0; i != ATy->getNumElements(); i++) {
1205 Address EltPtr = Builder.CreateConstArrayGEP(Loc, i);
1206 emitStoresForConstant(
1207 CGM, D, EltPtr, isVolatile, Builder,
1208 cast<llvm::Constant>(Builder.CreateExtractValue(constant, i)));
1215 // Copy from a global.
1216 Builder.CreateMemCpy(Loc,
1217 createUnnamedGlobalForMemcpyFrom(
1218 CGM, D, Builder, constant, Loc.getAlignment()),
1219 SizeVal, isVolatile);
1222 static void emitStoresForZeroInit(CodeGenModule &CGM, const VarDecl &D,
1223 Address Loc, bool isVolatile,
1224 CGBuilderTy &Builder) {
1225 llvm::Type *ElTy = Loc.getElementType();
1226 llvm::Constant *constant =
1227 constWithPadding(CGM, IsPattern::No, llvm::Constant::getNullValue(ElTy));
1228 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1231 static void emitStoresForPatternInit(CodeGenModule &CGM, const VarDecl &D,
1232 Address Loc, bool isVolatile,
1233 CGBuilderTy &Builder) {
1234 llvm::Type *ElTy = Loc.getElementType();
1235 llvm::Constant *constant = constWithPadding(
1236 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1237 assert(!isa<llvm::UndefValue>(constant));
1238 emitStoresForConstant(CGM, D, Loc, isVolatile, Builder, constant);
1241 static bool containsUndef(llvm::Constant *constant) {
1242 auto *Ty = constant->getType();
1243 if (isa<llvm::UndefValue>(constant))
1245 if (Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy())
1246 for (llvm::Use &Op : constant->operands())
1247 if (containsUndef(cast<llvm::Constant>(Op)))
1252 static llvm::Constant *replaceUndef(CodeGenModule &CGM, IsPattern isPattern,
1253 llvm::Constant *constant) {
1254 auto *Ty = constant->getType();
1255 if (isa<llvm::UndefValue>(constant))
1256 return patternOrZeroFor(CGM, isPattern, Ty);
1257 if (!(Ty->isStructTy() || Ty->isArrayTy() || Ty->isVectorTy()))
1259 if (!containsUndef(constant))
1261 llvm::SmallVector<llvm::Constant *, 8> Values(constant->getNumOperands());
1262 for (unsigned Op = 0, NumOp = constant->getNumOperands(); Op != NumOp; ++Op) {
1263 auto *OpValue = cast<llvm::Constant>(constant->getOperand(Op));
1264 Values[Op] = replaceUndef(CGM, isPattern, OpValue);
1266 if (Ty->isStructTy())
1267 return llvm::ConstantStruct::get(cast<llvm::StructType>(Ty), Values);
1268 if (Ty->isArrayTy())
1269 return llvm::ConstantArray::get(cast<llvm::ArrayType>(Ty), Values);
1270 assert(Ty->isVectorTy());
1271 return llvm::ConstantVector::get(Values);
1274 /// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
1275 /// variable declaration with auto, register, or no storage class specifier.
1276 /// These turn into simple stack objects, or GlobalValues depending on target.
1277 void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
1278 AutoVarEmission emission = EmitAutoVarAlloca(D);
1279 EmitAutoVarInit(emission);
1280 EmitAutoVarCleanups(emission);
1283 /// Emit a lifetime.begin marker if some criteria are satisfied.
1284 /// \return a pointer to the temporary size Value if a marker was emitted, null
1286 llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
1287 llvm::Value *Addr) {
1288 if (!ShouldEmitLifetimeMarkers)
1291 assert(Addr->getType()->getPointerAddressSpace() ==
1292 CGM.getDataLayout().getAllocaAddrSpace() &&
1293 "Pointer should be in alloca address space");
1294 llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
1295 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1297 Builder.CreateCall(CGM.getLLVMLifetimeStartFn(), {SizeV, Addr});
1298 C->setDoesNotThrow();
1302 void CodeGenFunction::EmitLifetimeEnd(llvm::Value *Size, llvm::Value *Addr) {
1303 assert(Addr->getType()->getPointerAddressSpace() ==
1304 CGM.getDataLayout().getAllocaAddrSpace() &&
1305 "Pointer should be in alloca address space");
1306 Addr = Builder.CreateBitCast(Addr, AllocaInt8PtrTy);
1308 Builder.CreateCall(CGM.getLLVMLifetimeEndFn(), {Size, Addr});
1309 C->setDoesNotThrow();
1312 void CodeGenFunction::EmitAndRegisterVariableArrayDimensions(
1313 CGDebugInfo *DI, const VarDecl &D, bool EmitDebugInfo) {
1314 // For each dimension stores its QualType and corresponding
1315 // size-expression Value.
1316 SmallVector<CodeGenFunction::VlaSizePair, 4> Dimensions;
1317 SmallVector<IdentifierInfo *, 4> VLAExprNames;
1319 // Break down the array into individual dimensions.
1320 QualType Type1D = D.getType();
1321 while (getContext().getAsVariableArrayType(Type1D)) {
1322 auto VlaSize = getVLAElements1D(Type1D);
1323 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1324 Dimensions.emplace_back(C, Type1D.getUnqualifiedType());
1326 // Generate a locally unique name for the size expression.
1327 Twine Name = Twine("__vla_expr") + Twine(VLAExprCounter++);
1328 SmallString<12> Buffer;
1329 StringRef NameRef = Name.toStringRef(Buffer);
1330 auto &Ident = getContext().Idents.getOwn(NameRef);
1331 VLAExprNames.push_back(&Ident);
1333 CreateDefaultAlignTempAlloca(VlaSize.NumElts->getType(), NameRef);
1334 Builder.CreateStore(VlaSize.NumElts, SizeExprAddr);
1335 Dimensions.emplace_back(SizeExprAddr.getPointer(),
1336 Type1D.getUnqualifiedType());
1338 Type1D = VlaSize.Type;
1344 // Register each dimension's size-expression with a DILocalVariable,
1345 // so that it can be used by CGDebugInfo when instantiating a DISubrange
1346 // to describe this array.
1347 unsigned NameIdx = 0;
1348 for (auto &VlaSize : Dimensions) {
1350 if (auto *C = dyn_cast<llvm::ConstantInt>(VlaSize.NumElts))
1351 MD = llvm::ConstantAsMetadata::get(C);
1353 // Create an artificial VarDecl to generate debug info for.
1354 IdentifierInfo *NameIdent = VLAExprNames[NameIdx++];
1355 auto VlaExprTy = VlaSize.NumElts->getType()->getPointerElementType();
1356 auto QT = getContext().getIntTypeForBitwidth(
1357 VlaExprTy->getScalarSizeInBits(), false);
1358 auto *ArtificialDecl = VarDecl::Create(
1359 getContext(), const_cast<DeclContext *>(D.getDeclContext()),
1360 D.getLocation(), D.getLocation(), NameIdent, QT,
1361 getContext().CreateTypeSourceInfo(QT), SC_Auto);
1362 ArtificialDecl->setImplicit();
1364 MD = DI->EmitDeclareOfAutoVariable(ArtificialDecl, VlaSize.NumElts,
1367 assert(MD && "No Size expression debug node created");
1368 DI->registerVLASizeExpression(VlaSize.Type, MD);
1372 /// EmitAutoVarAlloca - Emit the alloca and debug information for a
1373 /// local variable. Does not emit initialization or destruction.
1374 CodeGenFunction::AutoVarEmission
1375 CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
1376 QualType Ty = D.getType();
1378 Ty.getAddressSpace() == LangAS::Default ||
1379 (Ty.getAddressSpace() == LangAS::opencl_private && getLangOpts().OpenCL));
1381 AutoVarEmission emission(D);
1383 bool isEscapingByRef = D.isEscapingByref();
1384 emission.IsEscapingByRef = isEscapingByRef;
1386 CharUnits alignment = getContext().getDeclAlign(&D);
1388 // If the type is variably-modified, emit all the VLA sizes for it.
1389 if (Ty->isVariablyModifiedType())
1390 EmitVariablyModifiedType(Ty);
1392 auto *DI = getDebugInfo();
1393 bool EmitDebugInfo = DI && CGM.getCodeGenOpts().getDebugInfo() >=
1394 codegenoptions::LimitedDebugInfo;
1396 Address address = Address::invalid();
1397 Address AllocaAddr = Address::invalid();
1398 Address OpenMPLocalAddr =
1399 getLangOpts().OpenMP
1400 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
1401 : Address::invalid();
1402 bool NRVO = getLangOpts().ElideConstructors && D.isNRVOVariable();
1404 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
1405 address = OpenMPLocalAddr;
1406 } else if (Ty->isConstantSizeType()) {
1407 // If this value is an array or struct with a statically determinable
1408 // constant initializer, there are optimizations we can do.
1410 // TODO: We should constant-evaluate the initializer of any variable,
1411 // as long as it is initialized by a constant expression. Currently,
1412 // isConstantInitializer produces wrong answers for structs with
1413 // reference or bitfield members, and a few other cases, and checking
1414 // for POD-ness protects us from some of these.
1415 if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
1417 ((Ty.isPODType(getContext()) ||
1418 getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
1419 D.getInit()->isConstantInitializer(getContext(), false)))) {
1421 // If the variable's a const type, and it's neither an NRVO
1422 // candidate nor a __block variable and has no mutable members,
1423 // emit it as a global instead.
1424 // Exception is if a variable is located in non-constant address space
1426 if ((!getLangOpts().OpenCL ||
1427 Ty.getAddressSpace() == LangAS::opencl_constant) &&
1428 (CGM.getCodeGenOpts().MergeAllConstants && !NRVO &&
1429 !isEscapingByRef && CGM.isTypeConstant(Ty, true))) {
1430 EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
1432 // Signal this condition to later callbacks.
1433 emission.Addr = Address::invalid();
1434 assert(emission.wasEmittedAsGlobal());
1438 // Otherwise, tell the initialization code that we're in this case.
1439 emission.IsConstantAggregate = true;
1442 // A normal fixed sized variable becomes an alloca in the entry block,
1444 // - it's an NRVO variable.
1445 // - we are compiling OpenMP and it's an OpenMP local variable.
1447 // The named return value optimization: allocate this variable in the
1448 // return slot, so that we can elide the copy when returning this
1449 // variable (C++0x [class.copy]p34).
1450 address = ReturnValue;
1452 if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
1453 const auto *RD = RecordTy->getDecl();
1454 const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
1455 if ((CXXRD && !CXXRD->hasTrivialDestructor()) ||
1456 RD->isNonTrivialToPrimitiveDestroy()) {
1457 // Create a flag that is used to indicate when the NRVO was applied
1458 // to this variable. Set it to zero to indicate that NRVO was not
1460 llvm::Value *Zero = Builder.getFalse();
1462 CreateTempAlloca(Zero->getType(), CharUnits::One(), "nrvo");
1463 EnsureInsertPoint();
1464 Builder.CreateStore(Zero, NRVOFlag);
1466 // Record the NRVO flag for this variable.
1467 NRVOFlags[&D] = NRVOFlag.getPointer();
1468 emission.NRVOFlag = NRVOFlag.getPointer();
1472 CharUnits allocaAlignment;
1473 llvm::Type *allocaTy;
1474 if (isEscapingByRef) {
1475 auto &byrefInfo = getBlockByrefInfo(&D);
1476 allocaTy = byrefInfo.Type;
1477 allocaAlignment = byrefInfo.ByrefAlignment;
1479 allocaTy = ConvertTypeForMem(Ty);
1480 allocaAlignment = alignment;
1483 // Create the alloca. Note that we set the name separately from
1484 // building the instruction so that it's there even in no-asserts
1486 address = CreateTempAlloca(allocaTy, allocaAlignment, D.getName(),
1487 /*ArraySize=*/nullptr, &AllocaAddr);
1489 // Don't emit lifetime markers for MSVC catch parameters. The lifetime of
1490 // the catch parameter starts in the catchpad instruction, and we can't
1491 // insert code in those basic blocks.
1492 bool IsMSCatchParam =
1493 D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
1495 // Emit a lifetime intrinsic if meaningful. There's no point in doing this
1496 // if we don't have a valid insertion point (?).
1497 if (HaveInsertPoint() && !IsMSCatchParam) {
1498 // If there's a jump into the lifetime of this variable, its lifetime
1499 // gets broken up into several regions in IR, which requires more work
1500 // to handle correctly. For now, just omit the intrinsics; this is a
1501 // rare case, and it's better to just be conservatively correct.
1504 // We have to do this in all language modes if there's a jump past the
1505 // declaration. We also have to do it in C if there's a jump to an
1506 // earlier point in the current block because non-VLA lifetimes begin as
1507 // soon as the containing block is entered, not when its variables
1508 // actually come into scope; suppressing the lifetime annotations
1509 // completely in this case is unnecessarily pessimistic, but again, this
1511 if (!Bypasses.IsBypassed(&D) &&
1512 !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
1513 uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
1514 emission.SizeForLifetimeMarkers =
1515 EmitLifetimeStart(size, AllocaAddr.getPointer());
1518 assert(!emission.useLifetimeMarkers());
1522 EnsureInsertPoint();
1524 if (!DidCallStackSave) {
1527 CreateTempAlloca(Int8PtrTy, getPointerAlign(), "saved_stack");
1529 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
1530 llvm::Value *V = Builder.CreateCall(F);
1531 Builder.CreateStore(V, Stack);
1533 DidCallStackSave = true;
1535 // Push a cleanup block and restore the stack there.
1536 // FIXME: in general circumstances, this should be an EH cleanup.
1537 pushStackRestore(NormalCleanup, Stack);
1540 auto VlaSize = getVLASize(Ty);
1541 llvm::Type *llvmTy = ConvertTypeForMem(VlaSize.Type);
1543 // Allocate memory for the array.
1544 address = CreateTempAlloca(llvmTy, alignment, "vla", VlaSize.NumElts,
1547 // If we have debug info enabled, properly describe the VLA dimensions for
1548 // this type by registering the vla size expression for each of the
1550 EmitAndRegisterVariableArrayDimensions(DI, D, EmitDebugInfo);
1553 setAddrOfLocalVar(&D, address);
1554 emission.Addr = address;
1555 emission.AllocaAddr = AllocaAddr;
1557 // Emit debug info for local var declaration.
1558 if (EmitDebugInfo && HaveInsertPoint()) {
1559 Address DebugAddr = address;
1560 bool UsePointerValue = NRVO && ReturnValuePointer.isValid();
1561 DI->setLocation(D.getLocation());
1563 // If NRVO, use a pointer to the return address.
1564 if (UsePointerValue)
1565 DebugAddr = ReturnValuePointer;
1567 (void)DI->EmitDeclareOfAutoVariable(&D, DebugAddr.getPointer(), Builder,
1571 if (D.hasAttr<AnnotateAttr>() && HaveInsertPoint())
1572 EmitVarAnnotations(&D, address.getPointer());
1574 // Make sure we call @llvm.lifetime.end.
1575 if (emission.useLifetimeMarkers())
1576 EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
1577 emission.getOriginalAllocatedAddress(),
1578 emission.getSizeForLifetimeMarkers());
1583 static bool isCapturedBy(const VarDecl &, const Expr *);
1585 /// Determines whether the given __block variable is potentially
1586 /// captured by the given statement.
1587 static bool isCapturedBy(const VarDecl &Var, const Stmt *S) {
1588 if (const Expr *E = dyn_cast<Expr>(S))
1589 return isCapturedBy(Var, E);
1590 for (const Stmt *SubStmt : S->children())
1591 if (isCapturedBy(Var, SubStmt))
1596 /// Determines whether the given __block variable is potentially
1597 /// captured by the given expression.
1598 static bool isCapturedBy(const VarDecl &Var, const Expr *E) {
1599 // Skip the most common kinds of expressions that make
1600 // hierarchy-walking expensive.
1601 E = E->IgnoreParenCasts();
1603 if (const BlockExpr *BE = dyn_cast<BlockExpr>(E)) {
1604 const BlockDecl *Block = BE->getBlockDecl();
1605 for (const auto &I : Block->captures()) {
1606 if (I.getVariable() == &Var)
1610 // No need to walk into the subexpressions.
1614 if (const StmtExpr *SE = dyn_cast<StmtExpr>(E)) {
1615 const CompoundStmt *CS = SE->getSubStmt();
1616 for (const auto *BI : CS->body())
1617 if (const auto *BIE = dyn_cast<Expr>(BI)) {
1618 if (isCapturedBy(Var, BIE))
1621 else if (const auto *DS = dyn_cast<DeclStmt>(BI)) {
1622 // special case declarations
1623 for (const auto *I : DS->decls()) {
1624 if (const auto *VD = dyn_cast<VarDecl>((I))) {
1625 const Expr *Init = VD->getInit();
1626 if (Init && isCapturedBy(Var, Init))
1632 // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
1633 // Later, provide code to poke into statements for capture analysis.
1638 for (const Stmt *SubStmt : E->children())
1639 if (isCapturedBy(Var, SubStmt))
1645 /// Determine whether the given initializer is trivial in the sense
1646 /// that it requires no code to be generated.
1647 bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
1651 if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
1652 if (CXXConstructorDecl *Constructor = Construct->getConstructor())
1653 if (Constructor->isTrivial() &&
1654 Constructor->isDefaultConstructor() &&
1655 !Construct->requiresZeroInitialization())
1661 void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
1664 auto trivialAutoVarInit = getContext().getLangOpts().getTrivialAutoVarInit();
1665 CharUnits Size = getContext().getTypeSizeInChars(type);
1666 bool isVolatile = type.isVolatileQualified();
1667 if (!Size.isZero()) {
1668 switch (trivialAutoVarInit) {
1669 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1670 llvm_unreachable("Uninitialized handled by caller");
1671 case LangOptions::TrivialAutoVarInitKind::Zero:
1672 emitStoresForZeroInit(CGM, D, Loc, isVolatile, Builder);
1674 case LangOptions::TrivialAutoVarInitKind::Pattern:
1675 emitStoresForPatternInit(CGM, D, Loc, isVolatile, Builder);
1681 // VLAs look zero-sized to getTypeInfo. We can't emit constant stores to
1682 // them, so emit a memcpy with the VLA size to initialize each element.
1683 // Technically zero-sized or negative-sized VLAs are undefined, and UBSan
1684 // will catch that code, but there exists code which generates zero-sized
1685 // VLAs. Be nice and initialize whatever they requested.
1686 const auto *VlaType = getContext().getAsVariableArrayType(type);
1689 auto VlaSize = getVLASize(VlaType);
1690 auto SizeVal = VlaSize.NumElts;
1691 CharUnits EltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1692 switch (trivialAutoVarInit) {
1693 case LangOptions::TrivialAutoVarInitKind::Uninitialized:
1694 llvm_unreachable("Uninitialized handled by caller");
1696 case LangOptions::TrivialAutoVarInitKind::Zero:
1697 if (!EltSize.isOne())
1698 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1699 Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
1703 case LangOptions::TrivialAutoVarInitKind::Pattern: {
1704 llvm::Type *ElTy = Loc.getElementType();
1705 llvm::Constant *Constant = constWithPadding(
1706 CGM, IsPattern::Yes, initializationPatternFor(CGM, ElTy));
1707 CharUnits ConstantAlign = getContext().getTypeAlignInChars(VlaSize.Type);
1708 llvm::BasicBlock *SetupBB = createBasicBlock("vla-setup.loop");
1709 llvm::BasicBlock *LoopBB = createBasicBlock("vla-init.loop");
1710 llvm::BasicBlock *ContBB = createBasicBlock("vla-init.cont");
1711 llvm::Value *IsZeroSizedVLA = Builder.CreateICmpEQ(
1712 SizeVal, llvm::ConstantInt::get(SizeVal->getType(), 0),
1714 Builder.CreateCondBr(IsZeroSizedVLA, ContBB, SetupBB);
1716 if (!EltSize.isOne())
1717 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(EltSize));
1718 llvm::Value *BaseSizeInChars =
1719 llvm::ConstantInt::get(IntPtrTy, EltSize.getQuantity());
1720 Address Begin = Builder.CreateElementBitCast(Loc, Int8Ty, "vla.begin");
1722 Builder.CreateInBoundsGEP(Begin.getPointer(), SizeVal, "vla.end");
1723 llvm::BasicBlock *OriginBB = Builder.GetInsertBlock();
1725 llvm::PHINode *Cur = Builder.CreatePHI(Begin.getType(), 2, "vla.cur");
1726 Cur->addIncoming(Begin.getPointer(), OriginBB);
1727 CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
1728 Builder.CreateMemCpy(Address(Cur, CurAlign),
1729 createUnnamedGlobalForMemcpyFrom(
1730 CGM, D, Builder, Constant, ConstantAlign),
1731 BaseSizeInChars, isVolatile);
1733 Builder.CreateInBoundsGEP(Int8Ty, Cur, BaseSizeInChars, "vla.next");
1734 llvm::Value *Done = Builder.CreateICmpEQ(Next, End, "vla-init.isdone");
1735 Builder.CreateCondBr(Done, ContBB, LoopBB);
1736 Cur->addIncoming(Next, LoopBB);
1742 void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
1743 assert(emission.Variable && "emission was not valid!");
1745 // If this was emitted as a global constant, we're done.
1746 if (emission.wasEmittedAsGlobal()) return;
1748 const VarDecl &D = *emission.Variable;
1749 auto DL = ApplyDebugLocation::CreateDefaultArtificial(*this, D.getLocation());
1750 QualType type = D.getType();
1752 // If this local has an initializer, emit it now.
1753 const Expr *Init = D.getInit();
1755 // If we are at an unreachable point, we don't need to emit the initializer
1756 // unless it contains a label.
1757 if (!HaveInsertPoint()) {
1758 if (!Init || !ContainsLabel(Init)) return;
1759 EnsureInsertPoint();
1762 // Initialize the structure of a __block variable.
1763 if (emission.IsEscapingByRef)
1764 emitByrefStructureInit(emission);
1766 // Initialize the variable here if it doesn't have a initializer and it is a
1767 // C struct that is non-trivial to initialize or an array containing such a
1770 type.isNonTrivialToPrimitiveDefaultInitialize() ==
1771 QualType::PDIK_Struct) {
1772 LValue Dst = MakeAddrLValue(emission.getAllocatedAddress(), type);
1773 if (emission.IsEscapingByRef)
1774 drillIntoBlockVariable(*this, Dst, &D);
1775 defaultInitNonTrivialCStructVar(Dst);
1779 // Check whether this is a byref variable that's potentially
1780 // captured and moved by its own initializer. If so, we'll need to
1781 // emit the initializer first, then copy into the variable.
1782 bool capturedByInit =
1783 Init && emission.IsEscapingByRef && isCapturedBy(D, Init);
1785 bool locIsByrefHeader = !capturedByInit;
1787 locIsByrefHeader ? emission.getObjectAddress(*this) : emission.Addr;
1789 // Note: constexpr already initializes everything correctly.
1790 LangOptions::TrivialAutoVarInitKind trivialAutoVarInit =
1792 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1793 : (D.getAttr<UninitializedAttr>()
1794 ? LangOptions::TrivialAutoVarInitKind::Uninitialized
1795 : getContext().getLangOpts().getTrivialAutoVarInit()));
1797 auto initializeWhatIsTechnicallyUninitialized = [&](Address Loc) {
1798 if (trivialAutoVarInit ==
1799 LangOptions::TrivialAutoVarInitKind::Uninitialized)
1802 // Only initialize a __block's storage: we always initialize the header.
1803 if (emission.IsEscapingByRef && !locIsByrefHeader)
1804 Loc = emitBlockByrefAddress(Loc, &D, /*follow=*/false);
1806 return emitZeroOrPatternForAutoVarInit(type, D, Loc);
1809 if (isTrivialInitializer(Init))
1810 return initializeWhatIsTechnicallyUninitialized(Loc);
1812 llvm::Constant *constant = nullptr;
1813 if (emission.IsConstantAggregate ||
1814 D.mightBeUsableInConstantExpressions(getContext())) {
1815 assert(!capturedByInit && "constant init contains a capturing block?");
1816 constant = ConstantEmitter(*this).tryEmitAbstractForInitializer(D);
1817 if (constant && !constant->isZeroValue() &&
1818 (trivialAutoVarInit !=
1819 LangOptions::TrivialAutoVarInitKind::Uninitialized)) {
1820 IsPattern isPattern =
1821 (trivialAutoVarInit == LangOptions::TrivialAutoVarInitKind::Pattern)
1824 // C guarantees that brace-init with fewer initializers than members in
1825 // the aggregate will initialize the rest of the aggregate as-if it were
1826 // static initialization. In turn static initialization guarantees that
1827 // padding is initialized to zero bits. We could instead pattern-init if D
1828 // has any ImplicitValueInitExpr, but that seems to be unintuitive
1830 constant = constWithPadding(CGM, IsPattern::No,
1831 replaceUndef(CGM, isPattern, constant));
1836 initializeWhatIsTechnicallyUninitialized(Loc);
1837 LValue lv = MakeAddrLValue(Loc, type);
1839 return EmitExprAsInit(Init, &D, lv, capturedByInit);
1842 if (!emission.IsConstantAggregate) {
1843 // For simple scalar/complex initialization, store the value directly.
1844 LValue lv = MakeAddrLValue(Loc, type);
1846 return EmitStoreThroughLValue(RValue::get(constant), lv, true);
1849 llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
1850 emitStoresForConstant(
1851 CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
1852 type.isVolatileQualified(), Builder, constant);
1855 /// Emit an expression as an initializer for an object (variable, field, etc.)
1856 /// at the given location. The expression is not necessarily the normal
1857 /// initializer for the object, and the address is not necessarily
1858 /// its normal location.
1860 /// \param init the initializing expression
1861 /// \param D the object to act as if we're initializing
1862 /// \param loc the address to initialize; its type is a pointer
1863 /// to the LLVM mapping of the object's type
1864 /// \param alignment the alignment of the address
1865 /// \param capturedByInit true if \p D is a __block variable
1866 /// whose address is potentially changed by the initializer
1867 void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
1868 LValue lvalue, bool capturedByInit) {
1869 QualType type = D->getType();
1871 if (type->isReferenceType()) {
1872 RValue rvalue = EmitReferenceBindingToExpr(init);
1874 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1875 EmitStoreThroughLValue(rvalue, lvalue, true);
1878 switch (getEvaluationKind(type)) {
1880 EmitScalarInit(init, D, lvalue, capturedByInit);
1883 ComplexPairTy complex = EmitComplexExpr(init);
1885 drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
1886 EmitStoreOfComplex(complex, lvalue, /*init*/ true);
1890 if (type->isAtomicType()) {
1891 EmitAtomicInit(const_cast<Expr*>(init), lvalue);
1893 AggValueSlot::Overlap_t Overlap = AggValueSlot::MayOverlap;
1894 if (isa<VarDecl>(D))
1895 Overlap = AggValueSlot::DoesNotOverlap;
1896 else if (auto *FD = dyn_cast<FieldDecl>(D))
1897 Overlap = getOverlapForFieldInit(FD);
1898 // TODO: how can we delay here if D is captured by its initializer?
1899 EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
1900 AggValueSlot::IsDestructed,
1901 AggValueSlot::DoesNotNeedGCBarriers,
1902 AggValueSlot::IsNotAliased,
1907 llvm_unreachable("bad evaluation kind");
1910 /// Enter a destroy cleanup for the given local variable.
1911 void CodeGenFunction::emitAutoVarTypeCleanup(
1912 const CodeGenFunction::AutoVarEmission &emission,
1913 QualType::DestructionKind dtorKind) {
1914 assert(dtorKind != QualType::DK_none);
1916 // Note that for __block variables, we want to destroy the
1917 // original stack object, not the possibly forwarded object.
1918 Address addr = emission.getObjectAddress(*this);
1920 const VarDecl *var = emission.Variable;
1921 QualType type = var->getType();
1923 CleanupKind cleanupKind = NormalAndEHCleanup;
1924 CodeGenFunction::Destroyer *destroyer = nullptr;
1927 case QualType::DK_none:
1928 llvm_unreachable("no cleanup for trivially-destructible variable");
1930 case QualType::DK_cxx_destructor:
1931 // If there's an NRVO flag on the emission, we need a different
1933 if (emission.NRVOFlag) {
1934 assert(!type->isArrayType());
1935 CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
1936 EHStack.pushCleanup<DestroyNRVOVariableCXX>(cleanupKind, addr, type, dtor,
1942 case QualType::DK_objc_strong_lifetime:
1943 // Suppress cleanups for pseudo-strong variables.
1944 if (var->isARCPseudoStrong()) return;
1946 // Otherwise, consider whether to use an EH cleanup or not.
1947 cleanupKind = getARCCleanupKind();
1949 // Use the imprecise destroyer by default.
1950 if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
1951 destroyer = CodeGenFunction::destroyARCStrongImprecise;
1954 case QualType::DK_objc_weak_lifetime:
1957 case QualType::DK_nontrivial_c_struct:
1958 destroyer = CodeGenFunction::destroyNonTrivialCStruct;
1959 if (emission.NRVOFlag) {
1960 assert(!type->isArrayType());
1961 EHStack.pushCleanup<DestroyNRVOVariableC>(cleanupKind, addr,
1962 emission.NRVOFlag, type);
1968 // If we haven't chosen a more specific destroyer, use the default.
1969 if (!destroyer) destroyer = getDestroyer(dtorKind);
1971 // Use an EH cleanup in array destructors iff the destructor itself
1972 // is being pushed as an EH cleanup.
1973 bool useEHCleanup = (cleanupKind & EHCleanup);
1974 EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
1978 void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
1979 assert(emission.Variable && "emission was not valid!");
1981 // If this was emitted as a global constant, we're done.
1982 if (emission.wasEmittedAsGlobal()) return;
1984 // If we don't have an insertion point, we're done. Sema prevents
1985 // us from jumping into any of these scopes anyway.
1986 if (!HaveInsertPoint()) return;
1988 const VarDecl &D = *emission.Variable;
1990 // Check the type for a cleanup.
1991 if (QualType::DestructionKind dtorKind = D.needsDestruction(getContext()))
1992 emitAutoVarTypeCleanup(emission, dtorKind);
1994 // In GC mode, honor objc_precise_lifetime.
1995 if (getLangOpts().getGC() != LangOptions::NonGC &&
1996 D.hasAttr<ObjCPreciseLifetimeAttr>()) {
1997 EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
2000 // Handle the cleanup attribute.
2001 if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
2002 const FunctionDecl *FD = CA->getFunctionDecl();
2004 llvm::Constant *F = CGM.GetAddrOfFunction(FD);
2005 assert(F && "Could not find function!");
2007 const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
2008 EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
2011 // If this is a block variable, call _Block_object_destroy
2012 // (on the unforwarded address). Don't enter this cleanup if we're in pure-GC
2014 if (emission.IsEscapingByRef &&
2015 CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
2016 BlockFieldFlags Flags = BLOCK_FIELD_IS_BYREF;
2017 if (emission.Variable->getType().isObjCGCWeak())
2018 Flags |= BLOCK_FIELD_IS_WEAK;
2019 enterByrefCleanup(NormalAndEHCleanup, emission.Addr, Flags,
2020 /*LoadBlockVarAddr*/ false,
2021 cxxDestructorCanThrow(emission.Variable->getType()));
2025 CodeGenFunction::Destroyer *
2026 CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
2028 case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
2029 case QualType::DK_cxx_destructor:
2030 return destroyCXXObject;
2031 case QualType::DK_objc_strong_lifetime:
2032 return destroyARCStrongPrecise;
2033 case QualType::DK_objc_weak_lifetime:
2034 return destroyARCWeak;
2035 case QualType::DK_nontrivial_c_struct:
2036 return destroyNonTrivialCStruct;
2038 llvm_unreachable("Unknown DestructionKind");
2041 /// pushEHDestroy - Push the standard destructor for the given type as
2042 /// an EH-only cleanup.
2043 void CodeGenFunction::pushEHDestroy(QualType::DestructionKind dtorKind,
2044 Address addr, QualType type) {
2045 assert(dtorKind && "cannot push destructor for trivial type");
2046 assert(needsEHCleanup(dtorKind));
2048 pushDestroy(EHCleanup, addr, type, getDestroyer(dtorKind), true);
2051 /// pushDestroy - Push the standard destructor for the given type as
2052 /// at least a normal cleanup.
2053 void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
2054 Address addr, QualType type) {
2055 assert(dtorKind && "cannot push destructor for trivial type");
2057 CleanupKind cleanupKind = getCleanupKind(dtorKind);
2058 pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
2059 cleanupKind & EHCleanup);
2062 void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, Address addr,
2063 QualType type, Destroyer *destroyer,
2064 bool useEHCleanupForArray) {
2065 pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
2066 destroyer, useEHCleanupForArray);
2069 void CodeGenFunction::pushStackRestore(CleanupKind Kind, Address SPMem) {
2070 EHStack.pushCleanup<CallStackRestore>(Kind, SPMem);
2073 void CodeGenFunction::pushLifetimeExtendedDestroy(
2074 CleanupKind cleanupKind, Address addr, QualType type,
2075 Destroyer *destroyer, bool useEHCleanupForArray) {
2076 // Push an EH-only cleanup for the object now.
2077 // FIXME: When popping normal cleanups, we need to keep this EH cleanup
2078 // around in case a temporary's destructor throws an exception.
2079 if (cleanupKind & EHCleanup)
2080 EHStack.pushCleanup<DestroyObject>(
2081 static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
2082 destroyer, useEHCleanupForArray);
2084 // Remember that we need to push a full cleanup for the object at the
2085 // end of the full-expression.
2086 pushCleanupAfterFullExpr<DestroyObject>(
2087 cleanupKind, addr, type, destroyer, useEHCleanupForArray);
2090 /// emitDestroy - Immediately perform the destruction of the given
2093 /// \param addr - the address of the object; a type*
2094 /// \param type - the type of the object; if an array type, all
2095 /// objects are destroyed in reverse order
2096 /// \param destroyer - the function to call to destroy individual
2098 /// \param useEHCleanupForArray - whether an EH cleanup should be
2099 /// used when destroying array elements, in case one of the
2100 /// destructions throws an exception
2101 void CodeGenFunction::emitDestroy(Address addr, QualType type,
2102 Destroyer *destroyer,
2103 bool useEHCleanupForArray) {
2104 const ArrayType *arrayType = getContext().getAsArrayType(type);
2106 return destroyer(*this, addr, type);
2108 llvm::Value *length = emitArrayLength(arrayType, type, addr);
2110 CharUnits elementAlign =
2112 .alignmentOfArrayElement(getContext().getTypeSizeInChars(type));
2114 // Normally we have to check whether the array is zero-length.
2115 bool checkZeroLength = true;
2117 // But if the array length is constant, we can suppress that.
2118 if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
2119 // ...and if it's constant zero, we can just skip the entire thing.
2120 if (constLength->isZero()) return;
2121 checkZeroLength = false;
2124 llvm::Value *begin = addr.getPointer();
2125 llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
2126 emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2127 checkZeroLength, useEHCleanupForArray);
2130 /// emitArrayDestroy - Destroys all the elements of the given array,
2131 /// beginning from last to first. The array cannot be zero-length.
2133 /// \param begin - a type* denoting the first element of the array
2134 /// \param end - a type* denoting one past the end of the array
2135 /// \param elementType - the element type of the array
2136 /// \param destroyer - the function to call to destroy elements
2137 /// \param useEHCleanup - whether to push an EH cleanup to destroy
2138 /// the remaining elements in case the destruction of a single
2140 void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
2142 QualType elementType,
2143 CharUnits elementAlign,
2144 Destroyer *destroyer,
2145 bool checkZeroLength,
2146 bool useEHCleanup) {
2147 assert(!elementType->isArrayType());
2149 // The basic structure here is a do-while loop, because we don't
2150 // need to check for the zero-element case.
2151 llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
2152 llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
2154 if (checkZeroLength) {
2155 llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
2156 "arraydestroy.isempty");
2157 Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
2160 // Enter the loop body, making that address the current address.
2161 llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
2163 llvm::PHINode *elementPast =
2164 Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
2165 elementPast->addIncoming(end, entryBB);
2167 // Shift the address back by one element.
2168 llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
2169 llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
2170 "arraydestroy.element");
2173 pushRegularPartialArrayCleanup(begin, element, elementType, elementAlign,
2176 // Perform the actual destruction there.
2177 destroyer(*this, Address(element, elementAlign), elementType);
2182 // Check whether we've reached the end.
2183 llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
2184 Builder.CreateCondBr(done, doneBB, bodyBB);
2185 elementPast->addIncoming(element, Builder.GetInsertBlock());
2191 /// Perform partial array destruction as if in an EH cleanup. Unlike
2192 /// emitArrayDestroy, the element type here may still be an array type.
2193 static void emitPartialArrayDestroy(CodeGenFunction &CGF,
2194 llvm::Value *begin, llvm::Value *end,
2195 QualType type, CharUnits elementAlign,
2196 CodeGenFunction::Destroyer *destroyer) {
2197 // If the element type is itself an array, drill down.
2198 unsigned arrayDepth = 0;
2199 while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
2200 // VLAs don't require a GEP index to walk into.
2201 if (!isa<VariableArrayType>(arrayType))
2203 type = arrayType->getElementType();
2207 llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
2209 SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
2210 begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
2211 end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
2214 // Destroy the array. We don't ever need an EH cleanup because we
2215 // assume that we're in an EH cleanup ourselves, so a throwing
2216 // destructor causes an immediate terminate.
2217 CGF.emitArrayDestroy(begin, end, type, elementAlign, destroyer,
2218 /*checkZeroLength*/ true, /*useEHCleanup*/ false);
2222 /// RegularPartialArrayDestroy - a cleanup which performs a partial
2223 /// array destroy where the end pointer is regularly determined and
2224 /// does not need to be loaded from a local.
2225 class RegularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2226 llvm::Value *ArrayBegin;
2227 llvm::Value *ArrayEnd;
2228 QualType ElementType;
2229 CodeGenFunction::Destroyer *Destroyer;
2230 CharUnits ElementAlign;
2232 RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
2233 QualType elementType, CharUnits elementAlign,
2234 CodeGenFunction::Destroyer *destroyer)
2235 : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
2236 ElementType(elementType), Destroyer(destroyer),
2237 ElementAlign(elementAlign) {}
2239 void Emit(CodeGenFunction &CGF, Flags flags) override {
2240 emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
2241 ElementType, ElementAlign, Destroyer);
2245 /// IrregularPartialArrayDestroy - a cleanup which performs a
2246 /// partial array destroy where the end pointer is irregularly
2247 /// determined and must be loaded from a local.
2248 class IrregularPartialArrayDestroy final : public EHScopeStack::Cleanup {
2249 llvm::Value *ArrayBegin;
2250 Address ArrayEndPointer;
2251 QualType ElementType;
2252 CodeGenFunction::Destroyer *Destroyer;
2253 CharUnits ElementAlign;
2255 IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
2256 Address arrayEndPointer,
2257 QualType elementType,
2258 CharUnits elementAlign,
2259 CodeGenFunction::Destroyer *destroyer)
2260 : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
2261 ElementType(elementType), Destroyer(destroyer),
2262 ElementAlign(elementAlign) {}
2264 void Emit(CodeGenFunction &CGF, Flags flags) override {
2265 llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
2266 emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
2267 ElementType, ElementAlign, Destroyer);
2270 } // end anonymous namespace
2272 /// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
2273 /// already-constructed elements of the given array. The cleanup
2274 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2276 /// \param elementType - the immediate element type of the array;
2277 /// possibly still an array type
2278 void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
2279 Address arrayEndPointer,
2280 QualType elementType,
2281 CharUnits elementAlign,
2282 Destroyer *destroyer) {
2283 pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
2284 arrayBegin, arrayEndPointer,
2285 elementType, elementAlign,
2289 /// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
2290 /// already-constructed elements of the given array. The cleanup
2291 /// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
2293 /// \param elementType - the immediate element type of the array;
2294 /// possibly still an array type
2295 void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
2296 llvm::Value *arrayEnd,
2297 QualType elementType,
2298 CharUnits elementAlign,
2299 Destroyer *destroyer) {
2300 pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
2301 arrayBegin, arrayEnd,
2302 elementType, elementAlign,
2306 /// Lazily declare the @llvm.lifetime.start intrinsic.
2307 llvm::Function *CodeGenModule::getLLVMLifetimeStartFn() {
2308 if (LifetimeStartFn)
2309 return LifetimeStartFn;
2310 LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
2311 llvm::Intrinsic::lifetime_start, AllocaInt8PtrTy);
2312 return LifetimeStartFn;
2315 /// Lazily declare the @llvm.lifetime.end intrinsic.
2316 llvm::Function *CodeGenModule::getLLVMLifetimeEndFn() {
2318 return LifetimeEndFn;
2319 LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
2320 llvm::Intrinsic::lifetime_end, AllocaInt8PtrTy);
2321 return LifetimeEndFn;
2325 /// A cleanup to perform a release of an object at the end of a
2326 /// function. This is used to balance out the incoming +1 of a
2327 /// ns_consumed argument when we can't reasonably do that just by
2328 /// not doing the initial retain for a __block argument.
2329 struct ConsumeARCParameter final : EHScopeStack::Cleanup {
2330 ConsumeARCParameter(llvm::Value *param,
2331 ARCPreciseLifetime_t precise)
2332 : Param(param), Precise(precise) {}
2335 ARCPreciseLifetime_t Precise;
2337 void Emit(CodeGenFunction &CGF, Flags flags) override {
2338 CGF.EmitARCRelease(Param, Precise);
2341 } // end anonymous namespace
2343 /// Emit an alloca (or GlobalValue depending on target)
2344 /// for the specified parameter and set up LocalDeclMap.
2345 void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
2347 // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
2348 assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
2349 "Invalid argument to EmitParmDecl");
2351 Arg.getAnyValue()->setName(D.getName());
2353 QualType Ty = D.getType();
2355 // Use better IR generation for certain implicit parameters.
2356 if (auto IPD = dyn_cast<ImplicitParamDecl>(&D)) {
2357 // The only implicit argument a block has is its literal.
2358 // This may be passed as an inalloca'ed value on Windows x86.
2360 llvm::Value *V = Arg.isIndirect()
2361 ? Builder.CreateLoad(Arg.getIndirectAddress())
2362 : Arg.getDirectValue();
2363 setBlockContextParameter(IPD, ArgNo, V);
2368 Address DeclPtr = Address::invalid();
2369 bool DoStore = false;
2370 bool IsScalar = hasScalarEvaluationKind(Ty);
2371 // If we already have a pointer to the argument, reuse the input pointer.
2372 if (Arg.isIndirect()) {
2373 DeclPtr = Arg.getIndirectAddress();
2374 // If we have a prettier pointer type at this point, bitcast to that.
2375 unsigned AS = DeclPtr.getType()->getAddressSpace();
2376 llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
2377 if (DeclPtr.getType() != IRTy)
2378 DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
2379 // Indirect argument is in alloca address space, which may be different
2380 // from the default address space.
2381 auto AllocaAS = CGM.getASTAllocaAddressSpace();
2382 auto *V = DeclPtr.getPointer();
2383 auto SrcLangAS = getLangOpts().OpenCL ? LangAS::opencl_private : AllocaAS;
2385 getLangOpts().OpenCL ? LangAS::opencl_private : LangAS::Default;
2386 if (SrcLangAS != DestLangAS) {
2387 assert(getContext().getTargetAddressSpace(SrcLangAS) ==
2388 CGM.getDataLayout().getAllocaAddrSpace());
2389 auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
2390 auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
2391 DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
2392 *this, V, SrcLangAS, DestLangAS, T, true),
2393 DeclPtr.getAlignment());
2396 // Push a destructor cleanup for this parameter if the ABI requires it.
2397 // Don't push a cleanup in a thunk for a method that will also emit a
2399 if (hasAggregateEvaluationKind(Ty) && !CurFuncIsThunk &&
2400 Ty->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
2401 if (QualType::DestructionKind DtorKind =
2402 D.needsDestruction(getContext())) {
2403 assert((DtorKind == QualType::DK_cxx_destructor ||
2404 DtorKind == QualType::DK_nontrivial_c_struct) &&
2405 "unexpected destructor type");
2406 pushDestroy(DtorKind, DeclPtr, Ty);
2407 CalleeDestructedParamCleanups[cast<ParmVarDecl>(&D)] =
2408 EHStack.stable_begin();
2412 // Check if the parameter address is controlled by OpenMP runtime.
2413 Address OpenMPLocalAddr =
2414 getLangOpts().OpenMP
2415 ? CGM.getOpenMPRuntime().getAddressOfLocalVariable(*this, &D)
2416 : Address::invalid();
2417 if (getLangOpts().OpenMP && OpenMPLocalAddr.isValid()) {
2418 DeclPtr = OpenMPLocalAddr;
2420 // Otherwise, create a temporary to hold the value.
2421 DeclPtr = CreateMemTemp(Ty, getContext().getDeclAlign(&D),
2422 D.getName() + ".addr");
2427 llvm::Value *ArgVal = (DoStore ? Arg.getDirectValue() : nullptr);
2429 LValue lv = MakeAddrLValue(DeclPtr, Ty);
2431 Qualifiers qs = Ty.getQualifiers();
2432 if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
2433 // We honor __attribute__((ns_consumed)) for types with lifetime.
2434 // For __strong, it's handled by just skipping the initial retain;
2435 // otherwise we have to balance out the initial +1 with an extra
2436 // cleanup to do the release at the end of the function.
2437 bool isConsumed = D.hasAttr<NSConsumedAttr>();
2439 // If a parameter is pseudo-strong then we can omit the implicit retain.
2440 if (D.isARCPseudoStrong()) {
2441 assert(lt == Qualifiers::OCL_Strong &&
2442 "pseudo-strong variable isn't strong?");
2443 assert(qs.hasConst() && "pseudo-strong variable should be const!");
2444 lt = Qualifiers::OCL_ExplicitNone;
2447 // Load objects passed indirectly.
2448 if (Arg.isIndirect() && !ArgVal)
2449 ArgVal = Builder.CreateLoad(DeclPtr);
2451 if (lt == Qualifiers::OCL_Strong) {
2453 if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
2454 // use objc_storeStrong(&dest, value) for retaining the
2455 // object. But first, store a null into 'dest' because
2456 // objc_storeStrong attempts to release its old value.
2457 llvm::Value *Null = CGM.EmitNullConstant(D.getType());
2458 EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
2459 EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
2463 // Don't use objc_retainBlock for block pointers, because we
2464 // don't want to Block_copy something just because we got it
2466 ArgVal = EmitARCRetainNonBlock(ArgVal);
2469 // Push the cleanup for a consumed parameter.
2471 ARCPreciseLifetime_t precise = (D.hasAttr<ObjCPreciseLifetimeAttr>()
2472 ? ARCPreciseLifetime : ARCImpreciseLifetime);
2473 EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), ArgVal,
2477 if (lt == Qualifiers::OCL_Weak) {
2478 EmitARCInitWeak(DeclPtr, ArgVal);
2479 DoStore = false; // The weak init is a store, no need to do two.
2483 // Enter the cleanup scope.
2484 EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
2488 // Store the initial value into the alloca.
2490 EmitStoreOfScalar(ArgVal, lv, /* isInitialization */ true);
2492 setAddrOfLocalVar(&D, DeclPtr);
2494 // Emit debug info for param declarations in non-thunk functions.
2495 if (CGDebugInfo *DI = getDebugInfo()) {
2496 if (CGM.getCodeGenOpts().getDebugInfo() >=
2497 codegenoptions::LimitedDebugInfo &&
2499 DI->EmitDeclareOfArgVariable(&D, DeclPtr.getPointer(), ArgNo, Builder);
2503 if (D.hasAttr<AnnotateAttr>())
2504 EmitVarAnnotations(&D, DeclPtr.getPointer());
2506 // We can only check return value nullability if all arguments to the
2507 // function satisfy their nullability preconditions. This makes it necessary
2508 // to emit null checks for args in the function body itself.
2509 if (requiresReturnValueNullabilityCheck()) {
2510 auto Nullability = Ty->getNullability(getContext());
2511 if (Nullability && *Nullability == NullabilityKind::NonNull) {
2512 SanitizerScope SanScope(this);
2513 RetValNullabilityPrecondition =
2514 Builder.CreateAnd(RetValNullabilityPrecondition,
2515 Builder.CreateIsNotNull(Arg.getAnyValue()));
2520 void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
2521 CodeGenFunction *CGF) {
2522 if (!LangOpts.OpenMP || (!LangOpts.EmitAllDecls && !D->isUsed()))
2524 getOpenMPRuntime().emitUserDefinedReduction(CGF, D);
2527 void CodeGenModule::EmitOMPDeclareMapper(const OMPDeclareMapperDecl *D,
2528 CodeGenFunction *CGF) {
2529 if (!LangOpts.OpenMP || LangOpts.OpenMPSimd ||
2530 (!LangOpts.EmitAllDecls && !D->isUsed()))
2532 getOpenMPRuntime().emitUserDefinedMapper(D, CGF);
2535 void CodeGenModule::EmitOMPRequiresDecl(const OMPRequiresDecl *D) {
2536 getOpenMPRuntime().checkArchForUnifiedAddressing(D);