1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
12 //===----------------------------------------------------------------------===//
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Attr.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/CodeGenOptions.h"
27 #include "clang/Basic/TargetBuiltins.h"
28 #include "clang/Basic/TargetInfo.h"
29 #include "clang/CodeGen/CGFunctionInfo.h"
30 #include "clang/CodeGen/SwiftCallingConv.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/InlineAsm.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/Transforms/Utils/Local.h"
40 using namespace clang;
41 using namespace CodeGen;
45 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47 default: return llvm::CallingConv::C;
48 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
49 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
50 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
51 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
52 case CC_Win64: return llvm::CallingConv::Win64;
53 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
54 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
55 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
56 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
57 // TODO: Add support for __pascal to LLVM.
58 case CC_X86Pascal: return llvm::CallingConv::C;
59 // TODO: Add support for __vectorcall to LLVM.
60 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
61 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66 case CC_Swift: return llvm::CallingConv::Swift;
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
71 /// qualification. Either or both of RD and MD may be null. A null RD indicates
72 /// that there is no meaningful 'this' type, and a null MD can occur when
73 /// calling a method pointer.
74 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
75 const CXXMethodDecl *MD) {
78 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
80 RecTy = Context.VoidTy;
83 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
84 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
87 /// Returns the canonical formal type of the given C++ method.
88 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
89 return MD->getType()->getCanonicalTypeUnqualified()
90 .getAs<FunctionProtoType>();
93 /// Returns the "extra-canonicalized" return type, which discards
94 /// qualifiers on the return type. Codegen doesn't care about them,
95 /// and it makes ABI code a little easier to be able to assume that
96 /// all parameter and return types are top-level unqualified.
97 static CanQualType GetReturnType(QualType RetTy) {
98 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
101 /// Arrange the argument and result information for a value of the given
102 /// unprototyped freestanding function type.
103 const CGFunctionInfo &
104 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
105 // When translating an unprototyped function type, always use a
107 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
108 /*instanceMethod=*/false,
109 /*chainCall=*/false, None,
110 FTNP->getExtInfo(), {}, RequiredArgs(0));
113 static void addExtParameterInfosForCall(
114 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
115 const FunctionProtoType *proto,
117 unsigned totalArgs) {
118 assert(proto->hasExtParameterInfos());
119 assert(paramInfos.size() <= prefixArgs);
120 assert(proto->getNumParams() + prefixArgs <= totalArgs);
122 paramInfos.reserve(totalArgs);
124 // Add default infos for any prefix args that don't already have infos.
125 paramInfos.resize(prefixArgs);
127 // Add infos for the prototype.
128 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
129 paramInfos.push_back(ParamInfo);
130 // pass_object_size params have no parameter info.
131 if (ParamInfo.hasPassObjectSize())
132 paramInfos.emplace_back();
135 assert(paramInfos.size() <= totalArgs &&
136 "Did we forget to insert pass_object_size args?");
137 // Add default infos for the variadic and/or suffix arguments.
138 paramInfos.resize(totalArgs);
141 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
142 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
143 static void appendParameterTypes(const CodeGenTypes &CGT,
144 SmallVectorImpl<CanQualType> &prefix,
145 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
146 CanQual<FunctionProtoType> FPT) {
147 // Fast path: don't touch param info if we don't need to.
148 if (!FPT->hasExtParameterInfos()) {
149 assert(paramInfos.empty() &&
150 "We have paramInfos, but the prototype doesn't?");
151 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
155 unsigned PrefixSize = prefix.size();
156 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
157 // parameters; the only thing that can change this is the presence of
158 // pass_object_size. So, we preallocate for the common case.
159 prefix.reserve(prefix.size() + FPT->getNumParams());
161 auto ExtInfos = FPT->getExtParameterInfos();
162 assert(ExtInfos.size() == FPT->getNumParams());
163 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
164 prefix.push_back(FPT->getParamType(I));
165 if (ExtInfos[I].hasPassObjectSize())
166 prefix.push_back(CGT.getContext().getSizeType());
169 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
173 /// Arrange the LLVM function layout for a value of the given function
174 /// type, on top of any implicit parameters already stored.
175 static const CGFunctionInfo &
176 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
177 SmallVectorImpl<CanQualType> &prefix,
178 CanQual<FunctionProtoType> FTP) {
179 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
180 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
182 appendParameterTypes(CGT, prefix, paramInfos, FTP);
183 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
185 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
186 /*chainCall=*/false, prefix,
187 FTP->getExtInfo(), paramInfos,
191 /// Arrange the argument and result information for a value of the
192 /// given freestanding function type.
193 const CGFunctionInfo &
194 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
195 SmallVector<CanQualType, 16> argTypes;
196 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
200 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
201 // Set the appropriate calling convention for the Function.
202 if (D->hasAttr<StdCallAttr>())
203 return CC_X86StdCall;
205 if (D->hasAttr<FastCallAttr>())
206 return CC_X86FastCall;
208 if (D->hasAttr<RegCallAttr>())
209 return CC_X86RegCall;
211 if (D->hasAttr<ThisCallAttr>())
212 return CC_X86ThisCall;
214 if (D->hasAttr<VectorCallAttr>())
215 return CC_X86VectorCall;
217 if (D->hasAttr<PascalAttr>())
220 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
221 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
223 if (D->hasAttr<AArch64VectorPcsAttr>())
224 return CC_AArch64VectorCall;
226 if (D->hasAttr<IntelOclBiccAttr>())
227 return CC_IntelOclBicc;
229 if (D->hasAttr<MSABIAttr>())
230 return IsWindows ? CC_C : CC_Win64;
232 if (D->hasAttr<SysVABIAttr>())
233 return IsWindows ? CC_X86_64SysV : CC_C;
235 if (D->hasAttr<PreserveMostAttr>())
236 return CC_PreserveMost;
238 if (D->hasAttr<PreserveAllAttr>())
239 return CC_PreserveAll;
244 /// Arrange the argument and result information for a call to an
245 /// unknown C++ non-static member function of the given abstract type.
246 /// (A null RD means we don't have any meaningful "this" argument type,
247 /// so fall back to a generic pointer type).
248 /// The member function must be an ordinary function, i.e. not a
249 /// constructor or destructor.
250 const CGFunctionInfo &
251 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
252 const FunctionProtoType *FTP,
253 const CXXMethodDecl *MD) {
254 SmallVector<CanQualType, 16> argTypes;
256 // Add the 'this' pointer.
257 argTypes.push_back(DeriveThisType(RD, MD));
259 return ::arrangeLLVMFunctionInfo(
260 *this, true, argTypes,
261 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
264 /// Set calling convention for CUDA/HIP kernel.
265 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
266 const FunctionDecl *FD) {
267 if (FD->hasAttr<CUDAGlobalAttr>()) {
268 const FunctionType *FT = FTy->getAs<FunctionType>();
269 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
270 FTy = FT->getCanonicalTypeUnqualified();
274 /// Arrange the argument and result information for a declaration or
275 /// definition of the given C++ non-static member function. The
276 /// member function must be an ordinary function, i.e. not a
277 /// constructor or destructor.
278 const CGFunctionInfo &
279 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
280 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
281 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
283 CanQualType FT = GetFormalType(MD).getAs<Type>();
284 setCUDAKernelCallingConvention(FT, CGM, MD);
285 auto prototype = FT.getAs<FunctionProtoType>();
287 if (MD->isInstance()) {
288 // The abstract case is perfectly fine.
289 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
290 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
293 return arrangeFreeFunctionType(prototype);
296 bool CodeGenTypes::inheritingCtorHasParams(
297 const InheritedConstructor &Inherited, CXXCtorType Type) {
298 // Parameters are unnecessary if we're constructing a base class subobject
299 // and the inherited constructor lives in a virtual base.
300 return Type == Ctor_Complete ||
301 !Inherited.getShadowDecl()->constructsVirtualBase() ||
302 !Target.getCXXABI().hasConstructorVariants();
305 const CGFunctionInfo &
306 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
307 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
309 SmallVector<CanQualType, 16> argTypes;
310 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
311 argTypes.push_back(DeriveThisType(MD->getParent(), MD));
313 bool PassParams = true;
315 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
316 // A base class inheriting constructor doesn't get forwarded arguments
317 // needed to construct a virtual base (or base class thereof).
318 if (auto Inherited = CD->getInheritedConstructor())
319 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
322 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
324 // Add the formal parameters.
326 appendParameterTypes(*this, argTypes, paramInfos, FTP);
328 CGCXXABI::AddedStructorArgs AddedArgs =
329 TheCXXABI.buildStructorSignature(GD, argTypes);
330 if (!paramInfos.empty()) {
331 // Note: prefix implies after the first param.
332 if (AddedArgs.Prefix)
333 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
334 FunctionProtoType::ExtParameterInfo{});
335 if (AddedArgs.Suffix)
336 paramInfos.append(AddedArgs.Suffix,
337 FunctionProtoType::ExtParameterInfo{});
340 RequiredArgs required =
341 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
342 : RequiredArgs::All);
344 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
345 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
347 : TheCXXABI.hasMostDerivedReturn(GD)
348 ? CGM.getContext().VoidPtrTy
350 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
351 /*chainCall=*/false, argTypes, extInfo,
352 paramInfos, required);
355 static SmallVector<CanQualType, 16>
356 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
357 SmallVector<CanQualType, 16> argTypes;
358 for (auto &arg : args)
359 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
363 static SmallVector<CanQualType, 16>
364 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
365 SmallVector<CanQualType, 16> argTypes;
366 for (auto &arg : args)
367 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
371 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
372 getExtParameterInfosForCall(const FunctionProtoType *proto,
373 unsigned prefixArgs, unsigned totalArgs) {
374 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
375 if (proto->hasExtParameterInfos()) {
376 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
381 /// Arrange a call to a C++ method, passing the given arguments.
383 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
385 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
387 /// PassProtoArgs indicates whether `args` has args for the parameters in the
388 /// given CXXConstructorDecl.
389 const CGFunctionInfo &
390 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
391 const CXXConstructorDecl *D,
392 CXXCtorType CtorKind,
393 unsigned ExtraPrefixArgs,
394 unsigned ExtraSuffixArgs,
395 bool PassProtoArgs) {
397 SmallVector<CanQualType, 16> ArgTypes;
398 for (const auto &Arg : args)
399 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
401 // +1 for implicit this, which should always be args[0].
402 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
404 CanQual<FunctionProtoType> FPT = GetFormalType(D);
405 RequiredArgs Required = PassProtoArgs
406 ? RequiredArgs::forPrototypePlus(
407 FPT, TotalPrefixArgs + ExtraSuffixArgs)
410 GlobalDecl GD(D, CtorKind);
411 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
413 : TheCXXABI.hasMostDerivedReturn(GD)
414 ? CGM.getContext().VoidPtrTy
417 FunctionType::ExtInfo Info = FPT->getExtInfo();
418 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
419 // If the prototype args are elided, we should only have ABI-specific args,
420 // which never have param info.
421 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
422 // ABI-specific suffix arguments are treated the same as variadic arguments.
423 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
426 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
427 /*chainCall=*/false, ArgTypes, Info,
428 ParamInfos, Required);
431 /// Arrange the argument and result information for the declaration or
432 /// definition of the given function.
433 const CGFunctionInfo &
434 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
435 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
436 if (MD->isInstance())
437 return arrangeCXXMethodDeclaration(MD);
439 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
441 assert(isa<FunctionType>(FTy));
442 setCUDAKernelCallingConvention(FTy, CGM, FD);
444 // When declaring a function without a prototype, always use a
445 // non-variadic type.
446 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
447 return arrangeLLVMFunctionInfo(
448 noProto->getReturnType(), /*instanceMethod=*/false,
449 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
452 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
455 /// Arrange the argument and result information for the declaration or
456 /// definition of an Objective-C method.
457 const CGFunctionInfo &
458 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
459 // It happens that this is the same as a call with no optional
460 // arguments, except also using the formal 'self' type.
461 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
464 /// Arrange the argument and result information for the function type
465 /// through which to perform a send to the given Objective-C method,
466 /// using the given receiver type. The receiver type is not always
467 /// the 'self' type of the method or even an Objective-C pointer type.
468 /// This is *not* the right method for actually performing such a
469 /// message send, due to the possibility of optional arguments.
470 const CGFunctionInfo &
471 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
472 QualType receiverType) {
473 SmallVector<CanQualType, 16> argTys;
474 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
475 argTys.push_back(Context.getCanonicalParamType(receiverType));
476 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
478 for (const auto *I : MD->parameters()) {
479 argTys.push_back(Context.getCanonicalParamType(I->getType()));
480 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
481 I->hasAttr<NoEscapeAttr>());
482 extParamInfos.push_back(extParamInfo);
485 FunctionType::ExtInfo einfo;
486 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
487 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
489 if (getContext().getLangOpts().ObjCAutoRefCount &&
490 MD->hasAttr<NSReturnsRetainedAttr>())
491 einfo = einfo.withProducesResult(true);
493 RequiredArgs required =
494 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
496 return arrangeLLVMFunctionInfo(
497 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
498 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
501 const CGFunctionInfo &
502 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
503 const CallArgList &args) {
504 auto argTypes = getArgTypesForCall(Context, args);
505 FunctionType::ExtInfo einfo;
507 return arrangeLLVMFunctionInfo(
508 GetReturnType(returnType), /*instanceMethod=*/false,
509 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
512 const CGFunctionInfo &
513 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
514 // FIXME: Do we need to handle ObjCMethodDecl?
515 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
517 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
518 isa<CXXDestructorDecl>(GD.getDecl()))
519 return arrangeCXXStructorDeclaration(GD);
521 return arrangeFunctionDeclaration(FD);
524 /// Arrange a thunk that takes 'this' as the first parameter followed by
525 /// varargs. Return a void pointer, regardless of the actual return type.
526 /// The body of the thunk will end in a musttail call to a function of the
527 /// correct type, and the caller will bitcast the function to the correct
529 const CGFunctionInfo &
530 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
531 assert(MD->isVirtual() && "only methods have thunks");
532 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
533 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
534 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
535 /*chainCall=*/false, ArgTys,
536 FTP->getExtInfo(), {}, RequiredArgs(1));
539 const CGFunctionInfo &
540 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
542 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
544 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
545 SmallVector<CanQualType, 2> ArgTys;
546 const CXXRecordDecl *RD = CD->getParent();
547 ArgTys.push_back(DeriveThisType(RD, CD));
548 if (CT == Ctor_CopyingClosure)
549 ArgTys.push_back(*FTP->param_type_begin());
550 if (RD->getNumVBases() > 0)
551 ArgTys.push_back(Context.IntTy);
552 CallingConv CC = Context.getDefaultCallingConvention(
553 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
554 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
555 /*chainCall=*/false, ArgTys,
556 FunctionType::ExtInfo(CC), {},
560 /// Arrange a call as unto a free function, except possibly with an
561 /// additional number of formal parameters considered required.
562 static const CGFunctionInfo &
563 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
565 const CallArgList &args,
566 const FunctionType *fnType,
567 unsigned numExtraRequiredArgs,
569 assert(args.size() >= numExtraRequiredArgs);
571 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
573 // In most cases, there are no optional arguments.
574 RequiredArgs required = RequiredArgs::All;
576 // If we have a variadic prototype, the required arguments are the
577 // extra prefix plus the arguments in the prototype.
578 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
579 if (proto->isVariadic())
580 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
582 if (proto->hasExtParameterInfos())
583 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
586 // If we don't have a prototype at all, but we're supposed to
587 // explicitly use the variadic convention for unprototyped calls,
588 // treat all of the arguments as required but preserve the nominal
589 // possibility of variadics.
590 } else if (CGM.getTargetCodeGenInfo()
591 .isNoProtoCallVariadic(args,
592 cast<FunctionNoProtoType>(fnType))) {
593 required = RequiredArgs(args.size());
597 SmallVector<CanQualType, 16> argTypes;
598 for (const auto &arg : args)
599 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
600 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
601 /*instanceMethod=*/false, chainCall,
602 argTypes, fnType->getExtInfo(), paramInfos,
606 /// Figure out the rules for calling a function with the given formal
607 /// type using the given arguments. The arguments are necessary
608 /// because the function might be unprototyped, in which case it's
609 /// target-dependent in crazy ways.
610 const CGFunctionInfo &
611 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
612 const FunctionType *fnType,
614 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
615 chainCall ? 1 : 0, chainCall);
618 /// A block function is essentially a free function with an
619 /// extra implicit argument.
620 const CGFunctionInfo &
621 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
622 const FunctionType *fnType) {
623 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
624 /*chainCall=*/false);
627 const CGFunctionInfo &
628 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
629 const FunctionArgList ¶ms) {
630 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
631 auto argTypes = getArgTypesForDeclaration(Context, params);
633 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
634 /*instanceMethod*/ false, /*chainCall*/ false,
635 argTypes, proto->getExtInfo(), paramInfos,
636 RequiredArgs::forPrototypePlus(proto, 1));
639 const CGFunctionInfo &
640 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
641 const CallArgList &args) {
643 SmallVector<CanQualType, 16> argTypes;
644 for (const auto &Arg : args)
645 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
646 return arrangeLLVMFunctionInfo(
647 GetReturnType(resultType), /*instanceMethod=*/false,
648 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
649 /*paramInfos=*/ {}, RequiredArgs::All);
652 const CGFunctionInfo &
653 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
654 const FunctionArgList &args) {
655 auto argTypes = getArgTypesForDeclaration(Context, args);
657 return arrangeLLVMFunctionInfo(
658 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
659 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
662 const CGFunctionInfo &
663 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
664 ArrayRef<CanQualType> argTypes) {
665 return arrangeLLVMFunctionInfo(
666 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
667 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
670 /// Arrange a call to a C++ method, passing the given arguments.
672 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
673 /// does not count `this`.
674 const CGFunctionInfo &
675 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
676 const FunctionProtoType *proto,
677 RequiredArgs required,
678 unsigned numPrefixArgs) {
679 assert(numPrefixArgs + 1 <= args.size() &&
680 "Emitting a call with less args than the required prefix?");
681 // Add one to account for `this`. It's a bit awkward here, but we don't count
682 // `this` in similar places elsewhere.
684 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
687 auto argTypes = getArgTypesForCall(Context, args);
689 FunctionType::ExtInfo info = proto->getExtInfo();
690 return arrangeLLVMFunctionInfo(
691 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
692 /*chainCall=*/false, argTypes, info, paramInfos, required);
695 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
696 return arrangeLLVMFunctionInfo(
697 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
698 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
701 const CGFunctionInfo &
702 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
703 const CallArgList &args) {
704 assert(signature.arg_size() <= args.size());
705 if (signature.arg_size() == args.size())
708 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
709 auto sigParamInfos = signature.getExtParameterInfos();
710 if (!sigParamInfos.empty()) {
711 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
712 paramInfos.resize(args.size());
715 auto argTypes = getArgTypesForCall(Context, args);
717 assert(signature.getRequiredArgs().allowsOptionalArgs());
718 return arrangeLLVMFunctionInfo(signature.getReturnType(),
719 signature.isInstanceMethod(),
720 signature.isChainCall(),
722 signature.getExtInfo(),
724 signature.getRequiredArgs());
729 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
733 /// Arrange the argument and result information for an abstract value
734 /// of a given function type. This is the method which all of the
735 /// above functions ultimately defer to.
736 const CGFunctionInfo &
737 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
740 ArrayRef<CanQualType> argTypes,
741 FunctionType::ExtInfo info,
742 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
743 RequiredArgs required) {
744 assert(llvm::all_of(argTypes,
745 [](CanQualType T) { return T.isCanonicalAsParam(); }));
747 // Lookup or create unique function info.
748 llvm::FoldingSetNodeID ID;
749 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
750 required, resultType, argTypes);
752 void *insertPos = nullptr;
753 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
757 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
759 // Construct the function info. We co-allocate the ArgInfos.
760 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
761 paramInfos, resultType, argTypes, required);
762 FunctionInfos.InsertNode(FI, insertPos);
764 bool inserted = FunctionsBeingProcessed.insert(FI).second;
766 assert(inserted && "Recursively being processed?");
768 // Compute ABI information.
769 if (CC == llvm::CallingConv::SPIR_KERNEL) {
770 // Force target independent argument handling for the host visible
772 computeSPIRKernelABIInfo(CGM, *FI);
773 } else if (info.getCC() == CC_Swift) {
774 swiftcall::computeABIInfo(CGM, *FI);
776 getABIInfo().computeInfo(*FI);
779 // Loop over all of the computed argument and return value info. If any of
780 // them are direct or extend without a specified coerce type, specify the
782 ABIArgInfo &retInfo = FI->getReturnInfo();
783 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
784 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
786 for (auto &I : FI->arguments())
787 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
788 I.info.setCoerceToType(ConvertType(I.type));
790 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
791 assert(erased && "Not in set?");
796 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
799 const FunctionType::ExtInfo &info,
800 ArrayRef<ExtParameterInfo> paramInfos,
801 CanQualType resultType,
802 ArrayRef<CanQualType> argTypes,
803 RequiredArgs required) {
804 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
805 assert(!required.allowsOptionalArgs() ||
806 required.getNumRequiredArgs() <= argTypes.size());
809 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
810 argTypes.size() + 1, paramInfos.size()));
812 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
813 FI->CallingConvention = llvmCC;
814 FI->EffectiveCallingConvention = llvmCC;
815 FI->ASTCallingConvention = info.getCC();
816 FI->InstanceMethod = instanceMethod;
817 FI->ChainCall = chainCall;
818 FI->NoReturn = info.getNoReturn();
819 FI->ReturnsRetained = info.getProducesResult();
820 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
821 FI->NoCfCheck = info.getNoCfCheck();
822 FI->Required = required;
823 FI->HasRegParm = info.getHasRegParm();
824 FI->RegParm = info.getRegParm();
825 FI->ArgStruct = nullptr;
826 FI->ArgStructAlign = 0;
827 FI->NumArgs = argTypes.size();
828 FI->HasExtParameterInfos = !paramInfos.empty();
829 FI->getArgsBuffer()[0].type = resultType;
830 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
831 FI->getArgsBuffer()[i + 1].type = argTypes[i];
832 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
833 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
840 // ABIArgInfo::Expand implementation.
842 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
843 struct TypeExpansion {
844 enum TypeExpansionKind {
845 // Elements of constant arrays are expanded recursively.
847 // Record fields are expanded recursively (but if record is a union, only
848 // the field with the largest size is expanded).
850 // For complex types, real and imaginary parts are expanded recursively.
852 // All other types are not expandable.
856 const TypeExpansionKind Kind;
858 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
859 virtual ~TypeExpansion() {}
862 struct ConstantArrayExpansion : TypeExpansion {
866 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
867 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
868 static bool classof(const TypeExpansion *TE) {
869 return TE->Kind == TEK_ConstantArray;
873 struct RecordExpansion : TypeExpansion {
874 SmallVector<const CXXBaseSpecifier *, 1> Bases;
876 SmallVector<const FieldDecl *, 1> Fields;
878 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
879 SmallVector<const FieldDecl *, 1> &&Fields)
880 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
881 Fields(std::move(Fields)) {}
882 static bool classof(const TypeExpansion *TE) {
883 return TE->Kind == TEK_Record;
887 struct ComplexExpansion : TypeExpansion {
890 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
891 static bool classof(const TypeExpansion *TE) {
892 return TE->Kind == TEK_Complex;
896 struct NoExpansion : TypeExpansion {
897 NoExpansion() : TypeExpansion(TEK_None) {}
898 static bool classof(const TypeExpansion *TE) {
899 return TE->Kind == TEK_None;
904 static std::unique_ptr<TypeExpansion>
905 getTypeExpansion(QualType Ty, const ASTContext &Context) {
906 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
907 return std::make_unique<ConstantArrayExpansion>(
908 AT->getElementType(), AT->getSize().getZExtValue());
910 if (const RecordType *RT = Ty->getAs<RecordType>()) {
911 SmallVector<const CXXBaseSpecifier *, 1> Bases;
912 SmallVector<const FieldDecl *, 1> Fields;
913 const RecordDecl *RD = RT->getDecl();
914 assert(!RD->hasFlexibleArrayMember() &&
915 "Cannot expand structure with flexible array.");
917 // Unions can be here only in degenerative cases - all the fields are same
918 // after flattening. Thus we have to use the "largest" field.
919 const FieldDecl *LargestFD = nullptr;
920 CharUnits UnionSize = CharUnits::Zero();
922 for (const auto *FD : RD->fields()) {
923 if (FD->isZeroLengthBitField(Context))
925 assert(!FD->isBitField() &&
926 "Cannot expand structure with bit-field members.");
927 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
928 if (UnionSize < FieldSize) {
929 UnionSize = FieldSize;
934 Fields.push_back(LargestFD);
936 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
937 assert(!CXXRD->isDynamicClass() &&
938 "cannot expand vtable pointers in dynamic classes");
939 for (const CXXBaseSpecifier &BS : CXXRD->bases())
940 Bases.push_back(&BS);
943 for (const auto *FD : RD->fields()) {
944 if (FD->isZeroLengthBitField(Context))
946 assert(!FD->isBitField() &&
947 "Cannot expand structure with bit-field members.");
948 Fields.push_back(FD);
951 return std::make_unique<RecordExpansion>(std::move(Bases),
954 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
955 return std::make_unique<ComplexExpansion>(CT->getElementType());
957 return std::make_unique<NoExpansion>();
960 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
961 auto Exp = getTypeExpansion(Ty, Context);
962 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
963 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
965 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
967 for (auto BS : RExp->Bases)
968 Res += getExpansionSize(BS->getType(), Context);
969 for (auto FD : RExp->Fields)
970 Res += getExpansionSize(FD->getType(), Context);
973 if (isa<ComplexExpansion>(Exp.get()))
975 assert(isa<NoExpansion>(Exp.get()));
980 CodeGenTypes::getExpandedTypes(QualType Ty,
981 SmallVectorImpl<llvm::Type *>::iterator &TI) {
982 auto Exp = getTypeExpansion(Ty, Context);
983 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
984 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
985 getExpandedTypes(CAExp->EltTy, TI);
987 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
988 for (auto BS : RExp->Bases)
989 getExpandedTypes(BS->getType(), TI);
990 for (auto FD : RExp->Fields)
991 getExpandedTypes(FD->getType(), TI);
992 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
993 llvm::Type *EltTy = ConvertType(CExp->EltTy);
997 assert(isa<NoExpansion>(Exp.get()));
998 *TI++ = ConvertType(Ty);
1002 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1003 ConstantArrayExpansion *CAE,
1005 llvm::function_ref<void(Address)> Fn) {
1006 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1007 CharUnits EltAlign =
1008 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1010 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1011 llvm::Value *EltAddr =
1012 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1013 Fn(Address(EltAddr, EltAlign));
1017 void CodeGenFunction::ExpandTypeFromArgs(
1018 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
1019 assert(LV.isSimple() &&
1020 "Unexpected non-simple lvalue during struct expansion.");
1022 auto Exp = getTypeExpansion(Ty, getContext());
1023 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1024 forConstantArrayExpansion(
1025 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1026 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1027 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1029 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1030 Address This = LV.getAddress(*this);
1031 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1032 // Perform a single step derived-to-base conversion.
1034 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1035 /*NullCheckValue=*/false, SourceLocation());
1036 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1038 // Recurse onto bases.
1039 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1041 for (auto FD : RExp->Fields) {
1042 // FIXME: What are the right qualifiers here?
1043 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1044 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1046 } else if (isa<ComplexExpansion>(Exp.get())) {
1047 auto realValue = *AI++;
1048 auto imagValue = *AI++;
1049 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1051 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1053 assert(isa<NoExpansion>(Exp.get()));
1054 if (LV.isBitField())
1055 EmitStoreThroughLValue(RValue::get(*AI++), LV);
1057 EmitStoreOfScalar(*AI++, LV);
1061 void CodeGenFunction::ExpandTypeToArgs(
1062 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1063 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1064 auto Exp = getTypeExpansion(Ty, getContext());
1065 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1066 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1067 : Arg.getKnownRValue().getAggregateAddress();
1068 forConstantArrayExpansion(
1069 *this, CAExp, Addr, [&](Address EltAddr) {
1070 CallArg EltArg = CallArg(
1071 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1073 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1076 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1077 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1078 : Arg.getKnownRValue().getAggregateAddress();
1079 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1080 // Perform a single step derived-to-base conversion.
1082 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1083 /*NullCheckValue=*/false, SourceLocation());
1084 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1086 // Recurse onto bases.
1087 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1091 LValue LV = MakeAddrLValue(This, Ty);
1092 for (auto FD : RExp->Fields) {
1094 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1095 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1098 } else if (isa<ComplexExpansion>(Exp.get())) {
1099 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1100 IRCallArgs[IRCallArgPos++] = CV.first;
1101 IRCallArgs[IRCallArgPos++] = CV.second;
1103 assert(isa<NoExpansion>(Exp.get()));
1104 auto RV = Arg.getKnownRValue();
1105 assert(RV.isScalar() &&
1106 "Unexpected non-scalar rvalue during struct expansion.");
1108 // Insert a bitcast as needed.
1109 llvm::Value *V = RV.getScalarVal();
1110 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1111 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1112 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1114 IRCallArgs[IRCallArgPos++] = V;
1118 /// Create a temporary allocation for the purposes of coercion.
1119 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1120 CharUnits MinAlign) {
1121 // Don't use an alignment that's worse than what LLVM would prefer.
1122 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1123 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1125 return CGF.CreateTempAlloca(Ty, Align);
1128 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1129 /// accessing some number of bytes out of it, try to gep into the struct to get
1130 /// at its inner goodness. Dive as deep as possible without entering an element
1131 /// with an in-memory size smaller than DstSize.
1133 EnterStructPointerForCoercedAccess(Address SrcPtr,
1134 llvm::StructType *SrcSTy,
1135 uint64_t DstSize, CodeGenFunction &CGF) {
1136 // We can't dive into a zero-element struct.
1137 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1139 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1141 // If the first elt is at least as large as what we're looking for, or if the
1142 // first element is the same size as the whole struct, we can enter it. The
1143 // comparison must be made on the store size and not the alloca size. Using
1144 // the alloca size may overstate the size of the load.
1145 uint64_t FirstEltSize =
1146 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1147 if (FirstEltSize < DstSize &&
1148 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1151 // GEP into the first element.
1152 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1154 // If the first element is a struct, recurse.
1155 llvm::Type *SrcTy = SrcPtr.getElementType();
1156 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1157 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1162 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1163 /// are either integers or pointers. This does a truncation of the value if it
1164 /// is too large or a zero extension if it is too small.
1166 /// This behaves as if the value were coerced through memory, so on big-endian
1167 /// targets the high bits are preserved in a truncation, while little-endian
1168 /// targets preserve the low bits.
1169 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1171 CodeGenFunction &CGF) {
1172 if (Val->getType() == Ty)
1175 if (isa<llvm::PointerType>(Val->getType())) {
1176 // If this is Pointer->Pointer avoid conversion to and from int.
1177 if (isa<llvm::PointerType>(Ty))
1178 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1180 // Convert the pointer to an integer so we can play with its width.
1181 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1184 llvm::Type *DestIntTy = Ty;
1185 if (isa<llvm::PointerType>(DestIntTy))
1186 DestIntTy = CGF.IntPtrTy;
1188 if (Val->getType() != DestIntTy) {
1189 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1190 if (DL.isBigEndian()) {
1191 // Preserve the high bits on big-endian targets.
1192 // That is what memory coercion does.
1193 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1194 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1196 if (SrcSize > DstSize) {
1197 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1198 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1200 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1201 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1204 // Little-endian targets preserve the low bits. No shifts required.
1205 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1209 if (isa<llvm::PointerType>(Ty))
1210 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1216 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1217 /// a pointer to an object of type \arg Ty, known to be aligned to
1218 /// \arg SrcAlign bytes.
1220 /// This safely handles the case when the src type is smaller than the
1221 /// destination type; in this situation the values of bits which not
1222 /// present in the src are undefined.
1223 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1224 CodeGenFunction &CGF) {
1225 llvm::Type *SrcTy = Src.getElementType();
1227 // If SrcTy and Ty are the same, just do a load.
1229 return CGF.Builder.CreateLoad(Src);
1231 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1233 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1234 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1235 SrcTy = Src.getType()->getElementType();
1238 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1240 // If the source and destination are integer or pointer types, just do an
1241 // extension or truncation to the desired type.
1242 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1243 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1244 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1245 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1248 // If load is legal, just bitcast the src pointer.
1249 if (SrcSize >= DstSize) {
1250 // Generally SrcSize is never greater than DstSize, since this means we are
1251 // losing bits. However, this can happen in cases where the structure has
1252 // additional padding, for example due to a user specified alignment.
1254 // FIXME: Assert that we aren't truncating non-padding bits when have access
1255 // to that information.
1256 Src = CGF.Builder.CreateBitCast(Src,
1257 Ty->getPointerTo(Src.getAddressSpace()));
1258 return CGF.Builder.CreateLoad(Src);
1261 // Otherwise do coercion through memory. This is stupid, but simple.
1262 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1263 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1264 Address SrcCasted = CGF.Builder.CreateElementBitCast(Src,CGF.Int8Ty);
1265 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1266 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1268 return CGF.Builder.CreateLoad(Tmp);
1271 // Function to store a first-class aggregate into memory. We prefer to
1272 // store the elements rather than the aggregate to be more friendly to
1274 // FIXME: Do we need to recurse here?
1275 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1276 Address Dest, bool DestIsVolatile) {
1277 // Prefer scalar stores to first-class aggregate stores.
1278 if (llvm::StructType *STy =
1279 dyn_cast<llvm::StructType>(Val->getType())) {
1280 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1281 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i);
1282 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1283 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1286 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1290 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1291 /// where the source and destination may have different types. The
1292 /// destination is known to be aligned to \arg DstAlign bytes.
1294 /// This safely handles the case when the src type is larger than the
1295 /// destination type; the upper bits of the src will be lost.
1296 static void CreateCoercedStore(llvm::Value *Src,
1299 CodeGenFunction &CGF) {
1300 llvm::Type *SrcTy = Src->getType();
1301 llvm::Type *DstTy = Dst.getType()->getElementType();
1302 if (SrcTy == DstTy) {
1303 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1307 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1309 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1310 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1311 DstTy = Dst.getType()->getElementType();
1314 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1315 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1316 if (SrcPtrTy && DstPtrTy &&
1317 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1318 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1319 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1323 // If the source and destination are integer or pointer types, just do an
1324 // extension or truncation to the desired type.
1325 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1326 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1327 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1328 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1332 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1334 // If store is legal, just bitcast the src pointer.
1335 if (SrcSize <= DstSize) {
1336 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1337 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1339 // Otherwise do coercion through memory. This is stupid, but
1342 // Generally SrcSize is never greater than DstSize, since this means we are
1343 // losing bits. However, this can happen in cases where the structure has
1344 // additional padding, for example due to a user specified alignment.
1346 // FIXME: Assert that we aren't truncating non-padding bits when have access
1347 // to that information.
1348 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1349 CGF.Builder.CreateStore(Src, Tmp);
1350 Address Casted = CGF.Builder.CreateElementBitCast(Tmp,CGF.Int8Ty);
1351 Address DstCasted = CGF.Builder.CreateElementBitCast(Dst,CGF.Int8Ty);
1352 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1353 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1358 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1359 const ABIArgInfo &info) {
1360 if (unsigned offset = info.getDirectOffset()) {
1361 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1362 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1363 CharUnits::fromQuantity(offset));
1364 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1371 /// Encapsulates information about the way function arguments from
1372 /// CGFunctionInfo should be passed to actual LLVM IR function.
1373 class ClangToLLVMArgMapping {
1374 static const unsigned InvalidIndex = ~0U;
1375 unsigned InallocaArgNo;
1377 unsigned TotalIRArgs;
1379 /// Arguments of LLVM IR function corresponding to single Clang argument.
1381 unsigned PaddingArgIndex;
1382 // Argument is expanded to IR arguments at positions
1383 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1384 unsigned FirstArgIndex;
1385 unsigned NumberOfArgs;
1388 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1392 SmallVector<IRArgs, 8> ArgInfo;
1395 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1396 bool OnlyRequiredArgs = false)
1397 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1398 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1399 construct(Context, FI, OnlyRequiredArgs);
1402 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1403 unsigned getInallocaArgNo() const {
1404 assert(hasInallocaArg());
1405 return InallocaArgNo;
1408 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1409 unsigned getSRetArgNo() const {
1410 assert(hasSRetArg());
1414 unsigned totalIRArgs() const { return TotalIRArgs; }
1416 bool hasPaddingArg(unsigned ArgNo) const {
1417 assert(ArgNo < ArgInfo.size());
1418 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1420 unsigned getPaddingArgNo(unsigned ArgNo) const {
1421 assert(hasPaddingArg(ArgNo));
1422 return ArgInfo[ArgNo].PaddingArgIndex;
1425 /// Returns index of first IR argument corresponding to ArgNo, and their
1427 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1428 assert(ArgNo < ArgInfo.size());
1429 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1430 ArgInfo[ArgNo].NumberOfArgs);
1434 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1435 bool OnlyRequiredArgs);
1438 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1439 const CGFunctionInfo &FI,
1440 bool OnlyRequiredArgs) {
1441 unsigned IRArgNo = 0;
1442 bool SwapThisWithSRet = false;
1443 const ABIArgInfo &RetAI = FI.getReturnInfo();
1445 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1446 SwapThisWithSRet = RetAI.isSRetAfterThis();
1447 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1451 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1452 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1454 assert(I != FI.arg_end());
1455 QualType ArgType = I->type;
1456 const ABIArgInfo &AI = I->info;
1457 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1458 auto &IRArgs = ArgInfo[ArgNo];
1460 if (AI.getPaddingType())
1461 IRArgs.PaddingArgIndex = IRArgNo++;
1463 switch (AI.getKind()) {
1464 case ABIArgInfo::Extend:
1465 case ABIArgInfo::Direct: {
1466 // FIXME: handle sseregparm someday...
1467 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1468 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1469 IRArgs.NumberOfArgs = STy->getNumElements();
1471 IRArgs.NumberOfArgs = 1;
1475 case ABIArgInfo::Indirect:
1476 IRArgs.NumberOfArgs = 1;
1478 case ABIArgInfo::Ignore:
1479 case ABIArgInfo::InAlloca:
1480 // ignore and inalloca doesn't have matching LLVM parameters.
1481 IRArgs.NumberOfArgs = 0;
1483 case ABIArgInfo::CoerceAndExpand:
1484 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1486 case ABIArgInfo::Expand:
1487 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1491 if (IRArgs.NumberOfArgs > 0) {
1492 IRArgs.FirstArgIndex = IRArgNo;
1493 IRArgNo += IRArgs.NumberOfArgs;
1496 // Skip over the sret parameter when it comes second. We already handled it
1498 if (IRArgNo == 1 && SwapThisWithSRet)
1501 assert(ArgNo == ArgInfo.size());
1503 if (FI.usesInAlloca())
1504 InallocaArgNo = IRArgNo++;
1506 TotalIRArgs = IRArgNo;
1512 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1513 const auto &RI = FI.getReturnInfo();
1514 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1517 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1518 return ReturnTypeUsesSRet(FI) &&
1519 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1522 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1523 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1524 switch (BT->getKind()) {
1527 case BuiltinType::Float:
1528 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1529 case BuiltinType::Double:
1530 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1531 case BuiltinType::LongDouble:
1532 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1539 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1540 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1541 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1542 if (BT->getKind() == BuiltinType::LongDouble)
1543 return getTarget().useObjCFP2RetForComplexLongDouble();
1550 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1551 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1552 return GetFunctionType(FI);
1555 llvm::FunctionType *
1556 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1558 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1560 assert(Inserted && "Recursively being processed?");
1562 llvm::Type *resultType = nullptr;
1563 const ABIArgInfo &retAI = FI.getReturnInfo();
1564 switch (retAI.getKind()) {
1565 case ABIArgInfo::Expand:
1566 llvm_unreachable("Invalid ABI kind for return argument");
1568 case ABIArgInfo::Extend:
1569 case ABIArgInfo::Direct:
1570 resultType = retAI.getCoerceToType();
1573 case ABIArgInfo::InAlloca:
1574 if (retAI.getInAllocaSRet()) {
1575 // sret things on win32 aren't void, they return the sret pointer.
1576 QualType ret = FI.getReturnType();
1577 llvm::Type *ty = ConvertType(ret);
1578 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1579 resultType = llvm::PointerType::get(ty, addressSpace);
1581 resultType = llvm::Type::getVoidTy(getLLVMContext());
1585 case ABIArgInfo::Indirect:
1586 case ABIArgInfo::Ignore:
1587 resultType = llvm::Type::getVoidTy(getLLVMContext());
1590 case ABIArgInfo::CoerceAndExpand:
1591 resultType = retAI.getUnpaddedCoerceAndExpandType();
1595 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1596 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1598 // Add type for sret argument.
1599 if (IRFunctionArgs.hasSRetArg()) {
1600 QualType Ret = FI.getReturnType();
1601 llvm::Type *Ty = ConvertType(Ret);
1602 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1603 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1604 llvm::PointerType::get(Ty, AddressSpace);
1607 // Add type for inalloca argument.
1608 if (IRFunctionArgs.hasInallocaArg()) {
1609 auto ArgStruct = FI.getArgStruct();
1611 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1614 // Add in all of the required arguments.
1616 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1617 ie = it + FI.getNumRequiredArgs();
1618 for (; it != ie; ++it, ++ArgNo) {
1619 const ABIArgInfo &ArgInfo = it->info;
1621 // Insert a padding type to ensure proper alignment.
1622 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1623 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1624 ArgInfo.getPaddingType();
1626 unsigned FirstIRArg, NumIRArgs;
1627 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1629 switch (ArgInfo.getKind()) {
1630 case ABIArgInfo::Ignore:
1631 case ABIArgInfo::InAlloca:
1632 assert(NumIRArgs == 0);
1635 case ABIArgInfo::Indirect: {
1636 assert(NumIRArgs == 1);
1637 // indirect arguments are always on the stack, which is alloca addr space.
1638 llvm::Type *LTy = ConvertTypeForMem(it->type);
1639 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1640 CGM.getDataLayout().getAllocaAddrSpace());
1644 case ABIArgInfo::Extend:
1645 case ABIArgInfo::Direct: {
1646 // Fast-isel and the optimizer generally like scalar values better than
1647 // FCAs, so we flatten them if this is safe to do for this argument.
1648 llvm::Type *argType = ArgInfo.getCoerceToType();
1649 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1650 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1651 assert(NumIRArgs == st->getNumElements());
1652 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1653 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1655 assert(NumIRArgs == 1);
1656 ArgTypes[FirstIRArg] = argType;
1661 case ABIArgInfo::CoerceAndExpand: {
1662 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1663 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1664 *ArgTypesIter++ = EltTy;
1666 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1670 case ABIArgInfo::Expand:
1671 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1672 getExpandedTypes(it->type, ArgTypesIter);
1673 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1678 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1679 assert(Erased && "Not in set?");
1681 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1684 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1685 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1686 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1688 if (!isFuncTypeConvertible(FPT))
1689 return llvm::StructType::get(getLLVMContext());
1691 return GetFunctionType(GD);
1694 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1695 llvm::AttrBuilder &FuncAttrs,
1696 const FunctionProtoType *FPT) {
1700 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1702 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1705 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1706 bool AttrOnCallSite,
1707 llvm::AttrBuilder &FuncAttrs) {
1708 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1710 if (CodeGenOpts.OptimizeSize)
1711 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1712 if (CodeGenOpts.OptimizeSize == 2)
1713 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1716 if (CodeGenOpts.DisableRedZone)
1717 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1718 if (CodeGenOpts.IndirectTlsSegRefs)
1719 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1720 if (CodeGenOpts.NoImplicitFloat)
1721 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1723 if (AttrOnCallSite) {
1724 // Attributes that should go on the call site only.
1725 if (!CodeGenOpts.SimplifyLibCalls ||
1726 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1727 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1728 if (!CodeGenOpts.TrapFuncName.empty())
1729 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1732 switch (CodeGenOpts.getFramePointer()) {
1733 case CodeGenOptions::FramePointerKind::None:
1736 case CodeGenOptions::FramePointerKind::NonLeaf:
1737 FpKind = "non-leaf";
1739 case CodeGenOptions::FramePointerKind::All:
1743 FuncAttrs.addAttribute("frame-pointer", FpKind);
1745 FuncAttrs.addAttribute("less-precise-fpmad",
1746 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1748 if (CodeGenOpts.NullPointerIsValid)
1749 FuncAttrs.addAttribute("null-pointer-is-valid", "true");
1750 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::Invalid)
1751 FuncAttrs.addAttribute("denormal-fp-math",
1752 llvm::denormalModeName(CodeGenOpts.FPDenormalMode));
1754 FuncAttrs.addAttribute("no-trapping-math",
1755 llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1757 // Strict (compliant) code is the default, so only add this attribute to
1758 // indicate that we are trying to workaround a problem case.
1759 if (!CodeGenOpts.StrictFloatCastOverflow)
1760 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1762 // TODO: Are these all needed?
1763 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1764 FuncAttrs.addAttribute("no-infs-fp-math",
1765 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1766 FuncAttrs.addAttribute("no-nans-fp-math",
1767 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1768 FuncAttrs.addAttribute("unsafe-fp-math",
1769 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1770 FuncAttrs.addAttribute("use-soft-float",
1771 llvm::toStringRef(CodeGenOpts.SoftFloat));
1772 FuncAttrs.addAttribute("stack-protector-buffer-size",
1773 llvm::utostr(CodeGenOpts.SSPBufferSize));
1774 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1775 llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1776 FuncAttrs.addAttribute(
1777 "correctly-rounded-divide-sqrt-fp-math",
1778 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1780 if (getLangOpts().OpenCL)
1781 FuncAttrs.addAttribute("denorms-are-zero",
1782 llvm::toStringRef(CodeGenOpts.FlushDenorm));
1784 // TODO: Reciprocal estimate codegen options should apply to instructions?
1785 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1786 if (!Recips.empty())
1787 FuncAttrs.addAttribute("reciprocal-estimates",
1788 llvm::join(Recips, ","));
1790 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1791 CodeGenOpts.PreferVectorWidth != "none")
1792 FuncAttrs.addAttribute("prefer-vector-width",
1793 CodeGenOpts.PreferVectorWidth);
1795 if (CodeGenOpts.StackRealignment)
1796 FuncAttrs.addAttribute("stackrealign");
1797 if (CodeGenOpts.Backchain)
1798 FuncAttrs.addAttribute("backchain");
1800 if (CodeGenOpts.SpeculativeLoadHardening)
1801 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1804 if (getLangOpts().assumeFunctionsAreConvergent()) {
1805 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1806 // convergent (meaning, they may call an intrinsically convergent op, such
1807 // as __syncthreads() / barrier(), and so can't have certain optimizations
1808 // applied around them). LLVM will remove this attribute where it safely
1810 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1813 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1814 // Exceptions aren't supported in CUDA device code.
1815 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1817 // Respect -fcuda-flush-denormals-to-zero.
1818 if (CodeGenOpts.FlushDenorm)
1819 FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1822 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1823 StringRef Var, Value;
1824 std::tie(Var, Value) = Attr.split('=');
1825 FuncAttrs.addAttribute(Var, Value);
1829 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1830 llvm::AttrBuilder FuncAttrs;
1831 ConstructDefaultFnAttrList(F.getName(), F.hasOptNone(),
1832 /* AttrOnCallSite = */ false, FuncAttrs);
1833 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1836 void CodeGenModule::ConstructAttributeList(
1837 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1838 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1839 llvm::AttrBuilder FuncAttrs;
1840 llvm::AttrBuilder RetAttrs;
1842 CallingConv = FI.getEffectiveCallingConvention();
1843 if (FI.isNoReturn())
1844 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1846 // If we have information about the function prototype, we can learn
1847 // attributes from there.
1848 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1849 CalleeInfo.getCalleeFunctionProtoType());
1851 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1853 bool HasOptnone = false;
1854 // FIXME: handle sseregparm someday...
1856 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1857 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1858 if (TargetDecl->hasAttr<NoThrowAttr>())
1859 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1860 if (TargetDecl->hasAttr<NoReturnAttr>())
1861 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1862 if (TargetDecl->hasAttr<ColdAttr>())
1863 FuncAttrs.addAttribute(llvm::Attribute::Cold);
1864 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1865 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1866 if (TargetDecl->hasAttr<ConvergentAttr>())
1867 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1869 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1870 AddAttributesFromFunctionProtoType(
1871 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1872 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1873 const bool IsVirtualCall = MD && MD->isVirtual();
1874 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1875 // virtual function. These attributes are not inherited by overloads.
1876 if (!(AttrOnCallSite && IsVirtualCall)) {
1877 if (Fn->isNoReturn())
1878 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1880 const auto *NBA = Fn->getAttr<NoBuiltinAttr>();
1881 bool HasWildcard = NBA && llvm::is_contained(NBA->builtinNames(), "*");
1882 if (getLangOpts().NoBuiltin || HasWildcard)
1883 FuncAttrs.addAttribute("no-builtins");
1885 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1886 SmallString<32> AttributeName;
1887 AttributeName += "no-builtin-";
1888 AttributeName += BuiltinName;
1889 FuncAttrs.addAttribute(AttributeName);
1891 llvm::for_each(getLangOpts().NoBuiltinFuncs, AddNoBuiltinAttr);
1893 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1898 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1899 if (TargetDecl->hasAttr<ConstAttr>()) {
1900 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1901 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1902 } else if (TargetDecl->hasAttr<PureAttr>()) {
1903 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1904 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1905 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1906 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1907 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1909 if (TargetDecl->hasAttr<RestrictAttr>())
1910 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1911 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1912 !CodeGenOpts.NullPointerIsValid)
1913 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1914 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1915 FuncAttrs.addAttribute("no_caller_saved_registers");
1916 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1917 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1919 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1920 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1921 Optional<unsigned> NumElemsParam;
1922 if (AllocSize->getNumElemsParam().isValid())
1923 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1924 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1929 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1931 // This must run after constructing the default function attribute list
1932 // to ensure that the speculative load hardening attribute is removed
1933 // in the case where the -mspeculative-load-hardening flag was passed.
1935 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
1936 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
1937 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
1938 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1941 if (CodeGenOpts.EnableSegmentedStacks &&
1942 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1943 FuncAttrs.addAttribute("split-stack");
1945 // Add NonLazyBind attribute to function declarations when -fno-plt
1947 if (TargetDecl && CodeGenOpts.NoPLT) {
1948 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1949 if (!Fn->isDefined() && !AttrOnCallSite) {
1950 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
1955 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1956 if (getLangOpts().OpenCLVersion <= 120) {
1957 // OpenCL v1.2 Work groups are always uniform
1958 FuncAttrs.addAttribute("uniform-work-group-size", "true");
1960 // OpenCL v2.0 Work groups may be whether uniform or not.
1961 // '-cl-uniform-work-group-size' compile option gets a hint
1962 // to the compiler that the global work-size be a multiple of
1963 // the work-group size specified to clEnqueueNDRangeKernel
1964 // (i.e. work groups are uniform).
1965 FuncAttrs.addAttribute("uniform-work-group-size",
1966 llvm::toStringRef(CodeGenOpts.UniformWGSize));
1970 if (!AttrOnCallSite) {
1971 bool DisableTailCalls = false;
1973 if (CodeGenOpts.DisableTailCalls)
1974 DisableTailCalls = true;
1975 else if (TargetDecl) {
1976 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1977 TargetDecl->hasAttr<AnyX86InterruptAttr>())
1978 DisableTailCalls = true;
1979 else if (CodeGenOpts.NoEscapingBlockTailCalls) {
1980 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
1981 if (!BD->doesNotEscape())
1982 DisableTailCalls = true;
1986 FuncAttrs.addAttribute("disable-tail-calls",
1987 llvm::toStringRef(DisableTailCalls));
1988 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
1991 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1993 QualType RetTy = FI.getReturnType();
1994 const ABIArgInfo &RetAI = FI.getReturnInfo();
1995 switch (RetAI.getKind()) {
1996 case ABIArgInfo::Extend:
1997 if (RetAI.isSignExt())
1998 RetAttrs.addAttribute(llvm::Attribute::SExt);
2000 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2002 case ABIArgInfo::Direct:
2003 if (RetAI.getInReg())
2004 RetAttrs.addAttribute(llvm::Attribute::InReg);
2006 case ABIArgInfo::Ignore:
2009 case ABIArgInfo::InAlloca:
2010 case ABIArgInfo::Indirect: {
2011 // inalloca and sret disable readnone and readonly
2012 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2013 .removeAttribute(llvm::Attribute::ReadNone);
2017 case ABIArgInfo::CoerceAndExpand:
2020 case ABIArgInfo::Expand:
2021 llvm_unreachable("Invalid ABI kind for return argument");
2024 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2025 QualType PTy = RefTy->getPointeeType();
2026 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2027 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2029 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2030 !CodeGenOpts.NullPointerIsValid)
2031 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2034 bool hasUsedSRet = false;
2035 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2037 // Attach attributes to sret.
2038 if (IRFunctionArgs.hasSRetArg()) {
2039 llvm::AttrBuilder SRETAttrs;
2040 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2042 if (RetAI.getInReg())
2043 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2044 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2045 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2048 // Attach attributes to inalloca argument.
2049 if (IRFunctionArgs.hasInallocaArg()) {
2050 llvm::AttrBuilder Attrs;
2051 Attrs.addAttribute(llvm::Attribute::InAlloca);
2052 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2053 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2057 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2059 I != E; ++I, ++ArgNo) {
2060 QualType ParamType = I->type;
2061 const ABIArgInfo &AI = I->info;
2062 llvm::AttrBuilder Attrs;
2064 // Add attribute for padding argument, if necessary.
2065 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2066 if (AI.getPaddingInReg()) {
2067 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2068 llvm::AttributeSet::get(
2070 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2074 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2075 // have the corresponding parameter variable. It doesn't make
2076 // sense to do it here because parameters are so messed up.
2077 switch (AI.getKind()) {
2078 case ABIArgInfo::Extend:
2080 Attrs.addAttribute(llvm::Attribute::SExt);
2082 Attrs.addAttribute(llvm::Attribute::ZExt);
2084 case ABIArgInfo::Direct:
2085 if (ArgNo == 0 && FI.isChainCall())
2086 Attrs.addAttribute(llvm::Attribute::Nest);
2087 else if (AI.getInReg())
2088 Attrs.addAttribute(llvm::Attribute::InReg);
2091 case ABIArgInfo::Indirect: {
2093 Attrs.addAttribute(llvm::Attribute::InReg);
2095 if (AI.getIndirectByVal())
2096 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2098 CharUnits Align = AI.getIndirectAlign();
2100 // In a byval argument, it is important that the required
2101 // alignment of the type is honored, as LLVM might be creating a
2102 // *new* stack object, and needs to know what alignment to give
2103 // it. (Sometimes it can deduce a sensible alignment on its own,
2104 // but not if clang decides it must emit a packed struct, or the
2105 // user specifies increased alignment requirements.)
2107 // This is different from indirect *not* byval, where the object
2108 // exists already, and the align attribute is purely
2110 assert(!Align.isZero());
2112 // For now, only add this when we have a byval argument.
2113 // TODO: be less lazy about updating test cases.
2114 if (AI.getIndirectByVal())
2115 Attrs.addAlignmentAttr(Align.getQuantity());
2117 // byval disables readnone and readonly.
2118 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2119 .removeAttribute(llvm::Attribute::ReadNone);
2122 case ABIArgInfo::Ignore:
2123 case ABIArgInfo::Expand:
2124 case ABIArgInfo::CoerceAndExpand:
2127 case ABIArgInfo::InAlloca:
2128 // inalloca disables readnone and readonly.
2129 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2130 .removeAttribute(llvm::Attribute::ReadNone);
2134 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2135 QualType PTy = RefTy->getPointeeType();
2136 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2137 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2139 else if (getContext().getTargetAddressSpace(PTy) == 0 &&
2140 !CodeGenOpts.NullPointerIsValid)
2141 Attrs.addAttribute(llvm::Attribute::NonNull);
2144 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2145 case ParameterABI::Ordinary:
2148 case ParameterABI::SwiftIndirectResult: {
2149 // Add 'sret' if we haven't already used it for something, but
2150 // only if the result is void.
2151 if (!hasUsedSRet && RetTy->isVoidType()) {
2152 Attrs.addAttribute(llvm::Attribute::StructRet);
2156 // Add 'noalias' in either case.
2157 Attrs.addAttribute(llvm::Attribute::NoAlias);
2159 // Add 'dereferenceable' and 'alignment'.
2160 auto PTy = ParamType->getPointeeType();
2161 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2162 auto info = getContext().getTypeInfoInChars(PTy);
2163 Attrs.addDereferenceableAttr(info.first.getQuantity());
2164 Attrs.addAttribute(llvm::Attribute::getWithAlignment(
2165 getLLVMContext(), info.second.getAsAlign()));
2170 case ParameterABI::SwiftErrorResult:
2171 Attrs.addAttribute(llvm::Attribute::SwiftError);
2174 case ParameterABI::SwiftContext:
2175 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2179 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2180 Attrs.addAttribute(llvm::Attribute::NoCapture);
2182 if (Attrs.hasAttributes()) {
2183 unsigned FirstIRArg, NumIRArgs;
2184 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2185 for (unsigned i = 0; i < NumIRArgs; i++)
2186 ArgAttrs[FirstIRArg + i] =
2187 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2190 assert(ArgNo == FI.arg_size());
2192 AttrList = llvm::AttributeList::get(
2193 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2194 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2197 /// An argument came in as a promoted argument; demote it back to its
2199 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2201 llvm::Value *value) {
2202 llvm::Type *varType = CGF.ConvertType(var->getType());
2204 // This can happen with promotions that actually don't change the
2205 // underlying type, like the enum promotions.
2206 if (value->getType() == varType) return value;
2208 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2209 && "unexpected promotion type");
2211 if (isa<llvm::IntegerType>(varType))
2212 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2214 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2217 /// Returns the attribute (either parameter attribute, or function
2218 /// attribute), which declares argument ArgNo to be non-null.
2219 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2220 QualType ArgType, unsigned ArgNo) {
2221 // FIXME: __attribute__((nonnull)) can also be applied to:
2222 // - references to pointers, where the pointee is known to be
2223 // nonnull (apparently a Clang extension)
2224 // - transparent unions containing pointers
2225 // In the former case, LLVM IR cannot represent the constraint. In
2226 // the latter case, we have no guarantee that the transparent union
2227 // is in fact passed as a pointer.
2228 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2230 // First, check attribute on parameter itself.
2232 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2235 // Check function attributes.
2238 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2239 if (NNAttr->isNonNull(ArgNo))
2246 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2249 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2250 void Emit(CodeGenFunction &CGF, Flags flags) override {
2251 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2252 CGF.Builder.CreateStore(errorValue, Arg);
2257 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2259 const FunctionArgList &Args) {
2260 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2261 // Naked functions don't have prologues.
2264 // If this is an implicit-return-zero function, go ahead and
2265 // initialize the return value. TODO: it might be nice to have
2266 // a more general mechanism for this that didn't require synthesized
2267 // return statements.
2268 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2269 if (FD->hasImplicitReturnZero()) {
2270 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2271 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2272 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2273 Builder.CreateStore(Zero, ReturnValue);
2277 // FIXME: We no longer need the types from FunctionArgList; lift up and
2280 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2281 // Flattened function arguments.
2282 SmallVector<llvm::Value *, 16> FnArgs;
2283 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2284 for (auto &Arg : Fn->args()) {
2285 FnArgs.push_back(&Arg);
2287 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2289 // If we're using inalloca, all the memory arguments are GEPs off of the last
2290 // parameter, which is a pointer to the complete memory area.
2291 Address ArgStruct = Address::invalid();
2292 if (IRFunctionArgs.hasInallocaArg()) {
2293 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2294 FI.getArgStructAlignment());
2296 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2299 // Name the struct return parameter.
2300 if (IRFunctionArgs.hasSRetArg()) {
2301 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2302 AI->setName("agg.result");
2303 AI->addAttr(llvm::Attribute::NoAlias);
2306 // Track if we received the parameter as a pointer (indirect, byval, or
2307 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2308 // into a local alloca for us.
2309 SmallVector<ParamValue, 16> ArgVals;
2310 ArgVals.reserve(Args.size());
2312 // Create a pointer value for every parameter declaration. This usually
2313 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2314 // any cleanups or do anything that might unwind. We do that separately, so
2315 // we can push the cleanups in the correct order for the ABI.
2316 assert(FI.arg_size() == Args.size() &&
2317 "Mismatch between function signature & arguments.");
2319 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2320 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2321 i != e; ++i, ++info_it, ++ArgNo) {
2322 const VarDecl *Arg = *i;
2323 const ABIArgInfo &ArgI = info_it->info;
2326 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2327 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2328 // the parameter is promoted. In this case we convert to
2329 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2330 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2331 assert(hasScalarEvaluationKind(Ty) ==
2332 hasScalarEvaluationKind(Arg->getType()));
2334 unsigned FirstIRArg, NumIRArgs;
2335 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2337 switch (ArgI.getKind()) {
2338 case ABIArgInfo::InAlloca: {
2339 assert(NumIRArgs == 0);
2340 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2342 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2343 ArgVals.push_back(ParamValue::forIndirect(V));
2347 case ABIArgInfo::Indirect: {
2348 assert(NumIRArgs == 1);
2349 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2351 if (!hasScalarEvaluationKind(Ty)) {
2352 // Aggregates and complex variables are accessed by reference. All we
2353 // need to do is realign the value, if requested.
2354 Address V = ParamAddr;
2355 if (ArgI.getIndirectRealign()) {
2356 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2358 // Copy from the incoming argument pointer to the temporary with the
2359 // appropriate alignment.
2361 // FIXME: We should have a common utility for generating an aggregate
2363 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2364 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2365 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2366 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2367 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2370 ArgVals.push_back(ParamValue::forIndirect(V));
2372 // Load scalar value from indirect argument.
2374 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2377 V = emitArgumentDemotion(*this, Arg, V);
2378 ArgVals.push_back(ParamValue::forDirect(V));
2383 case ABIArgInfo::Extend:
2384 case ABIArgInfo::Direct: {
2386 // If we have the trivial case, handle it with no muss and fuss.
2387 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2388 ArgI.getCoerceToType() == ConvertType(Ty) &&
2389 ArgI.getDirectOffset() == 0) {
2390 assert(NumIRArgs == 1);
2391 llvm::Value *V = FnArgs[FirstIRArg];
2392 auto AI = cast<llvm::Argument>(V);
2394 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2395 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2396 PVD->getFunctionScopeIndex()) &&
2397 !CGM.getCodeGenOpts().NullPointerIsValid)
2398 AI->addAttr(llvm::Attribute::NonNull);
2400 QualType OTy = PVD->getOriginalType();
2401 if (const auto *ArrTy =
2402 getContext().getAsConstantArrayType(OTy)) {
2403 // A C99 array parameter declaration with the static keyword also
2404 // indicates dereferenceability, and if the size is constant we can
2405 // use the dereferenceable attribute (which requires the size in
2407 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2408 QualType ETy = ArrTy->getElementType();
2409 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2410 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2412 llvm::AttrBuilder Attrs;
2413 Attrs.addDereferenceableAttr(
2414 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2415 AI->addAttrs(Attrs);
2416 } else if (getContext().getTargetAddressSpace(ETy) == 0 &&
2417 !CGM.getCodeGenOpts().NullPointerIsValid) {
2418 AI->addAttr(llvm::Attribute::NonNull);
2421 } else if (const auto *ArrTy =
2422 getContext().getAsVariableArrayType(OTy)) {
2423 // For C99 VLAs with the static keyword, we don't know the size so
2424 // we can't use the dereferenceable attribute, but in addrspace(0)
2425 // we know that it must be nonnull.
2426 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2427 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2428 !CGM.getCodeGenOpts().NullPointerIsValid)
2429 AI->addAttr(llvm::Attribute::NonNull);
2432 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2434 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2435 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2436 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2437 // If alignment-assumption sanitizer is enabled, we do *not* add
2438 // alignment attribute here, but emit normal alignment assumption,
2439 // so the UBSAN check could function.
2440 llvm::Value *AlignmentValue =
2441 EmitScalarExpr(AVAttr->getAlignment());
2442 llvm::ConstantInt *AlignmentCI =
2443 cast<llvm::ConstantInt>(AlignmentValue);
2444 unsigned Alignment = std::min((unsigned)AlignmentCI->getZExtValue(),
2445 +llvm::Value::MaximumAlignment);
2446 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2450 if (Arg->getType().isRestrictQualified())
2451 AI->addAttr(llvm::Attribute::NoAlias);
2453 // LLVM expects swifterror parameters to be used in very restricted
2454 // ways. Copy the value into a less-restricted temporary.
2455 if (FI.getExtParameterInfo(ArgNo).getABI()
2456 == ParameterABI::SwiftErrorResult) {
2457 QualType pointeeTy = Ty->getPointeeType();
2458 assert(pointeeTy->isPointerType());
2460 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2461 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2462 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2463 Builder.CreateStore(incomingErrorValue, temp);
2464 V = temp.getPointer();
2466 // Push a cleanup to copy the value back at the end of the function.
2467 // The convention does not guarantee that the value will be written
2468 // back if the function exits with an unwind exception.
2469 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2472 // Ensure the argument is the correct type.
2473 if (V->getType() != ArgI.getCoerceToType())
2474 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2477 V = emitArgumentDemotion(*this, Arg, V);
2479 // Because of merging of function types from multiple decls it is
2480 // possible for the type of an argument to not match the corresponding
2481 // type in the function type. Since we are codegening the callee
2482 // in here, add a cast to the argument type.
2483 llvm::Type *LTy = ConvertType(Arg->getType());
2484 if (V->getType() != LTy)
2485 V = Builder.CreateBitCast(V, LTy);
2487 ArgVals.push_back(ParamValue::forDirect(V));
2491 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2494 // Pointer to store into.
2495 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2497 // Fast-isel and the optimizer generally like scalar values better than
2498 // FCAs, so we flatten them if this is safe to do for this argument.
2499 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2500 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2501 STy->getNumElements() > 1) {
2502 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2503 llvm::Type *DstTy = Ptr.getElementType();
2504 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2506 Address AddrToStoreInto = Address::invalid();
2507 if (SrcSize <= DstSize) {
2508 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2511 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2514 assert(STy->getNumElements() == NumIRArgs);
2515 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2516 auto AI = FnArgs[FirstIRArg + i];
2517 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2518 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2519 Builder.CreateStore(AI, EltPtr);
2522 if (SrcSize > DstSize) {
2523 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2527 // Simple case, just do a coerced store of the argument into the alloca.
2528 assert(NumIRArgs == 1);
2529 auto AI = FnArgs[FirstIRArg];
2530 AI->setName(Arg->getName() + ".coerce");
2531 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2534 // Match to what EmitParmDecl is expecting for this type.
2535 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2537 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2539 V = emitArgumentDemotion(*this, Arg, V);
2540 ArgVals.push_back(ParamValue::forDirect(V));
2542 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2547 case ABIArgInfo::CoerceAndExpand: {
2548 // Reconstruct into a temporary.
2549 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2550 ArgVals.push_back(ParamValue::forIndirect(alloca));
2552 auto coercionType = ArgI.getCoerceAndExpandType();
2553 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2555 unsigned argIndex = FirstIRArg;
2556 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2557 llvm::Type *eltType = coercionType->getElementType(i);
2558 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2561 auto eltAddr = Builder.CreateStructGEP(alloca, i);
2562 auto elt = FnArgs[argIndex++];
2563 Builder.CreateStore(elt, eltAddr);
2565 assert(argIndex == FirstIRArg + NumIRArgs);
2569 case ABIArgInfo::Expand: {
2570 // If this structure was expanded into multiple arguments then
2571 // we need to create a temporary and reconstruct it from the
2573 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2574 LValue LV = MakeAddrLValue(Alloca, Ty);
2575 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2577 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2578 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2579 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2580 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2581 auto AI = FnArgs[FirstIRArg + i];
2582 AI->setName(Arg->getName() + "." + Twine(i));
2587 case ABIArgInfo::Ignore:
2588 assert(NumIRArgs == 0);
2589 // Initialize the local variable appropriately.
2590 if (!hasScalarEvaluationKind(Ty)) {
2591 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2593 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2594 ArgVals.push_back(ParamValue::forDirect(U));
2600 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2601 for (int I = Args.size() - 1; I >= 0; --I)
2602 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2604 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2605 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2609 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2610 while (insn->use_empty()) {
2611 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2612 if (!bitcast) return;
2614 // This is "safe" because we would have used a ConstantExpr otherwise.
2615 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2616 bitcast->eraseFromParent();
2620 /// Try to emit a fused autorelease of a return result.
2621 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2622 llvm::Value *result) {
2623 // We must be immediately followed the cast.
2624 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2625 if (BB->empty()) return nullptr;
2626 if (&BB->back() != result) return nullptr;
2628 llvm::Type *resultType = result->getType();
2630 // result is in a BasicBlock and is therefore an Instruction.
2631 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2633 SmallVector<llvm::Instruction *, 4> InstsToKill;
2636 // %generator = bitcast %type1* %generator2 to %type2*
2637 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2638 // We would have emitted this as a constant if the operand weren't
2640 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2642 // Require the generator to be immediately followed by the cast.
2643 if (generator->getNextNode() != bitcast)
2646 InstsToKill.push_back(bitcast);
2650 // %generator = call i8* @objc_retain(i8* %originalResult)
2652 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2653 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2654 if (!call) return nullptr;
2656 bool doRetainAutorelease;
2658 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2659 doRetainAutorelease = true;
2660 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2661 .objc_retainAutoreleasedReturnValue) {
2662 doRetainAutorelease = false;
2664 // If we emitted an assembly marker for this call (and the
2665 // ARCEntrypoints field should have been set if so), go looking
2666 // for that call. If we can't find it, we can't do this
2667 // optimization. But it should always be the immediately previous
2668 // instruction, unless we needed bitcasts around the call.
2669 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2670 llvm::Instruction *prev = call->getPrevNode();
2672 if (isa<llvm::BitCastInst>(prev)) {
2673 prev = prev->getPrevNode();
2676 assert(isa<llvm::CallInst>(prev));
2677 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2678 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2679 InstsToKill.push_back(prev);
2685 result = call->getArgOperand(0);
2686 InstsToKill.push_back(call);
2688 // Keep killing bitcasts, for sanity. Note that we no longer care
2689 // about precise ordering as long as there's exactly one use.
2690 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2691 if (!bitcast->hasOneUse()) break;
2692 InstsToKill.push_back(bitcast);
2693 result = bitcast->getOperand(0);
2696 // Delete all the unnecessary instructions, from latest to earliest.
2697 for (auto *I : InstsToKill)
2698 I->eraseFromParent();
2700 // Do the fused retain/autorelease if we were asked to.
2701 if (doRetainAutorelease)
2702 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2704 // Cast back to the result type.
2705 return CGF.Builder.CreateBitCast(result, resultType);
2708 /// If this is a +1 of the value of an immutable 'self', remove it.
2709 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2710 llvm::Value *result) {
2711 // This is only applicable to a method with an immutable 'self'.
2712 const ObjCMethodDecl *method =
2713 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2714 if (!method) return nullptr;
2715 const VarDecl *self = method->getSelfDecl();
2716 if (!self->getType().isConstQualified()) return nullptr;
2718 // Look for a retain call.
2719 llvm::CallInst *retainCall =
2720 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2722 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2725 // Look for an ordinary load of 'self'.
2726 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2727 llvm::LoadInst *load =
2728 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2729 if (!load || load->isAtomic() || load->isVolatile() ||
2730 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2733 // Okay! Burn it all down. This relies for correctness on the
2734 // assumption that the retain is emitted as part of the return and
2735 // that thereafter everything is used "linearly".
2736 llvm::Type *resultType = result->getType();
2737 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2738 assert(retainCall->use_empty());
2739 retainCall->eraseFromParent();
2740 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2742 return CGF.Builder.CreateBitCast(load, resultType);
2745 /// Emit an ARC autorelease of the result of a function.
2747 /// \return the value to actually return from the function
2748 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2749 llvm::Value *result) {
2750 // If we're returning 'self', kill the initial retain. This is a
2751 // heuristic attempt to "encourage correctness" in the really unfortunate
2752 // case where we have a return of self during a dealloc and we desperately
2753 // need to avoid the possible autorelease.
2754 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2757 // At -O0, try to emit a fused retain/autorelease.
2758 if (CGF.shouldUseFusedARCCalls())
2759 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2762 return CGF.EmitARCAutoreleaseReturnValue(result);
2765 /// Heuristically search for a dominating store to the return-value slot.
2766 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2767 // Check if a User is a store which pointerOperand is the ReturnValue.
2768 // We are looking for stores to the ReturnValue, not for stores of the
2769 // ReturnValue to some other location.
2770 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2771 auto *SI = dyn_cast<llvm::StoreInst>(U);
2772 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2774 // These aren't actually possible for non-coerced returns, and we
2775 // only care about non-coerced returns on this code path.
2776 assert(!SI->isAtomic() && !SI->isVolatile());
2779 // If there are multiple uses of the return-value slot, just check
2780 // for something immediately preceding the IP. Sometimes this can
2781 // happen with how we generate implicit-returns; it can also happen
2782 // with noreturn cleanups.
2783 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2784 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2785 if (IP->empty()) return nullptr;
2786 llvm::Instruction *I = &IP->back();
2788 // Skip lifetime markers
2789 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2792 if (llvm::IntrinsicInst *Intrinsic =
2793 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2794 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2795 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2799 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2807 return GetStoreIfValid(I);
2810 llvm::StoreInst *store =
2811 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2812 if (!store) return nullptr;
2814 // Now do a first-and-dirty dominance check: just walk up the
2815 // single-predecessors chain from the current insertion point.
2816 llvm::BasicBlock *StoreBB = store->getParent();
2817 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2818 while (IP != StoreBB) {
2819 if (!(IP = IP->getSinglePredecessor()))
2823 // Okay, the store's basic block dominates the insertion point; we
2824 // can do our thing.
2828 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2830 SourceLocation EndLoc) {
2831 if (FI.isNoReturn()) {
2832 // Noreturn functions don't return.
2833 EmitUnreachable(EndLoc);
2837 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2838 // Naked functions don't have epilogues.
2839 Builder.CreateUnreachable();
2843 // Functions with no result always return void.
2844 if (!ReturnValue.isValid()) {
2845 Builder.CreateRetVoid();
2849 llvm::DebugLoc RetDbgLoc;
2850 llvm::Value *RV = nullptr;
2851 QualType RetTy = FI.getReturnType();
2852 const ABIArgInfo &RetAI = FI.getReturnInfo();
2854 switch (RetAI.getKind()) {
2855 case ABIArgInfo::InAlloca:
2856 // Aggregrates get evaluated directly into the destination. Sometimes we
2857 // need to return the sret value in a register, though.
2858 assert(hasAggregateEvaluationKind(RetTy));
2859 if (RetAI.getInAllocaSRet()) {
2860 llvm::Function::arg_iterator EI = CurFn->arg_end();
2862 llvm::Value *ArgStruct = &*EI;
2863 llvm::Value *SRet = Builder.CreateStructGEP(
2864 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2865 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2869 case ABIArgInfo::Indirect: {
2870 auto AI = CurFn->arg_begin();
2871 if (RetAI.isSRetAfterThis())
2873 switch (getEvaluationKind(RetTy)) {
2876 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2877 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2882 // Do nothing; aggregrates get evaluated directly into the destination.
2885 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2886 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2893 case ABIArgInfo::Extend:
2894 case ABIArgInfo::Direct:
2895 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2896 RetAI.getDirectOffset() == 0) {
2897 // The internal return value temp always will have pointer-to-return-type
2898 // type, just do a load.
2900 // If there is a dominating store to ReturnValue, we can elide
2901 // the load, zap the store, and usually zap the alloca.
2902 if (llvm::StoreInst *SI =
2903 findDominatingStoreToReturnValue(*this)) {
2904 // Reuse the debug location from the store unless there is
2905 // cleanup code to be emitted between the store and return
2907 if (EmitRetDbgLoc && !AutoreleaseResult)
2908 RetDbgLoc = SI->getDebugLoc();
2909 // Get the stored value and nuke the now-dead store.
2910 RV = SI->getValueOperand();
2911 SI->eraseFromParent();
2913 // Otherwise, we have to do a simple load.
2915 RV = Builder.CreateLoad(ReturnValue);
2918 // If the value is offset in memory, apply the offset now.
2919 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2921 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2924 // In ARC, end functions that return a retainable type with a call
2925 // to objc_autoreleaseReturnValue.
2926 if (AutoreleaseResult) {
2928 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2929 // been stripped of the typedefs, so we cannot use RetTy here. Get the
2930 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2931 // CurCodeDecl or BlockInfo.
2934 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2935 RT = FD->getReturnType();
2936 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2937 RT = MD->getReturnType();
2938 else if (isa<BlockDecl>(CurCodeDecl))
2939 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2941 llvm_unreachable("Unexpected function/method type");
2943 assert(getLangOpts().ObjCAutoRefCount &&
2944 !FI.isReturnsRetained() &&
2945 RT->isObjCRetainableType());
2947 RV = emitAutoreleaseOfResult(*this, RV);
2952 case ABIArgInfo::Ignore:
2955 case ABIArgInfo::CoerceAndExpand: {
2956 auto coercionType = RetAI.getCoerceAndExpandType();
2958 // Load all of the coerced elements out into results.
2959 llvm::SmallVector<llvm::Value*, 4> results;
2960 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2961 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2962 auto coercedEltType = coercionType->getElementType(i);
2963 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2966 auto eltAddr = Builder.CreateStructGEP(addr, i);
2967 auto elt = Builder.CreateLoad(eltAddr);
2968 results.push_back(elt);
2971 // If we have one result, it's the single direct result type.
2972 if (results.size() == 1) {
2975 // Otherwise, we need to make a first-class aggregate.
2977 // Construct a return type that lacks padding elements.
2978 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2980 RV = llvm::UndefValue::get(returnType);
2981 for (unsigned i = 0, e = results.size(); i != e; ++i) {
2982 RV = Builder.CreateInsertValue(RV, results[i], i);
2988 case ABIArgInfo::Expand:
2989 llvm_unreachable("Invalid ABI kind for return argument");
2992 llvm::Instruction *Ret;
2994 EmitReturnValueCheck(RV);
2995 Ret = Builder.CreateRet(RV);
2997 Ret = Builder.CreateRetVoid();
3001 Ret->setDebugLoc(std::move(RetDbgLoc));
3004 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3005 // A current decl may not be available when emitting vtable thunks.
3009 ReturnsNonNullAttr *RetNNAttr = nullptr;
3010 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3011 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3013 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3016 // Prefer the returns_nonnull attribute if it's present.
3017 SourceLocation AttrLoc;
3018 SanitizerMask CheckKind;
3019 SanitizerHandler Handler;
3021 assert(!requiresReturnValueNullabilityCheck() &&
3022 "Cannot check nullability and the nonnull attribute");
3023 AttrLoc = RetNNAttr->getLocation();
3024 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3025 Handler = SanitizerHandler::NonnullReturn;
3027 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3028 if (auto *TSI = DD->getTypeSourceInfo())
3029 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
3030 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3031 CheckKind = SanitizerKind::NullabilityReturn;
3032 Handler = SanitizerHandler::NullabilityReturn;
3035 SanitizerScope SanScope(this);
3037 // Make sure the "return" source location is valid. If we're checking a
3038 // nullability annotation, make sure the preconditions for the check are met.
3039 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3040 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3041 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3042 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3043 if (requiresReturnValueNullabilityCheck())
3045 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3046 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3049 // Now do the null check.
3050 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3051 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3052 llvm::Value *DynamicData[] = {SLocPtr};
3053 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3058 // The return location should not be used after the check has been emitted.
3059 ReturnLocation = Address::invalid();
3063 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3064 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3065 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3068 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3070 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3072 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3073 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3074 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3076 // FIXME: When we generate this IR in one pass, we shouldn't need
3077 // this win32-specific alignment hack.
3078 CharUnits Align = CharUnits::fromQuantity(4);
3079 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3081 return AggValueSlot::forAddr(Address(Placeholder, Align),
3083 AggValueSlot::IsNotDestructed,
3084 AggValueSlot::DoesNotNeedGCBarriers,
3085 AggValueSlot::IsNotAliased,
3086 AggValueSlot::DoesNotOverlap);
3089 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3090 const VarDecl *param,
3091 SourceLocation loc) {
3092 // StartFunction converted the ABI-lowered parameter(s) into a
3093 // local alloca. We need to turn that into an r-value suitable
3095 Address local = GetAddrOfLocalVar(param);
3097 QualType type = param->getType();
3099 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3100 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3103 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3104 // but the argument needs to be the original pointer.
3105 if (type->isReferenceType()) {
3106 args.add(RValue::get(Builder.CreateLoad(local)), type);
3108 // In ARC, move out of consumed arguments so that the release cleanup
3109 // entered by StartFunction doesn't cause an over-release. This isn't
3110 // optimal -O0 code generation, but it should get cleaned up when
3111 // optimization is enabled. This also assumes that delegate calls are
3112 // performed exactly once for a set of arguments, but that should be safe.
3113 } else if (getLangOpts().ObjCAutoRefCount &&
3114 param->hasAttr<NSConsumedAttr>() &&
3115 type->isObjCRetainableType()) {
3116 llvm::Value *ptr = Builder.CreateLoad(local);
3118 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3119 Builder.CreateStore(null, local);
3120 args.add(RValue::get(ptr), type);
3122 // For the most part, we just need to load the alloca, except that
3123 // aggregate r-values are actually pointers to temporaries.
3125 args.add(convertTempToRValue(local, type, loc), type);
3128 // Deactivate the cleanup for the callee-destructed param that was pushed.
3129 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3130 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3131 param->needsDestruction(getContext())) {
3132 EHScopeStack::stable_iterator cleanup =
3133 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3134 assert(cleanup.isValid() &&
3135 "cleanup for callee-destructed param not recorded");
3136 // This unreachable is a temporary marker which will be removed later.
3137 llvm::Instruction *isActive = Builder.CreateUnreachable();
3138 args.addArgCleanupDeactivation(cleanup, isActive);
3142 static bool isProvablyNull(llvm::Value *addr) {
3143 return isa<llvm::ConstantPointerNull>(addr);
3146 /// Emit the actual writing-back of a writeback.
3147 static void emitWriteback(CodeGenFunction &CGF,
3148 const CallArgList::Writeback &writeback) {
3149 const LValue &srcLV = writeback.Source;
3150 Address srcAddr = srcLV.getAddress(CGF);
3151 assert(!isProvablyNull(srcAddr.getPointer()) &&
3152 "shouldn't have writeback for provably null argument");
3154 llvm::BasicBlock *contBB = nullptr;
3156 // If the argument wasn't provably non-null, we need to null check
3157 // before doing the store.
3158 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3159 CGF.CGM.getDataLayout());
3160 if (!provablyNonNull) {
3161 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3162 contBB = CGF.createBasicBlock("icr.done");
3164 llvm::Value *isNull =
3165 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3166 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3167 CGF.EmitBlock(writebackBB);
3170 // Load the value to writeback.
3171 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3173 // Cast it back, in case we're writing an id to a Foo* or something.
3174 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3175 "icr.writeback-cast");
3177 // Perform the writeback.
3179 // If we have a "to use" value, it's something we need to emit a use
3180 // of. This has to be carefully threaded in: if it's done after the
3181 // release it's potentially undefined behavior (and the optimizer
3182 // will ignore it), and if it happens before the retain then the
3183 // optimizer could move the release there.
3184 if (writeback.ToUse) {
3185 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3187 // Retain the new value. No need to block-copy here: the block's
3188 // being passed up the stack.
3189 value = CGF.EmitARCRetainNonBlock(value);
3191 // Emit the intrinsic use here.
3192 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3194 // Load the old value (primitively).
3195 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3197 // Put the new value in place (primitively).
3198 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3200 // Release the old value.
3201 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3203 // Otherwise, we can just do a normal lvalue store.
3205 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3208 // Jump to the continuation block.
3209 if (!provablyNonNull)
3210 CGF.EmitBlock(contBB);
3213 static void emitWritebacks(CodeGenFunction &CGF,
3214 const CallArgList &args) {
3215 for (const auto &I : args.writebacks())
3216 emitWriteback(CGF, I);
3219 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3220 const CallArgList &CallArgs) {
3221 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3222 CallArgs.getCleanupsToDeactivate();
3223 // Iterate in reverse to increase the likelihood of popping the cleanup.
3224 for (const auto &I : llvm::reverse(Cleanups)) {
3225 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3226 I.IsActiveIP->eraseFromParent();
3230 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3231 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3232 if (uop->getOpcode() == UO_AddrOf)
3233 return uop->getSubExpr();
3237 /// Emit an argument that's being passed call-by-writeback. That is,
3238 /// we are passing the address of an __autoreleased temporary; it
3239 /// might be copy-initialized with the current value of the given
3240 /// address, but it will definitely be copied out of after the call.
3241 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3242 const ObjCIndirectCopyRestoreExpr *CRE) {
3245 // Make an optimistic effort to emit the address as an l-value.
3246 // This can fail if the argument expression is more complicated.
3247 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3248 srcLV = CGF.EmitLValue(lvExpr);
3250 // Otherwise, just emit it as a scalar.
3252 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3254 QualType srcAddrType =
3255 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3256 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3258 Address srcAddr = srcLV.getAddress(CGF);
3260 // The dest and src types don't necessarily match in LLVM terms
3261 // because of the crazy ObjC compatibility rules.
3263 llvm::PointerType *destType =
3264 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3266 // If the address is a constant null, just pass the appropriate null.
3267 if (isProvablyNull(srcAddr.getPointer())) {
3268 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3273 // Create the temporary.
3274 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3275 CGF.getPointerAlign(),
3277 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3278 // and that cleanup will be conditional if we can't prove that the l-value
3279 // isn't null, so we need to register a dominating point so that the cleanups
3280 // system will make valid IR.
3281 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3283 // Zero-initialize it if we're not doing a copy-initialization.
3284 bool shouldCopy = CRE->shouldCopy();
3287 llvm::ConstantPointerNull::get(
3288 cast<llvm::PointerType>(destType->getElementType()));
3289 CGF.Builder.CreateStore(null, temp);
3292 llvm::BasicBlock *contBB = nullptr;
3293 llvm::BasicBlock *originBB = nullptr;
3295 // If the address is *not* known to be non-null, we need to switch.
3296 llvm::Value *finalArgument;
3298 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3299 CGF.CGM.getDataLayout());
3300 if (provablyNonNull) {
3301 finalArgument = temp.getPointer();
3303 llvm::Value *isNull =
3304 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3306 finalArgument = CGF.Builder.CreateSelect(isNull,
3307 llvm::ConstantPointerNull::get(destType),
3308 temp.getPointer(), "icr.argument");
3310 // If we need to copy, then the load has to be conditional, which
3311 // means we need control flow.
3313 originBB = CGF.Builder.GetInsertBlock();
3314 contBB = CGF.createBasicBlock("icr.cont");
3315 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3316 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3317 CGF.EmitBlock(copyBB);
3318 condEval.begin(CGF);
3322 llvm::Value *valueToUse = nullptr;
3324 // Perform a copy if necessary.
3326 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3327 assert(srcRV.isScalar());
3329 llvm::Value *src = srcRV.getScalarVal();
3330 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3333 // Use an ordinary store, not a store-to-lvalue.
3334 CGF.Builder.CreateStore(src, temp);
3336 // If optimization is enabled, and the value was held in a
3337 // __strong variable, we need to tell the optimizer that this
3338 // value has to stay alive until we're doing the store back.
3339 // This is because the temporary is effectively unretained,
3340 // and so otherwise we can violate the high-level semantics.
3341 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3342 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3347 // Finish the control flow if we needed it.
3348 if (shouldCopy && !provablyNonNull) {
3349 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3350 CGF.EmitBlock(contBB);
3352 // Make a phi for the value to intrinsically use.
3354 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3356 phiToUse->addIncoming(valueToUse, copyBB);
3357 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3359 valueToUse = phiToUse;
3365 args.addWriteback(srcLV, temp, valueToUse);
3366 args.add(RValue::get(finalArgument), CRE->getType());
3369 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3373 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3374 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3377 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3379 // Restore the stack after the call.
3380 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3381 CGF.Builder.CreateCall(F, StackBase);
3385 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3386 SourceLocation ArgLoc,
3389 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3390 SanOpts.has(SanitizerKind::NullabilityArg)))
3393 // The param decl may be missing in a variadic function.
3394 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3395 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3397 // Prefer the nonnull attribute if it's present.
3398 const NonNullAttr *NNAttr = nullptr;
3399 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3400 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3402 bool CanCheckNullability = false;
3403 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3404 auto Nullability = PVD->getType()->getNullability(getContext());
3405 CanCheckNullability = Nullability &&
3406 *Nullability == NullabilityKind::NonNull &&
3407 PVD->getTypeSourceInfo();
3410 if (!NNAttr && !CanCheckNullability)
3413 SourceLocation AttrLoc;
3414 SanitizerMask CheckKind;
3415 SanitizerHandler Handler;
3417 AttrLoc = NNAttr->getLocation();
3418 CheckKind = SanitizerKind::NonnullAttribute;
3419 Handler = SanitizerHandler::NonnullArg;
3421 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3422 CheckKind = SanitizerKind::NullabilityArg;
3423 Handler = SanitizerHandler::NullabilityArg;
3426 SanitizerScope SanScope(this);
3427 assert(RV.isScalar());
3428 llvm::Value *V = RV.getScalarVal();
3430 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3431 llvm::Constant *StaticData[] = {
3432 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3433 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3435 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3438 void CodeGenFunction::EmitCallArgs(
3439 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3440 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3441 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3442 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3444 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3445 // because arguments are destroyed left to right in the callee. As a special
3446 // case, there are certain language constructs that require left-to-right
3447 // evaluation, and in those cases we consider the evaluation order requirement
3448 // to trump the "destruction order is reverse construction order" guarantee.
3450 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3451 ? Order == EvaluationOrder::ForceLeftToRight
3452 : Order != EvaluationOrder::ForceRightToLeft;
3454 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3455 RValue EmittedArg) {
3456 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3458 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3462 const auto &Context = getContext();
3463 auto SizeTy = Context.getSizeType();
3464 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3465 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3466 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3467 EmittedArg.getScalarVal(),
3469 Args.add(RValue::get(V), SizeTy);
3470 // If we're emitting args in reverse, be sure to do so with
3471 // pass_object_size, as well.
3473 std::swap(Args.back(), *(&Args.back() - 1));
3476 // Insert a stack save if we're going to need any inalloca args.
3477 bool HasInAllocaArgs = false;
3478 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3479 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3480 I != E && !HasInAllocaArgs; ++I)
3481 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3482 if (HasInAllocaArgs) {
3483 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3484 Args.allocateArgumentMemory(*this);
3488 // Evaluate each argument in the appropriate order.
3489 size_t CallArgsStart = Args.size();
3490 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3491 unsigned Idx = LeftToRight ? I : E - I - 1;
3492 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3493 unsigned InitialArgSize = Args.size();
3494 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3495 // the argument and parameter match or the objc method is parameterized.
3496 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3497 getContext().hasSameUnqualifiedType((*Arg)->getType(),
3499 (isa<ObjCMethodDecl>(AC.getDecl()) &&
3500 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3501 "Argument and parameter types don't match");
3502 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3503 // In particular, we depend on it being the last arg in Args, and the
3504 // objectsize bits depend on there only being one arg if !LeftToRight.
3505 assert(InitialArgSize + 1 == Args.size() &&
3506 "The code below depends on only adding one arg per EmitCallArg");
3507 (void)InitialArgSize;
3508 // Since pointer argument are never emitted as LValue, it is safe to emit
3509 // non-null argument check for r-value only.
3510 if (!Args.back().hasLValue()) {
3511 RValue RVArg = Args.back().getKnownRValue();
3512 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3513 ParamsToSkip + Idx);
3514 // @llvm.objectsize should never have side-effects and shouldn't need
3515 // destruction/cleanups, so we can safely "emit" it after its arg,
3516 // regardless of right-to-leftness
3517 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3522 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3524 std::reverse(Args.begin() + CallArgsStart, Args.end());
3530 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3531 DestroyUnpassedArg(Address Addr, QualType Ty)
3532 : Addr(Addr), Ty(Ty) {}
3537 void Emit(CodeGenFunction &CGF, Flags flags) override {
3538 QualType::DestructionKind DtorKind = Ty.isDestructedType();
3539 if (DtorKind == QualType::DK_cxx_destructor) {
3540 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3541 assert(!Dtor->isTrivial());
3542 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3543 /*Delegating=*/false, Addr, Ty);
3545 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3550 struct DisableDebugLocationUpdates {
3551 CodeGenFunction &CGF;
3552 bool disabledDebugInfo;
3553 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3554 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3555 CGF.disableDebugInfo();
3557 ~DisableDebugLocationUpdates() {
3558 if (disabledDebugInfo)
3559 CGF.enableDebugInfo();
3563 } // end anonymous namespace
3565 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3568 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3569 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3572 return RValue::getAggregate(Copy.getAddress(CGF));
3575 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3576 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3577 if (!HasLV && RV.isScalar())
3578 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3579 else if (!HasLV && RV.isComplex())
3580 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3582 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
3583 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3584 // We assume that call args are never copied into subobjects.
3585 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3586 HasLV ? LV.isVolatileQualified()
3587 : RV.isVolatileQualified());
3592 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3594 DisableDebugLocationUpdates Dis(*this, E);
3595 if (const ObjCIndirectCopyRestoreExpr *CRE
3596 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3597 assert(getLangOpts().ObjCAutoRefCount);
3598 return emitWritebackArg(*this, args, CRE);
3601 assert(type->isReferenceType() == E->isGLValue() &&
3602 "reference binding to unmaterialized r-value!");
3604 if (E->isGLValue()) {
3605 assert(E->getObjectKind() == OK_Ordinary);
3606 return args.add(EmitReferenceBindingToExpr(E), type);
3609 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3611 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3612 // However, we still have to push an EH-only cleanup in case we unwind before
3613 // we make it to the call.
3614 if (HasAggregateEvalKind &&
3615 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3616 // If we're using inalloca, use the argument memory. Otherwise, use a
3619 if (args.isUsingInAlloca())
3620 Slot = createPlaceholderSlot(*this, type);
3622 Slot = CreateAggTemp(type, "agg.tmp");
3624 bool DestroyedInCallee = true, NeedsEHCleanup = true;
3625 if (const auto *RD = type->getAsCXXRecordDecl())
3626 DestroyedInCallee = RD->hasNonTrivialDestructor();
3628 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3630 if (DestroyedInCallee)
3631 Slot.setExternallyDestructed();
3633 EmitAggExpr(E, Slot);
3634 RValue RV = Slot.asRValue();
3637 if (DestroyedInCallee && NeedsEHCleanup) {
3638 // Create a no-op GEP between the placeholder and the cleanup so we can
3639 // RAUW it successfully. It also serves as a marker of the first
3640 // instruction where the cleanup is active.
3641 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3643 // This unreachable is a temporary marker which will be removed later.
3644 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3645 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3650 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3651 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3652 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3653 assert(L.isSimple());
3654 args.addUncopiedAggregate(L, type);
3658 args.add(EmitAnyExprToTemp(E), type);
3661 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3662 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3663 // implicitly widens null pointer constants that are arguments to varargs
3664 // functions to pointer-sized ints.
3665 if (!getTarget().getTriple().isOSWindows())
3666 return Arg->getType();
3668 if (Arg->getType()->isIntegerType() &&
3669 getContext().getTypeSize(Arg->getType()) <
3670 getContext().getTargetInfo().getPointerWidth(0) &&
3671 Arg->isNullPointerConstant(getContext(),
3672 Expr::NPC_ValueDependentIsNotNull)) {
3673 return getContext().getIntPtrType();
3676 return Arg->getType();
3679 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3680 // optimizer it can aggressively ignore unwind edges.
3682 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3683 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3684 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3685 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3686 CGM.getNoObjCARCExceptionsMetadata());
3689 /// Emits a call to the given no-arguments nounwind runtime function.
3691 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3692 const llvm::Twine &name) {
3693 return EmitNounwindRuntimeCall(callee, None, name);
3696 /// Emits a call to the given nounwind runtime function.
3698 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
3699 ArrayRef<llvm::Value *> args,
3700 const llvm::Twine &name) {
3701 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3702 call->setDoesNotThrow();
3706 /// Emits a simple call (never an invoke) to the given no-arguments
3707 /// runtime function.
3708 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3709 const llvm::Twine &name) {
3710 return EmitRuntimeCall(callee, None, name);
3713 // Calls which may throw must have operand bundles indicating which funclet
3714 // they are nested within.
3715 SmallVector<llvm::OperandBundleDef, 1>
3716 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
3717 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3718 // There is no need for a funclet operand bundle if we aren't inside a
3720 if (!CurrentFuncletPad)
3723 // Skip intrinsics which cannot throw.
3724 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3725 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3728 BundleList.emplace_back("funclet", CurrentFuncletPad);
3732 /// Emits a simple call (never an invoke) to the given runtime function.
3733 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
3734 ArrayRef<llvm::Value *> args,
3735 const llvm::Twine &name) {
3736 llvm::CallInst *call = Builder.CreateCall(
3737 callee, args, getBundlesForFunclet(callee.getCallee()), name);
3738 call->setCallingConv(getRuntimeCC());
3742 /// Emits a call or invoke to the given noreturn runtime function.
3743 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
3744 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
3745 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3746 getBundlesForFunclet(callee.getCallee());
3748 if (getInvokeDest()) {
3749 llvm::InvokeInst *invoke =
3750 Builder.CreateInvoke(callee,
3751 getUnreachableBlock(),
3755 invoke->setDoesNotReturn();
3756 invoke->setCallingConv(getRuntimeCC());
3758 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3759 call->setDoesNotReturn();
3760 call->setCallingConv(getRuntimeCC());
3761 Builder.CreateUnreachable();
3765 /// Emits a call or invoke instruction to the given nullary runtime function.
3767 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3768 const Twine &name) {
3769 return EmitRuntimeCallOrInvoke(callee, None, name);
3772 /// Emits a call or invoke instruction to the given runtime function.
3774 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
3775 ArrayRef<llvm::Value *> args,
3776 const Twine &name) {
3777 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
3778 call->setCallingConv(getRuntimeCC());
3782 /// Emits a call or invoke instruction to the given function, depending
3783 /// on the current state of the EH stack.
3784 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
3785 ArrayRef<llvm::Value *> Args,
3786 const Twine &Name) {
3787 llvm::BasicBlock *InvokeDest = getInvokeDest();
3788 SmallVector<llvm::OperandBundleDef, 1> BundleList =
3789 getBundlesForFunclet(Callee.getCallee());
3791 llvm::CallBase *Inst;
3793 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3795 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3796 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3801 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3802 // optimizer it can aggressively ignore unwind edges.
3803 if (CGM.getLangOpts().ObjCAutoRefCount)
3804 AddObjCARCExceptionMetadata(Inst);
3809 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3811 DeferredReplacements.push_back(std::make_pair(Old, New));
3814 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3815 const CGCallee &Callee,
3816 ReturnValueSlot ReturnValue,
3817 const CallArgList &CallArgs,
3818 llvm::CallBase **callOrInvoke,
3819 SourceLocation Loc) {
3820 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3822 assert(Callee.isOrdinary() || Callee.isVirtual());
3824 // Handle struct-return functions by passing a pointer to the
3825 // location that we would like to return into.
3826 QualType RetTy = CallInfo.getReturnType();
3827 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3829 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
3831 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
3832 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
3833 // We can only guarantee that a function is called from the correct
3834 // context/function based on the appropriate target attributes,
3835 // so only check in the case where we have both always_inline and target
3836 // since otherwise we could be making a conditional call after a check for
3837 // the proper cpu features (and it won't cause code generation issues due to
3838 // function based code generation).
3839 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
3840 TargetDecl->hasAttr<TargetAttr>())
3841 checkTargetFeatures(Loc, FD);
3844 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
3845 // For an inalloca varargs function, we don't expect CallInfo to match the
3846 // function pointer's type, because the inalloca struct a will have extra
3847 // fields in it for the varargs parameters. Code later in this function
3848 // bitcasts the function pointer to the type derived from CallInfo.
3850 // In other cases, we assert that the types match up (until pointers stop
3851 // having pointee types).
3852 llvm::Type *TypeFromVal;
3853 if (Callee.isVirtual())
3854 TypeFromVal = Callee.getVirtualFunctionType();
3857 Callee.getFunctionPointer()->getType()->getPointerElementType();
3858 assert(IRFuncTy == TypeFromVal);
3862 // 1. Set up the arguments.
3864 // If we're using inalloca, insert the allocation after the stack save.
3865 // FIXME: Do this earlier rather than hacking it in here!
3866 Address ArgMemory = Address::invalid();
3867 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3868 const llvm::DataLayout &DL = CGM.getDataLayout();
3869 llvm::Instruction *IP = CallArgs.getStackBase();
3870 llvm::AllocaInst *AI;
3872 IP = IP->getNextNode();
3873 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3876 AI = CreateTempAlloca(ArgStruct, "argmem");
3878 auto Align = CallInfo.getArgStructAlignment();
3879 AI->setAlignment(Align.getAsAlign());
3880 AI->setUsedWithInAlloca(true);
3881 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3882 ArgMemory = Address(AI, Align);
3885 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3886 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3888 // If the call returns a temporary with struct return, create a temporary
3889 // alloca to hold the result, unless one is given to us.
3890 Address SRetPtr = Address::invalid();
3891 Address SRetAlloca = Address::invalid();
3892 llvm::Value *UnusedReturnSizePtr = nullptr;
3893 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3894 if (!ReturnValue.isNull()) {
3895 SRetPtr = ReturnValue.getValue();
3897 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
3898 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3900 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3901 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
3904 if (IRFunctionArgs.hasSRetArg()) {
3905 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3906 } else if (RetAI.isInAlloca()) {
3908 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3909 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3913 Address swiftErrorTemp = Address::invalid();
3914 Address swiftErrorArg = Address::invalid();
3916 // When passing arguments using temporary allocas, we need to add the
3917 // appropriate lifetime markers. This vector keeps track of all the lifetime
3918 // markers that need to be ended right after the call.
3919 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
3921 // Translate all of the arguments as necessary to match the IR lowering.
3922 assert(CallInfo.arg_size() == CallArgs.size() &&
3923 "Mismatch between function signature & arguments.");
3925 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3926 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3927 I != E; ++I, ++info_it, ++ArgNo) {
3928 const ABIArgInfo &ArgInfo = info_it->info;
3930 // Insert a padding argument to ensure proper alignment.
3931 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3932 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3933 llvm::UndefValue::get(ArgInfo.getPaddingType());
3935 unsigned FirstIRArg, NumIRArgs;
3936 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3938 switch (ArgInfo.getKind()) {
3939 case ABIArgInfo::InAlloca: {
3940 assert(NumIRArgs == 0);
3941 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3942 if (I->isAggregate()) {
3943 // Replace the placeholder with the appropriate argument slot GEP.
3944 Address Addr = I->hasLValue()
3945 ? I->getKnownLValue().getAddress(*this)
3946 : I->getKnownRValue().getAggregateAddress();
3947 llvm::Instruction *Placeholder =
3948 cast<llvm::Instruction>(Addr.getPointer());
3949 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3950 Builder.SetInsertPoint(Placeholder);
3952 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3953 Builder.restoreIP(IP);
3954 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3956 // Store the RValue into the argument struct.
3958 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3959 unsigned AS = Addr.getType()->getPointerAddressSpace();
3960 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3961 // There are some cases where a trivial bitcast is not avoidable. The
3962 // definition of a type later in a translation unit may change it's type
3963 // from {}* to (%struct.foo*)*.
3964 if (Addr.getType() != MemType)
3965 Addr = Builder.CreateBitCast(Addr, MemType);
3966 I->copyInto(*this, Addr);
3971 case ABIArgInfo::Indirect: {
3972 assert(NumIRArgs == 1);
3973 if (!I->isAggregate()) {
3974 // Make a temporary alloca to pass the argument.
3975 Address Addr = CreateMemTempWithoutCast(
3976 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
3977 IRCallArgs[FirstIRArg] = Addr.getPointer();
3979 I->copyInto(*this, Addr);
3981 // We want to avoid creating an unnecessary temporary+copy here;
3982 // however, we need one in three cases:
3983 // 1. If the argument is not byval, and we are required to copy the
3984 // source. (This case doesn't occur on any common architecture.)
3985 // 2. If the argument is byval, RV is not sufficiently aligned, and
3986 // we cannot force it to be sufficiently aligned.
3987 // 3. If the argument is byval, but RV is not located in default
3988 // or alloca address space.
3989 Address Addr = I->hasLValue()
3990 ? I->getKnownLValue().getAddress(*this)
3991 : I->getKnownRValue().getAggregateAddress();
3992 llvm::Value *V = Addr.getPointer();
3993 CharUnits Align = ArgInfo.getIndirectAlign();
3994 const llvm::DataLayout *TD = &CGM.getDataLayout();
3996 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
3997 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
3998 TD->getAllocaAddrSpace()) &&
3999 "indirect argument must be in alloca address space");
4001 bool NeedCopy = false;
4003 if (Addr.getAlignment() < Align &&
4004 llvm::getOrEnforceKnownAlignment(V, Align.getQuantity(), *TD) <
4005 Align.getQuantity()) {
4007 } else if (I->hasLValue()) {
4008 auto LV = I->getKnownLValue();
4009 auto AS = LV.getAddressSpace();
4011 if (!ArgInfo.getIndirectByVal() ||
4012 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4015 if (!getLangOpts().OpenCL) {
4016 if ((ArgInfo.getIndirectByVal() &&
4017 (AS != LangAS::Default &&
4018 AS != CGM.getASTAllocaAddressSpace()))) {
4022 // For OpenCL even if RV is located in default or alloca address space
4023 // we don't want to perform address space cast for it.
4024 else if ((ArgInfo.getIndirectByVal() &&
4025 Addr.getType()->getAddressSpace() != IRFuncTy->
4026 getParamType(FirstIRArg)->getPointerAddressSpace())) {
4032 // Create an aligned temporary, and copy to it.
4033 Address AI = CreateMemTempWithoutCast(
4034 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4035 IRCallArgs[FirstIRArg] = AI.getPointer();
4037 // Emit lifetime markers for the temporary alloca.
4038 uint64_t ByvalTempElementSize =
4039 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4040 llvm::Value *LifetimeSize =
4041 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4043 // Add cleanup code to emit the end lifetime marker after the call.
4044 if (LifetimeSize) // In case we disabled lifetime markers.
4045 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4047 // Generate the copy.
4048 I->copyInto(*this, AI);
4050 // Skip the extra memcpy call.
4051 auto *T = V->getType()->getPointerElementType()->getPointerTo(
4052 CGM.getDataLayout().getAllocaAddrSpace());
4053 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4054 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4061 case ABIArgInfo::Ignore:
4062 assert(NumIRArgs == 0);
4065 case ABIArgInfo::Extend:
4066 case ABIArgInfo::Direct: {
4067 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4068 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4069 ArgInfo.getDirectOffset() == 0) {
4070 assert(NumIRArgs == 1);
4072 if (!I->isAggregate())
4073 V = I->getKnownRValue().getScalarVal();
4075 V = Builder.CreateLoad(
4076 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4077 : I->getKnownRValue().getAggregateAddress());
4079 // Implement swifterror by copying into a new swifterror argument.
4080 // We'll write back in the normal path out of the call.
4081 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4082 == ParameterABI::SwiftErrorResult) {
4083 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4085 QualType pointeeTy = I->Ty->getPointeeType();
4087 Address(V, getContext().getTypeAlignInChars(pointeeTy));
4090 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4091 V = swiftErrorTemp.getPointer();
4092 cast<llvm::AllocaInst>(V)->setSwiftError(true);
4094 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4095 Builder.CreateStore(errorValue, swiftErrorTemp);
4098 // We might have to widen integers, but we should never truncate.
4099 if (ArgInfo.getCoerceToType() != V->getType() &&
4100 V->getType()->isIntegerTy())
4101 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4103 // If the argument doesn't match, perform a bitcast to coerce it. This
4104 // can happen due to trivial type mismatches.
4105 if (FirstIRArg < IRFuncTy->getNumParams() &&
4106 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4107 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4109 IRCallArgs[FirstIRArg] = V;
4113 // FIXME: Avoid the conversion through memory if possible.
4114 Address Src = Address::invalid();
4115 if (!I->isAggregate()) {
4116 Src = CreateMemTemp(I->Ty, "coerce");
4117 I->copyInto(*this, Src);
4119 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4120 : I->getKnownRValue().getAggregateAddress();
4123 // If the value is offset in memory, apply the offset now.
4124 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4126 // Fast-isel and the optimizer generally like scalar values better than
4127 // FCAs, so we flatten them if this is safe to do for this argument.
4128 llvm::StructType *STy =
4129 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4130 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4131 llvm::Type *SrcTy = Src.getType()->getElementType();
4132 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4133 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4135 // If the source type is smaller than the destination type of the
4136 // coerce-to logic, copy the source value into a temp alloca the size
4137 // of the destination type to allow loading all of it. The bits past
4138 // the source value are left undef.
4139 if (SrcSize < DstSize) {
4141 = CreateTempAlloca(STy, Src.getAlignment(),
4142 Src.getName() + ".coerce");
4143 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4146 Src = Builder.CreateBitCast(Src,
4147 STy->getPointerTo(Src.getAddressSpace()));
4150 assert(NumIRArgs == STy->getNumElements());
4151 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4152 Address EltPtr = Builder.CreateStructGEP(Src, i);
4153 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4154 IRCallArgs[FirstIRArg + i] = LI;
4157 // In the simple case, just pass the coerced loaded value.
4158 assert(NumIRArgs == 1);
4159 IRCallArgs[FirstIRArg] =
4160 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4166 case ABIArgInfo::CoerceAndExpand: {
4167 auto coercionType = ArgInfo.getCoerceAndExpandType();
4168 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4170 llvm::Value *tempSize = nullptr;
4171 Address addr = Address::invalid();
4172 Address AllocaAddr = Address::invalid();
4173 if (I->isAggregate()) {
4174 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4175 : I->getKnownRValue().getAggregateAddress();
4178 RValue RV = I->getKnownRValue();
4179 assert(RV.isScalar()); // complex should always just be direct
4181 llvm::Type *scalarType = RV.getScalarVal()->getType();
4182 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4183 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4185 // Materialize to a temporary.
4186 addr = CreateTempAlloca(
4187 RV.getScalarVal()->getType(),
4188 CharUnits::fromQuantity(std::max(
4189 (unsigned)layout->getAlignment().value(), scalarAlign)),
4191 /*ArraySize=*/nullptr, &AllocaAddr);
4192 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4194 Builder.CreateStore(RV.getScalarVal(), addr);
4197 addr = Builder.CreateElementBitCast(addr, coercionType);
4199 unsigned IRArgPos = FirstIRArg;
4200 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4201 llvm::Type *eltType = coercionType->getElementType(i);
4202 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4203 Address eltAddr = Builder.CreateStructGEP(addr, i);
4204 llvm::Value *elt = Builder.CreateLoad(eltAddr);
4205 IRCallArgs[IRArgPos++] = elt;
4207 assert(IRArgPos == FirstIRArg + NumIRArgs);
4210 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4216 case ABIArgInfo::Expand:
4217 unsigned IRArgPos = FirstIRArg;
4218 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4219 assert(IRArgPos == FirstIRArg + NumIRArgs);
4224 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4225 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4227 // If we're using inalloca, set up that argument.
4228 if (ArgMemory.isValid()) {
4229 llvm::Value *Arg = ArgMemory.getPointer();
4230 if (CallInfo.isVariadic()) {
4231 // When passing non-POD arguments by value to variadic functions, we will
4232 // end up with a variadic prototype and an inalloca call site. In such
4233 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4235 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4237 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4239 llvm::Type *LastParamTy =
4240 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4241 if (Arg->getType() != LastParamTy) {
4243 // Assert that these structs have equivalent element types.
4244 llvm::StructType *FullTy = CallInfo.getArgStruct();
4245 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4246 cast<llvm::PointerType>(LastParamTy)->getElementType());
4247 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4248 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4249 DE = DeclaredTy->element_end(),
4250 FI = FullTy->element_begin();
4251 DI != DE; ++DI, ++FI)
4254 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4257 assert(IRFunctionArgs.hasInallocaArg());
4258 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4261 // 2. Prepare the function pointer.
4263 // If the callee is a bitcast of a non-variadic function to have a
4264 // variadic function pointer type, check to see if we can remove the
4265 // bitcast. This comes up with unprototyped functions.
4267 // This makes the IR nicer, but more importantly it ensures that we
4268 // can inline the function at -O0 if it is marked always_inline.
4269 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4270 llvm::Value *Ptr) -> llvm::Function * {
4271 if (!CalleeFT->isVarArg())
4274 // Get underlying value if it's a bitcast
4275 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4276 if (CE->getOpcode() == llvm::Instruction::BitCast)
4277 Ptr = CE->getOperand(0);
4280 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4284 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4286 // If the original type is variadic, or if any of the component types
4287 // disagree, we cannot remove the cast.
4288 if (OrigFT->isVarArg() ||
4289 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4290 OrigFT->getReturnType() != CalleeFT->getReturnType())
4293 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4294 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4300 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4302 IRFuncTy = OrigFn->getFunctionType();
4305 // 3. Perform the actual call.
4307 // Deactivate any cleanups that we're supposed to do immediately before
4309 if (!CallArgs.getCleanupsToDeactivate().empty())
4310 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4312 // Assert that the arguments we computed match up. The IR verifier
4313 // will catch this, but this is a common enough source of problems
4314 // during IRGen changes that it's way better for debugging to catch
4315 // it ourselves here.
4317 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4318 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4319 // Inalloca argument can have different type.
4320 if (IRFunctionArgs.hasInallocaArg() &&
4321 i == IRFunctionArgs.getInallocaArgNo())
4323 if (i < IRFuncTy->getNumParams())
4324 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4328 // Update the largest vector width if any arguments have vector types.
4329 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4330 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4331 LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4332 VT->getPrimitiveSizeInBits().getFixedSize());
4335 // Compute the calling convention and attributes.
4336 unsigned CallingConv;
4337 llvm::AttributeList Attrs;
4338 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4339 Callee.getAbstractInfo(), Attrs, CallingConv,
4340 /*AttrOnCallSite=*/true);
4342 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4343 if (FD->usesFPIntrin())
4344 // All calls within a strictfp function are marked strictfp
4346 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4347 llvm::Attribute::StrictFP);
4349 // Apply some call-site-specific attributes.
4350 // TODO: work this into building the attribute set.
4352 // Apply always_inline to all calls within flatten functions.
4353 // FIXME: should this really take priority over __try, below?
4354 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4355 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4357 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4358 llvm::Attribute::AlwaysInline);
4361 // Disable inlining inside SEH __try blocks.
4362 if (isSEHTryScope()) {
4364 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4365 llvm::Attribute::NoInline);
4368 // Decide whether to use a call or an invoke.
4370 if (currentFunctionUsesSEHTry()) {
4371 // SEH cares about asynchronous exceptions, so everything can "throw."
4372 CannotThrow = false;
4373 } else if (isCleanupPadScope() &&
4374 EHPersonality::get(*this).isMSVCXXPersonality()) {
4375 // The MSVC++ personality will implicitly terminate the program if an
4376 // exception is thrown during a cleanup outside of a try/catch.
4377 // We don't need to model anything in IR to get this behavior.
4380 // Otherwise, nounwind call sites will never throw.
4381 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4382 llvm::Attribute::NoUnwind);
4385 // If we made a temporary, be sure to clean up after ourselves. Note that we
4386 // can't depend on being inside of an ExprWithCleanups, so we need to manually
4387 // pop this cleanup later on. Being eager about this is OK, since this
4388 // temporary is 'invisible' outside of the callee.
4389 if (UnusedReturnSizePtr)
4390 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4391 UnusedReturnSizePtr);
4393 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4395 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4396 getBundlesForFunclet(CalleePtr);
4398 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4399 if (FD->usesFPIntrin())
4400 // All calls within a strictfp function are marked strictfp
4402 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4403 llvm::Attribute::StrictFP);
4405 // Emit the actual call/invoke instruction.
4408 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4410 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4411 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4418 // If this is within a function that has the guard(nocf) attribute and is an
4419 // indirect call, add the "guard_nocf" attribute to this call to indicate that
4420 // Control Flow Guard checks should not be added, even if the call is inlined.
4421 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
4422 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
4423 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
4424 Attrs = Attrs.addAttribute(
4425 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
4429 // Apply the attributes and calling convention.
4430 CI->setAttributes(Attrs);
4431 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4433 // Apply various metadata.
4435 if (!CI->getType()->isVoidTy())
4436 CI->setName("call");
4438 // Update largest vector width from the return type.
4439 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4440 LargestVectorWidth = std::max((uint64_t)LargestVectorWidth,
4441 VT->getPrimitiveSizeInBits().getFixedSize());
4443 // Insert instrumentation or attach profile metadata at indirect call sites.
4444 // For more details, see the comment before the definition of
4445 // IPVK_IndirectCallTarget in InstrProfData.inc.
4446 if (!CI->getCalledFunction())
4447 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4450 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4451 // optimizer it can aggressively ignore unwind edges.
4452 if (CGM.getLangOpts().ObjCAutoRefCount)
4453 AddObjCARCExceptionMetadata(CI);
4455 // Suppress tail calls if requested.
4456 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4457 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4458 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4461 // Add metadata for calls to MSAllocator functions
4462 if (getDebugInfo() && TargetDecl &&
4463 TargetDecl->hasAttr<MSAllocatorAttr>())
4464 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy, Loc);
4466 // 4. Finish the call.
4468 // If the call doesn't return, finish the basic block and clear the
4469 // insertion point; this allows the rest of IRGen to discard
4470 // unreachable code.
4471 if (CI->doesNotReturn()) {
4472 if (UnusedReturnSizePtr)
4475 // Strip away the noreturn attribute to better diagnose unreachable UB.
4476 if (SanOpts.has(SanitizerKind::Unreachable)) {
4477 // Also remove from function since CallBase::hasFnAttr additionally checks
4478 // attributes of the called function.
4479 if (auto *F = CI->getCalledFunction())
4480 F->removeFnAttr(llvm::Attribute::NoReturn);
4481 CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4482 llvm::Attribute::NoReturn);
4484 // Avoid incompatibility with ASan which relies on the `noreturn`
4485 // attribute to insert handler calls.
4486 if (SanOpts.hasOneOf(SanitizerKind::Address |
4487 SanitizerKind::KernelAddress)) {
4488 SanitizerScope SanScope(this);
4489 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4490 Builder.SetInsertPoint(CI);
4491 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4492 llvm::FunctionCallee Fn =
4493 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4494 EmitNounwindRuntimeCall(Fn);
4498 EmitUnreachable(Loc);
4499 Builder.ClearInsertionPoint();
4501 // FIXME: For now, emit a dummy basic block because expr emitters in
4502 // generally are not ready to handle emitting expressions at unreachable
4504 EnsureInsertPoint();
4506 // Return a reasonable RValue.
4507 return GetUndefRValue(RetTy);
4510 // Perform the swifterror writeback.
4511 if (swiftErrorTemp.isValid()) {
4512 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4513 Builder.CreateStore(errorResult, swiftErrorArg);
4516 // Emit any call-associated writebacks immediately. Arguably this
4517 // should happen after any return-value munging.
4518 if (CallArgs.hasWritebacks())
4519 emitWritebacks(*this, CallArgs);
4521 // The stack cleanup for inalloca arguments has to run out of the normal
4522 // lexical order, so deactivate it and run it manually here.
4523 CallArgs.freeArgumentMemory(*this);
4525 // Extract the return value.
4527 switch (RetAI.getKind()) {
4528 case ABIArgInfo::CoerceAndExpand: {
4529 auto coercionType = RetAI.getCoerceAndExpandType();
4531 Address addr = SRetPtr;
4532 addr = Builder.CreateElementBitCast(addr, coercionType);
4534 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4535 bool requiresExtract = isa<llvm::StructType>(CI->getType());
4537 unsigned unpaddedIndex = 0;
4538 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4539 llvm::Type *eltType = coercionType->getElementType(i);
4540 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4541 Address eltAddr = Builder.CreateStructGEP(addr, i);
4542 llvm::Value *elt = CI;
4543 if (requiresExtract)
4544 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4546 assert(unpaddedIndex == 0);
4547 Builder.CreateStore(elt, eltAddr);
4553 case ABIArgInfo::InAlloca:
4554 case ABIArgInfo::Indirect: {
4555 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4556 if (UnusedReturnSizePtr)
4561 case ABIArgInfo::Ignore:
4562 // If we are ignoring an argument that had a result, make sure to
4563 // construct the appropriate return value for our caller.
4564 return GetUndefRValue(RetTy);
4566 case ABIArgInfo::Extend:
4567 case ABIArgInfo::Direct: {
4568 llvm::Type *RetIRTy = ConvertType(RetTy);
4569 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4570 switch (getEvaluationKind(RetTy)) {
4572 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4573 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4574 return RValue::getComplex(std::make_pair(Real, Imag));
4576 case TEK_Aggregate: {
4577 Address DestPtr = ReturnValue.getValue();
4578 bool DestIsVolatile = ReturnValue.isVolatile();
4580 if (!DestPtr.isValid()) {
4581 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4582 DestIsVolatile = false;
4584 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4585 return RValue::getAggregate(DestPtr);
4588 // If the argument doesn't match, perform a bitcast to coerce it. This
4589 // can happen due to trivial type mismatches.
4590 llvm::Value *V = CI;
4591 if (V->getType() != RetIRTy)
4592 V = Builder.CreateBitCast(V, RetIRTy);
4593 return RValue::get(V);
4596 llvm_unreachable("bad evaluation kind");
4599 Address DestPtr = ReturnValue.getValue();
4600 bool DestIsVolatile = ReturnValue.isVolatile();
4602 if (!DestPtr.isValid()) {
4603 DestPtr = CreateMemTemp(RetTy, "coerce");
4604 DestIsVolatile = false;
4607 // If the value is offset in memory, apply the offset now.
4608 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4609 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4611 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4614 case ABIArgInfo::Expand:
4615 llvm_unreachable("Invalid ABI kind for return argument");
4618 llvm_unreachable("Unhandled ABIArgInfo::Kind");
4621 // Emit the assume_aligned check on the return value.
4622 if (Ret.isScalar() && TargetDecl) {
4623 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4624 llvm::Value *OffsetValue = nullptr;
4625 if (const auto *Offset = AA->getOffset())
4626 OffsetValue = EmitScalarExpr(Offset);
4628 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4629 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4630 EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4631 AlignmentCI, OffsetValue);
4632 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4633 llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()]
4636 EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(),
4641 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
4642 // we can't use the full cleanup mechanism.
4643 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
4644 LifetimeEnd.Emit(*this, /*Flags=*/{});
4649 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
4651 const CallExpr *CE = getVirtualCallExpr();
4652 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
4653 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
4654 CE ? CE->getBeginLoc() : SourceLocation());
4660 /* VarArg handling */
4662 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4663 VAListAddr = VE->isMicrosoftABI()
4664 ? EmitMSVAListRef(VE->getSubExpr())
4665 : EmitVAListRef(VE->getSubExpr());
4666 QualType Ty = VE->getType();
4667 if (VE->isMicrosoftABI())
4668 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4669 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);