1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "clang/Frontend/CodeGenOptions.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 using namespace clang;
42 using namespace CodeGen;
46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
48 default: return llvm::CallingConv::C;
49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58 // TODO: Add support for __pascal to LLVM.
59 case CC_X86Pascal: return llvm::CallingConv::C;
60 // TODO: Add support for __vectorcall to LLVM.
61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66 case CC_Swift: return llvm::CallingConv::Swift;
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
78 /// Returns the canonical formal type of the given C++ method.
79 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
80 return MD->getType()->getCanonicalTypeUnqualified()
81 .getAs<FunctionProtoType>();
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
88 static CanQualType GetReturnType(QualType RetTy) {
89 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
95 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
96 // When translating an unprototyped function type, always use a
98 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99 /*instanceMethod=*/false,
100 /*chainCall=*/false, None,
101 FTNP->getExtInfo(), {}, RequiredArgs(0));
104 static void addExtParameterInfosForCall(
105 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
106 const FunctionProtoType *proto,
108 unsigned totalArgs) {
109 assert(proto->hasExtParameterInfos());
110 assert(paramInfos.size() <= prefixArgs);
111 assert(proto->getNumParams() + prefixArgs <= totalArgs);
113 paramInfos.reserve(totalArgs);
115 // Add default infos for any prefix args that don't already have infos.
116 paramInfos.resize(prefixArgs);
118 // Add infos for the prototype.
119 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
120 paramInfos.push_back(ParamInfo);
121 // pass_object_size params have no parameter info.
122 if (ParamInfo.hasPassObjectSize())
123 paramInfos.emplace_back();
126 assert(paramInfos.size() <= totalArgs &&
127 "Did we forget to insert pass_object_size args?");
128 // Add default infos for the variadic and/or suffix arguments.
129 paramInfos.resize(totalArgs);
132 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
133 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
134 static void appendParameterTypes(const CodeGenTypes &CGT,
135 SmallVectorImpl<CanQualType> &prefix,
136 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
137 CanQual<FunctionProtoType> FPT) {
138 // Fast path: don't touch param info if we don't need to.
139 if (!FPT->hasExtParameterInfos()) {
140 assert(paramInfos.empty() &&
141 "We have paramInfos, but the prototype doesn't?");
142 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
146 unsigned PrefixSize = prefix.size();
147 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
148 // parameters; the only thing that can change this is the presence of
149 // pass_object_size. So, we preallocate for the common case.
150 prefix.reserve(prefix.size() + FPT->getNumParams());
152 auto ExtInfos = FPT->getExtParameterInfos();
153 assert(ExtInfos.size() == FPT->getNumParams());
154 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
155 prefix.push_back(FPT->getParamType(I));
156 if (ExtInfos[I].hasPassObjectSize())
157 prefix.push_back(CGT.getContext().getSizeType());
160 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
164 /// Arrange the LLVM function layout for a value of the given function
165 /// type, on top of any implicit parameters already stored.
166 static const CGFunctionInfo &
167 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
168 SmallVectorImpl<CanQualType> &prefix,
169 CanQual<FunctionProtoType> FTP,
170 const FunctionDecl *FD) {
171 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
172 RequiredArgs Required =
173 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
175 appendParameterTypes(CGT, prefix, paramInfos, FTP);
176 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
178 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
179 /*chainCall=*/false, prefix,
180 FTP->getExtInfo(), paramInfos,
184 /// Arrange the argument and result information for a value of the
185 /// given freestanding function type.
186 const CGFunctionInfo &
187 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
188 const FunctionDecl *FD) {
189 SmallVector<CanQualType, 16> argTypes;
190 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
194 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
195 // Set the appropriate calling convention for the Function.
196 if (D->hasAttr<StdCallAttr>())
197 return CC_X86StdCall;
199 if (D->hasAttr<FastCallAttr>())
200 return CC_X86FastCall;
202 if (D->hasAttr<RegCallAttr>())
203 return CC_X86RegCall;
205 if (D->hasAttr<ThisCallAttr>())
206 return CC_X86ThisCall;
208 if (D->hasAttr<VectorCallAttr>())
209 return CC_X86VectorCall;
211 if (D->hasAttr<PascalAttr>())
214 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
215 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
217 if (D->hasAttr<IntelOclBiccAttr>())
218 return CC_IntelOclBicc;
220 if (D->hasAttr<MSABIAttr>())
221 return IsWindows ? CC_C : CC_X86_64Win64;
223 if (D->hasAttr<SysVABIAttr>())
224 return IsWindows ? CC_X86_64SysV : CC_C;
226 if (D->hasAttr<PreserveMostAttr>())
227 return CC_PreserveMost;
229 if (D->hasAttr<PreserveAllAttr>())
230 return CC_PreserveAll;
235 /// Arrange the argument and result information for a call to an
236 /// unknown C++ non-static member function of the given abstract type.
237 /// (Zero value of RD means we don't have any meaningful "this" argument type,
238 /// so fall back to a generic pointer type).
239 /// The member function must be an ordinary function, i.e. not a
240 /// constructor or destructor.
241 const CGFunctionInfo &
242 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
243 const FunctionProtoType *FTP,
244 const CXXMethodDecl *MD) {
245 SmallVector<CanQualType, 16> argTypes;
247 // Add the 'this' pointer.
249 argTypes.push_back(GetThisType(Context, RD));
251 argTypes.push_back(Context.VoidPtrTy);
253 return ::arrangeLLVMFunctionInfo(
254 *this, true, argTypes,
255 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
258 /// Arrange the argument and result information for a declaration or
259 /// definition of the given C++ non-static member function. The
260 /// member function must be an ordinary function, i.e. not a
261 /// constructor or destructor.
262 const CGFunctionInfo &
263 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
264 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
265 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
267 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
269 if (MD->isInstance()) {
270 // The abstract case is perfectly fine.
271 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
272 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
275 return arrangeFreeFunctionType(prototype, MD);
278 bool CodeGenTypes::inheritingCtorHasParams(
279 const InheritedConstructor &Inherited, CXXCtorType Type) {
280 // Parameters are unnecessary if we're constructing a base class subobject
281 // and the inherited constructor lives in a virtual base.
282 return Type == Ctor_Complete ||
283 !Inherited.getShadowDecl()->constructsVirtualBase() ||
284 !Target.getCXXABI().hasConstructorVariants();
287 const CGFunctionInfo &
288 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
291 SmallVector<CanQualType, 16> argTypes;
292 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
293 argTypes.push_back(GetThisType(Context, MD->getParent()));
295 bool PassParams = true;
298 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
299 GD = GlobalDecl(CD, toCXXCtorType(Type));
301 // A base class inheriting constructor doesn't get forwarded arguments
302 // needed to construct a virtual base (or base class thereof).
303 if (auto Inherited = CD->getInheritedConstructor())
304 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
306 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
307 GD = GlobalDecl(DD, toCXXDtorType(Type));
310 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
312 // Add the formal parameters.
314 appendParameterTypes(*this, argTypes, paramInfos, FTP);
316 CGCXXABI::AddedStructorArgs AddedArgs =
317 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
318 if (!paramInfos.empty()) {
319 // Note: prefix implies after the first param.
320 if (AddedArgs.Prefix)
321 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
322 FunctionProtoType::ExtParameterInfo{});
323 if (AddedArgs.Suffix)
324 paramInfos.append(AddedArgs.Suffix,
325 FunctionProtoType::ExtParameterInfo{});
328 RequiredArgs required =
329 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
330 : RequiredArgs::All);
332 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
333 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
335 : TheCXXABI.hasMostDerivedReturn(GD)
336 ? CGM.getContext().VoidPtrTy
338 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
339 /*chainCall=*/false, argTypes, extInfo,
340 paramInfos, required);
343 static SmallVector<CanQualType, 16>
344 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
345 SmallVector<CanQualType, 16> argTypes;
346 for (auto &arg : args)
347 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
351 static SmallVector<CanQualType, 16>
352 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
353 SmallVector<CanQualType, 16> argTypes;
354 for (auto &arg : args)
355 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
359 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
360 getExtParameterInfosForCall(const FunctionProtoType *proto,
361 unsigned prefixArgs, unsigned totalArgs) {
362 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
363 if (proto->hasExtParameterInfos()) {
364 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
369 /// Arrange a call to a C++ method, passing the given arguments.
371 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
373 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
375 /// PassProtoArgs indicates whether `args` has args for the parameters in the
376 /// given CXXConstructorDecl.
377 const CGFunctionInfo &
378 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
379 const CXXConstructorDecl *D,
380 CXXCtorType CtorKind,
381 unsigned ExtraPrefixArgs,
382 unsigned ExtraSuffixArgs,
383 bool PassProtoArgs) {
385 SmallVector<CanQualType, 16> ArgTypes;
386 for (const auto &Arg : args)
387 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
389 // +1 for implicit this, which should always be args[0].
390 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
392 CanQual<FunctionProtoType> FPT = GetFormalType(D);
393 RequiredArgs Required =
394 RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
395 GlobalDecl GD(D, CtorKind);
396 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
398 : TheCXXABI.hasMostDerivedReturn(GD)
399 ? CGM.getContext().VoidPtrTy
402 FunctionType::ExtInfo Info = FPT->getExtInfo();
403 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
404 // If the prototype args are elided, we should only have ABI-specific args,
405 // which never have param info.
406 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
407 // ABI-specific suffix arguments are treated the same as variadic arguments.
408 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
411 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
412 /*chainCall=*/false, ArgTypes, Info,
413 ParamInfos, Required);
416 /// Arrange the argument and result information for the declaration or
417 /// definition of the given function.
418 const CGFunctionInfo &
419 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
420 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
421 if (MD->isInstance())
422 return arrangeCXXMethodDeclaration(MD);
424 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
426 assert(isa<FunctionType>(FTy));
428 // When declaring a function without a prototype, always use a
429 // non-variadic type.
430 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
431 return arrangeLLVMFunctionInfo(
432 noProto->getReturnType(), /*instanceMethod=*/false,
433 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
436 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>(), FD);
439 /// Arrange the argument and result information for the declaration or
440 /// definition of an Objective-C method.
441 const CGFunctionInfo &
442 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
443 // It happens that this is the same as a call with no optional
444 // arguments, except also using the formal 'self' type.
445 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
448 /// Arrange the argument and result information for the function type
449 /// through which to perform a send to the given Objective-C method,
450 /// using the given receiver type. The receiver type is not always
451 /// the 'self' type of the method or even an Objective-C pointer type.
452 /// This is *not* the right method for actually performing such a
453 /// message send, due to the possibility of optional arguments.
454 const CGFunctionInfo &
455 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
456 QualType receiverType) {
457 SmallVector<CanQualType, 16> argTys;
458 argTys.push_back(Context.getCanonicalParamType(receiverType));
459 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
461 for (const auto *I : MD->parameters()) {
462 argTys.push_back(Context.getCanonicalParamType(I->getType()));
465 FunctionType::ExtInfo einfo;
466 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
467 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
469 if (getContext().getLangOpts().ObjCAutoRefCount &&
470 MD->hasAttr<NSReturnsRetainedAttr>())
471 einfo = einfo.withProducesResult(true);
473 RequiredArgs required =
474 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
476 return arrangeLLVMFunctionInfo(
477 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
478 /*chainCall=*/false, argTys, einfo, {}, required);
481 const CGFunctionInfo &
482 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
483 const CallArgList &args) {
484 auto argTypes = getArgTypesForCall(Context, args);
485 FunctionType::ExtInfo einfo;
487 return arrangeLLVMFunctionInfo(
488 GetReturnType(returnType), /*instanceMethod=*/false,
489 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
492 const CGFunctionInfo &
493 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
494 // FIXME: Do we need to handle ObjCMethodDecl?
495 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
497 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
498 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
500 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
501 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
503 return arrangeFunctionDeclaration(FD);
506 /// Arrange a thunk that takes 'this' as the first parameter followed by
507 /// varargs. Return a void pointer, regardless of the actual return type.
508 /// The body of the thunk will end in a musttail call to a function of the
509 /// correct type, and the caller will bitcast the function to the correct
511 const CGFunctionInfo &
512 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
513 assert(MD->isVirtual() && "only virtual memptrs have thunks");
514 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
515 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
516 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
517 /*chainCall=*/false, ArgTys,
518 FTP->getExtInfo(), {}, RequiredArgs(1));
521 const CGFunctionInfo &
522 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
524 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
526 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
527 SmallVector<CanQualType, 2> ArgTys;
528 const CXXRecordDecl *RD = CD->getParent();
529 ArgTys.push_back(GetThisType(Context, RD));
530 if (CT == Ctor_CopyingClosure)
531 ArgTys.push_back(*FTP->param_type_begin());
532 if (RD->getNumVBases() > 0)
533 ArgTys.push_back(Context.IntTy);
534 CallingConv CC = Context.getDefaultCallingConvention(
535 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
536 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
537 /*chainCall=*/false, ArgTys,
538 FunctionType::ExtInfo(CC), {},
542 /// Arrange a call as unto a free function, except possibly with an
543 /// additional number of formal parameters considered required.
544 static const CGFunctionInfo &
545 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
547 const CallArgList &args,
548 const FunctionType *fnType,
549 unsigned numExtraRequiredArgs,
551 assert(args.size() >= numExtraRequiredArgs);
553 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
555 // In most cases, there are no optional arguments.
556 RequiredArgs required = RequiredArgs::All;
558 // If we have a variadic prototype, the required arguments are the
559 // extra prefix plus the arguments in the prototype.
560 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
561 if (proto->isVariadic())
562 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
564 if (proto->hasExtParameterInfos())
565 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
568 // If we don't have a prototype at all, but we're supposed to
569 // explicitly use the variadic convention for unprototyped calls,
570 // treat all of the arguments as required but preserve the nominal
571 // possibility of variadics.
572 } else if (CGM.getTargetCodeGenInfo()
573 .isNoProtoCallVariadic(args,
574 cast<FunctionNoProtoType>(fnType))) {
575 required = RequiredArgs(args.size());
579 SmallVector<CanQualType, 16> argTypes;
580 for (const auto &arg : args)
581 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
582 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
583 /*instanceMethod=*/false, chainCall,
584 argTypes, fnType->getExtInfo(), paramInfos,
588 /// Figure out the rules for calling a function with the given formal
589 /// type using the given arguments. The arguments are necessary
590 /// because the function might be unprototyped, in which case it's
591 /// target-dependent in crazy ways.
592 const CGFunctionInfo &
593 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
594 const FunctionType *fnType,
596 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
597 chainCall ? 1 : 0, chainCall);
600 /// A block function is essentially a free function with an
601 /// extra implicit argument.
602 const CGFunctionInfo &
603 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
604 const FunctionType *fnType) {
605 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
606 /*chainCall=*/false);
609 const CGFunctionInfo &
610 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
611 const FunctionArgList ¶ms) {
612 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
613 auto argTypes = getArgTypesForDeclaration(Context, params);
615 return arrangeLLVMFunctionInfo(
616 GetReturnType(proto->getReturnType()),
617 /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
618 proto->getExtInfo(), paramInfos,
619 RequiredArgs::forPrototypePlus(proto, 1, nullptr));
622 const CGFunctionInfo &
623 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
624 const CallArgList &args) {
626 SmallVector<CanQualType, 16> argTypes;
627 for (const auto &Arg : args)
628 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
629 return arrangeLLVMFunctionInfo(
630 GetReturnType(resultType), /*instanceMethod=*/false,
631 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
632 /*paramInfos=*/ {}, RequiredArgs::All);
635 const CGFunctionInfo &
636 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
637 const FunctionArgList &args) {
638 auto argTypes = getArgTypesForDeclaration(Context, args);
640 return arrangeLLVMFunctionInfo(
641 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
642 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
645 const CGFunctionInfo &
646 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
647 ArrayRef<CanQualType> argTypes) {
648 return arrangeLLVMFunctionInfo(
649 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
650 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
653 /// Arrange a call to a C++ method, passing the given arguments.
655 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
656 /// does not count `this`.
657 const CGFunctionInfo &
658 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
659 const FunctionProtoType *proto,
660 RequiredArgs required,
661 unsigned numPrefixArgs) {
662 assert(numPrefixArgs + 1 <= args.size() &&
663 "Emitting a call with less args than the required prefix?");
664 // Add one to account for `this`. It's a bit awkward here, but we don't count
665 // `this` in similar places elsewhere.
667 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
670 auto argTypes = getArgTypesForCall(Context, args);
672 FunctionType::ExtInfo info = proto->getExtInfo();
673 return arrangeLLVMFunctionInfo(
674 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
675 /*chainCall=*/false, argTypes, info, paramInfos, required);
678 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
679 return arrangeLLVMFunctionInfo(
680 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
681 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
684 const CGFunctionInfo &
685 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
686 const CallArgList &args) {
687 assert(signature.arg_size() <= args.size());
688 if (signature.arg_size() == args.size())
691 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
692 auto sigParamInfos = signature.getExtParameterInfos();
693 if (!sigParamInfos.empty()) {
694 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
695 paramInfos.resize(args.size());
698 auto argTypes = getArgTypesForCall(Context, args);
700 assert(signature.getRequiredArgs().allowsOptionalArgs());
701 return arrangeLLVMFunctionInfo(signature.getReturnType(),
702 signature.isInstanceMethod(),
703 signature.isChainCall(),
705 signature.getExtInfo(),
707 signature.getRequiredArgs());
710 /// Arrange the argument and result information for an abstract value
711 /// of a given function type. This is the method which all of the
712 /// above functions ultimately defer to.
713 const CGFunctionInfo &
714 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
717 ArrayRef<CanQualType> argTypes,
718 FunctionType::ExtInfo info,
719 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
720 RequiredArgs required) {
721 assert(std::all_of(argTypes.begin(), argTypes.end(),
722 [](CanQualType T) { return T.isCanonicalAsParam(); }));
724 // Lookup or create unique function info.
725 llvm::FoldingSetNodeID ID;
726 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
727 required, resultType, argTypes);
729 void *insertPos = nullptr;
730 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
734 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
736 // Construct the function info. We co-allocate the ArgInfos.
737 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
738 paramInfos, resultType, argTypes, required);
739 FunctionInfos.InsertNode(FI, insertPos);
741 bool inserted = FunctionsBeingProcessed.insert(FI).second;
743 assert(inserted && "Recursively being processed?");
745 // Compute ABI information.
746 if (info.getCC() != CC_Swift) {
747 getABIInfo().computeInfo(*FI);
749 swiftcall::computeABIInfo(CGM, *FI);
752 // Loop over all of the computed argument and return value info. If any of
753 // them are direct or extend without a specified coerce type, specify the
755 ABIArgInfo &retInfo = FI->getReturnInfo();
756 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
757 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
759 for (auto &I : FI->arguments())
760 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
761 I.info.setCoerceToType(ConvertType(I.type));
763 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
764 assert(erased && "Not in set?");
769 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
772 const FunctionType::ExtInfo &info,
773 ArrayRef<ExtParameterInfo> paramInfos,
774 CanQualType resultType,
775 ArrayRef<CanQualType> argTypes,
776 RequiredArgs required) {
777 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
780 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
781 argTypes.size() + 1, paramInfos.size()));
783 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
784 FI->CallingConvention = llvmCC;
785 FI->EffectiveCallingConvention = llvmCC;
786 FI->ASTCallingConvention = info.getCC();
787 FI->InstanceMethod = instanceMethod;
788 FI->ChainCall = chainCall;
789 FI->NoReturn = info.getNoReturn();
790 FI->ReturnsRetained = info.getProducesResult();
791 FI->Required = required;
792 FI->HasRegParm = info.getHasRegParm();
793 FI->RegParm = info.getRegParm();
794 FI->ArgStruct = nullptr;
795 FI->ArgStructAlign = 0;
796 FI->NumArgs = argTypes.size();
797 FI->HasExtParameterInfos = !paramInfos.empty();
798 FI->getArgsBuffer()[0].type = resultType;
799 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
800 FI->getArgsBuffer()[i + 1].type = argTypes[i];
801 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
802 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
809 // ABIArgInfo::Expand implementation.
811 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
812 struct TypeExpansion {
813 enum TypeExpansionKind {
814 // Elements of constant arrays are expanded recursively.
816 // Record fields are expanded recursively (but if record is a union, only
817 // the field with the largest size is expanded).
819 // For complex types, real and imaginary parts are expanded recursively.
821 // All other types are not expandable.
825 const TypeExpansionKind Kind;
827 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
828 virtual ~TypeExpansion() {}
831 struct ConstantArrayExpansion : TypeExpansion {
835 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
836 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
837 static bool classof(const TypeExpansion *TE) {
838 return TE->Kind == TEK_ConstantArray;
842 struct RecordExpansion : TypeExpansion {
843 SmallVector<const CXXBaseSpecifier *, 1> Bases;
845 SmallVector<const FieldDecl *, 1> Fields;
847 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
848 SmallVector<const FieldDecl *, 1> &&Fields)
849 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
850 Fields(std::move(Fields)) {}
851 static bool classof(const TypeExpansion *TE) {
852 return TE->Kind == TEK_Record;
856 struct ComplexExpansion : TypeExpansion {
859 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
860 static bool classof(const TypeExpansion *TE) {
861 return TE->Kind == TEK_Complex;
865 struct NoExpansion : TypeExpansion {
866 NoExpansion() : TypeExpansion(TEK_None) {}
867 static bool classof(const TypeExpansion *TE) {
868 return TE->Kind == TEK_None;
873 static std::unique_ptr<TypeExpansion>
874 getTypeExpansion(QualType Ty, const ASTContext &Context) {
875 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
876 return llvm::make_unique<ConstantArrayExpansion>(
877 AT->getElementType(), AT->getSize().getZExtValue());
879 if (const RecordType *RT = Ty->getAs<RecordType>()) {
880 SmallVector<const CXXBaseSpecifier *, 1> Bases;
881 SmallVector<const FieldDecl *, 1> Fields;
882 const RecordDecl *RD = RT->getDecl();
883 assert(!RD->hasFlexibleArrayMember() &&
884 "Cannot expand structure with flexible array.");
886 // Unions can be here only in degenerative cases - all the fields are same
887 // after flattening. Thus we have to use the "largest" field.
888 const FieldDecl *LargestFD = nullptr;
889 CharUnits UnionSize = CharUnits::Zero();
891 for (const auto *FD : RD->fields()) {
892 // Skip zero length bitfields.
893 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
895 assert(!FD->isBitField() &&
896 "Cannot expand structure with bit-field members.");
897 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
898 if (UnionSize < FieldSize) {
899 UnionSize = FieldSize;
904 Fields.push_back(LargestFD);
906 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
907 assert(!CXXRD->isDynamicClass() &&
908 "cannot expand vtable pointers in dynamic classes");
909 for (const CXXBaseSpecifier &BS : CXXRD->bases())
910 Bases.push_back(&BS);
913 for (const auto *FD : RD->fields()) {
914 // Skip zero length bitfields.
915 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
917 assert(!FD->isBitField() &&
918 "Cannot expand structure with bit-field members.");
919 Fields.push_back(FD);
922 return llvm::make_unique<RecordExpansion>(std::move(Bases),
925 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
926 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
928 return llvm::make_unique<NoExpansion>();
931 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
932 auto Exp = getTypeExpansion(Ty, Context);
933 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
934 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
936 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
938 for (auto BS : RExp->Bases)
939 Res += getExpansionSize(BS->getType(), Context);
940 for (auto FD : RExp->Fields)
941 Res += getExpansionSize(FD->getType(), Context);
944 if (isa<ComplexExpansion>(Exp.get()))
946 assert(isa<NoExpansion>(Exp.get()));
951 CodeGenTypes::getExpandedTypes(QualType Ty,
952 SmallVectorImpl<llvm::Type *>::iterator &TI) {
953 auto Exp = getTypeExpansion(Ty, Context);
954 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
955 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
956 getExpandedTypes(CAExp->EltTy, TI);
958 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
959 for (auto BS : RExp->Bases)
960 getExpandedTypes(BS->getType(), TI);
961 for (auto FD : RExp->Fields)
962 getExpandedTypes(FD->getType(), TI);
963 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
964 llvm::Type *EltTy = ConvertType(CExp->EltTy);
968 assert(isa<NoExpansion>(Exp.get()));
969 *TI++ = ConvertType(Ty);
973 static void forConstantArrayExpansion(CodeGenFunction &CGF,
974 ConstantArrayExpansion *CAE,
976 llvm::function_ref<void(Address)> Fn) {
977 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
979 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
981 for (int i = 0, n = CAE->NumElts; i < n; i++) {
982 llvm::Value *EltAddr =
983 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
984 Fn(Address(EltAddr, EltAlign));
988 void CodeGenFunction::ExpandTypeFromArgs(
989 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
990 assert(LV.isSimple() &&
991 "Unexpected non-simple lvalue during struct expansion.");
993 auto Exp = getTypeExpansion(Ty, getContext());
994 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
995 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
996 [&](Address EltAddr) {
997 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
998 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1000 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1001 Address This = LV.getAddress();
1002 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1003 // Perform a single step derived-to-base conversion.
1005 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1006 /*NullCheckValue=*/false, SourceLocation());
1007 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1009 // Recurse onto bases.
1010 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1012 for (auto FD : RExp->Fields) {
1013 // FIXME: What are the right qualifiers here?
1014 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1015 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1017 } else if (isa<ComplexExpansion>(Exp.get())) {
1018 auto realValue = *AI++;
1019 auto imagValue = *AI++;
1020 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1022 assert(isa<NoExpansion>(Exp.get()));
1023 EmitStoreThroughLValue(RValue::get(*AI++), LV);
1027 void CodeGenFunction::ExpandTypeToArgs(
1028 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
1029 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1030 auto Exp = getTypeExpansion(Ty, getContext());
1031 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1032 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
1033 [&](Address EltAddr) {
1035 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
1036 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
1038 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1039 Address This = RV.getAggregateAddress();
1040 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1041 // Perform a single step derived-to-base conversion.
1043 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1044 /*NullCheckValue=*/false, SourceLocation());
1045 RValue BaseRV = RValue::getAggregate(Base);
1047 // Recurse onto bases.
1048 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
1052 LValue LV = MakeAddrLValue(This, Ty);
1053 for (auto FD : RExp->Fields) {
1054 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
1055 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
1058 } else if (isa<ComplexExpansion>(Exp.get())) {
1059 ComplexPairTy CV = RV.getComplexVal();
1060 IRCallArgs[IRCallArgPos++] = CV.first;
1061 IRCallArgs[IRCallArgPos++] = CV.second;
1063 assert(isa<NoExpansion>(Exp.get()));
1064 assert(RV.isScalar() &&
1065 "Unexpected non-scalar rvalue during struct expansion.");
1067 // Insert a bitcast as needed.
1068 llvm::Value *V = RV.getScalarVal();
1069 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1070 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1071 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1073 IRCallArgs[IRCallArgPos++] = V;
1077 /// Create a temporary allocation for the purposes of coercion.
1078 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1079 CharUnits MinAlign) {
1080 // Don't use an alignment that's worse than what LLVM would prefer.
1081 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1082 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1084 return CGF.CreateTempAlloca(Ty, Align);
1087 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1088 /// accessing some number of bytes out of it, try to gep into the struct to get
1089 /// at its inner goodness. Dive as deep as possible without entering an element
1090 /// with an in-memory size smaller than DstSize.
1092 EnterStructPointerForCoercedAccess(Address SrcPtr,
1093 llvm::StructType *SrcSTy,
1094 uint64_t DstSize, CodeGenFunction &CGF) {
1095 // We can't dive into a zero-element struct.
1096 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1098 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1100 // If the first elt is at least as large as what we're looking for, or if the
1101 // first element is the same size as the whole struct, we can enter it. The
1102 // comparison must be made on the store size and not the alloca size. Using
1103 // the alloca size may overstate the size of the load.
1104 uint64_t FirstEltSize =
1105 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1106 if (FirstEltSize < DstSize &&
1107 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1110 // GEP into the first element.
1111 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1113 // If the first element is a struct, recurse.
1114 llvm::Type *SrcTy = SrcPtr.getElementType();
1115 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1116 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1121 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1122 /// are either integers or pointers. This does a truncation of the value if it
1123 /// is too large or a zero extension if it is too small.
1125 /// This behaves as if the value were coerced through memory, so on big-endian
1126 /// targets the high bits are preserved in a truncation, while little-endian
1127 /// targets preserve the low bits.
1128 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1130 CodeGenFunction &CGF) {
1131 if (Val->getType() == Ty)
1134 if (isa<llvm::PointerType>(Val->getType())) {
1135 // If this is Pointer->Pointer avoid conversion to and from int.
1136 if (isa<llvm::PointerType>(Ty))
1137 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1139 // Convert the pointer to an integer so we can play with its width.
1140 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1143 llvm::Type *DestIntTy = Ty;
1144 if (isa<llvm::PointerType>(DestIntTy))
1145 DestIntTy = CGF.IntPtrTy;
1147 if (Val->getType() != DestIntTy) {
1148 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1149 if (DL.isBigEndian()) {
1150 // Preserve the high bits on big-endian targets.
1151 // That is what memory coercion does.
1152 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1153 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1155 if (SrcSize > DstSize) {
1156 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1157 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1159 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1160 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1163 // Little-endian targets preserve the low bits. No shifts required.
1164 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1168 if (isa<llvm::PointerType>(Ty))
1169 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1175 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1176 /// a pointer to an object of type \arg Ty, known to be aligned to
1177 /// \arg SrcAlign bytes.
1179 /// This safely handles the case when the src type is smaller than the
1180 /// destination type; in this situation the values of bits which not
1181 /// present in the src are undefined.
1182 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1183 CodeGenFunction &CGF) {
1184 llvm::Type *SrcTy = Src.getElementType();
1186 // If SrcTy and Ty are the same, just do a load.
1188 return CGF.Builder.CreateLoad(Src);
1190 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1192 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1193 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1194 SrcTy = Src.getType()->getElementType();
1197 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1199 // If the source and destination are integer or pointer types, just do an
1200 // extension or truncation to the desired type.
1201 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1202 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1203 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1204 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1207 // If load is legal, just bitcast the src pointer.
1208 if (SrcSize >= DstSize) {
1209 // Generally SrcSize is never greater than DstSize, since this means we are
1210 // losing bits. However, this can happen in cases where the structure has
1211 // additional padding, for example due to a user specified alignment.
1213 // FIXME: Assert that we aren't truncating non-padding bits when have access
1214 // to that information.
1215 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1216 return CGF.Builder.CreateLoad(Src);
1219 // Otherwise do coercion through memory. This is stupid, but simple.
1220 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1221 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1222 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1223 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1224 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1226 return CGF.Builder.CreateLoad(Tmp);
1229 // Function to store a first-class aggregate into memory. We prefer to
1230 // store the elements rather than the aggregate to be more friendly to
1232 // FIXME: Do we need to recurse here?
1233 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1234 Address Dest, bool DestIsVolatile) {
1235 // Prefer scalar stores to first-class aggregate stores.
1236 if (llvm::StructType *STy =
1237 dyn_cast<llvm::StructType>(Val->getType())) {
1238 const llvm::StructLayout *Layout =
1239 CGF.CGM.getDataLayout().getStructLayout(STy);
1241 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1242 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1243 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1244 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1245 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1248 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1252 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1253 /// where the source and destination may have different types. The
1254 /// destination is known to be aligned to \arg DstAlign bytes.
1256 /// This safely handles the case when the src type is larger than the
1257 /// destination type; the upper bits of the src will be lost.
1258 static void CreateCoercedStore(llvm::Value *Src,
1261 CodeGenFunction &CGF) {
1262 llvm::Type *SrcTy = Src->getType();
1263 llvm::Type *DstTy = Dst.getType()->getElementType();
1264 if (SrcTy == DstTy) {
1265 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1269 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1271 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1272 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1273 DstTy = Dst.getType()->getElementType();
1276 // If the source and destination are integer or pointer types, just do an
1277 // extension or truncation to the desired type.
1278 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1279 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1280 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1281 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1285 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1287 // If store is legal, just bitcast the src pointer.
1288 if (SrcSize <= DstSize) {
1289 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1290 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1292 // Otherwise do coercion through memory. This is stupid, but
1295 // Generally SrcSize is never greater than DstSize, since this means we are
1296 // losing bits. However, this can happen in cases where the structure has
1297 // additional padding, for example due to a user specified alignment.
1299 // FIXME: Assert that we aren't truncating non-padding bits when have access
1300 // to that information.
1301 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1302 CGF.Builder.CreateStore(Src, Tmp);
1303 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1304 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1305 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1306 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1311 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1312 const ABIArgInfo &info) {
1313 if (unsigned offset = info.getDirectOffset()) {
1314 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1315 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1316 CharUnits::fromQuantity(offset));
1317 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1324 /// Encapsulates information about the way function arguments from
1325 /// CGFunctionInfo should be passed to actual LLVM IR function.
1326 class ClangToLLVMArgMapping {
1327 static const unsigned InvalidIndex = ~0U;
1328 unsigned InallocaArgNo;
1330 unsigned TotalIRArgs;
1332 /// Arguments of LLVM IR function corresponding to single Clang argument.
1334 unsigned PaddingArgIndex;
1335 // Argument is expanded to IR arguments at positions
1336 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1337 unsigned FirstArgIndex;
1338 unsigned NumberOfArgs;
1341 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1345 SmallVector<IRArgs, 8> ArgInfo;
1348 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1349 bool OnlyRequiredArgs = false)
1350 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1351 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1352 construct(Context, FI, OnlyRequiredArgs);
1355 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1356 unsigned getInallocaArgNo() const {
1357 assert(hasInallocaArg());
1358 return InallocaArgNo;
1361 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1362 unsigned getSRetArgNo() const {
1363 assert(hasSRetArg());
1367 unsigned totalIRArgs() const { return TotalIRArgs; }
1369 bool hasPaddingArg(unsigned ArgNo) const {
1370 assert(ArgNo < ArgInfo.size());
1371 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1373 unsigned getPaddingArgNo(unsigned ArgNo) const {
1374 assert(hasPaddingArg(ArgNo));
1375 return ArgInfo[ArgNo].PaddingArgIndex;
1378 /// Returns index of first IR argument corresponding to ArgNo, and their
1380 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1381 assert(ArgNo < ArgInfo.size());
1382 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1383 ArgInfo[ArgNo].NumberOfArgs);
1387 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1388 bool OnlyRequiredArgs);
1391 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1392 const CGFunctionInfo &FI,
1393 bool OnlyRequiredArgs) {
1394 unsigned IRArgNo = 0;
1395 bool SwapThisWithSRet = false;
1396 const ABIArgInfo &RetAI = FI.getReturnInfo();
1398 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1399 SwapThisWithSRet = RetAI.isSRetAfterThis();
1400 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1404 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1405 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1407 assert(I != FI.arg_end());
1408 QualType ArgType = I->type;
1409 const ABIArgInfo &AI = I->info;
1410 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1411 auto &IRArgs = ArgInfo[ArgNo];
1413 if (AI.getPaddingType())
1414 IRArgs.PaddingArgIndex = IRArgNo++;
1416 switch (AI.getKind()) {
1417 case ABIArgInfo::Extend:
1418 case ABIArgInfo::Direct: {
1419 // FIXME: handle sseregparm someday...
1420 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1421 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1422 IRArgs.NumberOfArgs = STy->getNumElements();
1424 IRArgs.NumberOfArgs = 1;
1428 case ABIArgInfo::Indirect:
1429 IRArgs.NumberOfArgs = 1;
1431 case ABIArgInfo::Ignore:
1432 case ABIArgInfo::InAlloca:
1433 // ignore and inalloca doesn't have matching LLVM parameters.
1434 IRArgs.NumberOfArgs = 0;
1436 case ABIArgInfo::CoerceAndExpand:
1437 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1439 case ABIArgInfo::Expand:
1440 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1444 if (IRArgs.NumberOfArgs > 0) {
1445 IRArgs.FirstArgIndex = IRArgNo;
1446 IRArgNo += IRArgs.NumberOfArgs;
1449 // Skip over the sret parameter when it comes second. We already handled it
1451 if (IRArgNo == 1 && SwapThisWithSRet)
1454 assert(ArgNo == ArgInfo.size());
1456 if (FI.usesInAlloca())
1457 InallocaArgNo = IRArgNo++;
1459 TotalIRArgs = IRArgNo;
1465 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1466 return FI.getReturnInfo().isIndirect();
1469 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1470 return ReturnTypeUsesSRet(FI) &&
1471 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1474 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1475 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1476 switch (BT->getKind()) {
1479 case BuiltinType::Float:
1480 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1481 case BuiltinType::Double:
1482 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1483 case BuiltinType::LongDouble:
1484 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1491 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1492 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1493 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1494 if (BT->getKind() == BuiltinType::LongDouble)
1495 return getTarget().useObjCFP2RetForComplexLongDouble();
1502 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1503 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1504 return GetFunctionType(FI);
1507 llvm::FunctionType *
1508 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1510 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1512 assert(Inserted && "Recursively being processed?");
1514 llvm::Type *resultType = nullptr;
1515 const ABIArgInfo &retAI = FI.getReturnInfo();
1516 switch (retAI.getKind()) {
1517 case ABIArgInfo::Expand:
1518 llvm_unreachable("Invalid ABI kind for return argument");
1520 case ABIArgInfo::Extend:
1521 case ABIArgInfo::Direct:
1522 resultType = retAI.getCoerceToType();
1525 case ABIArgInfo::InAlloca:
1526 if (retAI.getInAllocaSRet()) {
1527 // sret things on win32 aren't void, they return the sret pointer.
1528 QualType ret = FI.getReturnType();
1529 llvm::Type *ty = ConvertType(ret);
1530 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1531 resultType = llvm::PointerType::get(ty, addressSpace);
1533 resultType = llvm::Type::getVoidTy(getLLVMContext());
1537 case ABIArgInfo::Indirect:
1538 case ABIArgInfo::Ignore:
1539 resultType = llvm::Type::getVoidTy(getLLVMContext());
1542 case ABIArgInfo::CoerceAndExpand:
1543 resultType = retAI.getUnpaddedCoerceAndExpandType();
1547 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1548 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1550 // Add type for sret argument.
1551 if (IRFunctionArgs.hasSRetArg()) {
1552 QualType Ret = FI.getReturnType();
1553 llvm::Type *Ty = ConvertType(Ret);
1554 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1555 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1556 llvm::PointerType::get(Ty, AddressSpace);
1559 // Add type for inalloca argument.
1560 if (IRFunctionArgs.hasInallocaArg()) {
1561 auto ArgStruct = FI.getArgStruct();
1563 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1566 // Add in all of the required arguments.
1568 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1569 ie = it + FI.getNumRequiredArgs();
1570 for (; it != ie; ++it, ++ArgNo) {
1571 const ABIArgInfo &ArgInfo = it->info;
1573 // Insert a padding type to ensure proper alignment.
1574 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1575 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1576 ArgInfo.getPaddingType();
1578 unsigned FirstIRArg, NumIRArgs;
1579 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1581 switch (ArgInfo.getKind()) {
1582 case ABIArgInfo::Ignore:
1583 case ABIArgInfo::InAlloca:
1584 assert(NumIRArgs == 0);
1587 case ABIArgInfo::Indirect: {
1588 assert(NumIRArgs == 1);
1589 // indirect arguments are always on the stack, which is addr space #0.
1590 llvm::Type *LTy = ConvertTypeForMem(it->type);
1591 ArgTypes[FirstIRArg] = LTy->getPointerTo();
1595 case ABIArgInfo::Extend:
1596 case ABIArgInfo::Direct: {
1597 // Fast-isel and the optimizer generally like scalar values better than
1598 // FCAs, so we flatten them if this is safe to do for this argument.
1599 llvm::Type *argType = ArgInfo.getCoerceToType();
1600 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1601 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1602 assert(NumIRArgs == st->getNumElements());
1603 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1604 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1606 assert(NumIRArgs == 1);
1607 ArgTypes[FirstIRArg] = argType;
1612 case ABIArgInfo::CoerceAndExpand: {
1613 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1614 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1615 *ArgTypesIter++ = EltTy;
1617 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1621 case ABIArgInfo::Expand:
1622 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1623 getExpandedTypes(it->type, ArgTypesIter);
1624 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1629 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1630 assert(Erased && "Not in set?");
1632 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1635 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1636 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1637 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1639 if (!isFuncTypeConvertible(FPT))
1640 return llvm::StructType::get(getLLVMContext());
1642 const CGFunctionInfo *Info;
1643 if (isa<CXXDestructorDecl>(MD))
1645 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1647 Info = &arrangeCXXMethodDeclaration(MD);
1648 return GetFunctionType(*Info);
1651 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1652 llvm::AttrBuilder &FuncAttrs,
1653 const FunctionProtoType *FPT) {
1657 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1658 FPT->isNothrow(Ctx))
1659 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1662 void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
1663 bool AttrOnCallSite,
1664 llvm::AttrBuilder &FuncAttrs) {
1665 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1667 if (CodeGenOpts.OptimizeSize)
1668 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1669 if (CodeGenOpts.OptimizeSize == 2)
1670 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1673 if (CodeGenOpts.DisableRedZone)
1674 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1675 if (CodeGenOpts.NoImplicitFloat)
1676 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1678 if (AttrOnCallSite) {
1679 // Attributes that should go on the call site only.
1680 if (!CodeGenOpts.SimplifyLibCalls ||
1681 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1682 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1683 if (!CodeGenOpts.TrapFuncName.empty())
1684 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1686 // Attributes that should go on the function, but not the call site.
1687 if (!CodeGenOpts.DisableFPElim) {
1688 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1689 } else if (CodeGenOpts.OmitLeafFramePointer) {
1690 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1691 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1693 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1694 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1697 FuncAttrs.addAttribute("less-precise-fpmad",
1698 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1700 if (!CodeGenOpts.FPDenormalMode.empty())
1701 FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
1703 FuncAttrs.addAttribute("no-trapping-math",
1704 llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1706 // TODO: Are these all needed?
1707 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1708 FuncAttrs.addAttribute("no-infs-fp-math",
1709 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1710 FuncAttrs.addAttribute("no-nans-fp-math",
1711 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1712 FuncAttrs.addAttribute("unsafe-fp-math",
1713 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1714 FuncAttrs.addAttribute("use-soft-float",
1715 llvm::toStringRef(CodeGenOpts.SoftFloat));
1716 FuncAttrs.addAttribute("stack-protector-buffer-size",
1717 llvm::utostr(CodeGenOpts.SSPBufferSize));
1718 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1719 llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1720 FuncAttrs.addAttribute(
1721 "correctly-rounded-divide-sqrt-fp-math",
1722 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1724 // TODO: Reciprocal estimate codegen options should apply to instructions?
1725 std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
1726 if (!Recips.empty())
1727 FuncAttrs.addAttribute("reciprocal-estimates",
1728 llvm::join(Recips.begin(), Recips.end(), ","));
1730 if (CodeGenOpts.StackRealignment)
1731 FuncAttrs.addAttribute("stackrealign");
1732 if (CodeGenOpts.Backchain)
1733 FuncAttrs.addAttribute("backchain");
1736 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1737 // Conservatively, mark all functions and calls in CUDA as convergent
1738 // (meaning, they may call an intrinsically convergent op, such as
1739 // __syncthreads(), and so can't have certain optimizations applied around
1740 // them). LLVM will remove this attribute where it safely can.
1741 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1743 // Exceptions aren't supported in CUDA device code.
1744 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1746 // Respect -fcuda-flush-denormals-to-zero.
1747 if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1748 FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1752 void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
1753 llvm::AttrBuilder FuncAttrs;
1754 ConstructDefaultFnAttrList(F.getName(),
1755 F.hasFnAttribute(llvm::Attribute::OptimizeNone),
1756 /* AttrOnCallsite = */ false, FuncAttrs);
1757 llvm::AttributeList AS = llvm::AttributeList::get(
1758 getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
1759 F.addAttributes(llvm::AttributeList::FunctionIndex, AS);
1762 void CodeGenModule::ConstructAttributeList(
1763 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1764 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1765 llvm::AttrBuilder FuncAttrs;
1766 llvm::AttrBuilder RetAttrs;
1768 CallingConv = FI.getEffectiveCallingConvention();
1769 if (FI.isNoReturn())
1770 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1772 // If we have information about the function prototype, we can learn
1773 // attributes form there.
1774 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1775 CalleeInfo.getCalleeFunctionProtoType());
1777 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1779 bool HasOptnone = false;
1780 // FIXME: handle sseregparm someday...
1782 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1783 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1784 if (TargetDecl->hasAttr<NoThrowAttr>())
1785 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1786 if (TargetDecl->hasAttr<NoReturnAttr>())
1787 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1788 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1789 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1790 if (TargetDecl->hasAttr<ConvergentAttr>())
1791 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1793 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1794 AddAttributesFromFunctionProtoType(
1795 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1796 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1797 // These attributes are not inherited by overloads.
1798 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1799 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1800 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1803 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1804 if (TargetDecl->hasAttr<ConstAttr>()) {
1805 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1806 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1807 } else if (TargetDecl->hasAttr<PureAttr>()) {
1808 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1809 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1810 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1811 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1812 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1814 if (TargetDecl->hasAttr<RestrictAttr>())
1815 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1816 if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1817 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1819 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1820 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1821 Optional<unsigned> NumElemsParam;
1822 // alloc_size args are base-1, 0 means not present.
1823 if (unsigned N = AllocSize->getNumElemsParam())
1824 NumElemsParam = N - 1;
1825 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
1830 ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
1832 if (CodeGenOpts.EnableSegmentedStacks &&
1833 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1834 FuncAttrs.addAttribute("split-stack");
1836 if (!AttrOnCallSite) {
1837 bool DisableTailCalls =
1838 CodeGenOpts.DisableTailCalls ||
1839 (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
1840 TargetDecl->hasAttr<AnyX86InterruptAttr>()));
1841 FuncAttrs.addAttribute("disable-tail-calls",
1842 llvm::toStringRef(DisableTailCalls));
1844 // Add target-cpu and target-features attributes to functions. If
1845 // we have a decl for the function and it has a target attribute then
1846 // parse that and add it to the feature set.
1847 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1848 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1849 if (FD && FD->hasAttr<TargetAttr>()) {
1850 llvm::StringMap<bool> FeatureMap;
1851 getFunctionFeatureMap(FeatureMap, FD);
1853 // Produce the canonical string for this set of features.
1854 std::vector<std::string> Features;
1855 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1856 ie = FeatureMap.end();
1858 Features.push_back((it->second ? "+" : "-") + it->first().str());
1860 // Now add the target-cpu and target-features to the function.
1861 // While we populated the feature map above, we still need to
1862 // get and parse the target attribute so we can get the cpu for
1864 const auto *TD = FD->getAttr<TargetAttr>();
1865 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1866 if (ParsedAttr.second != "")
1867 TargetCPU = ParsedAttr.second;
1868 if (TargetCPU != "")
1869 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1870 if (!Features.empty()) {
1871 std::sort(Features.begin(), Features.end());
1872 FuncAttrs.addAttribute(
1874 llvm::join(Features.begin(), Features.end(), ","));
1877 // Otherwise just add the existing target cpu and target features to the
1879 std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1880 if (TargetCPU != "")
1881 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1882 if (!Features.empty()) {
1883 std::sort(Features.begin(), Features.end());
1884 FuncAttrs.addAttribute(
1886 llvm::join(Features.begin(), Features.end(), ","));
1891 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1893 QualType RetTy = FI.getReturnType();
1894 const ABIArgInfo &RetAI = FI.getReturnInfo();
1895 switch (RetAI.getKind()) {
1896 case ABIArgInfo::Extend:
1897 if (RetTy->hasSignedIntegerRepresentation())
1898 RetAttrs.addAttribute(llvm::Attribute::SExt);
1899 else if (RetTy->hasUnsignedIntegerRepresentation())
1900 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1902 case ABIArgInfo::Direct:
1903 if (RetAI.getInReg())
1904 RetAttrs.addAttribute(llvm::Attribute::InReg);
1906 case ABIArgInfo::Ignore:
1909 case ABIArgInfo::InAlloca:
1910 case ABIArgInfo::Indirect: {
1911 // inalloca and sret disable readnone and readonly
1912 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1913 .removeAttribute(llvm::Attribute::ReadNone);
1917 case ABIArgInfo::CoerceAndExpand:
1920 case ABIArgInfo::Expand:
1921 llvm_unreachable("Invalid ABI kind for return argument");
1924 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1925 QualType PTy = RefTy->getPointeeType();
1926 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1927 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1929 else if (getContext().getTargetAddressSpace(PTy) == 0)
1930 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1933 // Attach return attributes.
1934 if (RetAttrs.hasAttributes()) {
1935 PAL.push_back(llvm::AttributeList::get(
1936 getLLVMContext(), llvm::AttributeList::ReturnIndex, RetAttrs));
1939 bool hasUsedSRet = false;
1941 // Attach attributes to sret.
1942 if (IRFunctionArgs.hasSRetArg()) {
1943 llvm::AttrBuilder SRETAttrs;
1944 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1946 if (RetAI.getInReg())
1947 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1948 PAL.push_back(llvm::AttributeList::get(
1949 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1952 // Attach attributes to inalloca argument.
1953 if (IRFunctionArgs.hasInallocaArg()) {
1954 llvm::AttrBuilder Attrs;
1955 Attrs.addAttribute(llvm::Attribute::InAlloca);
1956 PAL.push_back(llvm::AttributeList::get(
1957 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1961 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1963 I != E; ++I, ++ArgNo) {
1964 QualType ParamType = I->type;
1965 const ABIArgInfo &AI = I->info;
1966 llvm::AttrBuilder Attrs;
1968 // Add attribute for padding argument, if necessary.
1969 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1970 if (AI.getPaddingInReg())
1971 PAL.push_back(llvm::AttributeList::get(
1972 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1973 llvm::Attribute::InReg));
1976 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1977 // have the corresponding parameter variable. It doesn't make
1978 // sense to do it here because parameters are so messed up.
1979 switch (AI.getKind()) {
1980 case ABIArgInfo::Extend:
1981 if (ParamType->isSignedIntegerOrEnumerationType())
1982 Attrs.addAttribute(llvm::Attribute::SExt);
1983 else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1984 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1985 Attrs.addAttribute(llvm::Attribute::SExt);
1987 Attrs.addAttribute(llvm::Attribute::ZExt);
1990 case ABIArgInfo::Direct:
1991 if (ArgNo == 0 && FI.isChainCall())
1992 Attrs.addAttribute(llvm::Attribute::Nest);
1993 else if (AI.getInReg())
1994 Attrs.addAttribute(llvm::Attribute::InReg);
1997 case ABIArgInfo::Indirect: {
1999 Attrs.addAttribute(llvm::Attribute::InReg);
2001 if (AI.getIndirectByVal())
2002 Attrs.addAttribute(llvm::Attribute::ByVal);
2004 CharUnits Align = AI.getIndirectAlign();
2006 // In a byval argument, it is important that the required
2007 // alignment of the type is honored, as LLVM might be creating a
2008 // *new* stack object, and needs to know what alignment to give
2009 // it. (Sometimes it can deduce a sensible alignment on its own,
2010 // but not if clang decides it must emit a packed struct, or the
2011 // user specifies increased alignment requirements.)
2013 // This is different from indirect *not* byval, where the object
2014 // exists already, and the align attribute is purely
2016 assert(!Align.isZero());
2018 // For now, only add this when we have a byval argument.
2019 // TODO: be less lazy about updating test cases.
2020 if (AI.getIndirectByVal())
2021 Attrs.addAlignmentAttr(Align.getQuantity());
2023 // byval disables readnone and readonly.
2024 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2025 .removeAttribute(llvm::Attribute::ReadNone);
2028 case ABIArgInfo::Ignore:
2029 case ABIArgInfo::Expand:
2030 case ABIArgInfo::CoerceAndExpand:
2033 case ABIArgInfo::InAlloca:
2034 // inalloca disables readnone and readonly.
2035 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2036 .removeAttribute(llvm::Attribute::ReadNone);
2040 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2041 QualType PTy = RefTy->getPointeeType();
2042 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2043 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
2045 else if (getContext().getTargetAddressSpace(PTy) == 0)
2046 Attrs.addAttribute(llvm::Attribute::NonNull);
2049 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2050 case ParameterABI::Ordinary:
2053 case ParameterABI::SwiftIndirectResult: {
2054 // Add 'sret' if we haven't already used it for something, but
2055 // only if the result is void.
2056 if (!hasUsedSRet && RetTy->isVoidType()) {
2057 Attrs.addAttribute(llvm::Attribute::StructRet);
2061 // Add 'noalias' in either case.
2062 Attrs.addAttribute(llvm::Attribute::NoAlias);
2064 // Add 'dereferenceable' and 'alignment'.
2065 auto PTy = ParamType->getPointeeType();
2066 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2067 auto info = getContext().getTypeInfoInChars(PTy);
2068 Attrs.addDereferenceableAttr(info.first.getQuantity());
2069 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2070 info.second.getQuantity()));
2075 case ParameterABI::SwiftErrorResult:
2076 Attrs.addAttribute(llvm::Attribute::SwiftError);
2079 case ParameterABI::SwiftContext:
2080 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2084 if (Attrs.hasAttributes()) {
2085 unsigned FirstIRArg, NumIRArgs;
2086 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2087 for (unsigned i = 0; i < NumIRArgs; i++)
2088 PAL.push_back(llvm::AttributeList::get(getLLVMContext(),
2089 FirstIRArg + i + 1, Attrs));
2092 assert(ArgNo == FI.arg_size());
2094 if (FuncAttrs.hasAttributes())
2095 PAL.push_back(llvm::AttributeList::get(
2096 getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs));
2099 /// An argument came in as a promoted argument; demote it back to its
2101 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2103 llvm::Value *value) {
2104 llvm::Type *varType = CGF.ConvertType(var->getType());
2106 // This can happen with promotions that actually don't change the
2107 // underlying type, like the enum promotions.
2108 if (value->getType() == varType) return value;
2110 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2111 && "unexpected promotion type");
2113 if (isa<llvm::IntegerType>(varType))
2114 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2116 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2119 /// Returns the attribute (either parameter attribute, or function
2120 /// attribute), which declares argument ArgNo to be non-null.
2121 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2122 QualType ArgType, unsigned ArgNo) {
2123 // FIXME: __attribute__((nonnull)) can also be applied to:
2124 // - references to pointers, where the pointee is known to be
2125 // nonnull (apparently a Clang extension)
2126 // - transparent unions containing pointers
2127 // In the former case, LLVM IR cannot represent the constraint. In
2128 // the latter case, we have no guarantee that the transparent union
2129 // is in fact passed as a pointer.
2130 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2132 // First, check attribute on parameter itself.
2134 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2137 // Check function attributes.
2140 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2141 if (NNAttr->isNonNull(ArgNo))
2148 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2151 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2152 void Emit(CodeGenFunction &CGF, Flags flags) override {
2153 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2154 CGF.Builder.CreateStore(errorValue, Arg);
2159 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2161 const FunctionArgList &Args) {
2162 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2163 // Naked functions don't have prologues.
2166 // If this is an implicit-return-zero function, go ahead and
2167 // initialize the return value. TODO: it might be nice to have
2168 // a more general mechanism for this that didn't require synthesized
2169 // return statements.
2170 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2171 if (FD->hasImplicitReturnZero()) {
2172 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2173 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2174 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2175 Builder.CreateStore(Zero, ReturnValue);
2179 // FIXME: We no longer need the types from FunctionArgList; lift up and
2182 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2183 // Flattened function arguments.
2184 SmallVector<llvm::Value *, 16> FnArgs;
2185 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2186 for (auto &Arg : Fn->args()) {
2187 FnArgs.push_back(&Arg);
2189 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2191 // If we're using inalloca, all the memory arguments are GEPs off of the last
2192 // parameter, which is a pointer to the complete memory area.
2193 Address ArgStruct = Address::invalid();
2194 const llvm::StructLayout *ArgStructLayout = nullptr;
2195 if (IRFunctionArgs.hasInallocaArg()) {
2196 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2197 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2198 FI.getArgStructAlignment());
2200 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2203 // Name the struct return parameter.
2204 if (IRFunctionArgs.hasSRetArg()) {
2205 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2206 AI->setName("agg.result");
2207 AI->addAttr(llvm::AttributeList::get(getLLVMContext(), AI->getArgNo() + 1,
2208 llvm::Attribute::NoAlias));
2211 // Track if we received the parameter as a pointer (indirect, byval, or
2212 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2213 // into a local alloca for us.
2214 SmallVector<ParamValue, 16> ArgVals;
2215 ArgVals.reserve(Args.size());
2217 // Create a pointer value for every parameter declaration. This usually
2218 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2219 // any cleanups or do anything that might unwind. We do that separately, so
2220 // we can push the cleanups in the correct order for the ABI.
2221 assert(FI.arg_size() == Args.size() &&
2222 "Mismatch between function signature & arguments.");
2224 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2225 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2226 i != e; ++i, ++info_it, ++ArgNo) {
2227 const VarDecl *Arg = *i;
2228 QualType Ty = info_it->type;
2229 const ABIArgInfo &ArgI = info_it->info;
2232 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2234 unsigned FirstIRArg, NumIRArgs;
2235 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2237 switch (ArgI.getKind()) {
2238 case ABIArgInfo::InAlloca: {
2239 assert(NumIRArgs == 0);
2240 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2241 CharUnits FieldOffset =
2242 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2243 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2245 ArgVals.push_back(ParamValue::forIndirect(V));
2249 case ABIArgInfo::Indirect: {
2250 assert(NumIRArgs == 1);
2251 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2253 if (!hasScalarEvaluationKind(Ty)) {
2254 // Aggregates and complex variables are accessed by reference. All we
2255 // need to do is realign the value, if requested.
2256 Address V = ParamAddr;
2257 if (ArgI.getIndirectRealign()) {
2258 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2260 // Copy from the incoming argument pointer to the temporary with the
2261 // appropriate alignment.
2263 // FIXME: We should have a common utility for generating an aggregate
2265 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2266 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2267 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2268 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2269 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2272 ArgVals.push_back(ParamValue::forIndirect(V));
2274 // Load scalar value from indirect argument.
2276 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2279 V = emitArgumentDemotion(*this, Arg, V);
2280 ArgVals.push_back(ParamValue::forDirect(V));
2285 case ABIArgInfo::Extend:
2286 case ABIArgInfo::Direct: {
2288 // If we have the trivial case, handle it with no muss and fuss.
2289 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2290 ArgI.getCoerceToType() == ConvertType(Ty) &&
2291 ArgI.getDirectOffset() == 0) {
2292 assert(NumIRArgs == 1);
2293 llvm::Value *V = FnArgs[FirstIRArg];
2294 auto AI = cast<llvm::Argument>(V);
2296 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2297 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2298 PVD->getFunctionScopeIndex()))
2299 AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
2301 llvm::Attribute::NonNull));
2303 QualType OTy = PVD->getOriginalType();
2304 if (const auto *ArrTy =
2305 getContext().getAsConstantArrayType(OTy)) {
2306 // A C99 array parameter declaration with the static keyword also
2307 // indicates dereferenceability, and if the size is constant we can
2308 // use the dereferenceable attribute (which requires the size in
2310 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2311 QualType ETy = ArrTy->getElementType();
2312 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2313 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2315 llvm::AttrBuilder Attrs;
2316 Attrs.addDereferenceableAttr(
2317 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2318 AI->addAttr(llvm::AttributeList::get(
2319 getLLVMContext(), AI->getArgNo() + 1, Attrs));
2320 } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2321 AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
2323 llvm::Attribute::NonNull));
2326 } else if (const auto *ArrTy =
2327 getContext().getAsVariableArrayType(OTy)) {
2328 // For C99 VLAs with the static keyword, we don't know the size so
2329 // we can't use the dereferenceable attribute, but in addrspace(0)
2330 // we know that it must be nonnull.
2331 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2332 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2333 AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
2335 llvm::Attribute::NonNull));
2338 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2340 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2341 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2343 llvm::Value *AlignmentValue =
2344 EmitScalarExpr(AVAttr->getAlignment());
2345 llvm::ConstantInt *AlignmentCI =
2346 cast<llvm::ConstantInt>(AlignmentValue);
2347 unsigned Alignment =
2348 std::min((unsigned) AlignmentCI->getZExtValue(),
2349 +llvm::Value::MaximumAlignment);
2351 llvm::AttrBuilder Attrs;
2352 Attrs.addAlignmentAttr(Alignment);
2353 AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
2354 AI->getArgNo() + 1, Attrs));
2358 if (Arg->getType().isRestrictQualified())
2359 AI->addAttr(llvm::AttributeList::get(
2360 getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NoAlias));
2362 // LLVM expects swifterror parameters to be used in very restricted
2363 // ways. Copy the value into a less-restricted temporary.
2364 if (FI.getExtParameterInfo(ArgNo).getABI()
2365 == ParameterABI::SwiftErrorResult) {
2366 QualType pointeeTy = Ty->getPointeeType();
2367 assert(pointeeTy->isPointerType());
2369 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2370 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2371 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2372 Builder.CreateStore(incomingErrorValue, temp);
2373 V = temp.getPointer();
2375 // Push a cleanup to copy the value back at the end of the function.
2376 // The convention does not guarantee that the value will be written
2377 // back if the function exits with an unwind exception.
2378 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2381 // Ensure the argument is the correct type.
2382 if (V->getType() != ArgI.getCoerceToType())
2383 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2386 V = emitArgumentDemotion(*this, Arg, V);
2388 // Because of merging of function types from multiple decls it is
2389 // possible for the type of an argument to not match the corresponding
2390 // type in the function type. Since we are codegening the callee
2391 // in here, add a cast to the argument type.
2392 llvm::Type *LTy = ConvertType(Arg->getType());
2393 if (V->getType() != LTy)
2394 V = Builder.CreateBitCast(V, LTy);
2396 ArgVals.push_back(ParamValue::forDirect(V));
2400 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2403 // Pointer to store into.
2404 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2406 // Fast-isel and the optimizer generally like scalar values better than
2407 // FCAs, so we flatten them if this is safe to do for this argument.
2408 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2409 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2410 STy->getNumElements() > 1) {
2411 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2412 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2413 llvm::Type *DstTy = Ptr.getElementType();
2414 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2416 Address AddrToStoreInto = Address::invalid();
2417 if (SrcSize <= DstSize) {
2419 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2422 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2425 assert(STy->getNumElements() == NumIRArgs);
2426 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2427 auto AI = FnArgs[FirstIRArg + i];
2428 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2429 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2431 Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2432 Builder.CreateStore(AI, EltPtr);
2435 if (SrcSize > DstSize) {
2436 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2440 // Simple case, just do a coerced store of the argument into the alloca.
2441 assert(NumIRArgs == 1);
2442 auto AI = FnArgs[FirstIRArg];
2443 AI->setName(Arg->getName() + ".coerce");
2444 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2447 // Match to what EmitParmDecl is expecting for this type.
2448 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2450 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2452 V = emitArgumentDemotion(*this, Arg, V);
2453 ArgVals.push_back(ParamValue::forDirect(V));
2455 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2460 case ABIArgInfo::CoerceAndExpand: {
2461 // Reconstruct into a temporary.
2462 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2463 ArgVals.push_back(ParamValue::forIndirect(alloca));
2465 auto coercionType = ArgI.getCoerceAndExpandType();
2466 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2467 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2469 unsigned argIndex = FirstIRArg;
2470 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2471 llvm::Type *eltType = coercionType->getElementType(i);
2472 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2475 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2476 auto elt = FnArgs[argIndex++];
2477 Builder.CreateStore(elt, eltAddr);
2479 assert(argIndex == FirstIRArg + NumIRArgs);
2483 case ABIArgInfo::Expand: {
2484 // If this structure was expanded into multiple arguments then
2485 // we need to create a temporary and reconstruct it from the
2487 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2488 LValue LV = MakeAddrLValue(Alloca, Ty);
2489 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2491 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2492 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2493 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2494 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2495 auto AI = FnArgs[FirstIRArg + i];
2496 AI->setName(Arg->getName() + "." + Twine(i));
2501 case ABIArgInfo::Ignore:
2502 assert(NumIRArgs == 0);
2503 // Initialize the local variable appropriately.
2504 if (!hasScalarEvaluationKind(Ty)) {
2505 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2507 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2508 ArgVals.push_back(ParamValue::forDirect(U));
2514 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2515 for (int I = Args.size() - 1; I >= 0; --I)
2516 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2518 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2519 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2523 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2524 while (insn->use_empty()) {
2525 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2526 if (!bitcast) return;
2528 // This is "safe" because we would have used a ConstantExpr otherwise.
2529 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2530 bitcast->eraseFromParent();
2534 /// Try to emit a fused autorelease of a return result.
2535 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2536 llvm::Value *result) {
2537 // We must be immediately followed the cast.
2538 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2539 if (BB->empty()) return nullptr;
2540 if (&BB->back() != result) return nullptr;
2542 llvm::Type *resultType = result->getType();
2544 // result is in a BasicBlock and is therefore an Instruction.
2545 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2547 SmallVector<llvm::Instruction *, 4> InstsToKill;
2550 // %generator = bitcast %type1* %generator2 to %type2*
2551 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2552 // We would have emitted this as a constant if the operand weren't
2554 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2556 // Require the generator to be immediately followed by the cast.
2557 if (generator->getNextNode() != bitcast)
2560 InstsToKill.push_back(bitcast);
2564 // %generator = call i8* @objc_retain(i8* %originalResult)
2566 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2567 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2568 if (!call) return nullptr;
2570 bool doRetainAutorelease;
2572 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2573 doRetainAutorelease = true;
2574 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2575 .objc_retainAutoreleasedReturnValue) {
2576 doRetainAutorelease = false;
2578 // If we emitted an assembly marker for this call (and the
2579 // ARCEntrypoints field should have been set if so), go looking
2580 // for that call. If we can't find it, we can't do this
2581 // optimization. But it should always be the immediately previous
2582 // instruction, unless we needed bitcasts around the call.
2583 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2584 llvm::Instruction *prev = call->getPrevNode();
2586 if (isa<llvm::BitCastInst>(prev)) {
2587 prev = prev->getPrevNode();
2590 assert(isa<llvm::CallInst>(prev));
2591 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2592 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2593 InstsToKill.push_back(prev);
2599 result = call->getArgOperand(0);
2600 InstsToKill.push_back(call);
2602 // Keep killing bitcasts, for sanity. Note that we no longer care
2603 // about precise ordering as long as there's exactly one use.
2604 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2605 if (!bitcast->hasOneUse()) break;
2606 InstsToKill.push_back(bitcast);
2607 result = bitcast->getOperand(0);
2610 // Delete all the unnecessary instructions, from latest to earliest.
2611 for (auto *I : InstsToKill)
2612 I->eraseFromParent();
2614 // Do the fused retain/autorelease if we were asked to.
2615 if (doRetainAutorelease)
2616 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2618 // Cast back to the result type.
2619 return CGF.Builder.CreateBitCast(result, resultType);
2622 /// If this is a +1 of the value of an immutable 'self', remove it.
2623 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2624 llvm::Value *result) {
2625 // This is only applicable to a method with an immutable 'self'.
2626 const ObjCMethodDecl *method =
2627 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2628 if (!method) return nullptr;
2629 const VarDecl *self = method->getSelfDecl();
2630 if (!self->getType().isConstQualified()) return nullptr;
2632 // Look for a retain call.
2633 llvm::CallInst *retainCall =
2634 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2636 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2639 // Look for an ordinary load of 'self'.
2640 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2641 llvm::LoadInst *load =
2642 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2643 if (!load || load->isAtomic() || load->isVolatile() ||
2644 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2647 // Okay! Burn it all down. This relies for correctness on the
2648 // assumption that the retain is emitted as part of the return and
2649 // that thereafter everything is used "linearly".
2650 llvm::Type *resultType = result->getType();
2651 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2652 assert(retainCall->use_empty());
2653 retainCall->eraseFromParent();
2654 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2656 return CGF.Builder.CreateBitCast(load, resultType);
2659 /// Emit an ARC autorelease of the result of a function.
2661 /// \return the value to actually return from the function
2662 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2663 llvm::Value *result) {
2664 // If we're returning 'self', kill the initial retain. This is a
2665 // heuristic attempt to "encourage correctness" in the really unfortunate
2666 // case where we have a return of self during a dealloc and we desperately
2667 // need to avoid the possible autorelease.
2668 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2671 // At -O0, try to emit a fused retain/autorelease.
2672 if (CGF.shouldUseFusedARCCalls())
2673 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2676 return CGF.EmitARCAutoreleaseReturnValue(result);
2679 /// Heuristically search for a dominating store to the return-value slot.
2680 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2681 // Check if a User is a store which pointerOperand is the ReturnValue.
2682 // We are looking for stores to the ReturnValue, not for stores of the
2683 // ReturnValue to some other location.
2684 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2685 auto *SI = dyn_cast<llvm::StoreInst>(U);
2686 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2688 // These aren't actually possible for non-coerced returns, and we
2689 // only care about non-coerced returns on this code path.
2690 assert(!SI->isAtomic() && !SI->isVolatile());
2693 // If there are multiple uses of the return-value slot, just check
2694 // for something immediately preceding the IP. Sometimes this can
2695 // happen with how we generate implicit-returns; it can also happen
2696 // with noreturn cleanups.
2697 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2698 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2699 if (IP->empty()) return nullptr;
2700 llvm::Instruction *I = &IP->back();
2702 // Skip lifetime markers
2703 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2706 if (llvm::IntrinsicInst *Intrinsic =
2707 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2708 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2709 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2713 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2721 return GetStoreIfValid(I);
2724 llvm::StoreInst *store =
2725 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2726 if (!store) return nullptr;
2728 // Now do a first-and-dirty dominance check: just walk up the
2729 // single-predecessors chain from the current insertion point.
2730 llvm::BasicBlock *StoreBB = store->getParent();
2731 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2732 while (IP != StoreBB) {
2733 if (!(IP = IP->getSinglePredecessor()))
2737 // Okay, the store's basic block dominates the insertion point; we
2738 // can do our thing.
2742 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2744 SourceLocation EndLoc) {
2745 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2746 // Naked functions don't have epilogues.
2747 Builder.CreateUnreachable();
2751 // Functions with no result always return void.
2752 if (!ReturnValue.isValid()) {
2753 Builder.CreateRetVoid();
2757 llvm::DebugLoc RetDbgLoc;
2758 llvm::Value *RV = nullptr;
2759 QualType RetTy = FI.getReturnType();
2760 const ABIArgInfo &RetAI = FI.getReturnInfo();
2762 switch (RetAI.getKind()) {
2763 case ABIArgInfo::InAlloca:
2764 // Aggregrates get evaluated directly into the destination. Sometimes we
2765 // need to return the sret value in a register, though.
2766 assert(hasAggregateEvaluationKind(RetTy));
2767 if (RetAI.getInAllocaSRet()) {
2768 llvm::Function::arg_iterator EI = CurFn->arg_end();
2770 llvm::Value *ArgStruct = &*EI;
2771 llvm::Value *SRet = Builder.CreateStructGEP(
2772 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2773 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2777 case ABIArgInfo::Indirect: {
2778 auto AI = CurFn->arg_begin();
2779 if (RetAI.isSRetAfterThis())
2781 switch (getEvaluationKind(RetTy)) {
2784 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2785 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2790 // Do nothing; aggregrates get evaluated directly into the destination.
2793 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2794 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2801 case ABIArgInfo::Extend:
2802 case ABIArgInfo::Direct:
2803 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2804 RetAI.getDirectOffset() == 0) {
2805 // The internal return value temp always will have pointer-to-return-type
2806 // type, just do a load.
2808 // If there is a dominating store to ReturnValue, we can elide
2809 // the load, zap the store, and usually zap the alloca.
2810 if (llvm::StoreInst *SI =
2811 findDominatingStoreToReturnValue(*this)) {
2812 // Reuse the debug location from the store unless there is
2813 // cleanup code to be emitted between the store and return
2815 if (EmitRetDbgLoc && !AutoreleaseResult)
2816 RetDbgLoc = SI->getDebugLoc();
2817 // Get the stored value and nuke the now-dead store.
2818 RV = SI->getValueOperand();
2819 SI->eraseFromParent();
2821 // If that was the only use of the return value, nuke it as well now.
2822 auto returnValueInst = ReturnValue.getPointer();
2823 if (returnValueInst->use_empty()) {
2824 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2825 alloca->eraseFromParent();
2826 ReturnValue = Address::invalid();
2830 // Otherwise, we have to do a simple load.
2832 RV = Builder.CreateLoad(ReturnValue);
2835 // If the value is offset in memory, apply the offset now.
2836 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2838 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2841 // In ARC, end functions that return a retainable type with a call
2842 // to objc_autoreleaseReturnValue.
2843 if (AutoreleaseResult) {
2845 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2846 // been stripped of the typedefs, so we cannot use RetTy here. Get the
2847 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2848 // CurCodeDecl or BlockInfo.
2851 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2852 RT = FD->getReturnType();
2853 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2854 RT = MD->getReturnType();
2855 else if (isa<BlockDecl>(CurCodeDecl))
2856 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2858 llvm_unreachable("Unexpected function/method type");
2860 assert(getLangOpts().ObjCAutoRefCount &&
2861 !FI.isReturnsRetained() &&
2862 RT->isObjCRetainableType());
2864 RV = emitAutoreleaseOfResult(*this, RV);
2869 case ABIArgInfo::Ignore:
2872 case ABIArgInfo::CoerceAndExpand: {
2873 auto coercionType = RetAI.getCoerceAndExpandType();
2874 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2876 // Load all of the coerced elements out into results.
2877 llvm::SmallVector<llvm::Value*, 4> results;
2878 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2879 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2880 auto coercedEltType = coercionType->getElementType(i);
2881 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2884 auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2885 auto elt = Builder.CreateLoad(eltAddr);
2886 results.push_back(elt);
2889 // If we have one result, it's the single direct result type.
2890 if (results.size() == 1) {
2893 // Otherwise, we need to make a first-class aggregate.
2895 // Construct a return type that lacks padding elements.
2896 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2898 RV = llvm::UndefValue::get(returnType);
2899 for (unsigned i = 0, e = results.size(); i != e; ++i) {
2900 RV = Builder.CreateInsertValue(RV, results[i], i);
2906 case ABIArgInfo::Expand:
2907 llvm_unreachable("Invalid ABI kind for return argument");
2910 llvm::Instruction *Ret;
2912 EmitReturnValueCheck(RV, EndLoc);
2913 Ret = Builder.CreateRet(RV);
2915 Ret = Builder.CreateRetVoid();
2919 Ret->setDebugLoc(std::move(RetDbgLoc));
2922 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV,
2923 SourceLocation EndLoc) {
2924 // A current decl may not be available when emitting vtable thunks.
2928 ReturnsNonNullAttr *RetNNAttr = nullptr;
2929 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
2930 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
2932 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
2935 // Prefer the returns_nonnull attribute if it's present.
2936 SourceLocation AttrLoc;
2937 SanitizerMask CheckKind;
2938 SanitizerHandler Handler;
2940 assert(!requiresReturnValueNullabilityCheck() &&
2941 "Cannot check nullability and the nonnull attribute");
2942 AttrLoc = RetNNAttr->getLocation();
2943 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
2944 Handler = SanitizerHandler::NonnullReturn;
2946 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
2947 if (auto *TSI = DD->getTypeSourceInfo())
2948 if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
2949 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
2950 CheckKind = SanitizerKind::NullabilityReturn;
2951 Handler = SanitizerHandler::NullabilityReturn;
2954 SanitizerScope SanScope(this);
2956 llvm::BasicBlock *Check = nullptr;
2957 llvm::BasicBlock *NoCheck = nullptr;
2958 if (requiresReturnValueNullabilityCheck()) {
2959 // Before doing the nullability check, make sure that the preconditions for
2960 // the check are met.
2961 Check = createBasicBlock("nullcheck");
2962 NoCheck = createBasicBlock("no.nullcheck");
2963 Builder.CreateCondBr(RetValNullabilityPrecondition, Check, NoCheck);
2967 // Now do the null check. If the returns_nonnull attribute is present, this
2968 // is done unconditionally.
2969 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
2970 llvm::Constant *StaticData[] = {
2971 EmitCheckSourceLocation(EndLoc), EmitCheckSourceLocation(AttrLoc),
2973 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
2975 if (requiresReturnValueNullabilityCheck())
2979 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2980 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2981 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2984 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
2986 // FIXME: Generate IR in one pass, rather than going back and fixing up these
2988 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2989 llvm::Type *IRPtrTy = IRTy->getPointerTo();
2990 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
2992 // FIXME: When we generate this IR in one pass, we shouldn't need
2993 // this win32-specific alignment hack.
2994 CharUnits Align = CharUnits::fromQuantity(4);
2995 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
2997 return AggValueSlot::forAddr(Address(Placeholder, Align),
2999 AggValueSlot::IsNotDestructed,
3000 AggValueSlot::DoesNotNeedGCBarriers,
3001 AggValueSlot::IsNotAliased);
3004 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3005 const VarDecl *param,
3006 SourceLocation loc) {
3007 // StartFunction converted the ABI-lowered parameter(s) into a
3008 // local alloca. We need to turn that into an r-value suitable
3010 Address local = GetAddrOfLocalVar(param);
3012 QualType type = param->getType();
3014 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
3015 "cannot emit delegate call arguments for inalloca arguments!");
3017 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3018 // but the argument needs to be the original pointer.
3019 if (type->isReferenceType()) {
3020 args.add(RValue::get(Builder.CreateLoad(local)), type);
3022 // In ARC, move out of consumed arguments so that the release cleanup
3023 // entered by StartFunction doesn't cause an over-release. This isn't
3024 // optimal -O0 code generation, but it should get cleaned up when
3025 // optimization is enabled. This also assumes that delegate calls are
3026 // performed exactly once for a set of arguments, but that should be safe.
3027 } else if (getLangOpts().ObjCAutoRefCount &&
3028 param->hasAttr<NSConsumedAttr>() &&
3029 type->isObjCRetainableType()) {
3030 llvm::Value *ptr = Builder.CreateLoad(local);
3032 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3033 Builder.CreateStore(null, local);
3034 args.add(RValue::get(ptr), type);
3036 // For the most part, we just need to load the alloca, except that
3037 // aggregate r-values are actually pointers to temporaries.
3039 args.add(convertTempToRValue(local, type, loc), type);
3043 static bool isProvablyNull(llvm::Value *addr) {
3044 return isa<llvm::ConstantPointerNull>(addr);
3047 /// Emit the actual writing-back of a writeback.
3048 static void emitWriteback(CodeGenFunction &CGF,
3049 const CallArgList::Writeback &writeback) {
3050 const LValue &srcLV = writeback.Source;
3051 Address srcAddr = srcLV.getAddress();
3052 assert(!isProvablyNull(srcAddr.getPointer()) &&
3053 "shouldn't have writeback for provably null argument");
3055 llvm::BasicBlock *contBB = nullptr;
3057 // If the argument wasn't provably non-null, we need to null check
3058 // before doing the store.
3059 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
3060 if (!provablyNonNull) {
3061 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3062 contBB = CGF.createBasicBlock("icr.done");
3064 llvm::Value *isNull =
3065 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3066 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3067 CGF.EmitBlock(writebackBB);
3070 // Load the value to writeback.
3071 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3073 // Cast it back, in case we're writing an id to a Foo* or something.
3074 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3075 "icr.writeback-cast");
3077 // Perform the writeback.
3079 // If we have a "to use" value, it's something we need to emit a use
3080 // of. This has to be carefully threaded in: if it's done after the
3081 // release it's potentially undefined behavior (and the optimizer
3082 // will ignore it), and if it happens before the retain then the
3083 // optimizer could move the release there.
3084 if (writeback.ToUse) {
3085 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3087 // Retain the new value. No need to block-copy here: the block's
3088 // being passed up the stack.
3089 value = CGF.EmitARCRetainNonBlock(value);
3091 // Emit the intrinsic use here.
3092 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3094 // Load the old value (primitively).
3095 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3097 // Put the new value in place (primitively).
3098 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3100 // Release the old value.
3101 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3103 // Otherwise, we can just do a normal lvalue store.
3105 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3108 // Jump to the continuation block.
3109 if (!provablyNonNull)
3110 CGF.EmitBlock(contBB);
3113 static void emitWritebacks(CodeGenFunction &CGF,
3114 const CallArgList &args) {
3115 for (const auto &I : args.writebacks())
3116 emitWriteback(CGF, I);
3119 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3120 const CallArgList &CallArgs) {
3121 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
3122 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3123 CallArgs.getCleanupsToDeactivate();
3124 // Iterate in reverse to increase the likelihood of popping the cleanup.
3125 for (const auto &I : llvm::reverse(Cleanups)) {
3126 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3127 I.IsActiveIP->eraseFromParent();
3131 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3132 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3133 if (uop->getOpcode() == UO_AddrOf)
3134 return uop->getSubExpr();
3138 /// Emit an argument that's being passed call-by-writeback. That is,
3139 /// we are passing the address of an __autoreleased temporary; it
3140 /// might be copy-initialized with the current value of the given
3141 /// address, but it will definitely be copied out of after the call.
3142 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3143 const ObjCIndirectCopyRestoreExpr *CRE) {
3146 // Make an optimistic effort to emit the address as an l-value.
3147 // This can fail if the argument expression is more complicated.
3148 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3149 srcLV = CGF.EmitLValue(lvExpr);
3151 // Otherwise, just emit it as a scalar.
3153 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3155 QualType srcAddrType =
3156 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3157 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3159 Address srcAddr = srcLV.getAddress();
3161 // The dest and src types don't necessarily match in LLVM terms
3162 // because of the crazy ObjC compatibility rules.
3164 llvm::PointerType *destType =
3165 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3167 // If the address is a constant null, just pass the appropriate null.
3168 if (isProvablyNull(srcAddr.getPointer())) {
3169 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3174 // Create the temporary.
3175 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3176 CGF.getPointerAlign(),
3178 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3179 // and that cleanup will be conditional if we can't prove that the l-value
3180 // isn't null, so we need to register a dominating point so that the cleanups
3181 // system will make valid IR.
3182 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3184 // Zero-initialize it if we're not doing a copy-initialization.
3185 bool shouldCopy = CRE->shouldCopy();
3188 llvm::ConstantPointerNull::get(
3189 cast<llvm::PointerType>(destType->getElementType()));
3190 CGF.Builder.CreateStore(null, temp);
3193 llvm::BasicBlock *contBB = nullptr;
3194 llvm::BasicBlock *originBB = nullptr;
3196 // If the address is *not* known to be non-null, we need to switch.
3197 llvm::Value *finalArgument;
3199 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
3200 if (provablyNonNull) {
3201 finalArgument = temp.getPointer();
3203 llvm::Value *isNull =
3204 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3206 finalArgument = CGF.Builder.CreateSelect(isNull,
3207 llvm::ConstantPointerNull::get(destType),
3208 temp.getPointer(), "icr.argument");
3210 // If we need to copy, then the load has to be conditional, which
3211 // means we need control flow.
3213 originBB = CGF.Builder.GetInsertBlock();
3214 contBB = CGF.createBasicBlock("icr.cont");
3215 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3216 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3217 CGF.EmitBlock(copyBB);
3218 condEval.begin(CGF);
3222 llvm::Value *valueToUse = nullptr;
3224 // Perform a copy if necessary.
3226 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3227 assert(srcRV.isScalar());
3229 llvm::Value *src = srcRV.getScalarVal();
3230 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3233 // Use an ordinary store, not a store-to-lvalue.
3234 CGF.Builder.CreateStore(src, temp);
3236 // If optimization is enabled, and the value was held in a
3237 // __strong variable, we need to tell the optimizer that this
3238 // value has to stay alive until we're doing the store back.
3239 // This is because the temporary is effectively unretained,
3240 // and so otherwise we can violate the high-level semantics.
3241 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3242 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3247 // Finish the control flow if we needed it.
3248 if (shouldCopy && !provablyNonNull) {
3249 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3250 CGF.EmitBlock(contBB);
3252 // Make a phi for the value to intrinsically use.
3254 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3256 phiToUse->addIncoming(valueToUse, copyBB);
3257 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3259 valueToUse = phiToUse;
3265 args.addWriteback(srcLV, temp, valueToUse);
3266 args.add(RValue::get(finalArgument), CRE->getType());
3269 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3273 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3274 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3277 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3279 // Restore the stack after the call.
3280 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3281 CGF.Builder.CreateCall(F, StackBase);
3285 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3286 SourceLocation ArgLoc,
3289 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3290 SanOpts.has(SanitizerKind::NullabilityArg)))
3293 // The param decl may be missing in a variadic function.
3294 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3295 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3297 // Prefer the nonnull attribute if it's present.
3298 const NonNullAttr *NNAttr = nullptr;
3299 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3300 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3302 bool CanCheckNullability = false;
3303 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3304 auto Nullability = PVD->getType()->getNullability(getContext());
3305 CanCheckNullability = Nullability &&
3306 *Nullability == NullabilityKind::NonNull &&
3307 PVD->getTypeSourceInfo();
3310 if (!NNAttr && !CanCheckNullability)
3313 SourceLocation AttrLoc;
3314 SanitizerMask CheckKind;
3315 SanitizerHandler Handler;
3317 AttrLoc = NNAttr->getLocation();
3318 CheckKind = SanitizerKind::NonnullAttribute;
3319 Handler = SanitizerHandler::NonnullArg;
3321 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3322 CheckKind = SanitizerKind::NullabilityArg;
3323 Handler = SanitizerHandler::NullabilityArg;
3326 SanitizerScope SanScope(this);
3327 assert(RV.isScalar());
3328 llvm::Value *V = RV.getScalarVal();
3330 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3331 llvm::Constant *StaticData[] = {
3332 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3333 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3335 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3338 void CodeGenFunction::EmitCallArgs(
3339 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3340 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3341 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3342 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3344 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3345 // because arguments are destroyed left to right in the callee. As a special
3346 // case, there are certain language constructs that require left-to-right
3347 // evaluation, and in those cases we consider the evaluation order requirement
3348 // to trump the "destruction order is reverse construction order" guarantee.
3350 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3351 ? Order == EvaluationOrder::ForceLeftToRight
3352 : Order != EvaluationOrder::ForceRightToLeft;
3354 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3355 RValue EmittedArg) {
3356 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3358 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3362 const auto &Context = getContext();
3363 auto SizeTy = Context.getSizeType();
3364 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3365 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3366 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3367 EmittedArg.getScalarVal());
3368 Args.add(RValue::get(V), SizeTy);
3369 // If we're emitting args in reverse, be sure to do so with
3370 // pass_object_size, as well.
3372 std::swap(Args.back(), *(&Args.back() - 1));
3375 // Insert a stack save if we're going to need any inalloca args.
3376 bool HasInAllocaArgs = false;
3377 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3378 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3379 I != E && !HasInAllocaArgs; ++I)
3380 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3381 if (HasInAllocaArgs) {
3382 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3383 Args.allocateArgumentMemory(*this);
3387 // Evaluate each argument in the appropriate order.
3388 size_t CallArgsStart = Args.size();
3389 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3390 unsigned Idx = LeftToRight ? I : E - I - 1;
3391 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3392 unsigned InitialArgSize = Args.size();
3393 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3394 // In particular, we depend on it being the last arg in Args, and the
3395 // objectsize bits depend on there only being one arg if !LeftToRight.
3396 assert(InitialArgSize + 1 == Args.size() &&
3397 "The code below depends on only adding one arg per EmitCallArg");
3398 (void)InitialArgSize;
3399 RValue RVArg = Args.back().RV;
3400 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3401 ParamsToSkip + Idx);
3402 // @llvm.objectsize should never have side-effects and shouldn't need
3403 // destruction/cleanups, so we can safely "emit" it after its arg,
3404 // regardless of right-to-leftness
3405 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3409 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3411 std::reverse(Args.begin() + CallArgsStart, Args.end());
3417 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3418 DestroyUnpassedArg(Address Addr, QualType Ty)
3419 : Addr(Addr), Ty(Ty) {}
3424 void Emit(CodeGenFunction &CGF, Flags flags) override {
3425 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3426 assert(!Dtor->isTrivial());
3427 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3428 /*Delegating=*/false, Addr);
3432 struct DisableDebugLocationUpdates {
3433 CodeGenFunction &CGF;
3434 bool disabledDebugInfo;
3435 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3436 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3437 CGF.disableDebugInfo();
3439 ~DisableDebugLocationUpdates() {
3440 if (disabledDebugInfo)
3441 CGF.enableDebugInfo();
3445 } // end anonymous namespace
3447 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3449 DisableDebugLocationUpdates Dis(*this, E);
3450 if (const ObjCIndirectCopyRestoreExpr *CRE
3451 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3452 assert(getLangOpts().ObjCAutoRefCount);
3453 assert(getContext().hasSameUnqualifiedType(E->getType(), type));
3454 return emitWritebackArg(*this, args, CRE);
3457 assert(type->isReferenceType() == E->isGLValue() &&
3458 "reference binding to unmaterialized r-value!");
3460 if (E->isGLValue()) {
3461 assert(E->getObjectKind() == OK_Ordinary);
3462 return args.add(EmitReferenceBindingToExpr(E), type);
3465 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3467 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3468 // However, we still have to push an EH-only cleanup in case we unwind before
3469 // we make it to the call.
3470 if (HasAggregateEvalKind &&
3471 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3472 // If we're using inalloca, use the argument memory. Otherwise, use a
3475 if (args.isUsingInAlloca())
3476 Slot = createPlaceholderSlot(*this, type);
3478 Slot = CreateAggTemp(type, "agg.tmp");
3480 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3481 bool DestroyedInCallee =
3482 RD && RD->hasNonTrivialDestructor() &&
3483 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
3484 if (DestroyedInCallee)
3485 Slot.setExternallyDestructed();
3487 EmitAggExpr(E, Slot);
3488 RValue RV = Slot.asRValue();
3491 if (DestroyedInCallee) {
3492 // Create a no-op GEP between the placeholder and the cleanup so we can
3493 // RAUW it successfully. It also serves as a marker of the first
3494 // instruction where the cleanup is active.
3495 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3497 // This unreachable is a temporary marker which will be removed later.
3498 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3499 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3504 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3505 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3506 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3507 assert(L.isSimple());
3508 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3509 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3511 // We can't represent a misaligned lvalue in the CallArgList, so copy
3512 // to an aligned temporary now.
3513 Address tmp = CreateMemTemp(type);
3514 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3515 args.add(RValue::getAggregate(tmp), type);
3520 args.add(EmitAnyExprToTemp(E), type);
3523 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3524 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3525 // implicitly widens null pointer constants that are arguments to varargs
3526 // functions to pointer-sized ints.
3527 if (!getTarget().getTriple().isOSWindows())
3528 return Arg->getType();
3530 if (Arg->getType()->isIntegerType() &&
3531 getContext().getTypeSize(Arg->getType()) <
3532 getContext().getTargetInfo().getPointerWidth(0) &&
3533 Arg->isNullPointerConstant(getContext(),
3534 Expr::NPC_ValueDependentIsNotNull)) {
3535 return getContext().getIntPtrType();
3538 return Arg->getType();
3541 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3542 // optimizer it can aggressively ignore unwind edges.
3544 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3545 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3546 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3547 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3548 CGM.getNoObjCARCExceptionsMetadata());
3551 /// Emits a call to the given no-arguments nounwind runtime function.
3553 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3554 const llvm::Twine &name) {
3555 return EmitNounwindRuntimeCall(callee, None, name);
3558 /// Emits a call to the given nounwind runtime function.
3560 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3561 ArrayRef<llvm::Value*> args,
3562 const llvm::Twine &name) {
3563 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3564 call->setDoesNotThrow();
3568 /// Emits a simple call (never an invoke) to the given no-arguments
3569 /// runtime function.
3571 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3572 const llvm::Twine &name) {
3573 return EmitRuntimeCall(callee, None, name);
3576 // Calls which may throw must have operand bundles indicating which funclet
3577 // they are nested within.
3579 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3580 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
3581 // There is no need for a funclet operand bundle if we aren't inside a
3583 if (!CurrentFuncletPad)
3586 // Skip intrinsics which cannot throw.
3587 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3588 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3591 BundleList.emplace_back("funclet", CurrentFuncletPad);
3594 /// Emits a simple call (never an invoke) to the given runtime function.
3596 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3597 ArrayRef<llvm::Value*> args,
3598 const llvm::Twine &name) {
3599 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3600 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3602 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3603 call->setCallingConv(getRuntimeCC());
3607 /// Emits a call or invoke to the given noreturn runtime function.
3608 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3609 ArrayRef<llvm::Value*> args) {
3610 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3611 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3613 if (getInvokeDest()) {
3614 llvm::InvokeInst *invoke =
3615 Builder.CreateInvoke(callee,
3616 getUnreachableBlock(),
3620 invoke->setDoesNotReturn();
3621 invoke->setCallingConv(getRuntimeCC());
3623 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3624 call->setDoesNotReturn();
3625 call->setCallingConv(getRuntimeCC());
3626 Builder.CreateUnreachable();
3630 /// Emits a call or invoke instruction to the given nullary runtime function.
3632 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3633 const Twine &name) {
3634 return EmitRuntimeCallOrInvoke(callee, None, name);
3637 /// Emits a call or invoke instruction to the given runtime function.
3639 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3640 ArrayRef<llvm::Value*> args,
3641 const Twine &name) {
3642 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3643 callSite.setCallingConv(getRuntimeCC());
3647 /// Emits a call or invoke instruction to the given function, depending
3648 /// on the current state of the EH stack.
3650 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3651 ArrayRef<llvm::Value *> Args,
3652 const Twine &Name) {
3653 llvm::BasicBlock *InvokeDest = getInvokeDest();
3654 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3655 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3657 llvm::Instruction *Inst;
3659 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3661 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3662 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3667 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3668 // optimizer it can aggressively ignore unwind edges.
3669 if (CGM.getLangOpts().ObjCAutoRefCount)
3670 AddObjCARCExceptionMetadata(Inst);
3672 return llvm::CallSite(Inst);
3675 /// \brief Store a non-aggregate value to an address to initialize it. For
3676 /// initialization, a non-atomic store will be used.
3677 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3680 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3682 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3685 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3687 DeferredReplacements.push_back(std::make_pair(Old, New));
3690 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3691 const CGCallee &Callee,
3692 ReturnValueSlot ReturnValue,
3693 const CallArgList &CallArgs,
3694 llvm::Instruction **callOrInvoke) {
3695 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3697 assert(Callee.isOrdinary());
3699 // Handle struct-return functions by passing a pointer to the
3700 // location that we would like to return into.
3701 QualType RetTy = CallInfo.getReturnType();
3702 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3704 llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3706 // 1. Set up the arguments.
3708 // If we're using inalloca, insert the allocation after the stack save.
3709 // FIXME: Do this earlier rather than hacking it in here!
3710 Address ArgMemory = Address::invalid();
3711 const llvm::StructLayout *ArgMemoryLayout = nullptr;
3712 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3713 const llvm::DataLayout &DL = CGM.getDataLayout();
3714 ArgMemoryLayout = DL.getStructLayout(ArgStruct);
3715 llvm::Instruction *IP = CallArgs.getStackBase();
3716 llvm::AllocaInst *AI;
3718 IP = IP->getNextNode();
3719 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
3722 AI = CreateTempAlloca(ArgStruct, "argmem");
3724 auto Align = CallInfo.getArgStructAlignment();
3725 AI->setAlignment(Align.getQuantity());
3726 AI->setUsedWithInAlloca(true);
3727 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3728 ArgMemory = Address(AI, Align);
3731 // Helper function to drill into the inalloca allocation.
3732 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3734 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3735 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3738 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3739 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3741 // If the call returns a temporary with struct return, create a temporary
3742 // alloca to hold the result, unless one is given to us.
3743 Address SRetPtr = Address::invalid();
3744 size_t UnusedReturnSize = 0;
3745 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3746 if (!ReturnValue.isNull()) {
3747 SRetPtr = ReturnValue.getValue();
3749 SRetPtr = CreateMemTemp(RetTy);
3750 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3752 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3753 if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3754 UnusedReturnSize = size;
3757 if (IRFunctionArgs.hasSRetArg()) {
3758 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3759 } else if (RetAI.isInAlloca()) {
3760 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3761 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3765 Address swiftErrorTemp = Address::invalid();
3766 Address swiftErrorArg = Address::invalid();
3768 // Translate all of the arguments as necessary to match the IR lowering.
3769 assert(CallInfo.arg_size() == CallArgs.size() &&
3770 "Mismatch between function signature & arguments.");
3772 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3773 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3774 I != E; ++I, ++info_it, ++ArgNo) {
3775 const ABIArgInfo &ArgInfo = info_it->info;
3778 // Insert a padding argument to ensure proper alignment.
3779 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3780 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3781 llvm::UndefValue::get(ArgInfo.getPaddingType());
3783 unsigned FirstIRArg, NumIRArgs;
3784 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3786 switch (ArgInfo.getKind()) {
3787 case ABIArgInfo::InAlloca: {
3788 assert(NumIRArgs == 0);
3789 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3790 if (RV.isAggregate()) {
3791 // Replace the placeholder with the appropriate argument slot GEP.
3792 llvm::Instruction *Placeholder =
3793 cast<llvm::Instruction>(RV.getAggregatePointer());
3794 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3795 Builder.SetInsertPoint(Placeholder);
3796 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3797 Builder.restoreIP(IP);
3798 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3800 // Store the RValue into the argument struct.
3801 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3802 unsigned AS = Addr.getType()->getPointerAddressSpace();
3803 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3804 // There are some cases where a trivial bitcast is not avoidable. The
3805 // definition of a type later in a translation unit may change it's type
3806 // from {}* to (%struct.foo*)*.
3807 if (Addr.getType() != MemType)
3808 Addr = Builder.CreateBitCast(Addr, MemType);
3809 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3810 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3815 case ABIArgInfo::Indirect: {
3816 assert(NumIRArgs == 1);
3817 if (RV.isScalar() || RV.isComplex()) {
3818 // Make a temporary alloca to pass the argument.
3819 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3820 IRCallArgs[FirstIRArg] = Addr.getPointer();
3822 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3823 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3825 // We want to avoid creating an unnecessary temporary+copy here;
3826 // however, we need one in three cases:
3827 // 1. If the argument is not byval, and we are required to copy the
3828 // source. (This case doesn't occur on any common architecture.)
3829 // 2. If the argument is byval, RV is not sufficiently aligned, and
3830 // we cannot force it to be sufficiently aligned.
3831 // 3. If the argument is byval, but RV is located in an address space
3832 // different than that of the argument (0).
3833 Address Addr = RV.getAggregateAddress();
3834 CharUnits Align = ArgInfo.getIndirectAlign();
3835 const llvm::DataLayout *TD = &CGM.getDataLayout();
3836 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3837 const unsigned ArgAddrSpace =
3838 (FirstIRArg < IRFuncTy->getNumParams()
3839 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3841 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3842 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3843 llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3844 Align.getQuantity(), *TD)
3845 < Align.getQuantity()) ||
3846 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3847 // Create an aligned temporary, and copy to it.
3848 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3849 IRCallArgs[FirstIRArg] = AI.getPointer();
3850 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3852 // Skip the extra memcpy call.
3853 IRCallArgs[FirstIRArg] = Addr.getPointer();
3859 case ABIArgInfo::Ignore:
3860 assert(NumIRArgs == 0);
3863 case ABIArgInfo::Extend:
3864 case ABIArgInfo::Direct: {
3865 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3866 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3867 ArgInfo.getDirectOffset() == 0) {
3868 assert(NumIRArgs == 1);
3871 V = RV.getScalarVal();
3873 V = Builder.CreateLoad(RV.getAggregateAddress());
3875 // Implement swifterror by copying into a new swifterror argument.
3876 // We'll write back in the normal path out of the call.
3877 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3878 == ParameterABI::SwiftErrorResult) {
3879 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3881 QualType pointeeTy = I->Ty->getPointeeType();
3883 Address(V, getContext().getTypeAlignInChars(pointeeTy));
3886 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3887 V = swiftErrorTemp.getPointer();
3888 cast<llvm::AllocaInst>(V)->setSwiftError(true);
3890 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3891 Builder.CreateStore(errorValue, swiftErrorTemp);
3894 // We might have to widen integers, but we should never truncate.
3895 if (ArgInfo.getCoerceToType() != V->getType() &&
3896 V->getType()->isIntegerTy())
3897 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3899 // If the argument doesn't match, perform a bitcast to coerce it. This
3900 // can happen due to trivial type mismatches.
3901 if (FirstIRArg < IRFuncTy->getNumParams() &&
3902 V->getType() != IRFuncTy->getParamType(FirstIRArg))
3903 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3905 IRCallArgs[FirstIRArg] = V;
3909 // FIXME: Avoid the conversion through memory if possible.
3910 Address Src = Address::invalid();
3911 if (RV.isScalar() || RV.isComplex()) {
3912 Src = CreateMemTemp(I->Ty, "coerce");
3913 LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3914 EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3916 Src = RV.getAggregateAddress();
3919 // If the value is offset in memory, apply the offset now.
3920 Src = emitAddressAtOffset(*this, Src, ArgInfo);
3922 // Fast-isel and the optimizer generally like scalar values better than
3923 // FCAs, so we flatten them if this is safe to do for this argument.
3924 llvm::StructType *STy =
3925 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3926 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3927 llvm::Type *SrcTy = Src.getType()->getElementType();
3928 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3929 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3931 // If the source type is smaller than the destination type of the
3932 // coerce-to logic, copy the source value into a temp alloca the size
3933 // of the destination type to allow loading all of it. The bits past
3934 // the source value are left undef.
3935 if (SrcSize < DstSize) {
3937 = CreateTempAlloca(STy, Src.getAlignment(),
3938 Src.getName() + ".coerce");
3939 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3942 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3945 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3946 assert(NumIRArgs == STy->getNumElements());
3947 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3948 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3949 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3950 llvm::Value *LI = Builder.CreateLoad(EltPtr);
3951 IRCallArgs[FirstIRArg + i] = LI;
3954 // In the simple case, just pass the coerced loaded value.
3955 assert(NumIRArgs == 1);
3956 IRCallArgs[FirstIRArg] =
3957 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3963 case ABIArgInfo::CoerceAndExpand: {
3964 auto coercionType = ArgInfo.getCoerceAndExpandType();
3965 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3967 llvm::Value *tempSize = nullptr;
3968 Address addr = Address::invalid();
3969 if (RV.isAggregate()) {
3970 addr = RV.getAggregateAddress();
3972 assert(RV.isScalar()); // complex should always just be direct
3974 llvm::Type *scalarType = RV.getScalarVal()->getType();
3975 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
3976 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
3978 tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
3980 // Materialize to a temporary.
3981 addr = CreateTempAlloca(RV.getScalarVal()->getType(),
3982 CharUnits::fromQuantity(std::max(layout->getAlignment(),
3984 EmitLifetimeStart(scalarSize, addr.getPointer());
3986 Builder.CreateStore(RV.getScalarVal(), addr);
3989 addr = Builder.CreateElementBitCast(addr, coercionType);
3991 unsigned IRArgPos = FirstIRArg;
3992 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3993 llvm::Type *eltType = coercionType->getElementType(i);
3994 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
3995 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
3996 llvm::Value *elt = Builder.CreateLoad(eltAddr);
3997 IRCallArgs[IRArgPos++] = elt;
3999 assert(IRArgPos == FirstIRArg + NumIRArgs);
4002 EmitLifetimeEnd(tempSize, addr.getPointer());
4008 case ABIArgInfo::Expand:
4009 unsigned IRArgPos = FirstIRArg;
4010 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
4011 assert(IRArgPos == FirstIRArg + NumIRArgs);
4016 llvm::Value *CalleePtr = Callee.getFunctionPointer();
4018 // If we're using inalloca, set up that argument.
4019 if (ArgMemory.isValid()) {
4020 llvm::Value *Arg = ArgMemory.getPointer();
4021 if (CallInfo.isVariadic()) {
4022 // When passing non-POD arguments by value to variadic functions, we will
4023 // end up with a variadic prototype and an inalloca call site. In such
4024 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4026 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4027 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
4028 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
4030 llvm::Type *LastParamTy =
4031 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4032 if (Arg->getType() != LastParamTy) {
4034 // Assert that these structs have equivalent element types.
4035 llvm::StructType *FullTy = CallInfo.getArgStruct();
4036 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4037 cast<llvm::PointerType>(LastParamTy)->getElementType());
4038 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4039 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4040 DE = DeclaredTy->element_end(),
4041 FI = FullTy->element_begin();
4042 DI != DE; ++DI, ++FI)
4045 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4048 assert(IRFunctionArgs.hasInallocaArg());
4049 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4052 // 2. Prepare the function pointer.
4054 // If the callee is a bitcast of a non-variadic function to have a
4055 // variadic function pointer type, check to see if we can remove the
4056 // bitcast. This comes up with unprototyped functions.
4058 // This makes the IR nicer, but more importantly it ensures that we
4059 // can inline the function at -O0 if it is marked always_inline.
4060 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
4061 llvm::FunctionType *CalleeFT =
4062 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
4063 if (!CalleeFT->isVarArg())
4066 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
4067 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
4070 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
4074 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4076 // If the original type is variadic, or if any of the component types
4077 // disagree, we cannot remove the cast.
4078 if (OrigFT->isVarArg() ||
4079 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4080 OrigFT->getReturnType() != CalleeFT->getReturnType())
4083 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4084 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4089 CalleePtr = simplifyVariadicCallee(CalleePtr);
4091 // 3. Perform the actual call.
4093 // Deactivate any cleanups that we're supposed to do immediately before
4095 if (!CallArgs.getCleanupsToDeactivate().empty())
4096 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4098 // Assert that the arguments we computed match up. The IR verifier
4099 // will catch this, but this is a common enough source of problems
4100 // during IRGen changes that it's way better for debugging to catch
4101 // it ourselves here.
4103 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4104 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4105 // Inalloca argument can have different type.
4106 if (IRFunctionArgs.hasInallocaArg() &&
4107 i == IRFunctionArgs.getInallocaArgNo())
4109 if (i < IRFuncTy->getNumParams())
4110 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4114 // Compute the calling convention and attributes.
4115 unsigned CallingConv;
4116 CodeGen::AttributeListType AttributeList;
4117 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4118 Callee.getAbstractInfo(),
4119 AttributeList, CallingConv,
4120 /*AttrOnCallSite=*/true);
4121 llvm::AttributeList Attrs =
4122 llvm::AttributeList::get(getLLVMContext(), AttributeList);
4124 // Apply some call-site-specific attributes.
4125 // TODO: work this into building the attribute set.
4127 // Apply always_inline to all calls within flatten functions.
4128 // FIXME: should this really take priority over __try, below?
4129 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4130 !(Callee.getAbstractInfo().getCalleeDecl() &&
4131 Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
4133 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4134 llvm::Attribute::AlwaysInline);
4137 // Disable inlining inside SEH __try blocks.
4138 if (isSEHTryScope()) {
4140 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4141 llvm::Attribute::NoInline);
4144 // Decide whether to use a call or an invoke.
4146 if (currentFunctionUsesSEHTry()) {
4147 // SEH cares about asynchronous exceptions, so everything can "throw."
4148 CannotThrow = false;
4149 } else if (isCleanupPadScope() &&
4150 EHPersonality::get(*this).isMSVCXXPersonality()) {
4151 // The MSVC++ personality will implicitly terminate the program if an
4152 // exception is thrown during a cleanup outside of a try/catch.
4153 // We don't need to model anything in IR to get this behavior.
4156 // Otherwise, nounwind call sites will never throw.
4157 CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
4158 llvm::Attribute::NoUnwind);
4160 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4162 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4163 getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
4165 // Emit the actual call/invoke instruction.
4168 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4170 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4171 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4175 llvm::Instruction *CI = CS.getInstruction();
4179 // Apply the attributes and calling convention.
4180 CS.setAttributes(Attrs);
4181 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4183 // Apply various metadata.
4185 if (!CI->getType()->isVoidTy())
4186 CI->setName("call");
4188 // Insert instrumentation or attach profile metadata at indirect call sites.
4189 // For more details, see the comment before the definition of
4190 // IPVK_IndirectCallTarget in InstrProfData.inc.
4191 if (!CS.getCalledFunction())
4192 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4195 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4196 // optimizer it can aggressively ignore unwind edges.
4197 if (CGM.getLangOpts().ObjCAutoRefCount)
4198 AddObjCARCExceptionMetadata(CI);
4200 // Suppress tail calls if requested.
4201 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4202 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4203 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4204 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4207 // 4. Finish the call.
4209 // If the call doesn't return, finish the basic block and clear the
4210 // insertion point; this allows the rest of IRGen to discard
4211 // unreachable code.
4212 if (CS.doesNotReturn()) {
4213 if (UnusedReturnSize)
4214 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4215 SRetPtr.getPointer());
4217 Builder.CreateUnreachable();
4218 Builder.ClearInsertionPoint();
4220 // FIXME: For now, emit a dummy basic block because expr emitters in
4221 // generally are not ready to handle emitting expressions at unreachable
4223 EnsureInsertPoint();
4225 // Return a reasonable RValue.
4226 return GetUndefRValue(RetTy);
4229 // Perform the swifterror writeback.
4230 if (swiftErrorTemp.isValid()) {
4231 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4232 Builder.CreateStore(errorResult, swiftErrorArg);
4235 // Emit any call-associated writebacks immediately. Arguably this
4236 // should happen after any return-value munging.
4237 if (CallArgs.hasWritebacks())
4238 emitWritebacks(*this, CallArgs);
4240 // The stack cleanup for inalloca arguments has to run out of the normal
4241 // lexical order, so deactivate it and run it manually here.
4242 CallArgs.freeArgumentMemory(*this);
4244 // Extract the return value.
4246 switch (RetAI.getKind()) {
4247 case ABIArgInfo::CoerceAndExpand: {
4248 auto coercionType = RetAI.getCoerceAndExpandType();
4249 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4251 Address addr = SRetPtr;
4252 addr = Builder.CreateElementBitCast(addr, coercionType);
4254 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4255 bool requiresExtract = isa<llvm::StructType>(CI->getType());
4257 unsigned unpaddedIndex = 0;
4258 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4259 llvm::Type *eltType = coercionType->getElementType(i);
4260 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4261 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4262 llvm::Value *elt = CI;
4263 if (requiresExtract)
4264 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4266 assert(unpaddedIndex == 0);
4267 Builder.CreateStore(elt, eltAddr);
4272 case ABIArgInfo::InAlloca:
4273 case ABIArgInfo::Indirect: {
4274 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4275 if (UnusedReturnSize)
4276 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4277 SRetPtr.getPointer());
4281 case ABIArgInfo::Ignore:
4282 // If we are ignoring an argument that had a result, make sure to
4283 // construct the appropriate return value for our caller.
4284 return GetUndefRValue(RetTy);
4286 case ABIArgInfo::Extend:
4287 case ABIArgInfo::Direct: {
4288 llvm::Type *RetIRTy = ConvertType(RetTy);
4289 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4290 switch (getEvaluationKind(RetTy)) {
4292 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4293 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4294 return RValue::getComplex(std::make_pair(Real, Imag));
4296 case TEK_Aggregate: {
4297 Address DestPtr = ReturnValue.getValue();
4298 bool DestIsVolatile = ReturnValue.isVolatile();
4300 if (!DestPtr.isValid()) {
4301 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4302 DestIsVolatile = false;
4304 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4305 return RValue::getAggregate(DestPtr);
4308 // If the argument doesn't match, perform a bitcast to coerce it. This
4309 // can happen due to trivial type mismatches.
4310 llvm::Value *V = CI;
4311 if (V->getType() != RetIRTy)
4312 V = Builder.CreateBitCast(V, RetIRTy);
4313 return RValue::get(V);
4316 llvm_unreachable("bad evaluation kind");
4319 Address DestPtr = ReturnValue.getValue();
4320 bool DestIsVolatile = ReturnValue.isVolatile();
4322 if (!DestPtr.isValid()) {
4323 DestPtr = CreateMemTemp(RetTy, "coerce");
4324 DestIsVolatile = false;
4327 // If the value is offset in memory, apply the offset now.
4328 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4329 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4331 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4334 case ABIArgInfo::Expand:
4335 llvm_unreachable("Invalid ABI kind for return argument");
4338 llvm_unreachable("Unhandled ABIArgInfo::Kind");
4341 // Emit the assume_aligned check on the return value.
4342 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4343 if (Ret.isScalar() && TargetDecl) {
4344 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4345 llvm::Value *OffsetValue = nullptr;
4346 if (const auto *Offset = AA->getOffset())
4347 OffsetValue = EmitScalarExpr(Offset);
4349 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4350 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4351 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4353 } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
4354 llvm::Value *ParamVal =
4355 CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
4356 EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
4363 /* VarArg handling */
4365 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4366 VAListAddr = VE->isMicrosoftABI()
4367 ? EmitMSVAListRef(VE->getSubExpr())
4368 : EmitVAListRef(VE->getSubExpr());
4369 QualType Ty = VE->getType();
4370 if (VE->isMicrosoftABI())
4371 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4372 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);