1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
19 #include "CGCleanup.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/DeclObjC.h"
26 #include "clang/Basic/TargetBuiltins.h"
27 #include "clang/Basic/TargetInfo.h"
28 #include "clang/CodeGen/CGFunctionInfo.h"
29 #include "clang/CodeGen/SwiftCallingConv.h"
30 #include "clang/Frontend/CodeGenOptions.h"
31 #include "llvm/ADT/StringExtras.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Attributes.h"
34 #include "llvm/IR/CallingConv.h"
35 #include "llvm/IR/CallSite.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 using namespace clang;
42 using namespace CodeGen;
46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
48 default: return llvm::CallingConv::C;
49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58 // TODO: Add support for __pascal to LLVM.
59 case CC_X86Pascal: return llvm::CallingConv::C;
60 // TODO: Add support for __vectorcall to LLVM.
61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
63 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
64 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
65 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
66 case CC_Swift: return llvm::CallingConv::Swift;
70 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
72 /// FIXME: address space qualification?
73 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
74 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
75 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
78 /// Returns the canonical formal type of the given C++ method.
79 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
80 return MD->getType()->getCanonicalTypeUnqualified()
81 .getAs<FunctionProtoType>();
84 /// Returns the "extra-canonicalized" return type, which discards
85 /// qualifiers on the return type. Codegen doesn't care about them,
86 /// and it makes ABI code a little easier to be able to assume that
87 /// all parameter and return types are top-level unqualified.
88 static CanQualType GetReturnType(QualType RetTy) {
89 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
92 /// Arrange the argument and result information for a value of the given
93 /// unprototyped freestanding function type.
94 const CGFunctionInfo &
95 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
96 // When translating an unprototyped function type, always use a
98 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
99 /*instanceMethod=*/false,
100 /*chainCall=*/false, None,
101 FTNP->getExtInfo(), {}, RequiredArgs(0));
104 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
105 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
106 static void appendParameterTypes(const CodeGenTypes &CGT,
107 SmallVectorImpl<CanQualType> &prefix,
108 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
109 CanQual<FunctionProtoType> FPT,
110 const FunctionDecl *FD) {
111 // Fill out paramInfos.
112 if (FPT->hasExtParameterInfos() || !paramInfos.empty()) {
113 assert(paramInfos.size() <= prefix.size());
114 auto protoParamInfos = FPT->getExtParameterInfos();
115 paramInfos.reserve(prefix.size() + protoParamInfos.size());
116 paramInfos.resize(prefix.size());
117 paramInfos.append(protoParamInfos.begin(), protoParamInfos.end());
120 // Fast path: unknown target.
122 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
126 // In the vast majority cases, we'll have precisely FPT->getNumParams()
127 // parameters; the only thing that can change this is the presence of
128 // pass_object_size. So, we preallocate for the common case.
129 prefix.reserve(prefix.size() + FPT->getNumParams());
131 assert(FD->getNumParams() == FPT->getNumParams());
132 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
133 prefix.push_back(FPT->getParamType(I));
134 if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
135 prefix.push_back(CGT.getContext().getSizeType());
139 /// Arrange the LLVM function layout for a value of the given function
140 /// type, on top of any implicit parameters already stored.
141 static const CGFunctionInfo &
142 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
143 SmallVectorImpl<CanQualType> &prefix,
144 CanQual<FunctionProtoType> FTP,
145 const FunctionDecl *FD) {
146 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
147 RequiredArgs Required =
148 RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
150 appendParameterTypes(CGT, prefix, paramInfos, FTP, FD);
151 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
153 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
154 /*chainCall=*/false, prefix,
155 FTP->getExtInfo(), paramInfos,
159 /// Arrange the argument and result information for a value of the
160 /// given freestanding function type.
161 const CGFunctionInfo &
162 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
163 const FunctionDecl *FD) {
164 SmallVector<CanQualType, 16> argTypes;
165 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
169 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
170 // Set the appropriate calling convention for the Function.
171 if (D->hasAttr<StdCallAttr>())
172 return CC_X86StdCall;
174 if (D->hasAttr<FastCallAttr>())
175 return CC_X86FastCall;
177 if (D->hasAttr<RegCallAttr>())
178 return CC_X86RegCall;
180 if (D->hasAttr<ThisCallAttr>())
181 return CC_X86ThisCall;
183 if (D->hasAttr<VectorCallAttr>())
184 return CC_X86VectorCall;
186 if (D->hasAttr<PascalAttr>())
189 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
190 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
192 if (D->hasAttr<IntelOclBiccAttr>())
193 return CC_IntelOclBicc;
195 if (D->hasAttr<MSABIAttr>())
196 return IsWindows ? CC_C : CC_X86_64Win64;
198 if (D->hasAttr<SysVABIAttr>())
199 return IsWindows ? CC_X86_64SysV : CC_C;
201 if (D->hasAttr<PreserveMostAttr>())
202 return CC_PreserveMost;
204 if (D->hasAttr<PreserveAllAttr>())
205 return CC_PreserveAll;
210 /// Arrange the argument and result information for a call to an
211 /// unknown C++ non-static member function of the given abstract type.
212 /// (Zero value of RD means we don't have any meaningful "this" argument type,
213 /// so fall back to a generic pointer type).
214 /// The member function must be an ordinary function, i.e. not a
215 /// constructor or destructor.
216 const CGFunctionInfo &
217 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
218 const FunctionProtoType *FTP,
219 const CXXMethodDecl *MD) {
220 SmallVector<CanQualType, 16> argTypes;
222 // Add the 'this' pointer.
224 argTypes.push_back(GetThisType(Context, RD));
226 argTypes.push_back(Context.VoidPtrTy);
228 return ::arrangeLLVMFunctionInfo(
229 *this, true, argTypes,
230 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
233 /// Arrange the argument and result information for a declaration or
234 /// definition of the given C++ non-static member function. The
235 /// member function must be an ordinary function, i.e. not a
236 /// constructor or destructor.
237 const CGFunctionInfo &
238 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
239 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
240 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
242 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
244 if (MD->isInstance()) {
245 // The abstract case is perfectly fine.
246 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
247 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
250 return arrangeFreeFunctionType(prototype, MD);
253 bool CodeGenTypes::inheritingCtorHasParams(
254 const InheritedConstructor &Inherited, CXXCtorType Type) {
255 // Parameters are unnecessary if we're constructing a base class subobject
256 // and the inherited constructor lives in a virtual base.
257 return Type == Ctor_Complete ||
258 !Inherited.getShadowDecl()->constructsVirtualBase() ||
259 !Target.getCXXABI().hasConstructorVariants();
262 const CGFunctionInfo &
263 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
266 SmallVector<CanQualType, 16> argTypes;
267 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
268 argTypes.push_back(GetThisType(Context, MD->getParent()));
270 bool PassParams = true;
273 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
274 GD = GlobalDecl(CD, toCXXCtorType(Type));
276 // A base class inheriting constructor doesn't get forwarded arguments
277 // needed to construct a virtual base (or base class thereof).
278 if (auto Inherited = CD->getInheritedConstructor())
279 PassParams = inheritingCtorHasParams(Inherited, toCXXCtorType(Type));
281 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
282 GD = GlobalDecl(DD, toCXXDtorType(Type));
285 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
287 // Add the formal parameters.
289 appendParameterTypes(*this, argTypes, paramInfos, FTP, MD);
291 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
293 RequiredArgs required =
294 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
295 : RequiredArgs::All);
297 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
298 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
300 : TheCXXABI.hasMostDerivedReturn(GD)
301 ? CGM.getContext().VoidPtrTy
303 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
304 /*chainCall=*/false, argTypes, extInfo,
305 paramInfos, required);
308 static SmallVector<CanQualType, 16>
309 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
310 SmallVector<CanQualType, 16> argTypes;
311 for (auto &arg : args)
312 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
316 static SmallVector<CanQualType, 16>
317 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
318 SmallVector<CanQualType, 16> argTypes;
319 for (auto &arg : args)
320 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
324 static void addExtParameterInfosForCall(
325 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
326 const FunctionProtoType *proto,
328 unsigned totalArgs) {
329 assert(proto->hasExtParameterInfos());
330 assert(paramInfos.size() <= prefixArgs);
331 assert(proto->getNumParams() + prefixArgs <= totalArgs);
333 // Add default infos for any prefix args that don't already have infos.
334 paramInfos.resize(prefixArgs);
336 // Add infos for the prototype.
337 auto protoInfos = proto->getExtParameterInfos();
338 paramInfos.append(protoInfos.begin(), protoInfos.end());
340 // Add default infos for the variadic arguments.
341 paramInfos.resize(totalArgs);
344 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
345 getExtParameterInfosForCall(const FunctionProtoType *proto,
346 unsigned prefixArgs, unsigned totalArgs) {
347 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
348 if (proto->hasExtParameterInfos()) {
349 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
354 /// Arrange a call to a C++ method, passing the given arguments.
355 const CGFunctionInfo &
356 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
357 const CXXConstructorDecl *D,
358 CXXCtorType CtorKind,
359 unsigned ExtraArgs) {
361 SmallVector<CanQualType, 16> ArgTypes;
362 for (const auto &Arg : args)
363 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
365 CanQual<FunctionProtoType> FPT = GetFormalType(D);
366 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D);
367 GlobalDecl GD(D, CtorKind);
368 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
370 : TheCXXABI.hasMostDerivedReturn(GD)
371 ? CGM.getContext().VoidPtrTy
374 FunctionType::ExtInfo Info = FPT->getExtInfo();
375 auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs,
377 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
378 /*chainCall=*/false, ArgTypes, Info,
379 ParamInfos, Required);
382 /// Arrange the argument and result information for the declaration or
383 /// definition of the given function.
384 const CGFunctionInfo &
385 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
386 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
387 if (MD->isInstance())
388 return arrangeCXXMethodDeclaration(MD);
390 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
392 assert(isa<FunctionType>(FTy));
394 // When declaring a function without a prototype, always use a
395 // non-variadic type.
396 if (isa<FunctionNoProtoType>(FTy)) {
397 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
398 return arrangeLLVMFunctionInfo(
399 noProto->getReturnType(), /*instanceMethod=*/false,
400 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
403 assert(isa<FunctionProtoType>(FTy));
404 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
407 /// Arrange the argument and result information for the declaration or
408 /// definition of an Objective-C method.
409 const CGFunctionInfo &
410 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
411 // It happens that this is the same as a call with no optional
412 // arguments, except also using the formal 'self' type.
413 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
416 /// Arrange the argument and result information for the function type
417 /// through which to perform a send to the given Objective-C method,
418 /// using the given receiver type. The receiver type is not always
419 /// the 'self' type of the method or even an Objective-C pointer type.
420 /// This is *not* the right method for actually performing such a
421 /// message send, due to the possibility of optional arguments.
422 const CGFunctionInfo &
423 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
424 QualType receiverType) {
425 SmallVector<CanQualType, 16> argTys;
426 argTys.push_back(Context.getCanonicalParamType(receiverType));
427 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
429 for (const auto *I : MD->parameters()) {
430 argTys.push_back(Context.getCanonicalParamType(I->getType()));
433 FunctionType::ExtInfo einfo;
434 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
435 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
437 if (getContext().getLangOpts().ObjCAutoRefCount &&
438 MD->hasAttr<NSReturnsRetainedAttr>())
439 einfo = einfo.withProducesResult(true);
441 RequiredArgs required =
442 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
444 return arrangeLLVMFunctionInfo(
445 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
446 /*chainCall=*/false, argTys, einfo, {}, required);
449 const CGFunctionInfo &
450 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
451 const CallArgList &args) {
452 auto argTypes = getArgTypesForCall(Context, args);
453 FunctionType::ExtInfo einfo;
455 return arrangeLLVMFunctionInfo(
456 GetReturnType(returnType), /*instanceMethod=*/false,
457 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
460 const CGFunctionInfo &
461 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
462 // FIXME: Do we need to handle ObjCMethodDecl?
463 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
465 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
466 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
468 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
469 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
471 return arrangeFunctionDeclaration(FD);
474 /// Arrange a thunk that takes 'this' as the first parameter followed by
475 /// varargs. Return a void pointer, regardless of the actual return type.
476 /// The body of the thunk will end in a musttail call to a function of the
477 /// correct type, and the caller will bitcast the function to the correct
479 const CGFunctionInfo &
480 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
481 assert(MD->isVirtual() && "only virtual memptrs have thunks");
482 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
483 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
484 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
485 /*chainCall=*/false, ArgTys,
486 FTP->getExtInfo(), {}, RequiredArgs(1));
489 const CGFunctionInfo &
490 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
492 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
494 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
495 SmallVector<CanQualType, 2> ArgTys;
496 const CXXRecordDecl *RD = CD->getParent();
497 ArgTys.push_back(GetThisType(Context, RD));
498 if (CT == Ctor_CopyingClosure)
499 ArgTys.push_back(*FTP->param_type_begin());
500 if (RD->getNumVBases() > 0)
501 ArgTys.push_back(Context.IntTy);
502 CallingConv CC = Context.getDefaultCallingConvention(
503 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
504 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
505 /*chainCall=*/false, ArgTys,
506 FunctionType::ExtInfo(CC), {},
510 /// Arrange a call as unto a free function, except possibly with an
511 /// additional number of formal parameters considered required.
512 static const CGFunctionInfo &
513 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
515 const CallArgList &args,
516 const FunctionType *fnType,
517 unsigned numExtraRequiredArgs,
519 assert(args.size() >= numExtraRequiredArgs);
521 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
523 // In most cases, there are no optional arguments.
524 RequiredArgs required = RequiredArgs::All;
526 // If we have a variadic prototype, the required arguments are the
527 // extra prefix plus the arguments in the prototype.
528 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
529 if (proto->isVariadic())
530 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
532 if (proto->hasExtParameterInfos())
533 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
536 // If we don't have a prototype at all, but we're supposed to
537 // explicitly use the variadic convention for unprototyped calls,
538 // treat all of the arguments as required but preserve the nominal
539 // possibility of variadics.
540 } else if (CGM.getTargetCodeGenInfo()
541 .isNoProtoCallVariadic(args,
542 cast<FunctionNoProtoType>(fnType))) {
543 required = RequiredArgs(args.size());
547 SmallVector<CanQualType, 16> argTypes;
548 for (const auto &arg : args)
549 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
550 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
551 /*instanceMethod=*/false, chainCall,
552 argTypes, fnType->getExtInfo(), paramInfos,
556 /// Figure out the rules for calling a function with the given formal
557 /// type using the given arguments. The arguments are necessary
558 /// because the function might be unprototyped, in which case it's
559 /// target-dependent in crazy ways.
560 const CGFunctionInfo &
561 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
562 const FunctionType *fnType,
564 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
565 chainCall ? 1 : 0, chainCall);
568 /// A block function is essentially a free function with an
569 /// extra implicit argument.
570 const CGFunctionInfo &
571 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
572 const FunctionType *fnType) {
573 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
574 /*chainCall=*/false);
577 const CGFunctionInfo &
578 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
579 const FunctionArgList ¶ms) {
580 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
581 auto argTypes = getArgTypesForDeclaration(Context, params);
583 return arrangeLLVMFunctionInfo(
584 GetReturnType(proto->getReturnType()),
585 /*instanceMethod*/ false, /*chainCall*/ false, argTypes,
586 proto->getExtInfo(), paramInfos,
587 RequiredArgs::forPrototypePlus(proto, 1, nullptr));
590 const CGFunctionInfo &
591 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
592 const CallArgList &args) {
594 SmallVector<CanQualType, 16> argTypes;
595 for (const auto &Arg : args)
596 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
597 return arrangeLLVMFunctionInfo(
598 GetReturnType(resultType), /*instanceMethod=*/false,
599 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
600 /*paramInfos=*/ {}, RequiredArgs::All);
603 const CGFunctionInfo &
604 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
605 const FunctionArgList &args) {
606 auto argTypes = getArgTypesForDeclaration(Context, args);
608 return arrangeLLVMFunctionInfo(
609 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
610 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
613 const CGFunctionInfo &
614 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
615 ArrayRef<CanQualType> argTypes) {
616 return arrangeLLVMFunctionInfo(
617 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
618 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
621 /// Arrange a call to a C++ method, passing the given arguments.
622 const CGFunctionInfo &
623 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
624 const FunctionProtoType *proto,
625 RequiredArgs required) {
626 unsigned numRequiredArgs =
627 (proto->isVariadic() ? required.getNumRequiredArgs() : args.size());
628 unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams();
630 getExtParameterInfosForCall(proto, numPrefixArgs, args.size());
633 auto argTypes = getArgTypesForCall(Context, args);
635 FunctionType::ExtInfo info = proto->getExtInfo();
636 return arrangeLLVMFunctionInfo(
637 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
638 /*chainCall=*/false, argTypes, info, paramInfos, required);
641 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
642 return arrangeLLVMFunctionInfo(
643 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
644 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
647 const CGFunctionInfo &
648 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
649 const CallArgList &args) {
650 assert(signature.arg_size() <= args.size());
651 if (signature.arg_size() == args.size())
654 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
655 auto sigParamInfos = signature.getExtParameterInfos();
656 if (!sigParamInfos.empty()) {
657 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
658 paramInfos.resize(args.size());
661 auto argTypes = getArgTypesForCall(Context, args);
663 assert(signature.getRequiredArgs().allowsOptionalArgs());
664 return arrangeLLVMFunctionInfo(signature.getReturnType(),
665 signature.isInstanceMethod(),
666 signature.isChainCall(),
668 signature.getExtInfo(),
670 signature.getRequiredArgs());
673 /// Arrange the argument and result information for an abstract value
674 /// of a given function type. This is the method which all of the
675 /// above functions ultimately defer to.
676 const CGFunctionInfo &
677 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
680 ArrayRef<CanQualType> argTypes,
681 FunctionType::ExtInfo info,
682 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
683 RequiredArgs required) {
684 assert(std::all_of(argTypes.begin(), argTypes.end(),
685 std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
687 // Lookup or create unique function info.
688 llvm::FoldingSetNodeID ID;
689 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
690 required, resultType, argTypes);
692 void *insertPos = nullptr;
693 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
697 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
699 // Construct the function info. We co-allocate the ArgInfos.
700 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
701 paramInfos, resultType, argTypes, required);
702 FunctionInfos.InsertNode(FI, insertPos);
704 bool inserted = FunctionsBeingProcessed.insert(FI).second;
706 assert(inserted && "Recursively being processed?");
708 // Compute ABI information.
709 if (info.getCC() != CC_Swift) {
710 getABIInfo().computeInfo(*FI);
712 swiftcall::computeABIInfo(CGM, *FI);
715 // Loop over all of the computed argument and return value info. If any of
716 // them are direct or extend without a specified coerce type, specify the
718 ABIArgInfo &retInfo = FI->getReturnInfo();
719 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
720 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
722 for (auto &I : FI->arguments())
723 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
724 I.info.setCoerceToType(ConvertType(I.type));
726 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
727 assert(erased && "Not in set?");
732 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
735 const FunctionType::ExtInfo &info,
736 ArrayRef<ExtParameterInfo> paramInfos,
737 CanQualType resultType,
738 ArrayRef<CanQualType> argTypes,
739 RequiredArgs required) {
740 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
743 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
744 argTypes.size() + 1, paramInfos.size()));
746 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
747 FI->CallingConvention = llvmCC;
748 FI->EffectiveCallingConvention = llvmCC;
749 FI->ASTCallingConvention = info.getCC();
750 FI->InstanceMethod = instanceMethod;
751 FI->ChainCall = chainCall;
752 FI->NoReturn = info.getNoReturn();
753 FI->ReturnsRetained = info.getProducesResult();
754 FI->Required = required;
755 FI->HasRegParm = info.getHasRegParm();
756 FI->RegParm = info.getRegParm();
757 FI->ArgStruct = nullptr;
758 FI->ArgStructAlign = 0;
759 FI->NumArgs = argTypes.size();
760 FI->HasExtParameterInfos = !paramInfos.empty();
761 FI->getArgsBuffer()[0].type = resultType;
762 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
763 FI->getArgsBuffer()[i + 1].type = argTypes[i];
764 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
765 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
772 // ABIArgInfo::Expand implementation.
774 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
775 struct TypeExpansion {
776 enum TypeExpansionKind {
777 // Elements of constant arrays are expanded recursively.
779 // Record fields are expanded recursively (but if record is a union, only
780 // the field with the largest size is expanded).
782 // For complex types, real and imaginary parts are expanded recursively.
784 // All other types are not expandable.
788 const TypeExpansionKind Kind;
790 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
791 virtual ~TypeExpansion() {}
794 struct ConstantArrayExpansion : TypeExpansion {
798 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
799 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
800 static bool classof(const TypeExpansion *TE) {
801 return TE->Kind == TEK_ConstantArray;
805 struct RecordExpansion : TypeExpansion {
806 SmallVector<const CXXBaseSpecifier *, 1> Bases;
808 SmallVector<const FieldDecl *, 1> Fields;
810 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
811 SmallVector<const FieldDecl *, 1> &&Fields)
812 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
813 Fields(std::move(Fields)) {}
814 static bool classof(const TypeExpansion *TE) {
815 return TE->Kind == TEK_Record;
819 struct ComplexExpansion : TypeExpansion {
822 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
823 static bool classof(const TypeExpansion *TE) {
824 return TE->Kind == TEK_Complex;
828 struct NoExpansion : TypeExpansion {
829 NoExpansion() : TypeExpansion(TEK_None) {}
830 static bool classof(const TypeExpansion *TE) {
831 return TE->Kind == TEK_None;
836 static std::unique_ptr<TypeExpansion>
837 getTypeExpansion(QualType Ty, const ASTContext &Context) {
838 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
839 return llvm::make_unique<ConstantArrayExpansion>(
840 AT->getElementType(), AT->getSize().getZExtValue());
842 if (const RecordType *RT = Ty->getAs<RecordType>()) {
843 SmallVector<const CXXBaseSpecifier *, 1> Bases;
844 SmallVector<const FieldDecl *, 1> Fields;
845 const RecordDecl *RD = RT->getDecl();
846 assert(!RD->hasFlexibleArrayMember() &&
847 "Cannot expand structure with flexible array.");
849 // Unions can be here only in degenerative cases - all the fields are same
850 // after flattening. Thus we have to use the "largest" field.
851 const FieldDecl *LargestFD = nullptr;
852 CharUnits UnionSize = CharUnits::Zero();
854 for (const auto *FD : RD->fields()) {
855 // Skip zero length bitfields.
856 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
858 assert(!FD->isBitField() &&
859 "Cannot expand structure with bit-field members.");
860 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
861 if (UnionSize < FieldSize) {
862 UnionSize = FieldSize;
867 Fields.push_back(LargestFD);
869 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
870 assert(!CXXRD->isDynamicClass() &&
871 "cannot expand vtable pointers in dynamic classes");
872 for (const CXXBaseSpecifier &BS : CXXRD->bases())
873 Bases.push_back(&BS);
876 for (const auto *FD : RD->fields()) {
877 // Skip zero length bitfields.
878 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
880 assert(!FD->isBitField() &&
881 "Cannot expand structure with bit-field members.");
882 Fields.push_back(FD);
885 return llvm::make_unique<RecordExpansion>(std::move(Bases),
888 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
889 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
891 return llvm::make_unique<NoExpansion>();
894 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
895 auto Exp = getTypeExpansion(Ty, Context);
896 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
897 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
899 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
901 for (auto BS : RExp->Bases)
902 Res += getExpansionSize(BS->getType(), Context);
903 for (auto FD : RExp->Fields)
904 Res += getExpansionSize(FD->getType(), Context);
907 if (isa<ComplexExpansion>(Exp.get()))
909 assert(isa<NoExpansion>(Exp.get()));
914 CodeGenTypes::getExpandedTypes(QualType Ty,
915 SmallVectorImpl<llvm::Type *>::iterator &TI) {
916 auto Exp = getTypeExpansion(Ty, Context);
917 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
918 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
919 getExpandedTypes(CAExp->EltTy, TI);
921 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
922 for (auto BS : RExp->Bases)
923 getExpandedTypes(BS->getType(), TI);
924 for (auto FD : RExp->Fields)
925 getExpandedTypes(FD->getType(), TI);
926 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
927 llvm::Type *EltTy = ConvertType(CExp->EltTy);
931 assert(isa<NoExpansion>(Exp.get()));
932 *TI++ = ConvertType(Ty);
936 static void forConstantArrayExpansion(CodeGenFunction &CGF,
937 ConstantArrayExpansion *CAE,
939 llvm::function_ref<void(Address)> Fn) {
940 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
942 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
944 for (int i = 0, n = CAE->NumElts; i < n; i++) {
945 llvm::Value *EltAddr =
946 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
947 Fn(Address(EltAddr, EltAlign));
951 void CodeGenFunction::ExpandTypeFromArgs(
952 QualType Ty, LValue LV, SmallVectorImpl<llvm::Value *>::iterator &AI) {
953 assert(LV.isSimple() &&
954 "Unexpected non-simple lvalue during struct expansion.");
956 auto Exp = getTypeExpansion(Ty, getContext());
957 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
958 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
959 [&](Address EltAddr) {
960 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
961 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
963 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
964 Address This = LV.getAddress();
965 for (const CXXBaseSpecifier *BS : RExp->Bases) {
966 // Perform a single step derived-to-base conversion.
968 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
969 /*NullCheckValue=*/false, SourceLocation());
970 LValue SubLV = MakeAddrLValue(Base, BS->getType());
972 // Recurse onto bases.
973 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
975 for (auto FD : RExp->Fields) {
976 // FIXME: What are the right qualifiers here?
977 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
978 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
980 } else if (isa<ComplexExpansion>(Exp.get())) {
981 auto realValue = *AI++;
982 auto imagValue = *AI++;
983 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
985 assert(isa<NoExpansion>(Exp.get()));
986 EmitStoreThroughLValue(RValue::get(*AI++), LV);
990 void CodeGenFunction::ExpandTypeToArgs(
991 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
992 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
993 auto Exp = getTypeExpansion(Ty, getContext());
994 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
995 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
996 [&](Address EltAddr) {
998 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
999 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
1001 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1002 Address This = RV.getAggregateAddress();
1003 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1004 // Perform a single step derived-to-base conversion.
1006 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1007 /*NullCheckValue=*/false, SourceLocation());
1008 RValue BaseRV = RValue::getAggregate(Base);
1010 // Recurse onto bases.
1011 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
1015 LValue LV = MakeAddrLValue(This, Ty);
1016 for (auto FD : RExp->Fields) {
1017 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
1018 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
1021 } else if (isa<ComplexExpansion>(Exp.get())) {
1022 ComplexPairTy CV = RV.getComplexVal();
1023 IRCallArgs[IRCallArgPos++] = CV.first;
1024 IRCallArgs[IRCallArgPos++] = CV.second;
1026 assert(isa<NoExpansion>(Exp.get()));
1027 assert(RV.isScalar() &&
1028 "Unexpected non-scalar rvalue during struct expansion.");
1030 // Insert a bitcast as needed.
1031 llvm::Value *V = RV.getScalarVal();
1032 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1033 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1034 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1036 IRCallArgs[IRCallArgPos++] = V;
1040 /// Create a temporary allocation for the purposes of coercion.
1041 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1042 CharUnits MinAlign) {
1043 // Don't use an alignment that's worse than what LLVM would prefer.
1044 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1045 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1047 return CGF.CreateTempAlloca(Ty, Align);
1050 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1051 /// accessing some number of bytes out of it, try to gep into the struct to get
1052 /// at its inner goodness. Dive as deep as possible without entering an element
1053 /// with an in-memory size smaller than DstSize.
1055 EnterStructPointerForCoercedAccess(Address SrcPtr,
1056 llvm::StructType *SrcSTy,
1057 uint64_t DstSize, CodeGenFunction &CGF) {
1058 // We can't dive into a zero-element struct.
1059 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1061 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1063 // If the first elt is at least as large as what we're looking for, or if the
1064 // first element is the same size as the whole struct, we can enter it. The
1065 // comparison must be made on the store size and not the alloca size. Using
1066 // the alloca size may overstate the size of the load.
1067 uint64_t FirstEltSize =
1068 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1069 if (FirstEltSize < DstSize &&
1070 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1073 // GEP into the first element.
1074 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
1076 // If the first element is a struct, recurse.
1077 llvm::Type *SrcTy = SrcPtr.getElementType();
1078 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1079 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1084 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1085 /// are either integers or pointers. This does a truncation of the value if it
1086 /// is too large or a zero extension if it is too small.
1088 /// This behaves as if the value were coerced through memory, so on big-endian
1089 /// targets the high bits are preserved in a truncation, while little-endian
1090 /// targets preserve the low bits.
1091 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1093 CodeGenFunction &CGF) {
1094 if (Val->getType() == Ty)
1097 if (isa<llvm::PointerType>(Val->getType())) {
1098 // If this is Pointer->Pointer avoid conversion to and from int.
1099 if (isa<llvm::PointerType>(Ty))
1100 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1102 // Convert the pointer to an integer so we can play with its width.
1103 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1106 llvm::Type *DestIntTy = Ty;
1107 if (isa<llvm::PointerType>(DestIntTy))
1108 DestIntTy = CGF.IntPtrTy;
1110 if (Val->getType() != DestIntTy) {
1111 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1112 if (DL.isBigEndian()) {
1113 // Preserve the high bits on big-endian targets.
1114 // That is what memory coercion does.
1115 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1116 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1118 if (SrcSize > DstSize) {
1119 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1120 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1122 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1123 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1126 // Little-endian targets preserve the low bits. No shifts required.
1127 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1131 if (isa<llvm::PointerType>(Ty))
1132 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1138 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1139 /// a pointer to an object of type \arg Ty, known to be aligned to
1140 /// \arg SrcAlign bytes.
1142 /// This safely handles the case when the src type is smaller than the
1143 /// destination type; in this situation the values of bits which not
1144 /// present in the src are undefined.
1145 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1146 CodeGenFunction &CGF) {
1147 llvm::Type *SrcTy = Src.getElementType();
1149 // If SrcTy and Ty are the same, just do a load.
1151 return CGF.Builder.CreateLoad(Src);
1153 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1155 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1156 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1157 SrcTy = Src.getType()->getElementType();
1160 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1162 // If the source and destination are integer or pointer types, just do an
1163 // extension or truncation to the desired type.
1164 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1165 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1166 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1167 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1170 // If load is legal, just bitcast the src pointer.
1171 if (SrcSize >= DstSize) {
1172 // Generally SrcSize is never greater than DstSize, since this means we are
1173 // losing bits. However, this can happen in cases where the structure has
1174 // additional padding, for example due to a user specified alignment.
1176 // FIXME: Assert that we aren't truncating non-padding bits when have access
1177 // to that information.
1178 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1179 return CGF.Builder.CreateLoad(Src);
1182 // Otherwise do coercion through memory. This is stupid, but simple.
1183 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1184 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1185 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1186 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1187 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1189 return CGF.Builder.CreateLoad(Tmp);
1192 // Function to store a first-class aggregate into memory. We prefer to
1193 // store the elements rather than the aggregate to be more friendly to
1195 // FIXME: Do we need to recurse here?
1196 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1197 Address Dest, bool DestIsVolatile) {
1198 // Prefer scalar stores to first-class aggregate stores.
1199 if (llvm::StructType *STy =
1200 dyn_cast<llvm::StructType>(Val->getType())) {
1201 const llvm::StructLayout *Layout =
1202 CGF.CGM.getDataLayout().getStructLayout(STy);
1204 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1205 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1206 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1207 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1208 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1211 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1215 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1216 /// where the source and destination may have different types. The
1217 /// destination is known to be aligned to \arg DstAlign bytes.
1219 /// This safely handles the case when the src type is larger than the
1220 /// destination type; the upper bits of the src will be lost.
1221 static void CreateCoercedStore(llvm::Value *Src,
1224 CodeGenFunction &CGF) {
1225 llvm::Type *SrcTy = Src->getType();
1226 llvm::Type *DstTy = Dst.getType()->getElementType();
1227 if (SrcTy == DstTy) {
1228 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1232 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1234 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1235 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1236 DstTy = Dst.getType()->getElementType();
1239 // If the source and destination are integer or pointer types, just do an
1240 // extension or truncation to the desired type.
1241 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1242 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1243 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1244 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1248 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1250 // If store is legal, just bitcast the src pointer.
1251 if (SrcSize <= DstSize) {
1252 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1253 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1255 // Otherwise do coercion through memory. This is stupid, but
1258 // Generally SrcSize is never greater than DstSize, since this means we are
1259 // losing bits. However, this can happen in cases where the structure has
1260 // additional padding, for example due to a user specified alignment.
1262 // FIXME: Assert that we aren't truncating non-padding bits when have access
1263 // to that information.
1264 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1265 CGF.Builder.CreateStore(Src, Tmp);
1266 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1267 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1268 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1269 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1274 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1275 const ABIArgInfo &info) {
1276 if (unsigned offset = info.getDirectOffset()) {
1277 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1278 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1279 CharUnits::fromQuantity(offset));
1280 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1287 /// Encapsulates information about the way function arguments from
1288 /// CGFunctionInfo should be passed to actual LLVM IR function.
1289 class ClangToLLVMArgMapping {
1290 static const unsigned InvalidIndex = ~0U;
1291 unsigned InallocaArgNo;
1293 unsigned TotalIRArgs;
1295 /// Arguments of LLVM IR function corresponding to single Clang argument.
1297 unsigned PaddingArgIndex;
1298 // Argument is expanded to IR arguments at positions
1299 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1300 unsigned FirstArgIndex;
1301 unsigned NumberOfArgs;
1304 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1308 SmallVector<IRArgs, 8> ArgInfo;
1311 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1312 bool OnlyRequiredArgs = false)
1313 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1314 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1315 construct(Context, FI, OnlyRequiredArgs);
1318 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1319 unsigned getInallocaArgNo() const {
1320 assert(hasInallocaArg());
1321 return InallocaArgNo;
1324 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1325 unsigned getSRetArgNo() const {
1326 assert(hasSRetArg());
1330 unsigned totalIRArgs() const { return TotalIRArgs; }
1332 bool hasPaddingArg(unsigned ArgNo) const {
1333 assert(ArgNo < ArgInfo.size());
1334 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1336 unsigned getPaddingArgNo(unsigned ArgNo) const {
1337 assert(hasPaddingArg(ArgNo));
1338 return ArgInfo[ArgNo].PaddingArgIndex;
1341 /// Returns index of first IR argument corresponding to ArgNo, and their
1343 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1344 assert(ArgNo < ArgInfo.size());
1345 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1346 ArgInfo[ArgNo].NumberOfArgs);
1350 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1351 bool OnlyRequiredArgs);
1354 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1355 const CGFunctionInfo &FI,
1356 bool OnlyRequiredArgs) {
1357 unsigned IRArgNo = 0;
1358 bool SwapThisWithSRet = false;
1359 const ABIArgInfo &RetAI = FI.getReturnInfo();
1361 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1362 SwapThisWithSRet = RetAI.isSRetAfterThis();
1363 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1367 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1368 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1370 assert(I != FI.arg_end());
1371 QualType ArgType = I->type;
1372 const ABIArgInfo &AI = I->info;
1373 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1374 auto &IRArgs = ArgInfo[ArgNo];
1376 if (AI.getPaddingType())
1377 IRArgs.PaddingArgIndex = IRArgNo++;
1379 switch (AI.getKind()) {
1380 case ABIArgInfo::Extend:
1381 case ABIArgInfo::Direct: {
1382 // FIXME: handle sseregparm someday...
1383 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1384 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1385 IRArgs.NumberOfArgs = STy->getNumElements();
1387 IRArgs.NumberOfArgs = 1;
1391 case ABIArgInfo::Indirect:
1392 IRArgs.NumberOfArgs = 1;
1394 case ABIArgInfo::Ignore:
1395 case ABIArgInfo::InAlloca:
1396 // ignore and inalloca doesn't have matching LLVM parameters.
1397 IRArgs.NumberOfArgs = 0;
1399 case ABIArgInfo::CoerceAndExpand:
1400 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1402 case ABIArgInfo::Expand:
1403 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1407 if (IRArgs.NumberOfArgs > 0) {
1408 IRArgs.FirstArgIndex = IRArgNo;
1409 IRArgNo += IRArgs.NumberOfArgs;
1412 // Skip over the sret parameter when it comes second. We already handled it
1414 if (IRArgNo == 1 && SwapThisWithSRet)
1417 assert(ArgNo == ArgInfo.size());
1419 if (FI.usesInAlloca())
1420 InallocaArgNo = IRArgNo++;
1422 TotalIRArgs = IRArgNo;
1428 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1429 return FI.getReturnInfo().isIndirect();
1432 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1433 return ReturnTypeUsesSRet(FI) &&
1434 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1437 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1438 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1439 switch (BT->getKind()) {
1442 case BuiltinType::Float:
1443 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1444 case BuiltinType::Double:
1445 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1446 case BuiltinType::LongDouble:
1447 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1454 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1455 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1456 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1457 if (BT->getKind() == BuiltinType::LongDouble)
1458 return getTarget().useObjCFP2RetForComplexLongDouble();
1465 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1466 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1467 return GetFunctionType(FI);
1470 llvm::FunctionType *
1471 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1473 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1475 assert(Inserted && "Recursively being processed?");
1477 llvm::Type *resultType = nullptr;
1478 const ABIArgInfo &retAI = FI.getReturnInfo();
1479 switch (retAI.getKind()) {
1480 case ABIArgInfo::Expand:
1481 llvm_unreachable("Invalid ABI kind for return argument");
1483 case ABIArgInfo::Extend:
1484 case ABIArgInfo::Direct:
1485 resultType = retAI.getCoerceToType();
1488 case ABIArgInfo::InAlloca:
1489 if (retAI.getInAllocaSRet()) {
1490 // sret things on win32 aren't void, they return the sret pointer.
1491 QualType ret = FI.getReturnType();
1492 llvm::Type *ty = ConvertType(ret);
1493 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1494 resultType = llvm::PointerType::get(ty, addressSpace);
1496 resultType = llvm::Type::getVoidTy(getLLVMContext());
1500 case ABIArgInfo::Indirect:
1501 case ABIArgInfo::Ignore:
1502 resultType = llvm::Type::getVoidTy(getLLVMContext());
1505 case ABIArgInfo::CoerceAndExpand:
1506 resultType = retAI.getUnpaddedCoerceAndExpandType();
1510 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1511 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1513 // Add type for sret argument.
1514 if (IRFunctionArgs.hasSRetArg()) {
1515 QualType Ret = FI.getReturnType();
1516 llvm::Type *Ty = ConvertType(Ret);
1517 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1518 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1519 llvm::PointerType::get(Ty, AddressSpace);
1522 // Add type for inalloca argument.
1523 if (IRFunctionArgs.hasInallocaArg()) {
1524 auto ArgStruct = FI.getArgStruct();
1526 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1529 // Add in all of the required arguments.
1531 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1532 ie = it + FI.getNumRequiredArgs();
1533 for (; it != ie; ++it, ++ArgNo) {
1534 const ABIArgInfo &ArgInfo = it->info;
1536 // Insert a padding type to ensure proper alignment.
1537 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1538 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1539 ArgInfo.getPaddingType();
1541 unsigned FirstIRArg, NumIRArgs;
1542 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1544 switch (ArgInfo.getKind()) {
1545 case ABIArgInfo::Ignore:
1546 case ABIArgInfo::InAlloca:
1547 assert(NumIRArgs == 0);
1550 case ABIArgInfo::Indirect: {
1551 assert(NumIRArgs == 1);
1552 // indirect arguments are always on the stack, which is addr space #0.
1553 llvm::Type *LTy = ConvertTypeForMem(it->type);
1554 ArgTypes[FirstIRArg] = LTy->getPointerTo();
1558 case ABIArgInfo::Extend:
1559 case ABIArgInfo::Direct: {
1560 // Fast-isel and the optimizer generally like scalar values better than
1561 // FCAs, so we flatten them if this is safe to do for this argument.
1562 llvm::Type *argType = ArgInfo.getCoerceToType();
1563 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1564 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1565 assert(NumIRArgs == st->getNumElements());
1566 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1567 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1569 assert(NumIRArgs == 1);
1570 ArgTypes[FirstIRArg] = argType;
1575 case ABIArgInfo::CoerceAndExpand: {
1576 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1577 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1578 *ArgTypesIter++ = EltTy;
1580 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1584 case ABIArgInfo::Expand:
1585 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1586 getExpandedTypes(it->type, ArgTypesIter);
1587 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1592 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1593 assert(Erased && "Not in set?");
1595 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1598 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1599 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1600 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1602 if (!isFuncTypeConvertible(FPT))
1603 return llvm::StructType::get(getLLVMContext());
1605 const CGFunctionInfo *Info;
1606 if (isa<CXXDestructorDecl>(MD))
1608 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1610 Info = &arrangeCXXMethodDeclaration(MD);
1611 return GetFunctionType(*Info);
1614 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1615 llvm::AttrBuilder &FuncAttrs,
1616 const FunctionProtoType *FPT) {
1620 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1621 FPT->isNothrow(Ctx))
1622 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1625 void CodeGenModule::ConstructAttributeList(
1626 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1627 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1628 llvm::AttrBuilder FuncAttrs;
1629 llvm::AttrBuilder RetAttrs;
1630 bool HasOptnone = false;
1632 CallingConv = FI.getEffectiveCallingConvention();
1634 if (FI.isNoReturn())
1635 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1637 // If we have information about the function prototype, we can learn
1638 // attributes form there.
1639 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1640 CalleeInfo.getCalleeFunctionProtoType());
1642 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1644 bool HasAnyX86InterruptAttr = false;
1645 // FIXME: handle sseregparm someday...
1647 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1648 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1649 if (TargetDecl->hasAttr<NoThrowAttr>())
1650 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1651 if (TargetDecl->hasAttr<NoReturnAttr>())
1652 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1653 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1654 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1655 if (TargetDecl->hasAttr<ConvergentAttr>())
1656 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1658 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1659 AddAttributesFromFunctionProtoType(
1660 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1661 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1662 // These attributes are not inherited by overloads.
1663 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1664 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1665 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1668 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1669 if (TargetDecl->hasAttr<ConstAttr>()) {
1670 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1671 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1672 } else if (TargetDecl->hasAttr<PureAttr>()) {
1673 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1674 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1675 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1676 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1677 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1679 if (TargetDecl->hasAttr<RestrictAttr>())
1680 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1681 if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1682 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1684 HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
1685 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1686 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1687 Optional<unsigned> NumElemsParam;
1688 // alloc_size args are base-1, 0 means not present.
1689 if (unsigned N = AllocSize->getNumElemsParam())
1690 NumElemsParam = N - 1;
1691 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
1696 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1698 if (CodeGenOpts.OptimizeSize)
1699 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1700 if (CodeGenOpts.OptimizeSize == 2)
1701 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1704 if (CodeGenOpts.DisableRedZone)
1705 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1706 if (CodeGenOpts.NoImplicitFloat)
1707 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1708 if (CodeGenOpts.EnableSegmentedStacks &&
1709 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1710 FuncAttrs.addAttribute("split-stack");
1712 if (AttrOnCallSite) {
1713 // Attributes that should go on the call site only.
1714 if (!CodeGenOpts.SimplifyLibCalls ||
1715 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1716 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1717 if (!CodeGenOpts.TrapFuncName.empty())
1718 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1720 // Attributes that should go on the function, but not the call site.
1721 if (!CodeGenOpts.DisableFPElim) {
1722 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1723 } else if (CodeGenOpts.OmitLeafFramePointer) {
1724 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1725 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1727 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1728 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1731 bool DisableTailCalls =
1732 CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr ||
1733 (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1734 FuncAttrs.addAttribute(
1735 "disable-tail-calls",
1736 llvm::toStringRef(DisableTailCalls));
1738 FuncAttrs.addAttribute("less-precise-fpmad",
1739 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1741 if (!CodeGenOpts.FPDenormalMode.empty())
1742 FuncAttrs.addAttribute("denormal-fp-math",
1743 CodeGenOpts.FPDenormalMode);
1745 FuncAttrs.addAttribute("no-trapping-math",
1746 llvm::toStringRef(CodeGenOpts.NoTrappingMath));
1748 // TODO: Are these all needed?
1749 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1750 FuncAttrs.addAttribute("no-infs-fp-math",
1751 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1752 FuncAttrs.addAttribute("no-nans-fp-math",
1753 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1754 FuncAttrs.addAttribute("unsafe-fp-math",
1755 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1756 FuncAttrs.addAttribute("use-soft-float",
1757 llvm::toStringRef(CodeGenOpts.SoftFloat));
1758 FuncAttrs.addAttribute("stack-protector-buffer-size",
1759 llvm::utostr(CodeGenOpts.SSPBufferSize));
1760 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1761 llvm::toStringRef(CodeGenOpts.NoSignedZeros));
1762 FuncAttrs.addAttribute(
1763 "correctly-rounded-divide-sqrt-fp-math",
1764 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1766 // TODO: Reciprocal estimate codegen options should apply to instructions?
1767 std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
1768 if (!Recips.empty())
1769 FuncAttrs.addAttribute("reciprocal-estimates",
1770 llvm::join(Recips.begin(), Recips.end(), ","));
1772 if (CodeGenOpts.StackRealignment)
1773 FuncAttrs.addAttribute("stackrealign");
1774 if (CodeGenOpts.Backchain)
1775 FuncAttrs.addAttribute("backchain");
1777 // Add target-cpu and target-features attributes to functions. If
1778 // we have a decl for the function and it has a target attribute then
1779 // parse that and add it to the feature set.
1780 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1781 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1782 if (FD && FD->hasAttr<TargetAttr>()) {
1783 llvm::StringMap<bool> FeatureMap;
1784 getFunctionFeatureMap(FeatureMap, FD);
1786 // Produce the canonical string for this set of features.
1787 std::vector<std::string> Features;
1788 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1789 ie = FeatureMap.end();
1791 Features.push_back((it->second ? "+" : "-") + it->first().str());
1793 // Now add the target-cpu and target-features to the function.
1794 // While we populated the feature map above, we still need to
1795 // get and parse the target attribute so we can get the cpu for
1797 const auto *TD = FD->getAttr<TargetAttr>();
1798 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1799 if (ParsedAttr.second != "")
1800 TargetCPU = ParsedAttr.second;
1801 if (TargetCPU != "")
1802 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1803 if (!Features.empty()) {
1804 std::sort(Features.begin(), Features.end());
1805 FuncAttrs.addAttribute(
1807 llvm::join(Features.begin(), Features.end(), ","));
1810 // Otherwise just add the existing target cpu and target features to the
1812 std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1813 if (TargetCPU != "")
1814 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1815 if (!Features.empty()) {
1816 std::sort(Features.begin(), Features.end());
1817 FuncAttrs.addAttribute(
1819 llvm::join(Features.begin(), Features.end(), ","));
1824 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1825 // Conservatively, mark all functions and calls in CUDA as convergent
1826 // (meaning, they may call an intrinsically convergent op, such as
1827 // __syncthreads(), and so can't have certain optimizations applied around
1828 // them). LLVM will remove this attribute where it safely can.
1829 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1831 // Exceptions aren't supported in CUDA device code.
1832 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1834 // Respect -fcuda-flush-denormals-to-zero.
1835 if (getLangOpts().CUDADeviceFlushDenormalsToZero)
1836 FuncAttrs.addAttribute("nvptx-f32ftz", "true");
1839 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1841 QualType RetTy = FI.getReturnType();
1842 const ABIArgInfo &RetAI = FI.getReturnInfo();
1843 switch (RetAI.getKind()) {
1844 case ABIArgInfo::Extend:
1845 if (RetTy->hasSignedIntegerRepresentation())
1846 RetAttrs.addAttribute(llvm::Attribute::SExt);
1847 else if (RetTy->hasUnsignedIntegerRepresentation())
1848 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1850 case ABIArgInfo::Direct:
1851 if (RetAI.getInReg())
1852 RetAttrs.addAttribute(llvm::Attribute::InReg);
1854 case ABIArgInfo::Ignore:
1857 case ABIArgInfo::InAlloca:
1858 case ABIArgInfo::Indirect: {
1859 // inalloca and sret disable readnone and readonly
1860 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1861 .removeAttribute(llvm::Attribute::ReadNone);
1865 case ABIArgInfo::CoerceAndExpand:
1868 case ABIArgInfo::Expand:
1869 llvm_unreachable("Invalid ABI kind for return argument");
1872 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1873 QualType PTy = RefTy->getPointeeType();
1874 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1875 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1877 else if (getContext().getTargetAddressSpace(PTy) == 0)
1878 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1881 // Attach return attributes.
1882 if (RetAttrs.hasAttributes()) {
1883 PAL.push_back(llvm::AttributeSet::get(
1884 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1887 bool hasUsedSRet = false;
1889 // Attach attributes to sret.
1890 if (IRFunctionArgs.hasSRetArg()) {
1891 llvm::AttrBuilder SRETAttrs;
1892 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1894 if (RetAI.getInReg())
1895 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1896 PAL.push_back(llvm::AttributeSet::get(
1897 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1900 // Attach attributes to inalloca argument.
1901 if (IRFunctionArgs.hasInallocaArg()) {
1902 llvm::AttrBuilder Attrs;
1903 Attrs.addAttribute(llvm::Attribute::InAlloca);
1904 PAL.push_back(llvm::AttributeSet::get(
1905 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1909 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1911 I != E; ++I, ++ArgNo) {
1912 QualType ParamType = I->type;
1913 const ABIArgInfo &AI = I->info;
1914 llvm::AttrBuilder Attrs;
1916 // Add attribute for padding argument, if necessary.
1917 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1918 if (AI.getPaddingInReg())
1919 PAL.push_back(llvm::AttributeSet::get(
1920 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1921 llvm::Attribute::InReg));
1924 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1925 // have the corresponding parameter variable. It doesn't make
1926 // sense to do it here because parameters are so messed up.
1927 switch (AI.getKind()) {
1928 case ABIArgInfo::Extend:
1929 if (ParamType->isSignedIntegerOrEnumerationType())
1930 Attrs.addAttribute(llvm::Attribute::SExt);
1931 else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1932 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1933 Attrs.addAttribute(llvm::Attribute::SExt);
1935 Attrs.addAttribute(llvm::Attribute::ZExt);
1938 case ABIArgInfo::Direct:
1939 if (ArgNo == 0 && FI.isChainCall())
1940 Attrs.addAttribute(llvm::Attribute::Nest);
1941 else if (AI.getInReg())
1942 Attrs.addAttribute(llvm::Attribute::InReg);
1945 case ABIArgInfo::Indirect: {
1947 Attrs.addAttribute(llvm::Attribute::InReg);
1949 if (AI.getIndirectByVal())
1950 Attrs.addAttribute(llvm::Attribute::ByVal);
1952 CharUnits Align = AI.getIndirectAlign();
1954 // In a byval argument, it is important that the required
1955 // alignment of the type is honored, as LLVM might be creating a
1956 // *new* stack object, and needs to know what alignment to give
1957 // it. (Sometimes it can deduce a sensible alignment on its own,
1958 // but not if clang decides it must emit a packed struct, or the
1959 // user specifies increased alignment requirements.)
1961 // This is different from indirect *not* byval, where the object
1962 // exists already, and the align attribute is purely
1964 assert(!Align.isZero());
1966 // For now, only add this when we have a byval argument.
1967 // TODO: be less lazy about updating test cases.
1968 if (AI.getIndirectByVal())
1969 Attrs.addAlignmentAttr(Align.getQuantity());
1971 // byval disables readnone and readonly.
1972 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1973 .removeAttribute(llvm::Attribute::ReadNone);
1976 case ABIArgInfo::Ignore:
1977 case ABIArgInfo::Expand:
1978 case ABIArgInfo::CoerceAndExpand:
1981 case ABIArgInfo::InAlloca:
1982 // inalloca disables readnone and readonly.
1983 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1984 .removeAttribute(llvm::Attribute::ReadNone);
1988 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1989 QualType PTy = RefTy->getPointeeType();
1990 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1991 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1993 else if (getContext().getTargetAddressSpace(PTy) == 0)
1994 Attrs.addAttribute(llvm::Attribute::NonNull);
1997 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
1998 case ParameterABI::Ordinary:
2001 case ParameterABI::SwiftIndirectResult: {
2002 // Add 'sret' if we haven't already used it for something, but
2003 // only if the result is void.
2004 if (!hasUsedSRet && RetTy->isVoidType()) {
2005 Attrs.addAttribute(llvm::Attribute::StructRet);
2009 // Add 'noalias' in either case.
2010 Attrs.addAttribute(llvm::Attribute::NoAlias);
2012 // Add 'dereferenceable' and 'alignment'.
2013 auto PTy = ParamType->getPointeeType();
2014 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2015 auto info = getContext().getTypeInfoInChars(PTy);
2016 Attrs.addDereferenceableAttr(info.first.getQuantity());
2017 Attrs.addAttribute(llvm::Attribute::getWithAlignment(getLLVMContext(),
2018 info.second.getQuantity()));
2023 case ParameterABI::SwiftErrorResult:
2024 Attrs.addAttribute(llvm::Attribute::SwiftError);
2027 case ParameterABI::SwiftContext:
2028 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2032 if (Attrs.hasAttributes()) {
2033 unsigned FirstIRArg, NumIRArgs;
2034 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2035 for (unsigned i = 0; i < NumIRArgs; i++)
2036 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
2037 FirstIRArg + i + 1, Attrs));
2040 assert(ArgNo == FI.arg_size());
2042 if (FuncAttrs.hasAttributes())
2043 PAL.push_back(llvm::
2044 AttributeSet::get(getLLVMContext(),
2045 llvm::AttributeSet::FunctionIndex,
2049 /// An argument came in as a promoted argument; demote it back to its
2051 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2053 llvm::Value *value) {
2054 llvm::Type *varType = CGF.ConvertType(var->getType());
2056 // This can happen with promotions that actually don't change the
2057 // underlying type, like the enum promotions.
2058 if (value->getType() == varType) return value;
2060 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2061 && "unexpected promotion type");
2063 if (isa<llvm::IntegerType>(varType))
2064 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2066 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2069 /// Returns the attribute (either parameter attribute, or function
2070 /// attribute), which declares argument ArgNo to be non-null.
2071 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2072 QualType ArgType, unsigned ArgNo) {
2073 // FIXME: __attribute__((nonnull)) can also be applied to:
2074 // - references to pointers, where the pointee is known to be
2075 // nonnull (apparently a Clang extension)
2076 // - transparent unions containing pointers
2077 // In the former case, LLVM IR cannot represent the constraint. In
2078 // the latter case, we have no guarantee that the transparent union
2079 // is in fact passed as a pointer.
2080 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2082 // First, check attribute on parameter itself.
2084 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2087 // Check function attributes.
2090 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2091 if (NNAttr->isNonNull(ArgNo))
2098 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2101 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
2102 void Emit(CodeGenFunction &CGF, Flags flags) override {
2103 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2104 CGF.Builder.CreateStore(errorValue, Arg);
2109 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2111 const FunctionArgList &Args) {
2112 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2113 // Naked functions don't have prologues.
2116 // If this is an implicit-return-zero function, go ahead and
2117 // initialize the return value. TODO: it might be nice to have
2118 // a more general mechanism for this that didn't require synthesized
2119 // return statements.
2120 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2121 if (FD->hasImplicitReturnZero()) {
2122 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2123 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2124 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2125 Builder.CreateStore(Zero, ReturnValue);
2129 // FIXME: We no longer need the types from FunctionArgList; lift up and
2132 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2133 // Flattened function arguments.
2134 SmallVector<llvm::Value *, 16> FnArgs;
2135 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
2136 for (auto &Arg : Fn->args()) {
2137 FnArgs.push_back(&Arg);
2139 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
2141 // If we're using inalloca, all the memory arguments are GEPs off of the last
2142 // parameter, which is a pointer to the complete memory area.
2143 Address ArgStruct = Address::invalid();
2144 const llvm::StructLayout *ArgStructLayout = nullptr;
2145 if (IRFunctionArgs.hasInallocaArg()) {
2146 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
2147 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
2148 FI.getArgStructAlignment());
2150 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2153 // Name the struct return parameter.
2154 if (IRFunctionArgs.hasSRetArg()) {
2155 auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
2156 AI->setName("agg.result");
2157 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
2158 llvm::Attribute::NoAlias));
2161 // Track if we received the parameter as a pointer (indirect, byval, or
2162 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2163 // into a local alloca for us.
2164 SmallVector<ParamValue, 16> ArgVals;
2165 ArgVals.reserve(Args.size());
2167 // Create a pointer value for every parameter declaration. This usually
2168 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2169 // any cleanups or do anything that might unwind. We do that separately, so
2170 // we can push the cleanups in the correct order for the ABI.
2171 assert(FI.arg_size() == Args.size() &&
2172 "Mismatch between function signature & arguments.");
2174 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2175 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2176 i != e; ++i, ++info_it, ++ArgNo) {
2177 const VarDecl *Arg = *i;
2178 QualType Ty = info_it->type;
2179 const ABIArgInfo &ArgI = info_it->info;
2182 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2184 unsigned FirstIRArg, NumIRArgs;
2185 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2187 switch (ArgI.getKind()) {
2188 case ABIArgInfo::InAlloca: {
2189 assert(NumIRArgs == 0);
2190 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2191 CharUnits FieldOffset =
2192 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
2193 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
2195 ArgVals.push_back(ParamValue::forIndirect(V));
2199 case ABIArgInfo::Indirect: {
2200 assert(NumIRArgs == 1);
2201 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
2203 if (!hasScalarEvaluationKind(Ty)) {
2204 // Aggregates and complex variables are accessed by reference. All we
2205 // need to do is realign the value, if requested.
2206 Address V = ParamAddr;
2207 if (ArgI.getIndirectRealign()) {
2208 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2210 // Copy from the incoming argument pointer to the temporary with the
2211 // appropriate alignment.
2213 // FIXME: We should have a common utility for generating an aggregate
2215 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2216 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
2217 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
2218 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
2219 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
2222 ArgVals.push_back(ParamValue::forIndirect(V));
2224 // Load scalar value from indirect argument.
2226 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
2229 V = emitArgumentDemotion(*this, Arg, V);
2230 ArgVals.push_back(ParamValue::forDirect(V));
2235 case ABIArgInfo::Extend:
2236 case ABIArgInfo::Direct: {
2238 // If we have the trivial case, handle it with no muss and fuss.
2239 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2240 ArgI.getCoerceToType() == ConvertType(Ty) &&
2241 ArgI.getDirectOffset() == 0) {
2242 assert(NumIRArgs == 1);
2243 llvm::Value *V = FnArgs[FirstIRArg];
2244 auto AI = cast<llvm::Argument>(V);
2246 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2247 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2248 PVD->getFunctionScopeIndex()))
2249 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2251 llvm::Attribute::NonNull));
2253 QualType OTy = PVD->getOriginalType();
2254 if (const auto *ArrTy =
2255 getContext().getAsConstantArrayType(OTy)) {
2256 // A C99 array parameter declaration with the static keyword also
2257 // indicates dereferenceability, and if the size is constant we can
2258 // use the dereferenceable attribute (which requires the size in
2260 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2261 QualType ETy = ArrTy->getElementType();
2262 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2263 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2265 llvm::AttrBuilder Attrs;
2266 Attrs.addDereferenceableAttr(
2267 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
2268 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2269 AI->getArgNo() + 1, Attrs));
2270 } else if (getContext().getTargetAddressSpace(ETy) == 0) {
2271 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2273 llvm::Attribute::NonNull));
2276 } else if (const auto *ArrTy =
2277 getContext().getAsVariableArrayType(OTy)) {
2278 // For C99 VLAs with the static keyword, we don't know the size so
2279 // we can't use the dereferenceable attribute, but in addrspace(0)
2280 // we know that it must be nonnull.
2281 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2282 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
2283 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2285 llvm::Attribute::NonNull));
2288 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2290 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2291 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2293 llvm::Value *AlignmentValue =
2294 EmitScalarExpr(AVAttr->getAlignment());
2295 llvm::ConstantInt *AlignmentCI =
2296 cast<llvm::ConstantInt>(AlignmentValue);
2297 unsigned Alignment =
2298 std::min((unsigned) AlignmentCI->getZExtValue(),
2299 +llvm::Value::MaximumAlignment);
2301 llvm::AttrBuilder Attrs;
2302 Attrs.addAlignmentAttr(Alignment);
2303 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2304 AI->getArgNo() + 1, Attrs));
2308 if (Arg->getType().isRestrictQualified())
2309 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2311 llvm::Attribute::NoAlias));
2313 // LLVM expects swifterror parameters to be used in very restricted
2314 // ways. Copy the value into a less-restricted temporary.
2315 if (FI.getExtParameterInfo(ArgNo).getABI()
2316 == ParameterABI::SwiftErrorResult) {
2317 QualType pointeeTy = Ty->getPointeeType();
2318 assert(pointeeTy->isPointerType());
2320 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2321 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2322 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2323 Builder.CreateStore(incomingErrorValue, temp);
2324 V = temp.getPointer();
2326 // Push a cleanup to copy the value back at the end of the function.
2327 // The convention does not guarantee that the value will be written
2328 // back if the function exits with an unwind exception.
2329 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2332 // Ensure the argument is the correct type.
2333 if (V->getType() != ArgI.getCoerceToType())
2334 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2337 V = emitArgumentDemotion(*this, Arg, V);
2339 // Because of merging of function types from multiple decls it is
2340 // possible for the type of an argument to not match the corresponding
2341 // type in the function type. Since we are codegening the callee
2342 // in here, add a cast to the argument type.
2343 llvm::Type *LTy = ConvertType(Arg->getType());
2344 if (V->getType() != LTy)
2345 V = Builder.CreateBitCast(V, LTy);
2347 ArgVals.push_back(ParamValue::forDirect(V));
2351 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2354 // Pointer to store into.
2355 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2357 // Fast-isel and the optimizer generally like scalar values better than
2358 // FCAs, so we flatten them if this is safe to do for this argument.
2359 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2360 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2361 STy->getNumElements() > 1) {
2362 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2363 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2364 llvm::Type *DstTy = Ptr.getElementType();
2365 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2367 Address AddrToStoreInto = Address::invalid();
2368 if (SrcSize <= DstSize) {
2370 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2373 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2376 assert(STy->getNumElements() == NumIRArgs);
2377 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2378 auto AI = FnArgs[FirstIRArg + i];
2379 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2380 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2382 Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2383 Builder.CreateStore(AI, EltPtr);
2386 if (SrcSize > DstSize) {
2387 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2391 // Simple case, just do a coerced store of the argument into the alloca.
2392 assert(NumIRArgs == 1);
2393 auto AI = FnArgs[FirstIRArg];
2394 AI->setName(Arg->getName() + ".coerce");
2395 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2398 // Match to what EmitParmDecl is expecting for this type.
2399 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2401 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2403 V = emitArgumentDemotion(*this, Arg, V);
2404 ArgVals.push_back(ParamValue::forDirect(V));
2406 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2411 case ABIArgInfo::CoerceAndExpand: {
2412 // Reconstruct into a temporary.
2413 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2414 ArgVals.push_back(ParamValue::forIndirect(alloca));
2416 auto coercionType = ArgI.getCoerceAndExpandType();
2417 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2418 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2420 unsigned argIndex = FirstIRArg;
2421 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2422 llvm::Type *eltType = coercionType->getElementType(i);
2423 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2426 auto eltAddr = Builder.CreateStructGEP(alloca, i, layout);
2427 auto elt = FnArgs[argIndex++];
2428 Builder.CreateStore(elt, eltAddr);
2430 assert(argIndex == FirstIRArg + NumIRArgs);
2434 case ABIArgInfo::Expand: {
2435 // If this structure was expanded into multiple arguments then
2436 // we need to create a temporary and reconstruct it from the
2438 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2439 LValue LV = MakeAddrLValue(Alloca, Ty);
2440 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2442 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2443 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2444 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2445 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2446 auto AI = FnArgs[FirstIRArg + i];
2447 AI->setName(Arg->getName() + "." + Twine(i));
2452 case ABIArgInfo::Ignore:
2453 assert(NumIRArgs == 0);
2454 // Initialize the local variable appropriately.
2455 if (!hasScalarEvaluationKind(Ty)) {
2456 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2458 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2459 ArgVals.push_back(ParamValue::forDirect(U));
2465 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2466 for (int I = Args.size() - 1; I >= 0; --I)
2467 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2469 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2470 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2474 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2475 while (insn->use_empty()) {
2476 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2477 if (!bitcast) return;
2479 // This is "safe" because we would have used a ConstantExpr otherwise.
2480 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2481 bitcast->eraseFromParent();
2485 /// Try to emit a fused autorelease of a return result.
2486 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2487 llvm::Value *result) {
2488 // We must be immediately followed the cast.
2489 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2490 if (BB->empty()) return nullptr;
2491 if (&BB->back() != result) return nullptr;
2493 llvm::Type *resultType = result->getType();
2495 // result is in a BasicBlock and is therefore an Instruction.
2496 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2498 SmallVector<llvm::Instruction *, 4> InstsToKill;
2501 // %generator = bitcast %type1* %generator2 to %type2*
2502 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2503 // We would have emitted this as a constant if the operand weren't
2505 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2507 // Require the generator to be immediately followed by the cast.
2508 if (generator->getNextNode() != bitcast)
2511 InstsToKill.push_back(bitcast);
2515 // %generator = call i8* @objc_retain(i8* %originalResult)
2517 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2518 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2519 if (!call) return nullptr;
2521 bool doRetainAutorelease;
2523 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2524 doRetainAutorelease = true;
2525 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2526 .objc_retainAutoreleasedReturnValue) {
2527 doRetainAutorelease = false;
2529 // If we emitted an assembly marker for this call (and the
2530 // ARCEntrypoints field should have been set if so), go looking
2531 // for that call. If we can't find it, we can't do this
2532 // optimization. But it should always be the immediately previous
2533 // instruction, unless we needed bitcasts around the call.
2534 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2535 llvm::Instruction *prev = call->getPrevNode();
2537 if (isa<llvm::BitCastInst>(prev)) {
2538 prev = prev->getPrevNode();
2541 assert(isa<llvm::CallInst>(prev));
2542 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2543 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2544 InstsToKill.push_back(prev);
2550 result = call->getArgOperand(0);
2551 InstsToKill.push_back(call);
2553 // Keep killing bitcasts, for sanity. Note that we no longer care
2554 // about precise ordering as long as there's exactly one use.
2555 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2556 if (!bitcast->hasOneUse()) break;
2557 InstsToKill.push_back(bitcast);
2558 result = bitcast->getOperand(0);
2561 // Delete all the unnecessary instructions, from latest to earliest.
2562 for (auto *I : InstsToKill)
2563 I->eraseFromParent();
2565 // Do the fused retain/autorelease if we were asked to.
2566 if (doRetainAutorelease)
2567 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2569 // Cast back to the result type.
2570 return CGF.Builder.CreateBitCast(result, resultType);
2573 /// If this is a +1 of the value of an immutable 'self', remove it.
2574 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2575 llvm::Value *result) {
2576 // This is only applicable to a method with an immutable 'self'.
2577 const ObjCMethodDecl *method =
2578 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2579 if (!method) return nullptr;
2580 const VarDecl *self = method->getSelfDecl();
2581 if (!self->getType().isConstQualified()) return nullptr;
2583 // Look for a retain call.
2584 llvm::CallInst *retainCall =
2585 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2587 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2590 // Look for an ordinary load of 'self'.
2591 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2592 llvm::LoadInst *load =
2593 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2594 if (!load || load->isAtomic() || load->isVolatile() ||
2595 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2598 // Okay! Burn it all down. This relies for correctness on the
2599 // assumption that the retain is emitted as part of the return and
2600 // that thereafter everything is used "linearly".
2601 llvm::Type *resultType = result->getType();
2602 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2603 assert(retainCall->use_empty());
2604 retainCall->eraseFromParent();
2605 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2607 return CGF.Builder.CreateBitCast(load, resultType);
2610 /// Emit an ARC autorelease of the result of a function.
2612 /// \return the value to actually return from the function
2613 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2614 llvm::Value *result) {
2615 // If we're returning 'self', kill the initial retain. This is a
2616 // heuristic attempt to "encourage correctness" in the really unfortunate
2617 // case where we have a return of self during a dealloc and we desperately
2618 // need to avoid the possible autorelease.
2619 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2622 // At -O0, try to emit a fused retain/autorelease.
2623 if (CGF.shouldUseFusedARCCalls())
2624 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2627 return CGF.EmitARCAutoreleaseReturnValue(result);
2630 /// Heuristically search for a dominating store to the return-value slot.
2631 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2632 // Check if a User is a store which pointerOperand is the ReturnValue.
2633 // We are looking for stores to the ReturnValue, not for stores of the
2634 // ReturnValue to some other location.
2635 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2636 auto *SI = dyn_cast<llvm::StoreInst>(U);
2637 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2639 // These aren't actually possible for non-coerced returns, and we
2640 // only care about non-coerced returns on this code path.
2641 assert(!SI->isAtomic() && !SI->isVolatile());
2644 // If there are multiple uses of the return-value slot, just check
2645 // for something immediately preceding the IP. Sometimes this can
2646 // happen with how we generate implicit-returns; it can also happen
2647 // with noreturn cleanups.
2648 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2649 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2650 if (IP->empty()) return nullptr;
2651 llvm::Instruction *I = &IP->back();
2653 // Skip lifetime markers
2654 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2657 if (llvm::IntrinsicInst *Intrinsic =
2658 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2659 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2660 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2664 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2672 return GetStoreIfValid(I);
2675 llvm::StoreInst *store =
2676 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2677 if (!store) return nullptr;
2679 // Now do a first-and-dirty dominance check: just walk up the
2680 // single-predecessors chain from the current insertion point.
2681 llvm::BasicBlock *StoreBB = store->getParent();
2682 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2683 while (IP != StoreBB) {
2684 if (!(IP = IP->getSinglePredecessor()))
2688 // Okay, the store's basic block dominates the insertion point; we
2689 // can do our thing.
2693 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2695 SourceLocation EndLoc) {
2696 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2697 // Naked functions don't have epilogues.
2698 Builder.CreateUnreachable();
2702 // Functions with no result always return void.
2703 if (!ReturnValue.isValid()) {
2704 Builder.CreateRetVoid();
2708 llvm::DebugLoc RetDbgLoc;
2709 llvm::Value *RV = nullptr;
2710 QualType RetTy = FI.getReturnType();
2711 const ABIArgInfo &RetAI = FI.getReturnInfo();
2713 switch (RetAI.getKind()) {
2714 case ABIArgInfo::InAlloca:
2715 // Aggregrates get evaluated directly into the destination. Sometimes we
2716 // need to return the sret value in a register, though.
2717 assert(hasAggregateEvaluationKind(RetTy));
2718 if (RetAI.getInAllocaSRet()) {
2719 llvm::Function::arg_iterator EI = CurFn->arg_end();
2721 llvm::Value *ArgStruct = &*EI;
2722 llvm::Value *SRet = Builder.CreateStructGEP(
2723 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2724 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2728 case ABIArgInfo::Indirect: {
2729 auto AI = CurFn->arg_begin();
2730 if (RetAI.isSRetAfterThis())
2732 switch (getEvaluationKind(RetTy)) {
2735 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2736 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2741 // Do nothing; aggregrates get evaluated directly into the destination.
2744 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2745 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2752 case ABIArgInfo::Extend:
2753 case ABIArgInfo::Direct:
2754 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2755 RetAI.getDirectOffset() == 0) {
2756 // The internal return value temp always will have pointer-to-return-type
2757 // type, just do a load.
2759 // If there is a dominating store to ReturnValue, we can elide
2760 // the load, zap the store, and usually zap the alloca.
2761 if (llvm::StoreInst *SI =
2762 findDominatingStoreToReturnValue(*this)) {
2763 // Reuse the debug location from the store unless there is
2764 // cleanup code to be emitted between the store and return
2766 if (EmitRetDbgLoc && !AutoreleaseResult)
2767 RetDbgLoc = SI->getDebugLoc();
2768 // Get the stored value and nuke the now-dead store.
2769 RV = SI->getValueOperand();
2770 SI->eraseFromParent();
2772 // If that was the only use of the return value, nuke it as well now.
2773 auto returnValueInst = ReturnValue.getPointer();
2774 if (returnValueInst->use_empty()) {
2775 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2776 alloca->eraseFromParent();
2777 ReturnValue = Address::invalid();
2781 // Otherwise, we have to do a simple load.
2783 RV = Builder.CreateLoad(ReturnValue);
2786 // If the value is offset in memory, apply the offset now.
2787 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2789 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2792 // In ARC, end functions that return a retainable type with a call
2793 // to objc_autoreleaseReturnValue.
2794 if (AutoreleaseResult) {
2796 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
2797 // been stripped of the typedefs, so we cannot use RetTy here. Get the
2798 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
2799 // CurCodeDecl or BlockInfo.
2802 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
2803 RT = FD->getReturnType();
2804 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
2805 RT = MD->getReturnType();
2806 else if (isa<BlockDecl>(CurCodeDecl))
2807 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
2809 llvm_unreachable("Unexpected function/method type");
2811 assert(getLangOpts().ObjCAutoRefCount &&
2812 !FI.isReturnsRetained() &&
2813 RT->isObjCRetainableType());
2815 RV = emitAutoreleaseOfResult(*this, RV);
2820 case ABIArgInfo::Ignore:
2823 case ABIArgInfo::CoerceAndExpand: {
2824 auto coercionType = RetAI.getCoerceAndExpandType();
2825 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
2827 // Load all of the coerced elements out into results.
2828 llvm::SmallVector<llvm::Value*, 4> results;
2829 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
2830 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2831 auto coercedEltType = coercionType->getElementType(i);
2832 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
2835 auto eltAddr = Builder.CreateStructGEP(addr, i, layout);
2836 auto elt = Builder.CreateLoad(eltAddr);
2837 results.push_back(elt);
2840 // If we have one result, it's the single direct result type.
2841 if (results.size() == 1) {
2844 // Otherwise, we need to make a first-class aggregate.
2846 // Construct a return type that lacks padding elements.
2847 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
2849 RV = llvm::UndefValue::get(returnType);
2850 for (unsigned i = 0, e = results.size(); i != e; ++i) {
2851 RV = Builder.CreateInsertValue(RV, results[i], i);
2857 case ABIArgInfo::Expand:
2858 llvm_unreachable("Invalid ABI kind for return argument");
2861 llvm::Instruction *Ret;
2863 if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2864 if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2865 SanitizerScope SanScope(this);
2866 llvm::Value *Cond = Builder.CreateICmpNE(
2867 RV, llvm::Constant::getNullValue(RV->getType()));
2868 llvm::Constant *StaticData[] = {
2869 EmitCheckSourceLocation(EndLoc),
2870 EmitCheckSourceLocation(RetNNAttr->getLocation()),
2872 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2873 SanitizerHandler::NonnullReturn, StaticData, None);
2876 Ret = Builder.CreateRet(RV);
2878 Ret = Builder.CreateRetVoid();
2882 Ret->setDebugLoc(std::move(RetDbgLoc));
2885 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2886 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2887 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2890 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
2892 // FIXME: Generate IR in one pass, rather than going back and fixing up these
2894 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2895 llvm::Type *IRPtrTy = IRTy->getPointerTo();
2896 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
2898 // FIXME: When we generate this IR in one pass, we shouldn't need
2899 // this win32-specific alignment hack.
2900 CharUnits Align = CharUnits::fromQuantity(4);
2901 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
2903 return AggValueSlot::forAddr(Address(Placeholder, Align),
2905 AggValueSlot::IsNotDestructed,
2906 AggValueSlot::DoesNotNeedGCBarriers,
2907 AggValueSlot::IsNotAliased);
2910 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2911 const VarDecl *param,
2912 SourceLocation loc) {
2913 // StartFunction converted the ABI-lowered parameter(s) into a
2914 // local alloca. We need to turn that into an r-value suitable
2916 Address local = GetAddrOfLocalVar(param);
2918 QualType type = param->getType();
2920 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2921 "cannot emit delegate call arguments for inalloca arguments!");
2923 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
2924 // but the argument needs to be the original pointer.
2925 if (type->isReferenceType()) {
2926 args.add(RValue::get(Builder.CreateLoad(local)), type);
2928 // In ARC, move out of consumed arguments so that the release cleanup
2929 // entered by StartFunction doesn't cause an over-release. This isn't
2930 // optimal -O0 code generation, but it should get cleaned up when
2931 // optimization is enabled. This also assumes that delegate calls are
2932 // performed exactly once for a set of arguments, but that should be safe.
2933 } else if (getLangOpts().ObjCAutoRefCount &&
2934 param->hasAttr<NSConsumedAttr>() &&
2935 type->isObjCRetainableType()) {
2936 llvm::Value *ptr = Builder.CreateLoad(local);
2938 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
2939 Builder.CreateStore(null, local);
2940 args.add(RValue::get(ptr), type);
2942 // For the most part, we just need to load the alloca, except that
2943 // aggregate r-values are actually pointers to temporaries.
2945 args.add(convertTempToRValue(local, type, loc), type);
2949 static bool isProvablyNull(llvm::Value *addr) {
2950 return isa<llvm::ConstantPointerNull>(addr);
2953 /// Emit the actual writing-back of a writeback.
2954 static void emitWriteback(CodeGenFunction &CGF,
2955 const CallArgList::Writeback &writeback) {
2956 const LValue &srcLV = writeback.Source;
2957 Address srcAddr = srcLV.getAddress();
2958 assert(!isProvablyNull(srcAddr.getPointer()) &&
2959 "shouldn't have writeback for provably null argument");
2961 llvm::BasicBlock *contBB = nullptr;
2963 // If the argument wasn't provably non-null, we need to null check
2964 // before doing the store.
2965 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
2966 if (!provablyNonNull) {
2967 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2968 contBB = CGF.createBasicBlock("icr.done");
2970 llvm::Value *isNull =
2971 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2972 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2973 CGF.EmitBlock(writebackBB);
2976 // Load the value to writeback.
2977 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2979 // Cast it back, in case we're writing an id to a Foo* or something.
2980 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2981 "icr.writeback-cast");
2983 // Perform the writeback.
2985 // If we have a "to use" value, it's something we need to emit a use
2986 // of. This has to be carefully threaded in: if it's done after the
2987 // release it's potentially undefined behavior (and the optimizer
2988 // will ignore it), and if it happens before the retain then the
2989 // optimizer could move the release there.
2990 if (writeback.ToUse) {
2991 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2993 // Retain the new value. No need to block-copy here: the block's
2994 // being passed up the stack.
2995 value = CGF.EmitARCRetainNonBlock(value);
2997 // Emit the intrinsic use here.
2998 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3000 // Load the old value (primitively).
3001 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3003 // Put the new value in place (primitively).
3004 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3006 // Release the old value.
3007 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3009 // Otherwise, we can just do a normal lvalue store.
3011 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3014 // Jump to the continuation block.
3015 if (!provablyNonNull)
3016 CGF.EmitBlock(contBB);
3019 static void emitWritebacks(CodeGenFunction &CGF,
3020 const CallArgList &args) {
3021 for (const auto &I : args.writebacks())
3022 emitWriteback(CGF, I);
3025 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3026 const CallArgList &CallArgs) {
3027 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
3028 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3029 CallArgs.getCleanupsToDeactivate();
3030 // Iterate in reverse to increase the likelihood of popping the cleanup.
3031 for (const auto &I : llvm::reverse(Cleanups)) {
3032 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3033 I.IsActiveIP->eraseFromParent();
3037 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3038 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3039 if (uop->getOpcode() == UO_AddrOf)
3040 return uop->getSubExpr();
3044 /// Emit an argument that's being passed call-by-writeback. That is,
3045 /// we are passing the address of an __autoreleased temporary; it
3046 /// might be copy-initialized with the current value of the given
3047 /// address, but it will definitely be copied out of after the call.
3048 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3049 const ObjCIndirectCopyRestoreExpr *CRE) {
3052 // Make an optimistic effort to emit the address as an l-value.
3053 // This can fail if the argument expression is more complicated.
3054 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3055 srcLV = CGF.EmitLValue(lvExpr);
3057 // Otherwise, just emit it as a scalar.
3059 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3061 QualType srcAddrType =
3062 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3063 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3065 Address srcAddr = srcLV.getAddress();
3067 // The dest and src types don't necessarily match in LLVM terms
3068 // because of the crazy ObjC compatibility rules.
3070 llvm::PointerType *destType =
3071 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3073 // If the address is a constant null, just pass the appropriate null.
3074 if (isProvablyNull(srcAddr.getPointer())) {
3075 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3080 // Create the temporary.
3081 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3082 CGF.getPointerAlign(),
3084 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3085 // and that cleanup will be conditional if we can't prove that the l-value
3086 // isn't null, so we need to register a dominating point so that the cleanups
3087 // system will make valid IR.
3088 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3090 // Zero-initialize it if we're not doing a copy-initialization.
3091 bool shouldCopy = CRE->shouldCopy();
3094 llvm::ConstantPointerNull::get(
3095 cast<llvm::PointerType>(destType->getElementType()));
3096 CGF.Builder.CreateStore(null, temp);
3099 llvm::BasicBlock *contBB = nullptr;
3100 llvm::BasicBlock *originBB = nullptr;
3102 // If the address is *not* known to be non-null, we need to switch.
3103 llvm::Value *finalArgument;
3105 bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
3106 if (provablyNonNull) {
3107 finalArgument = temp.getPointer();
3109 llvm::Value *isNull =
3110 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3112 finalArgument = CGF.Builder.CreateSelect(isNull,
3113 llvm::ConstantPointerNull::get(destType),
3114 temp.getPointer(), "icr.argument");
3116 // If we need to copy, then the load has to be conditional, which
3117 // means we need control flow.
3119 originBB = CGF.Builder.GetInsertBlock();
3120 contBB = CGF.createBasicBlock("icr.cont");
3121 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3122 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3123 CGF.EmitBlock(copyBB);
3124 condEval.begin(CGF);
3128 llvm::Value *valueToUse = nullptr;
3130 // Perform a copy if necessary.
3132 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3133 assert(srcRV.isScalar());
3135 llvm::Value *src = srcRV.getScalarVal();
3136 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3139 // Use an ordinary store, not a store-to-lvalue.
3140 CGF.Builder.CreateStore(src, temp);
3142 // If optimization is enabled, and the value was held in a
3143 // __strong variable, we need to tell the optimizer that this
3144 // value has to stay alive until we're doing the store back.
3145 // This is because the temporary is effectively unretained,
3146 // and so otherwise we can violate the high-level semantics.
3147 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3148 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3153 // Finish the control flow if we needed it.
3154 if (shouldCopy && !provablyNonNull) {
3155 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3156 CGF.EmitBlock(contBB);
3158 // Make a phi for the value to intrinsically use.
3160 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3162 phiToUse->addIncoming(valueToUse, copyBB);
3163 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3165 valueToUse = phiToUse;
3171 args.addWriteback(srcLV, temp, valueToUse);
3172 args.add(RValue::get(finalArgument), CRE->getType());
3175 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3179 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3180 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3183 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3185 // Restore the stack after the call.
3186 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3187 CGF.Builder.CreateCall(F, StackBase);
3191 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3192 SourceLocation ArgLoc,
3193 const FunctionDecl *FD,
3195 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
3197 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
3198 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3199 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
3202 SanitizerScope SanScope(this);
3203 assert(RV.isScalar());
3204 llvm::Value *V = RV.getScalarVal();
3206 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3207 llvm::Constant *StaticData[] = {
3208 EmitCheckSourceLocation(ArgLoc),
3209 EmitCheckSourceLocation(NNAttr->getLocation()),
3210 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3212 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
3213 SanitizerHandler::NonnullArg, StaticData, None);
3216 void CodeGenFunction::EmitCallArgs(
3217 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3218 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3219 const FunctionDecl *CalleeDecl, unsigned ParamsToSkip,
3220 EvaluationOrder Order) {
3221 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3223 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
3224 if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
3226 auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3230 const auto &Context = getContext();
3231 auto SizeTy = Context.getSizeType();
3232 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3233 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
3234 Args.add(RValue::get(V), SizeTy);
3237 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3238 // because arguments are destroyed left to right in the callee. As a special
3239 // case, there are certain language constructs that require left-to-right
3240 // evaluation, and in those cases we consider the evaluation order requirement
3241 // to trump the "destruction order is reverse construction order" guarantee.
3243 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3244 ? Order == EvaluationOrder::ForceLeftToRight
3245 : Order != EvaluationOrder::ForceRightToLeft;
3247 // Insert a stack save if we're going to need any inalloca args.
3248 bool HasInAllocaArgs = false;
3249 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3250 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3251 I != E && !HasInAllocaArgs; ++I)
3252 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3253 if (HasInAllocaArgs) {
3254 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3255 Args.allocateArgumentMemory(*this);
3259 // Evaluate each argument in the appropriate order.
3260 size_t CallArgsStart = Args.size();
3261 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3262 unsigned Idx = LeftToRight ? I : E - I - 1;
3263 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3264 if (!LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
3265 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3266 EmitNonNullArgCheck(Args.back().RV, ArgTypes[Idx], (*Arg)->getExprLoc(),
3267 CalleeDecl, ParamsToSkip + Idx);
3268 if (LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
3272 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3274 std::reverse(Args.begin() + CallArgsStart, Args.end());
3280 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
3281 DestroyUnpassedArg(Address Addr, QualType Ty)
3282 : Addr(Addr), Ty(Ty) {}
3287 void Emit(CodeGenFunction &CGF, Flags flags) override {
3288 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3289 assert(!Dtor->isTrivial());
3290 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3291 /*Delegating=*/false, Addr);
3295 struct DisableDebugLocationUpdates {
3296 CodeGenFunction &CGF;
3297 bool disabledDebugInfo;
3298 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3299 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3300 CGF.disableDebugInfo();
3302 ~DisableDebugLocationUpdates() {
3303 if (disabledDebugInfo)
3304 CGF.enableDebugInfo();
3308 } // end anonymous namespace
3310 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3312 DisableDebugLocationUpdates Dis(*this, E);
3313 if (const ObjCIndirectCopyRestoreExpr *CRE
3314 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3315 assert(getLangOpts().ObjCAutoRefCount);
3316 assert(getContext().hasSameUnqualifiedType(E->getType(), type));
3317 return emitWritebackArg(*this, args, CRE);
3320 assert(type->isReferenceType() == E->isGLValue() &&
3321 "reference binding to unmaterialized r-value!");
3323 if (E->isGLValue()) {
3324 assert(E->getObjectKind() == OK_Ordinary);
3325 return args.add(EmitReferenceBindingToExpr(E), type);
3328 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3330 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3331 // However, we still have to push an EH-only cleanup in case we unwind before
3332 // we make it to the call.
3333 if (HasAggregateEvalKind &&
3334 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3335 // If we're using inalloca, use the argument memory. Otherwise, use a
3338 if (args.isUsingInAlloca())
3339 Slot = createPlaceholderSlot(*this, type);
3341 Slot = CreateAggTemp(type, "agg.tmp");
3343 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3344 bool DestroyedInCallee =
3345 RD && RD->hasNonTrivialDestructor() &&
3346 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
3347 if (DestroyedInCallee)
3348 Slot.setExternallyDestructed();
3350 EmitAggExpr(E, Slot);
3351 RValue RV = Slot.asRValue();
3354 if (DestroyedInCallee) {
3355 // Create a no-op GEP between the placeholder and the cleanup so we can
3356 // RAUW it successfully. It also serves as a marker of the first
3357 // instruction where the cleanup is active.
3358 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3360 // This unreachable is a temporary marker which will be removed later.
3361 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3362 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3367 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3368 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3369 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3370 assert(L.isSimple());
3371 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
3372 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
3374 // We can't represent a misaligned lvalue in the CallArgList, so copy
3375 // to an aligned temporary now.
3376 Address tmp = CreateMemTemp(type);
3377 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
3378 args.add(RValue::getAggregate(tmp), type);
3383 args.add(EmitAnyExprToTemp(E), type);
3386 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3387 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3388 // implicitly widens null pointer constants that are arguments to varargs
3389 // functions to pointer-sized ints.
3390 if (!getTarget().getTriple().isOSWindows())
3391 return Arg->getType();
3393 if (Arg->getType()->isIntegerType() &&
3394 getContext().getTypeSize(Arg->getType()) <
3395 getContext().getTargetInfo().getPointerWidth(0) &&
3396 Arg->isNullPointerConstant(getContext(),
3397 Expr::NPC_ValueDependentIsNotNull)) {
3398 return getContext().getIntPtrType();
3401 return Arg->getType();
3404 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3405 // optimizer it can aggressively ignore unwind edges.
3407 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3408 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3409 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3410 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3411 CGM.getNoObjCARCExceptionsMetadata());
3414 /// Emits a call to the given no-arguments nounwind runtime function.
3416 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3417 const llvm::Twine &name) {
3418 return EmitNounwindRuntimeCall(callee, None, name);
3421 /// Emits a call to the given nounwind runtime function.
3423 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3424 ArrayRef<llvm::Value*> args,
3425 const llvm::Twine &name) {
3426 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3427 call->setDoesNotThrow();
3431 /// Emits a simple call (never an invoke) to the given no-arguments
3432 /// runtime function.
3434 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3435 const llvm::Twine &name) {
3436 return EmitRuntimeCall(callee, None, name);
3439 // Calls which may throw must have operand bundles indicating which funclet
3440 // they are nested within.
3442 getBundlesForFunclet(llvm::Value *Callee, llvm::Instruction *CurrentFuncletPad,
3443 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
3444 // There is no need for a funclet operand bundle if we aren't inside a
3446 if (!CurrentFuncletPad)
3449 // Skip intrinsics which cannot throw.
3450 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3451 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3454 BundleList.emplace_back("funclet", CurrentFuncletPad);
3457 /// Emits a simple call (never an invoke) to the given runtime function.
3459 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3460 ArrayRef<llvm::Value*> args,
3461 const llvm::Twine &name) {
3462 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3463 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3465 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList, name);
3466 call->setCallingConv(getRuntimeCC());
3470 /// Emits a call or invoke to the given noreturn runtime function.
3471 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3472 ArrayRef<llvm::Value*> args) {
3473 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3474 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3476 if (getInvokeDest()) {
3477 llvm::InvokeInst *invoke =
3478 Builder.CreateInvoke(callee,
3479 getUnreachableBlock(),
3483 invoke->setDoesNotReturn();
3484 invoke->setCallingConv(getRuntimeCC());
3486 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3487 call->setDoesNotReturn();
3488 call->setCallingConv(getRuntimeCC());
3489 Builder.CreateUnreachable();
3493 /// Emits a call or invoke instruction to the given nullary runtime function.
3495 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3496 const Twine &name) {
3497 return EmitRuntimeCallOrInvoke(callee, None, name);
3500 /// Emits a call or invoke instruction to the given runtime function.
3502 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3503 ArrayRef<llvm::Value*> args,
3504 const Twine &name) {
3505 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3506 callSite.setCallingConv(getRuntimeCC());
3510 /// Emits a call or invoke instruction to the given function, depending
3511 /// on the current state of the EH stack.
3513 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3514 ArrayRef<llvm::Value *> Args,
3515 const Twine &Name) {
3516 llvm::BasicBlock *InvokeDest = getInvokeDest();
3517 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3518 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3520 llvm::Instruction *Inst;
3522 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
3524 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3525 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
3530 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3531 // optimizer it can aggressively ignore unwind edges.
3532 if (CGM.getLangOpts().ObjCAutoRefCount)
3533 AddObjCARCExceptionMetadata(Inst);
3535 return llvm::CallSite(Inst);
3538 /// \brief Store a non-aggregate value to an address to initialize it. For
3539 /// initialization, a non-atomic store will be used.
3540 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3543 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3545 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3548 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3550 DeferredReplacements.push_back(std::make_pair(Old, New));
3553 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3554 const CGCallee &Callee,
3555 ReturnValueSlot ReturnValue,
3556 const CallArgList &CallArgs,
3557 llvm::Instruction **callOrInvoke) {
3558 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3560 assert(Callee.isOrdinary());
3562 // Handle struct-return functions by passing a pointer to the
3563 // location that we would like to return into.
3564 QualType RetTy = CallInfo.getReturnType();
3565 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3567 llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
3569 // 1. Set up the arguments.
3571 // If we're using inalloca, insert the allocation after the stack save.
3572 // FIXME: Do this earlier rather than hacking it in here!
3573 Address ArgMemory = Address::invalid();
3574 const llvm::StructLayout *ArgMemoryLayout = nullptr;
3575 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3576 ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3577 llvm::Instruction *IP = CallArgs.getStackBase();
3578 llvm::AllocaInst *AI;
3580 IP = IP->getNextNode();
3581 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3583 AI = CreateTempAlloca(ArgStruct, "argmem");
3585 auto Align = CallInfo.getArgStructAlignment();
3586 AI->setAlignment(Align.getQuantity());
3587 AI->setUsedWithInAlloca(true);
3588 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3589 ArgMemory = Address(AI, Align);
3592 // Helper function to drill into the inalloca allocation.
3593 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3595 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3596 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3599 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3600 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3602 // If the call returns a temporary with struct return, create a temporary
3603 // alloca to hold the result, unless one is given to us.
3604 Address SRetPtr = Address::invalid();
3605 size_t UnusedReturnSize = 0;
3606 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
3607 if (!ReturnValue.isNull()) {
3608 SRetPtr = ReturnValue.getValue();
3610 SRetPtr = CreateMemTemp(RetTy);
3611 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3613 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3614 if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3615 UnusedReturnSize = size;
3618 if (IRFunctionArgs.hasSRetArg()) {
3619 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3620 } else if (RetAI.isInAlloca()) {
3621 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3622 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3626 Address swiftErrorTemp = Address::invalid();
3627 Address swiftErrorArg = Address::invalid();
3629 // Translate all of the arguments as necessary to match the IR lowering.
3630 assert(CallInfo.arg_size() == CallArgs.size() &&
3631 "Mismatch between function signature & arguments.");
3633 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3634 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3635 I != E; ++I, ++info_it, ++ArgNo) {
3636 const ABIArgInfo &ArgInfo = info_it->info;
3639 // Insert a padding argument to ensure proper alignment.
3640 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3641 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3642 llvm::UndefValue::get(ArgInfo.getPaddingType());
3644 unsigned FirstIRArg, NumIRArgs;
3645 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3647 switch (ArgInfo.getKind()) {
3648 case ABIArgInfo::InAlloca: {
3649 assert(NumIRArgs == 0);
3650 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3651 if (RV.isAggregate()) {
3652 // Replace the placeholder with the appropriate argument slot GEP.
3653 llvm::Instruction *Placeholder =
3654 cast<llvm::Instruction>(RV.getAggregatePointer());
3655 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3656 Builder.SetInsertPoint(Placeholder);
3657 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3658 Builder.restoreIP(IP);
3659 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3661 // Store the RValue into the argument struct.
3662 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3663 unsigned AS = Addr.getType()->getPointerAddressSpace();
3664 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3665 // There are some cases where a trivial bitcast is not avoidable. The
3666 // definition of a type later in a translation unit may change it's type
3667 // from {}* to (%struct.foo*)*.
3668 if (Addr.getType() != MemType)
3669 Addr = Builder.CreateBitCast(Addr, MemType);
3670 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3671 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3676 case ABIArgInfo::Indirect: {
3677 assert(NumIRArgs == 1);
3678 if (RV.isScalar() || RV.isComplex()) {
3679 // Make a temporary alloca to pass the argument.
3680 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3681 IRCallArgs[FirstIRArg] = Addr.getPointer();
3683 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3684 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3686 // We want to avoid creating an unnecessary temporary+copy here;
3687 // however, we need one in three cases:
3688 // 1. If the argument is not byval, and we are required to copy the
3689 // source. (This case doesn't occur on any common architecture.)
3690 // 2. If the argument is byval, RV is not sufficiently aligned, and
3691 // we cannot force it to be sufficiently aligned.
3692 // 3. If the argument is byval, but RV is located in an address space
3693 // different than that of the argument (0).
3694 Address Addr = RV.getAggregateAddress();
3695 CharUnits Align = ArgInfo.getIndirectAlign();
3696 const llvm::DataLayout *TD = &CGM.getDataLayout();
3697 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3698 const unsigned ArgAddrSpace =
3699 (FirstIRArg < IRFuncTy->getNumParams()
3700 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3702 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3703 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3704 llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3705 Align.getQuantity(), *TD)
3706 < Align.getQuantity()) ||
3707 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3708 // Create an aligned temporary, and copy to it.
3709 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3710 IRCallArgs[FirstIRArg] = AI.getPointer();
3711 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3713 // Skip the extra memcpy call.
3714 IRCallArgs[FirstIRArg] = Addr.getPointer();
3720 case ABIArgInfo::Ignore:
3721 assert(NumIRArgs == 0);
3724 case ABIArgInfo::Extend:
3725 case ABIArgInfo::Direct: {
3726 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3727 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3728 ArgInfo.getDirectOffset() == 0) {
3729 assert(NumIRArgs == 1);
3732 V = RV.getScalarVal();
3734 V = Builder.CreateLoad(RV.getAggregateAddress());
3736 // Implement swifterror by copying into a new swifterror argument.
3737 // We'll write back in the normal path out of the call.
3738 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
3739 == ParameterABI::SwiftErrorResult) {
3740 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
3742 QualType pointeeTy = I->Ty->getPointeeType();
3744 Address(V, getContext().getTypeAlignInChars(pointeeTy));
3747 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3748 V = swiftErrorTemp.getPointer();
3749 cast<llvm::AllocaInst>(V)->setSwiftError(true);
3751 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
3752 Builder.CreateStore(errorValue, swiftErrorTemp);
3755 // We might have to widen integers, but we should never truncate.
3756 if (ArgInfo.getCoerceToType() != V->getType() &&
3757 V->getType()->isIntegerTy())
3758 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3760 // If the argument doesn't match, perform a bitcast to coerce it. This
3761 // can happen due to trivial type mismatches.
3762 if (FirstIRArg < IRFuncTy->getNumParams() &&
3763 V->getType() != IRFuncTy->getParamType(FirstIRArg))
3764 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3766 IRCallArgs[FirstIRArg] = V;
3770 // FIXME: Avoid the conversion through memory if possible.
3771 Address Src = Address::invalid();
3772 if (RV.isScalar() || RV.isComplex()) {
3773 Src = CreateMemTemp(I->Ty, "coerce");
3774 LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3775 EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3777 Src = RV.getAggregateAddress();
3780 // If the value is offset in memory, apply the offset now.
3781 Src = emitAddressAtOffset(*this, Src, ArgInfo);
3783 // Fast-isel and the optimizer generally like scalar values better than
3784 // FCAs, so we flatten them if this is safe to do for this argument.
3785 llvm::StructType *STy =
3786 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3787 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3788 llvm::Type *SrcTy = Src.getType()->getElementType();
3789 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3790 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3792 // If the source type is smaller than the destination type of the
3793 // coerce-to logic, copy the source value into a temp alloca the size
3794 // of the destination type to allow loading all of it. The bits past
3795 // the source value are left undef.
3796 if (SrcSize < DstSize) {
3798 = CreateTempAlloca(STy, Src.getAlignment(),
3799 Src.getName() + ".coerce");
3800 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3803 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3806 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3807 assert(NumIRArgs == STy->getNumElements());
3808 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3809 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3810 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3811 llvm::Value *LI = Builder.CreateLoad(EltPtr);
3812 IRCallArgs[FirstIRArg + i] = LI;
3815 // In the simple case, just pass the coerced loaded value.
3816 assert(NumIRArgs == 1);
3817 IRCallArgs[FirstIRArg] =
3818 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3824 case ABIArgInfo::CoerceAndExpand: {
3825 auto coercionType = ArgInfo.getCoerceAndExpandType();
3826 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
3828 llvm::Value *tempSize = nullptr;
3829 Address addr = Address::invalid();
3830 if (RV.isAggregate()) {
3831 addr = RV.getAggregateAddress();
3833 assert(RV.isScalar()); // complex should always just be direct
3835 llvm::Type *scalarType = RV.getScalarVal()->getType();
3836 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
3837 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
3839 tempSize = llvm::ConstantInt::get(CGM.Int64Ty, scalarSize);
3841 // Materialize to a temporary.
3842 addr = CreateTempAlloca(RV.getScalarVal()->getType(),
3843 CharUnits::fromQuantity(std::max(layout->getAlignment(),
3845 EmitLifetimeStart(scalarSize, addr.getPointer());
3847 Builder.CreateStore(RV.getScalarVal(), addr);
3850 addr = Builder.CreateElementBitCast(addr, coercionType);
3852 unsigned IRArgPos = FirstIRArg;
3853 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3854 llvm::Type *eltType = coercionType->getElementType(i);
3855 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
3856 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
3857 llvm::Value *elt = Builder.CreateLoad(eltAddr);
3858 IRCallArgs[IRArgPos++] = elt;
3860 assert(IRArgPos == FirstIRArg + NumIRArgs);
3863 EmitLifetimeEnd(tempSize, addr.getPointer());
3869 case ABIArgInfo::Expand:
3870 unsigned IRArgPos = FirstIRArg;
3871 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3872 assert(IRArgPos == FirstIRArg + NumIRArgs);
3877 llvm::Value *CalleePtr = Callee.getFunctionPointer();
3879 // If we're using inalloca, set up that argument.
3880 if (ArgMemory.isValid()) {
3881 llvm::Value *Arg = ArgMemory.getPointer();
3882 if (CallInfo.isVariadic()) {
3883 // When passing non-POD arguments by value to variadic functions, we will
3884 // end up with a variadic prototype and an inalloca call site. In such
3885 // cases, we can't do any parameter mismatch checks. Give up and bitcast
3887 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
3888 auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
3889 CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
3891 llvm::Type *LastParamTy =
3892 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3893 if (Arg->getType() != LastParamTy) {
3895 // Assert that these structs have equivalent element types.
3896 llvm::StructType *FullTy = CallInfo.getArgStruct();
3897 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3898 cast<llvm::PointerType>(LastParamTy)->getElementType());
3899 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3900 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3901 DE = DeclaredTy->element_end(),
3902 FI = FullTy->element_begin();
3903 DI != DE; ++DI, ++FI)
3906 Arg = Builder.CreateBitCast(Arg, LastParamTy);
3909 assert(IRFunctionArgs.hasInallocaArg());
3910 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3913 // 2. Prepare the function pointer.
3915 // If the callee is a bitcast of a non-variadic function to have a
3916 // variadic function pointer type, check to see if we can remove the
3917 // bitcast. This comes up with unprototyped functions.
3919 // This makes the IR nicer, but more importantly it ensures that we
3920 // can inline the function at -O0 if it is marked always_inline.
3921 auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
3922 llvm::FunctionType *CalleeFT =
3923 cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
3924 if (!CalleeFT->isVarArg())
3927 llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
3928 if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
3931 llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
3935 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
3937 // If the original type is variadic, or if any of the component types
3938 // disagree, we cannot remove the cast.
3939 if (OrigFT->isVarArg() ||
3940 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
3941 OrigFT->getReturnType() != CalleeFT->getReturnType())
3944 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
3945 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
3950 CalleePtr = simplifyVariadicCallee(CalleePtr);
3952 // 3. Perform the actual call.
3954 // Deactivate any cleanups that we're supposed to do immediately before
3956 if (!CallArgs.getCleanupsToDeactivate().empty())
3957 deactivateArgCleanupsBeforeCall(*this, CallArgs);
3959 // Assert that the arguments we computed match up. The IR verifier
3960 // will catch this, but this is a common enough source of problems
3961 // during IRGen changes that it's way better for debugging to catch
3962 // it ourselves here.
3964 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3965 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3966 // Inalloca argument can have different type.
3967 if (IRFunctionArgs.hasInallocaArg() &&
3968 i == IRFunctionArgs.getInallocaArgNo())
3970 if (i < IRFuncTy->getNumParams())
3971 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3975 // Compute the calling convention and attributes.
3976 unsigned CallingConv;
3977 CodeGen::AttributeListType AttributeList;
3978 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
3979 Callee.getAbstractInfo(),
3980 AttributeList, CallingConv,
3981 /*AttrOnCallSite=*/true);
3982 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3985 // Apply some call-site-specific attributes.
3986 // TODO: work this into building the attribute set.
3988 // Apply always_inline to all calls within flatten functions.
3989 // FIXME: should this really take priority over __try, below?
3990 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3991 !(Callee.getAbstractInfo().getCalleeDecl() &&
3992 Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
3994 Attrs.addAttribute(getLLVMContext(),
3995 llvm::AttributeSet::FunctionIndex,
3996 llvm::Attribute::AlwaysInline);
3999 // Disable inlining inside SEH __try blocks.
4000 if (isSEHTryScope()) {
4002 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
4003 llvm::Attribute::NoInline);
4006 // Decide whether to use a call or an invoke.
4008 if (currentFunctionUsesSEHTry()) {
4009 // SEH cares about asynchronous exceptions, so everything can "throw."
4010 CannotThrow = false;
4011 } else if (isCleanupPadScope() &&
4012 EHPersonality::get(*this).isMSVCXXPersonality()) {
4013 // The MSVC++ personality will implicitly terminate the program if an
4014 // exception is thrown during a cleanup outside of a try/catch.
4015 // We don't need to model anything in IR to get this behavior.
4018 // Otherwise, nounwind call sites will never throw.
4019 CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
4020 llvm::Attribute::NoUnwind);
4022 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4024 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4025 getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
4027 // Emit the actual call/invoke instruction.
4030 CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
4032 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4033 CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
4037 llvm::Instruction *CI = CS.getInstruction();
4041 // Apply the attributes and calling convention.
4042 CS.setAttributes(Attrs);
4043 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4045 // Apply various metadata.
4047 if (!CI->getType()->isVoidTy())
4048 CI->setName("call");
4050 // Insert instrumentation or attach profile metadata at indirect call sites.
4051 // For more details, see the comment before the definition of
4052 // IPVK_IndirectCallTarget in InstrProfData.inc.
4053 if (!CS.getCalledFunction())
4054 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4057 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4058 // optimizer it can aggressively ignore unwind edges.
4059 if (CGM.getLangOpts().ObjCAutoRefCount)
4060 AddObjCARCExceptionMetadata(CI);
4062 // Suppress tail calls if requested.
4063 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4064 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4065 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4066 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4069 // 4. Finish the call.
4071 // If the call doesn't return, finish the basic block and clear the
4072 // insertion point; this allows the rest of IRGen to discard
4073 // unreachable code.
4074 if (CS.doesNotReturn()) {
4075 if (UnusedReturnSize)
4076 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4077 SRetPtr.getPointer());
4079 Builder.CreateUnreachable();
4080 Builder.ClearInsertionPoint();
4082 // FIXME: For now, emit a dummy basic block because expr emitters in
4083 // generally are not ready to handle emitting expressions at unreachable
4085 EnsureInsertPoint();
4087 // Return a reasonable RValue.
4088 return GetUndefRValue(RetTy);
4091 // Perform the swifterror writeback.
4092 if (swiftErrorTemp.isValid()) {
4093 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4094 Builder.CreateStore(errorResult, swiftErrorArg);
4097 // Emit any call-associated writebacks immediately. Arguably this
4098 // should happen after any return-value munging.
4099 if (CallArgs.hasWritebacks())
4100 emitWritebacks(*this, CallArgs);
4102 // The stack cleanup for inalloca arguments has to run out of the normal
4103 // lexical order, so deactivate it and run it manually here.
4104 CallArgs.freeArgumentMemory(*this);
4106 // Extract the return value.
4108 switch (RetAI.getKind()) {
4109 case ABIArgInfo::CoerceAndExpand: {
4110 auto coercionType = RetAI.getCoerceAndExpandType();
4111 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4113 Address addr = SRetPtr;
4114 addr = Builder.CreateElementBitCast(addr, coercionType);
4116 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
4117 bool requiresExtract = isa<llvm::StructType>(CI->getType());
4119 unsigned unpaddedIndex = 0;
4120 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4121 llvm::Type *eltType = coercionType->getElementType(i);
4122 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4123 Address eltAddr = Builder.CreateStructGEP(addr, i, layout);
4124 llvm::Value *elt = CI;
4125 if (requiresExtract)
4126 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
4128 assert(unpaddedIndex == 0);
4129 Builder.CreateStore(elt, eltAddr);
4134 case ABIArgInfo::InAlloca:
4135 case ABIArgInfo::Indirect: {
4136 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
4137 if (UnusedReturnSize)
4138 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
4139 SRetPtr.getPointer());
4143 case ABIArgInfo::Ignore:
4144 // If we are ignoring an argument that had a result, make sure to
4145 // construct the appropriate return value for our caller.
4146 return GetUndefRValue(RetTy);
4148 case ABIArgInfo::Extend:
4149 case ABIArgInfo::Direct: {
4150 llvm::Type *RetIRTy = ConvertType(RetTy);
4151 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
4152 switch (getEvaluationKind(RetTy)) {
4154 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
4155 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
4156 return RValue::getComplex(std::make_pair(Real, Imag));
4158 case TEK_Aggregate: {
4159 Address DestPtr = ReturnValue.getValue();
4160 bool DestIsVolatile = ReturnValue.isVolatile();
4162 if (!DestPtr.isValid()) {
4163 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
4164 DestIsVolatile = false;
4166 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
4167 return RValue::getAggregate(DestPtr);
4170 // If the argument doesn't match, perform a bitcast to coerce it. This
4171 // can happen due to trivial type mismatches.
4172 llvm::Value *V = CI;
4173 if (V->getType() != RetIRTy)
4174 V = Builder.CreateBitCast(V, RetIRTy);
4175 return RValue::get(V);
4178 llvm_unreachable("bad evaluation kind");
4181 Address DestPtr = ReturnValue.getValue();
4182 bool DestIsVolatile = ReturnValue.isVolatile();
4184 if (!DestPtr.isValid()) {
4185 DestPtr = CreateMemTemp(RetTy, "coerce");
4186 DestIsVolatile = false;
4189 // If the value is offset in memory, apply the offset now.
4190 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
4191 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
4193 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
4196 case ABIArgInfo::Expand:
4197 llvm_unreachable("Invalid ABI kind for return argument");
4200 llvm_unreachable("Unhandled ABIArgInfo::Kind");
4203 // Emit the assume_aligned check on the return value.
4204 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
4205 if (Ret.isScalar() && TargetDecl) {
4206 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
4207 llvm::Value *OffsetValue = nullptr;
4208 if (const auto *Offset = AA->getOffset())
4209 OffsetValue = EmitScalarExpr(Offset);
4211 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
4212 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
4213 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
4221 /* VarArg handling */
4223 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
4224 VAListAddr = VE->isMicrosoftABI()
4225 ? EmitMSVAListRef(VE->getSubExpr())
4226 : EmitVAListRef(VE->getSubExpr());
4227 QualType Ty = VE->getType();
4228 if (VE->isMicrosoftABI())
4229 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
4230 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);