1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "clang/Frontend/CodeGenOptions.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/MC/SubtargetFeature.h"
32 #include "llvm/Support/CallSite.h"
33 #include "llvm/Transforms/Utils/Local.h"
34 using namespace clang;
35 using namespace CodeGen;
39 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
41 default: return llvm::CallingConv::C;
42 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
43 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
44 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
45 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
46 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
47 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
48 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
49 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
50 // TODO: add support for CC_X86Pascal to llvm
54 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
56 /// FIXME: address space qualification?
57 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
58 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
59 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
62 /// Returns the canonical formal type of the given C++ method.
63 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
64 return MD->getType()->getCanonicalTypeUnqualified()
65 .getAs<FunctionProtoType>();
68 /// Returns the "extra-canonicalized" return type, which discards
69 /// qualifiers on the return type. Codegen doesn't care about them,
70 /// and it makes ABI code a little easier to be able to assume that
71 /// all parameter and return types are top-level unqualified.
72 static CanQualType GetReturnType(QualType RetTy) {
73 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
76 /// Arrange the argument and result information for a value of the given
77 /// unprototyped freestanding function type.
78 const CGFunctionInfo &
79 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
80 // When translating an unprototyped function type, always use a
82 return arrangeLLVMFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
83 None, FTNP->getExtInfo(), RequiredArgs(0));
86 /// Arrange the LLVM function layout for a value of the given function
87 /// type, on top of any implicit parameters already stored. Use the
88 /// given ExtInfo instead of the ExtInfo from the function type.
89 static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
90 SmallVectorImpl<CanQualType> &prefix,
91 CanQual<FunctionProtoType> FTP,
92 FunctionType::ExtInfo extInfo) {
93 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
95 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
96 prefix.push_back(FTP->getArgType(i));
97 CanQualType resultType = FTP->getResultType().getUnqualifiedType();
98 return CGT.arrangeLLVMFunctionInfo(resultType, prefix, extInfo, required);
101 /// Arrange the argument and result information for a free function (i.e.
102 /// not a C++ or ObjC instance method) of the given type.
103 static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
104 SmallVectorImpl<CanQualType> &prefix,
105 CanQual<FunctionProtoType> FTP) {
106 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
109 /// Arrange the argument and result information for a free function (i.e.
110 /// not a C++ or ObjC instance method) of the given type.
111 static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
112 SmallVectorImpl<CanQualType> &prefix,
113 CanQual<FunctionProtoType> FTP) {
114 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
115 return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
118 /// Arrange the argument and result information for a value of the
119 /// given freestanding function type.
120 const CGFunctionInfo &
121 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
122 SmallVector<CanQualType, 16> argTypes;
123 return ::arrangeFreeFunctionType(*this, argTypes, FTP);
126 static CallingConv getCallingConventionForDecl(const Decl *D) {
127 // Set the appropriate calling convention for the Function.
128 if (D->hasAttr<StdCallAttr>())
129 return CC_X86StdCall;
131 if (D->hasAttr<FastCallAttr>())
132 return CC_X86FastCall;
134 if (D->hasAttr<ThisCallAttr>())
135 return CC_X86ThisCall;
137 if (D->hasAttr<PascalAttr>())
140 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
141 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
143 if (D->hasAttr<PnaclCallAttr>())
146 if (D->hasAttr<IntelOclBiccAttr>())
147 return CC_IntelOclBicc;
152 /// Arrange the argument and result information for a call to an
153 /// unknown C++ non-static member function of the given abstract type.
154 /// (Zero value of RD means we don't have any meaningful "this" argument type,
155 /// so fall back to a generic pointer type).
156 /// The member function must be an ordinary function, i.e. not a
157 /// constructor or destructor.
158 const CGFunctionInfo &
159 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
160 const FunctionProtoType *FTP) {
161 SmallVector<CanQualType, 16> argTypes;
163 // Add the 'this' pointer.
165 argTypes.push_back(GetThisType(Context, RD));
167 argTypes.push_back(Context.VoidPtrTy);
169 return ::arrangeCXXMethodType(*this, argTypes,
170 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
173 /// Arrange the argument and result information for a declaration or
174 /// definition of the given C++ non-static member function. The
175 /// member function must be an ordinary function, i.e. not a
176 /// constructor or destructor.
177 const CGFunctionInfo &
178 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
179 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
180 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
182 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
184 if (MD->isInstance()) {
185 // The abstract case is perfectly fine.
186 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
187 return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
190 return arrangeFreeFunctionType(prototype);
193 /// Arrange the argument and result information for a declaration
194 /// or definition to the given constructor variant.
195 const CGFunctionInfo &
196 CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
197 CXXCtorType ctorKind) {
198 SmallVector<CanQualType, 16> argTypes;
199 argTypes.push_back(GetThisType(Context, D->getParent()));
201 GlobalDecl GD(D, ctorKind);
202 CanQualType resultType =
203 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
205 TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
207 CanQual<FunctionProtoType> FTP = GetFormalType(D);
209 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
211 // Add the formal parameters.
212 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
213 argTypes.push_back(FTP->getArgType(i));
215 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
216 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
219 /// Arrange the argument and result information for a declaration,
220 /// definition, or call to the given destructor variant. It so
221 /// happens that all three cases produce the same information.
222 const CGFunctionInfo &
223 CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
224 CXXDtorType dtorKind) {
225 SmallVector<CanQualType, 2> argTypes;
226 argTypes.push_back(GetThisType(Context, D->getParent()));
228 GlobalDecl GD(D, dtorKind);
229 CanQualType resultType =
230 TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
232 TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
234 CanQual<FunctionProtoType> FTP = GetFormalType(D);
235 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
236 assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
238 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
239 return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
243 /// Arrange the argument and result information for the declaration or
244 /// definition of the given function.
245 const CGFunctionInfo &
246 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
247 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
248 if (MD->isInstance())
249 return arrangeCXXMethodDeclaration(MD);
251 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
253 assert(isa<FunctionType>(FTy));
255 // When declaring a function without a prototype, always use a
256 // non-variadic type.
257 if (isa<FunctionNoProtoType>(FTy)) {
258 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
259 return arrangeLLVMFunctionInfo(noProto->getResultType(), None,
260 noProto->getExtInfo(), RequiredArgs::All);
263 assert(isa<FunctionProtoType>(FTy));
264 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
267 /// Arrange the argument and result information for the declaration or
268 /// definition of an Objective-C method.
269 const CGFunctionInfo &
270 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
271 // It happens that this is the same as a call with no optional
272 // arguments, except also using the formal 'self' type.
273 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
276 /// Arrange the argument and result information for the function type
277 /// through which to perform a send to the given Objective-C method,
278 /// using the given receiver type. The receiver type is not always
279 /// the 'self' type of the method or even an Objective-C pointer type.
280 /// This is *not* the right method for actually performing such a
281 /// message send, due to the possibility of optional arguments.
282 const CGFunctionInfo &
283 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
284 QualType receiverType) {
285 SmallVector<CanQualType, 16> argTys;
286 argTys.push_back(Context.getCanonicalParamType(receiverType));
287 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
289 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
290 e = MD->param_end(); i != e; ++i) {
291 argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
294 FunctionType::ExtInfo einfo;
295 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
297 if (getContext().getLangOpts().ObjCAutoRefCount &&
298 MD->hasAttr<NSReturnsRetainedAttr>())
299 einfo = einfo.withProducesResult(true);
301 RequiredArgs required =
302 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
304 return arrangeLLVMFunctionInfo(GetReturnType(MD->getResultType()), argTys,
308 const CGFunctionInfo &
309 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
310 // FIXME: Do we need to handle ObjCMethodDecl?
311 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
313 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
314 return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
316 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
317 return arrangeCXXDestructor(DD, GD.getDtorType());
319 return arrangeFunctionDeclaration(FD);
322 /// Arrange a call as unto a free function, except possibly with an
323 /// additional number of formal parameters considered required.
324 static const CGFunctionInfo &
325 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
327 const CallArgList &args,
328 const FunctionType *fnType,
329 unsigned numExtraRequiredArgs) {
330 assert(args.size() >= numExtraRequiredArgs);
332 // In most cases, there are no optional arguments.
333 RequiredArgs required = RequiredArgs::All;
335 // If we have a variadic prototype, the required arguments are the
336 // extra prefix plus the arguments in the prototype.
337 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
338 if (proto->isVariadic())
339 required = RequiredArgs(proto->getNumArgs() + numExtraRequiredArgs);
341 // If we don't have a prototype at all, but we're supposed to
342 // explicitly use the variadic convention for unprototyped calls,
343 // treat all of the arguments as required but preserve the nominal
344 // possibility of variadics.
345 } else if (CGM.getTargetCodeGenInfo()
346 .isNoProtoCallVariadic(args,
347 cast<FunctionNoProtoType>(fnType))) {
348 required = RequiredArgs(args.size());
351 return CGT.arrangeFreeFunctionCall(fnType->getResultType(), args,
352 fnType->getExtInfo(), required);
355 /// Figure out the rules for calling a function with the given formal
356 /// type using the given arguments. The arguments are necessary
357 /// because the function might be unprototyped, in which case it's
358 /// target-dependent in crazy ways.
359 const CGFunctionInfo &
360 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
361 const FunctionType *fnType) {
362 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0);
365 /// A block function call is essentially a free-function call with an
366 /// extra implicit argument.
367 const CGFunctionInfo &
368 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
369 const FunctionType *fnType) {
370 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1);
373 const CGFunctionInfo &
374 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
375 const CallArgList &args,
376 FunctionType::ExtInfo info,
377 RequiredArgs required) {
379 SmallVector<CanQualType, 16> argTypes;
380 for (CallArgList::const_iterator i = args.begin(), e = args.end();
382 argTypes.push_back(Context.getCanonicalParamType(i->Ty));
383 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
387 /// Arrange a call to a C++ method, passing the given arguments.
388 const CGFunctionInfo &
389 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
390 const FunctionProtoType *FPT,
391 RequiredArgs required) {
393 SmallVector<CanQualType, 16> argTypes;
394 for (CallArgList::const_iterator i = args.begin(), e = args.end();
396 argTypes.push_back(Context.getCanonicalParamType(i->Ty));
398 FunctionType::ExtInfo info = FPT->getExtInfo();
399 return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
400 argTypes, info, required);
403 const CGFunctionInfo &
404 CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
405 const FunctionArgList &args,
406 const FunctionType::ExtInfo &info,
409 SmallVector<CanQualType, 16> argTypes;
410 for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
412 argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
414 RequiredArgs required =
415 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
416 return arrangeLLVMFunctionInfo(GetReturnType(resultType), argTypes, info,
420 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
421 return arrangeLLVMFunctionInfo(getContext().VoidTy, None,
422 FunctionType::ExtInfo(), RequiredArgs::All);
425 /// Arrange the argument and result information for an abstract value
426 /// of a given function type. This is the method which all of the
427 /// above functions ultimately defer to.
428 const CGFunctionInfo &
429 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
430 ArrayRef<CanQualType> argTypes,
431 FunctionType::ExtInfo info,
432 RequiredArgs required) {
434 for (ArrayRef<CanQualType>::const_iterator
435 I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
436 assert(I->isCanonicalAsParam());
439 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
441 // Lookup or create unique function info.
442 llvm::FoldingSetNodeID ID;
443 CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
446 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
450 // Construct the function info. We co-allocate the ArgInfos.
451 FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
452 FunctionInfos.InsertNode(FI, insertPos);
454 bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
455 assert(inserted && "Recursively being processed?");
457 // Compute ABI information.
458 getABIInfo().computeInfo(*FI);
460 // Loop over all of the computed argument and return value info. If any of
461 // them are direct or extend without a specified coerce type, specify the
463 ABIArgInfo &retInfo = FI->getReturnInfo();
464 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
465 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
467 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
469 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
470 I->info.setCoerceToType(ConvertType(I->type));
472 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
473 assert(erased && "Not in set?");
478 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
479 const FunctionType::ExtInfo &info,
480 CanQualType resultType,
481 ArrayRef<CanQualType> argTypes,
482 RequiredArgs required) {
483 void *buffer = operator new(sizeof(CGFunctionInfo) +
484 sizeof(ArgInfo) * (argTypes.size() + 1));
485 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
486 FI->CallingConvention = llvmCC;
487 FI->EffectiveCallingConvention = llvmCC;
488 FI->ASTCallingConvention = info.getCC();
489 FI->NoReturn = info.getNoReturn();
490 FI->ReturnsRetained = info.getProducesResult();
491 FI->Required = required;
492 FI->HasRegParm = info.getHasRegParm();
493 FI->RegParm = info.getRegParm();
494 FI->NumArgs = argTypes.size();
495 FI->getArgsBuffer()[0].type = resultType;
496 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
497 FI->getArgsBuffer()[i + 1].type = argTypes[i];
503 void CodeGenTypes::GetExpandedTypes(QualType type,
504 SmallVectorImpl<llvm::Type*> &expandedTypes) {
505 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
506 uint64_t NumElts = AT->getSize().getZExtValue();
507 for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
508 GetExpandedTypes(AT->getElementType(), expandedTypes);
509 } else if (const RecordType *RT = type->getAs<RecordType>()) {
510 const RecordDecl *RD = RT->getDecl();
511 assert(!RD->hasFlexibleArrayMember() &&
512 "Cannot expand structure with flexible array.");
514 // Unions can be here only in degenerative cases - all the fields are same
515 // after flattening. Thus we have to use the "largest" field.
516 const FieldDecl *LargestFD = 0;
517 CharUnits UnionSize = CharUnits::Zero();
519 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
521 const FieldDecl *FD = *i;
522 assert(!FD->isBitField() &&
523 "Cannot expand structure with bit-field members.");
524 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
525 if (UnionSize < FieldSize) {
526 UnionSize = FieldSize;
531 GetExpandedTypes(LargestFD->getType(), expandedTypes);
533 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
535 assert(!i->isBitField() &&
536 "Cannot expand structure with bit-field members.");
537 GetExpandedTypes(i->getType(), expandedTypes);
540 } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
541 llvm::Type *EltTy = ConvertType(CT->getElementType());
542 expandedTypes.push_back(EltTy);
543 expandedTypes.push_back(EltTy);
545 expandedTypes.push_back(ConvertType(type));
548 llvm::Function::arg_iterator
549 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
550 llvm::Function::arg_iterator AI) {
551 assert(LV.isSimple() &&
552 "Unexpected non-simple lvalue during struct expansion.");
554 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
555 unsigned NumElts = AT->getSize().getZExtValue();
556 QualType EltTy = AT->getElementType();
557 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
558 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
559 LValue LV = MakeAddrLValue(EltAddr, EltTy);
560 AI = ExpandTypeFromArgs(EltTy, LV, AI);
562 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
563 RecordDecl *RD = RT->getDecl();
565 // Unions can be here only in degenerative cases - all the fields are same
566 // after flattening. Thus we have to use the "largest" field.
567 const FieldDecl *LargestFD = 0;
568 CharUnits UnionSize = CharUnits::Zero();
570 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
572 const FieldDecl *FD = *i;
573 assert(!FD->isBitField() &&
574 "Cannot expand structure with bit-field members.");
575 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
576 if (UnionSize < FieldSize) {
577 UnionSize = FieldSize;
582 // FIXME: What are the right qualifiers here?
583 LValue SubLV = EmitLValueForField(LV, LargestFD);
584 AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
587 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
590 QualType FT = FD->getType();
592 // FIXME: What are the right qualifiers here?
593 LValue SubLV = EmitLValueForField(LV, FD);
594 AI = ExpandTypeFromArgs(FT, SubLV, AI);
597 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
598 QualType EltTy = CT->getElementType();
599 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
600 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
601 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
602 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
604 EmitStoreThroughLValue(RValue::get(AI), LV);
611 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
612 /// accessing some number of bytes out of it, try to gep into the struct to get
613 /// at its inner goodness. Dive as deep as possible without entering an element
614 /// with an in-memory size smaller than DstSize.
616 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
617 llvm::StructType *SrcSTy,
618 uint64_t DstSize, CodeGenFunction &CGF) {
619 // We can't dive into a zero-element struct.
620 if (SrcSTy->getNumElements() == 0) return SrcPtr;
622 llvm::Type *FirstElt = SrcSTy->getElementType(0);
624 // If the first elt is at least as large as what we're looking for, or if the
625 // first element is the same size as the whole struct, we can enter it.
626 uint64_t FirstEltSize =
627 CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
628 if (FirstEltSize < DstSize &&
629 FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
632 // GEP into the first element.
633 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
635 // If the first element is a struct, recurse.
637 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
638 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
639 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
644 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
645 /// are either integers or pointers. This does a truncation of the value if it
646 /// is too large or a zero extension if it is too small.
648 /// This behaves as if the value were coerced through memory, so on big-endian
649 /// targets the high bits are preserved in a truncation, while little-endian
650 /// targets preserve the low bits.
651 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
653 CodeGenFunction &CGF) {
654 if (Val->getType() == Ty)
657 if (isa<llvm::PointerType>(Val->getType())) {
658 // If this is Pointer->Pointer avoid conversion to and from int.
659 if (isa<llvm::PointerType>(Ty))
660 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
662 // Convert the pointer to an integer so we can play with its width.
663 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
666 llvm::Type *DestIntTy = Ty;
667 if (isa<llvm::PointerType>(DestIntTy))
668 DestIntTy = CGF.IntPtrTy;
670 if (Val->getType() != DestIntTy) {
671 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
672 if (DL.isBigEndian()) {
673 // Preserve the high bits on big-endian targets.
674 // That is what memory coercion does.
675 uint64_t SrcSize = DL.getTypeAllocSizeInBits(Val->getType());
676 uint64_t DstSize = DL.getTypeAllocSizeInBits(DestIntTy);
677 if (SrcSize > DstSize) {
678 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
679 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
681 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
682 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
685 // Little-endian targets preserve the low bits. No shifts required.
686 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
690 if (isa<llvm::PointerType>(Ty))
691 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
697 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
698 /// a pointer to an object of type \arg Ty.
700 /// This safely handles the case when the src type is smaller than the
701 /// destination type; in this situation the values of bits which not
702 /// present in the src are undefined.
703 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
705 CodeGenFunction &CGF) {
707 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
709 // If SrcTy and Ty are the same, just do a load.
711 return CGF.Builder.CreateLoad(SrcPtr);
713 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
715 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
716 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
717 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
720 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
722 // If the source and destination are integer or pointer types, just do an
723 // extension or truncation to the desired type.
724 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
725 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
726 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
727 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
730 // If load is legal, just bitcast the src pointer.
731 if (SrcSize >= DstSize) {
732 // Generally SrcSize is never greater than DstSize, since this means we are
733 // losing bits. However, this can happen in cases where the structure has
734 // additional padding, for example due to a user specified alignment.
736 // FIXME: Assert that we aren't truncating non-padding bits when have access
737 // to that information.
738 llvm::Value *Casted =
739 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
740 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
741 // FIXME: Use better alignment / avoid requiring aligned load.
742 Load->setAlignment(1);
746 // Otherwise do coercion through memory. This is stupid, but
748 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
749 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
750 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
751 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
752 // FIXME: Use better alignment.
753 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
754 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
756 return CGF.Builder.CreateLoad(Tmp);
759 // Function to store a first-class aggregate into memory. We prefer to
760 // store the elements rather than the aggregate to be more friendly to
762 // FIXME: Do we need to recurse here?
763 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
764 llvm::Value *DestPtr, bool DestIsVolatile,
766 // Prefer scalar stores to first-class aggregate stores.
767 if (llvm::StructType *STy =
768 dyn_cast<llvm::StructType>(Val->getType())) {
769 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
770 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
771 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
772 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
778 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
784 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
785 /// where the source and destination may have different types.
787 /// This safely handles the case when the src type is larger than the
788 /// destination type; the upper bits of the src will be lost.
789 static void CreateCoercedStore(llvm::Value *Src,
792 CodeGenFunction &CGF) {
793 llvm::Type *SrcTy = Src->getType();
795 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
796 if (SrcTy == DstTy) {
797 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
801 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
803 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
804 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
805 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
808 // If the source and destination are integer or pointer types, just do an
809 // extension or truncation to the desired type.
810 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
811 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
812 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
813 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
817 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
819 // If store is legal, just bitcast the src pointer.
820 if (SrcSize <= DstSize) {
821 llvm::Value *Casted =
822 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
823 // FIXME: Use better alignment / avoid requiring aligned store.
824 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
826 // Otherwise do coercion through memory. This is stupid, but
829 // Generally SrcSize is never greater than DstSize, since this means we are
830 // losing bits. However, this can happen in cases where the structure has
831 // additional padding, for example due to a user specified alignment.
833 // FIXME: Assert that we aren't truncating non-padding bits when have access
834 // to that information.
835 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
836 CGF.Builder.CreateStore(Src, Tmp);
837 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
838 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
839 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
840 // FIXME: Use better alignment.
841 CGF.Builder.CreateMemCpy(DstCasted, Casted,
842 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
849 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
850 return FI.getReturnInfo().isIndirect();
853 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
854 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
855 switch (BT->getKind()) {
858 case BuiltinType::Float:
859 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
860 case BuiltinType::Double:
861 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
862 case BuiltinType::LongDouble:
863 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
870 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
871 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
872 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
873 if (BT->getKind() == BuiltinType::LongDouble)
874 return getTarget().useObjCFP2RetForComplexLongDouble();
881 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
882 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
883 return GetFunctionType(FI);
887 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
889 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
890 assert(Inserted && "Recursively being processed?");
892 SmallVector<llvm::Type*, 8> argTypes;
893 llvm::Type *resultType = 0;
895 const ABIArgInfo &retAI = FI.getReturnInfo();
896 switch (retAI.getKind()) {
897 case ABIArgInfo::Expand:
898 llvm_unreachable("Invalid ABI kind for return argument");
900 case ABIArgInfo::Extend:
901 case ABIArgInfo::Direct:
902 resultType = retAI.getCoerceToType();
905 case ABIArgInfo::Indirect: {
906 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
907 resultType = llvm::Type::getVoidTy(getLLVMContext());
909 QualType ret = FI.getReturnType();
910 llvm::Type *ty = ConvertType(ret);
911 unsigned addressSpace = Context.getTargetAddressSpace(ret);
912 argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
916 case ABIArgInfo::Ignore:
917 resultType = llvm::Type::getVoidTy(getLLVMContext());
921 // Add in all of the required arguments.
922 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
923 if (FI.isVariadic()) {
924 ie = it + FI.getRequiredArgs().getNumRequiredArgs();
928 for (; it != ie; ++it) {
929 const ABIArgInfo &argAI = it->info;
931 // Insert a padding type to ensure proper alignment.
932 if (llvm::Type *PaddingType = argAI.getPaddingType())
933 argTypes.push_back(PaddingType);
935 switch (argAI.getKind()) {
936 case ABIArgInfo::Ignore:
939 case ABIArgInfo::Indirect: {
940 // indirect arguments are always on the stack, which is addr space #0.
941 llvm::Type *LTy = ConvertTypeForMem(it->type);
942 argTypes.push_back(LTy->getPointerTo());
946 case ABIArgInfo::Extend:
947 case ABIArgInfo::Direct: {
948 // If the coerce-to type is a first class aggregate, flatten it. Either
949 // way is semantically identical, but fast-isel and the optimizer
950 // generally likes scalar values better than FCAs.
951 llvm::Type *argType = argAI.getCoerceToType();
952 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
953 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
954 argTypes.push_back(st->getElementType(i));
956 argTypes.push_back(argType);
961 case ABIArgInfo::Expand:
962 GetExpandedTypes(it->type, argTypes);
967 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
968 assert(Erased && "Not in set?");
970 return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
973 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
974 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
975 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
977 if (!isFuncTypeConvertible(FPT))
978 return llvm::StructType::get(getLLVMContext());
980 const CGFunctionInfo *Info;
981 if (isa<CXXDestructorDecl>(MD))
982 Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
984 Info = &arrangeCXXMethodDeclaration(MD);
985 return GetFunctionType(*Info);
988 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
989 const Decl *TargetDecl,
990 AttributeListType &PAL,
991 unsigned &CallingConv,
992 bool AttrOnCallSite) {
993 llvm::AttrBuilder FuncAttrs;
994 llvm::AttrBuilder RetAttrs;
996 CallingConv = FI.getEffectiveCallingConvention();
999 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1001 // FIXME: handle sseregparm someday...
1003 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1004 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1005 if (TargetDecl->hasAttr<NoThrowAttr>())
1006 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1007 if (TargetDecl->hasAttr<NoReturnAttr>())
1008 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1010 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1011 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1012 if (FPT && FPT->isNothrow(getContext()))
1013 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1014 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1015 // These attributes are not inherited by overloads.
1016 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1017 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1018 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1021 // 'const' and 'pure' attribute functions are also nounwind.
1022 if (TargetDecl->hasAttr<ConstAttr>()) {
1023 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1024 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1025 } else if (TargetDecl->hasAttr<PureAttr>()) {
1026 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1027 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1029 if (TargetDecl->hasAttr<MallocAttr>())
1030 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1033 if (CodeGenOpts.OptimizeSize)
1034 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1035 if (CodeGenOpts.OptimizeSize == 2)
1036 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1037 if (CodeGenOpts.DisableRedZone)
1038 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1039 if (CodeGenOpts.NoImplicitFloat)
1040 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1042 if (AttrOnCallSite) {
1043 // Attributes that should go on the call site only.
1044 if (!CodeGenOpts.SimplifyLibCalls)
1045 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1047 // Attributes that should go on the function, but not the call site.
1048 if (!CodeGenOpts.DisableFPElim) {
1049 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1050 } else if (CodeGenOpts.OmitLeafFramePointer) {
1051 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1052 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1054 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1055 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1058 FuncAttrs.addAttribute("less-precise-fpmad",
1059 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1060 FuncAttrs.addAttribute("no-infs-fp-math",
1061 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1062 FuncAttrs.addAttribute("no-nans-fp-math",
1063 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1064 FuncAttrs.addAttribute("unsafe-fp-math",
1065 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1066 FuncAttrs.addAttribute("use-soft-float",
1067 llvm::toStringRef(CodeGenOpts.SoftFloat));
1068 FuncAttrs.addAttribute("stack-protector-buffer-size",
1069 llvm::utostr(CodeGenOpts.SSPBufferSize));
1071 if (!CodeGenOpts.StackRealignment)
1072 FuncAttrs.addAttribute("no-realign-stack");
1075 QualType RetTy = FI.getReturnType();
1077 const ABIArgInfo &RetAI = FI.getReturnInfo();
1078 switch (RetAI.getKind()) {
1079 case ABIArgInfo::Extend:
1080 if (RetTy->hasSignedIntegerRepresentation())
1081 RetAttrs.addAttribute(llvm::Attribute::SExt);
1082 else if (RetTy->hasUnsignedIntegerRepresentation())
1083 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1085 case ABIArgInfo::Direct:
1086 if (RetAI.getInReg())
1087 RetAttrs.addAttribute(llvm::Attribute::InReg);
1089 case ABIArgInfo::Ignore:
1092 case ABIArgInfo::Indirect: {
1093 llvm::AttrBuilder SRETAttrs;
1094 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1095 if (RetAI.getInReg())
1096 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1097 PAL.push_back(llvm::
1098 AttributeSet::get(getLLVMContext(), Index, SRETAttrs));
1101 // sret disables readnone and readonly
1102 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1103 .removeAttribute(llvm::Attribute::ReadNone);
1107 case ABIArgInfo::Expand:
1108 llvm_unreachable("Invalid ABI kind for return argument");
1111 if (RetAttrs.hasAttributes())
1112 PAL.push_back(llvm::
1113 AttributeSet::get(getLLVMContext(),
1114 llvm::AttributeSet::ReturnIndex,
1117 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1118 ie = FI.arg_end(); it != ie; ++it) {
1119 QualType ParamType = it->type;
1120 const ABIArgInfo &AI = it->info;
1121 llvm::AttrBuilder Attrs;
1123 if (AI.getPaddingType()) {
1124 if (AI.getPaddingInReg())
1125 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
1126 llvm::Attribute::InReg));
1127 // Increment Index if there is padding.
1131 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1132 // have the corresponding parameter variable. It doesn't make
1133 // sense to do it here because parameters are so messed up.
1134 switch (AI.getKind()) {
1135 case ABIArgInfo::Extend:
1136 if (ParamType->isSignedIntegerOrEnumerationType())
1137 Attrs.addAttribute(llvm::Attribute::SExt);
1138 else if (ParamType->isUnsignedIntegerOrEnumerationType())
1139 Attrs.addAttribute(llvm::Attribute::ZExt);
1141 case ABIArgInfo::Direct:
1143 Attrs.addAttribute(llvm::Attribute::InReg);
1145 // FIXME: handle sseregparm someday...
1147 if (llvm::StructType *STy =
1148 dyn_cast<llvm::StructType>(AI.getCoerceToType())) {
1149 unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
1150 if (Attrs.hasAttributes())
1151 for (unsigned I = 0; I < Extra; ++I)
1152 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
1158 case ABIArgInfo::Indirect:
1160 Attrs.addAttribute(llvm::Attribute::InReg);
1162 if (AI.getIndirectByVal())
1163 Attrs.addAttribute(llvm::Attribute::ByVal);
1165 Attrs.addAlignmentAttr(AI.getIndirectAlign());
1167 // byval disables readnone and readonly.
1168 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1169 .removeAttribute(llvm::Attribute::ReadNone);
1172 case ABIArgInfo::Ignore:
1173 // Skip increment, no matching LLVM parameter.
1176 case ABIArgInfo::Expand: {
1177 SmallVector<llvm::Type*, 8> types;
1178 // FIXME: This is rather inefficient. Do we ever actually need to do
1179 // anything here? The result should be just reconstructed on the other
1180 // side, so extension should be a non-issue.
1181 getTypes().GetExpandedTypes(ParamType, types);
1182 Index += types.size();
1187 if (Attrs.hasAttributes())
1188 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
1191 if (FuncAttrs.hasAttributes())
1192 PAL.push_back(llvm::
1193 AttributeSet::get(getLLVMContext(),
1194 llvm::AttributeSet::FunctionIndex,
1198 /// An argument came in as a promoted argument; demote it back to its
1200 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1202 llvm::Value *value) {
1203 llvm::Type *varType = CGF.ConvertType(var->getType());
1205 // This can happen with promotions that actually don't change the
1206 // underlying type, like the enum promotions.
1207 if (value->getType() == varType) return value;
1209 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1210 && "unexpected promotion type");
1212 if (isa<llvm::IntegerType>(varType))
1213 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1215 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1218 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1220 const FunctionArgList &Args) {
1221 // If this is an implicit-return-zero function, go ahead and
1222 // initialize the return value. TODO: it might be nice to have
1223 // a more general mechanism for this that didn't require synthesized
1224 // return statements.
1225 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1226 if (FD->hasImplicitReturnZero()) {
1227 QualType RetTy = FD->getResultType().getUnqualifiedType();
1228 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1229 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1230 Builder.CreateStore(Zero, ReturnValue);
1234 // FIXME: We no longer need the types from FunctionArgList; lift up and
1237 // Emit allocs for param decls. Give the LLVM Argument nodes names.
1238 llvm::Function::arg_iterator AI = Fn->arg_begin();
1240 // Name the struct return argument.
1241 if (CGM.ReturnTypeUsesSRet(FI)) {
1242 AI->setName("agg.result");
1243 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1245 llvm::Attribute::NoAlias));
1249 assert(FI.arg_size() == Args.size() &&
1250 "Mismatch between function signature & arguments.");
1252 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1253 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1254 i != e; ++i, ++info_it, ++ArgNo) {
1255 const VarDecl *Arg = *i;
1256 QualType Ty = info_it->type;
1257 const ABIArgInfo &ArgI = info_it->info;
1260 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1262 // Skip the dummy padding argument.
1263 if (ArgI.getPaddingType())
1266 switch (ArgI.getKind()) {
1267 case ABIArgInfo::Indirect: {
1268 llvm::Value *V = AI;
1270 if (!hasScalarEvaluationKind(Ty)) {
1271 // Aggregates and complex variables are accessed by reference. All we
1272 // need to do is realign the value, if requested
1273 if (ArgI.getIndirectRealign()) {
1274 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1276 // Copy from the incoming argument pointer to the temporary with the
1277 // appropriate alignment.
1279 // FIXME: We should have a common utility for generating an aggregate
1281 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1282 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1283 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1284 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1285 Builder.CreateMemCpy(Dst,
1287 llvm::ConstantInt::get(IntPtrTy,
1288 Size.getQuantity()),
1289 ArgI.getIndirectAlign(),
1294 // Load scalar value from indirect argument.
1295 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1296 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty,
1297 Arg->getLocStart());
1300 V = emitArgumentDemotion(*this, Arg, V);
1302 EmitParmDecl(*Arg, V, ArgNo);
1306 case ABIArgInfo::Extend:
1307 case ABIArgInfo::Direct: {
1309 // If we have the trivial case, handle it with no muss and fuss.
1310 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1311 ArgI.getCoerceToType() == ConvertType(Ty) &&
1312 ArgI.getDirectOffset() == 0) {
1313 assert(AI != Fn->arg_end() && "Argument mismatch!");
1314 llvm::Value *V = AI;
1316 if (Arg->getType().isRestrictQualified())
1317 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1319 llvm::Attribute::NoAlias));
1321 // Ensure the argument is the correct type.
1322 if (V->getType() != ArgI.getCoerceToType())
1323 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1326 V = emitArgumentDemotion(*this, Arg, V);
1328 if (const CXXMethodDecl *MD =
1329 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
1330 if (MD->isVirtual() && Arg == CXXABIThisDecl)
1331 V = CGM.getCXXABI().
1332 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
1335 // Because of merging of function types from multiple decls it is
1336 // possible for the type of an argument to not match the corresponding
1337 // type in the function type. Since we are codegening the callee
1338 // in here, add a cast to the argument type.
1339 llvm::Type *LTy = ConvertType(Arg->getType());
1340 if (V->getType() != LTy)
1341 V = Builder.CreateBitCast(V, LTy);
1343 EmitParmDecl(*Arg, V, ArgNo);
1347 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1349 // The alignment we need to use is the max of the requested alignment for
1350 // the argument plus the alignment required by our access code below.
1351 unsigned AlignmentToUse =
1352 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1353 AlignmentToUse = std::max(AlignmentToUse,
1354 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1356 Alloca->setAlignment(AlignmentToUse);
1357 llvm::Value *V = Alloca;
1358 llvm::Value *Ptr = V; // Pointer to store into.
1360 // If the value is offset in memory, apply the offset now.
1361 if (unsigned Offs = ArgI.getDirectOffset()) {
1362 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1363 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1364 Ptr = Builder.CreateBitCast(Ptr,
1365 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1368 // If the coerce-to type is a first class aggregate, we flatten it and
1369 // pass the elements. Either way is semantically identical, but fast-isel
1370 // and the optimizer generally likes scalar values better than FCAs.
1371 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1372 if (STy && STy->getNumElements() > 1) {
1373 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1375 cast<llvm::PointerType>(Ptr->getType())->getElementType();
1376 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1378 if (SrcSize <= DstSize) {
1379 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1381 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1382 assert(AI != Fn->arg_end() && "Argument mismatch!");
1383 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1384 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1385 Builder.CreateStore(AI++, EltPtr);
1388 llvm::AllocaInst *TempAlloca =
1389 CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1390 TempAlloca->setAlignment(AlignmentToUse);
1391 llvm::Value *TempV = TempAlloca;
1393 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1394 assert(AI != Fn->arg_end() && "Argument mismatch!");
1395 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1396 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1397 Builder.CreateStore(AI++, EltPtr);
1400 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1403 // Simple case, just do a coerced store of the argument into the alloca.
1404 assert(AI != Fn->arg_end() && "Argument mismatch!");
1405 AI->setName(Arg->getName() + ".coerce");
1406 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1410 // Match to what EmitParmDecl is expecting for this type.
1411 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
1412 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
1414 V = emitArgumentDemotion(*this, Arg, V);
1416 EmitParmDecl(*Arg, V, ArgNo);
1417 continue; // Skip ++AI increment, already done.
1420 case ABIArgInfo::Expand: {
1421 // If this structure was expanded into multiple arguments then
1422 // we need to create a temporary and reconstruct it from the
1424 llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1425 CharUnits Align = getContext().getDeclAlign(Arg);
1426 Alloca->setAlignment(Align.getQuantity());
1427 LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1428 llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
1429 EmitParmDecl(*Arg, Alloca, ArgNo);
1431 // Name the arguments used in expansion and increment AI.
1433 for (; AI != End; ++AI, ++Index)
1434 AI->setName(Arg->getName() + "." + Twine(Index));
1438 case ABIArgInfo::Ignore:
1439 // Initialize the local variable appropriately.
1440 if (!hasScalarEvaluationKind(Ty))
1441 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1443 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1446 // Skip increment, no matching LLVM parameter.
1452 assert(AI == Fn->arg_end() && "Argument mismatch!");
1455 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
1456 while (insn->use_empty()) {
1457 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
1458 if (!bitcast) return;
1460 // This is "safe" because we would have used a ConstantExpr otherwise.
1461 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
1462 bitcast->eraseFromParent();
1466 /// Try to emit a fused autorelease of a return result.
1467 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1468 llvm::Value *result) {
1469 // We must be immediately followed the cast.
1470 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1471 if (BB->empty()) return 0;
1472 if (&BB->back() != result) return 0;
1474 llvm::Type *resultType = result->getType();
1476 // result is in a BasicBlock and is therefore an Instruction.
1477 llvm::Instruction *generator = cast<llvm::Instruction>(result);
1479 SmallVector<llvm::Instruction*,4> insnsToKill;
1482 // %generator = bitcast %type1* %generator2 to %type2*
1483 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1484 // We would have emitted this as a constant if the operand weren't
1486 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1488 // Require the generator to be immediately followed by the cast.
1489 if (generator->getNextNode() != bitcast)
1492 insnsToKill.push_back(bitcast);
1496 // %generator = call i8* @objc_retain(i8* %originalResult)
1498 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1499 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1500 if (!call) return 0;
1502 bool doRetainAutorelease;
1504 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1505 doRetainAutorelease = true;
1506 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1507 .objc_retainAutoreleasedReturnValue) {
1508 doRetainAutorelease = false;
1510 // If we emitted an assembly marker for this call (and the
1511 // ARCEntrypoints field should have been set if so), go looking
1512 // for that call. If we can't find it, we can't do this
1513 // optimization. But it should always be the immediately previous
1514 // instruction, unless we needed bitcasts around the call.
1515 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
1516 llvm::Instruction *prev = call->getPrevNode();
1518 if (isa<llvm::BitCastInst>(prev)) {
1519 prev = prev->getPrevNode();
1522 assert(isa<llvm::CallInst>(prev));
1523 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
1524 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
1525 insnsToKill.push_back(prev);
1531 result = call->getArgOperand(0);
1532 insnsToKill.push_back(call);
1534 // Keep killing bitcasts, for sanity. Note that we no longer care
1535 // about precise ordering as long as there's exactly one use.
1536 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1537 if (!bitcast->hasOneUse()) break;
1538 insnsToKill.push_back(bitcast);
1539 result = bitcast->getOperand(0);
1542 // Delete all the unnecessary instructions, from latest to earliest.
1543 for (SmallVectorImpl<llvm::Instruction*>::iterator
1544 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1545 (*i)->eraseFromParent();
1547 // Do the fused retain/autorelease if we were asked to.
1548 if (doRetainAutorelease)
1549 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1551 // Cast back to the result type.
1552 return CGF.Builder.CreateBitCast(result, resultType);
1555 /// If this is a +1 of the value of an immutable 'self', remove it.
1556 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
1557 llvm::Value *result) {
1558 // This is only applicable to a method with an immutable 'self'.
1559 const ObjCMethodDecl *method =
1560 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
1561 if (!method) return 0;
1562 const VarDecl *self = method->getSelfDecl();
1563 if (!self->getType().isConstQualified()) return 0;
1565 // Look for a retain call.
1566 llvm::CallInst *retainCall =
1567 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
1569 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
1572 // Look for an ordinary load of 'self'.
1573 llvm::Value *retainedValue = retainCall->getArgOperand(0);
1574 llvm::LoadInst *load =
1575 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
1576 if (!load || load->isAtomic() || load->isVolatile() ||
1577 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
1580 // Okay! Burn it all down. This relies for correctness on the
1581 // assumption that the retain is emitted as part of the return and
1582 // that thereafter everything is used "linearly".
1583 llvm::Type *resultType = result->getType();
1584 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
1585 assert(retainCall->use_empty());
1586 retainCall->eraseFromParent();
1587 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
1589 return CGF.Builder.CreateBitCast(load, resultType);
1592 /// Emit an ARC autorelease of the result of a function.
1594 /// \return the value to actually return from the function
1595 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1596 llvm::Value *result) {
1597 // If we're returning 'self', kill the initial retain. This is a
1598 // heuristic attempt to "encourage correctness" in the really unfortunate
1599 // case where we have a return of self during a dealloc and we desperately
1600 // need to avoid the possible autorelease.
1601 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
1604 // At -O0, try to emit a fused retain/autorelease.
1605 if (CGF.shouldUseFusedARCCalls())
1606 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1609 return CGF.EmitARCAutoreleaseReturnValue(result);
1612 /// Heuristically search for a dominating store to the return-value slot.
1613 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
1614 // If there are multiple uses of the return-value slot, just check
1615 // for something immediately preceding the IP. Sometimes this can
1616 // happen with how we generate implicit-returns; it can also happen
1617 // with noreturn cleanups.
1618 if (!CGF.ReturnValue->hasOneUse()) {
1619 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1620 if (IP->empty()) return 0;
1621 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
1622 if (!store) return 0;
1623 if (store->getPointerOperand() != CGF.ReturnValue) return 0;
1624 assert(!store->isAtomic() && !store->isVolatile()); // see below
1628 llvm::StoreInst *store =
1629 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
1630 if (!store) return 0;
1632 // These aren't actually possible for non-coerced returns, and we
1633 // only care about non-coerced returns on this code path.
1634 assert(!store->isAtomic() && !store->isVolatile());
1636 // Now do a first-and-dirty dominance check: just walk up the
1637 // single-predecessors chain from the current insertion point.
1638 llvm::BasicBlock *StoreBB = store->getParent();
1639 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
1640 while (IP != StoreBB) {
1641 if (!(IP = IP->getSinglePredecessor()))
1645 // Okay, the store's basic block dominates the insertion point; we
1646 // can do our thing.
1650 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
1652 SourceLocation EndLoc) {
1653 // Functions with no result always return void.
1654 if (ReturnValue == 0) {
1655 Builder.CreateRetVoid();
1659 llvm::DebugLoc RetDbgLoc;
1660 llvm::Value *RV = 0;
1661 QualType RetTy = FI.getReturnType();
1662 const ABIArgInfo &RetAI = FI.getReturnInfo();
1664 switch (RetAI.getKind()) {
1665 case ABIArgInfo::Indirect: {
1666 switch (getEvaluationKind(RetTy)) {
1669 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
1671 EmitStoreOfComplex(RT,
1672 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1677 // Do nothing; aggregrates get evaluated directly into the destination.
1680 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
1681 MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
1688 case ABIArgInfo::Extend:
1689 case ABIArgInfo::Direct:
1690 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1691 RetAI.getDirectOffset() == 0) {
1692 // The internal return value temp always will have pointer-to-return-type
1693 // type, just do a load.
1695 // If there is a dominating store to ReturnValue, we can elide
1696 // the load, zap the store, and usually zap the alloca.
1697 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
1698 // Reuse the debug location from the store unless there is
1699 // cleanup code to be emitted between the store and return
1701 if (EmitRetDbgLoc && !AutoreleaseResult)
1702 RetDbgLoc = SI->getDebugLoc();
1703 // Get the stored value and nuke the now-dead store.
1704 RV = SI->getValueOperand();
1705 SI->eraseFromParent();
1707 // If that was the only use of the return value, nuke it as well now.
1708 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1709 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1713 // Otherwise, we have to do a simple load.
1715 RV = Builder.CreateLoad(ReturnValue);
1718 llvm::Value *V = ReturnValue;
1719 // If the value is offset in memory, apply the offset now.
1720 if (unsigned Offs = RetAI.getDirectOffset()) {
1721 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1722 V = Builder.CreateConstGEP1_32(V, Offs);
1723 V = Builder.CreateBitCast(V,
1724 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1727 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1730 // In ARC, end functions that return a retainable type with a call
1731 // to objc_autoreleaseReturnValue.
1732 if (AutoreleaseResult) {
1733 assert(getLangOpts().ObjCAutoRefCount &&
1734 !FI.isReturnsRetained() &&
1735 RetTy->isObjCRetainableType());
1736 RV = emitAutoreleaseOfResult(*this, RV);
1741 case ABIArgInfo::Ignore:
1744 case ABIArgInfo::Expand:
1745 llvm_unreachable("Invalid ABI kind for return argument");
1748 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1749 if (!RetDbgLoc.isUnknown())
1750 Ret->setDebugLoc(RetDbgLoc);
1753 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1754 const VarDecl *param,
1755 SourceLocation loc) {
1756 // StartFunction converted the ABI-lowered parameter(s) into a
1757 // local alloca. We need to turn that into an r-value suitable
1759 llvm::Value *local = GetAddrOfLocalVar(param);
1761 QualType type = param->getType();
1763 // For the most part, we just need to load the alloca, except:
1764 // 1) aggregate r-values are actually pointers to temporaries, and
1765 // 2) references to non-scalars are pointers directly to the aggregate.
1766 // I don't know why references to scalars are different here.
1767 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1768 if (!hasScalarEvaluationKind(ref->getPointeeType()))
1769 return args.add(RValue::getAggregate(local), type);
1771 // Locals which are references to scalars are represented
1772 // with allocas holding the pointer.
1773 return args.add(RValue::get(Builder.CreateLoad(local)), type);
1776 args.add(convertTempToRValue(local, type, loc), type);
1779 static bool isProvablyNull(llvm::Value *addr) {
1780 return isa<llvm::ConstantPointerNull>(addr);
1783 static bool isProvablyNonNull(llvm::Value *addr) {
1784 return isa<llvm::AllocaInst>(addr);
1787 /// Emit the actual writing-back of a writeback.
1788 static void emitWriteback(CodeGenFunction &CGF,
1789 const CallArgList::Writeback &writeback) {
1790 const LValue &srcLV = writeback.Source;
1791 llvm::Value *srcAddr = srcLV.getAddress();
1792 assert(!isProvablyNull(srcAddr) &&
1793 "shouldn't have writeback for provably null argument");
1795 llvm::BasicBlock *contBB = 0;
1797 // If the argument wasn't provably non-null, we need to null check
1798 // before doing the store.
1799 bool provablyNonNull = isProvablyNonNull(srcAddr);
1800 if (!provablyNonNull) {
1801 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1802 contBB = CGF.createBasicBlock("icr.done");
1804 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1805 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1806 CGF.EmitBlock(writebackBB);
1809 // Load the value to writeback.
1810 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1812 // Cast it back, in case we're writing an id to a Foo* or something.
1813 value = CGF.Builder.CreateBitCast(value,
1814 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1815 "icr.writeback-cast");
1817 // Perform the writeback.
1819 // If we have a "to use" value, it's something we need to emit a use
1820 // of. This has to be carefully threaded in: if it's done after the
1821 // release it's potentially undefined behavior (and the optimizer
1822 // will ignore it), and if it happens before the retain then the
1823 // optimizer could move the release there.
1824 if (writeback.ToUse) {
1825 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
1827 // Retain the new value. No need to block-copy here: the block's
1828 // being passed up the stack.
1829 value = CGF.EmitARCRetainNonBlock(value);
1831 // Emit the intrinsic use here.
1832 CGF.EmitARCIntrinsicUse(writeback.ToUse);
1834 // Load the old value (primitively).
1835 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
1837 // Put the new value in place (primitively).
1838 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
1840 // Release the old value.
1841 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
1843 // Otherwise, we can just do a normal lvalue store.
1845 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
1848 // Jump to the continuation block.
1849 if (!provablyNonNull)
1850 CGF.EmitBlock(contBB);
1853 static void emitWritebacks(CodeGenFunction &CGF,
1854 const CallArgList &args) {
1855 for (CallArgList::writeback_iterator
1856 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1857 emitWriteback(CGF, *i);
1860 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
1861 const CallArgList &CallArgs) {
1862 assert(CGF.getTarget().getCXXABI().isArgumentDestroyedByCallee());
1863 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
1864 CallArgs.getCleanupsToDeactivate();
1865 // Iterate in reverse to increase the likelihood of popping the cleanup.
1866 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
1867 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
1868 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
1869 I->IsActiveIP->eraseFromParent();
1873 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
1874 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
1875 if (uop->getOpcode() == UO_AddrOf)
1876 return uop->getSubExpr();
1880 /// Emit an argument that's being passed call-by-writeback. That is,
1881 /// we are passing the address of
1882 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1883 const ObjCIndirectCopyRestoreExpr *CRE) {
1886 // Make an optimistic effort to emit the address as an l-value.
1887 // This can fail if the the argument expression is more complicated.
1888 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
1889 srcLV = CGF.EmitLValue(lvExpr);
1891 // Otherwise, just emit it as a scalar.
1893 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1895 QualType srcAddrType =
1896 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1897 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
1899 llvm::Value *srcAddr = srcLV.getAddress();
1901 // The dest and src types don't necessarily match in LLVM terms
1902 // because of the crazy ObjC compatibility rules.
1904 llvm::PointerType *destType =
1905 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1907 // If the address is a constant null, just pass the appropriate null.
1908 if (isProvablyNull(srcAddr)) {
1909 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1914 // Create the temporary.
1915 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1917 // Loading an l-value can introduce a cleanup if the l-value is __weak,
1918 // and that cleanup will be conditional if we can't prove that the l-value
1919 // isn't null, so we need to register a dominating point so that the cleanups
1920 // system will make valid IR.
1921 CodeGenFunction::ConditionalEvaluation condEval(CGF);
1923 // Zero-initialize it if we're not doing a copy-initialization.
1924 bool shouldCopy = CRE->shouldCopy();
1927 llvm::ConstantPointerNull::get(
1928 cast<llvm::PointerType>(destType->getElementType()));
1929 CGF.Builder.CreateStore(null, temp);
1932 llvm::BasicBlock *contBB = 0;
1933 llvm::BasicBlock *originBB = 0;
1935 // If the address is *not* known to be non-null, we need to switch.
1936 llvm::Value *finalArgument;
1938 bool provablyNonNull = isProvablyNonNull(srcAddr);
1939 if (provablyNonNull) {
1940 finalArgument = temp;
1942 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1944 finalArgument = CGF.Builder.CreateSelect(isNull,
1945 llvm::ConstantPointerNull::get(destType),
1946 temp, "icr.argument");
1948 // If we need to copy, then the load has to be conditional, which
1949 // means we need control flow.
1951 originBB = CGF.Builder.GetInsertBlock();
1952 contBB = CGF.createBasicBlock("icr.cont");
1953 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1954 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1955 CGF.EmitBlock(copyBB);
1956 condEval.begin(CGF);
1960 llvm::Value *valueToUse = 0;
1962 // Perform a copy if necessary.
1964 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
1965 assert(srcRV.isScalar());
1967 llvm::Value *src = srcRV.getScalarVal();
1968 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1971 // Use an ordinary store, not a store-to-lvalue.
1972 CGF.Builder.CreateStore(src, temp);
1974 // If optimization is enabled, and the value was held in a
1975 // __strong variable, we need to tell the optimizer that this
1976 // value has to stay alive until we're doing the store back.
1977 // This is because the temporary is effectively unretained,
1978 // and so otherwise we can violate the high-level semantics.
1979 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
1980 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
1985 // Finish the control flow if we needed it.
1986 if (shouldCopy && !provablyNonNull) {
1987 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
1988 CGF.EmitBlock(contBB);
1990 // Make a phi for the value to intrinsically use.
1992 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
1994 phiToUse->addIncoming(valueToUse, copyBB);
1995 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
1997 valueToUse = phiToUse;
2003 args.addWriteback(srcLV, temp, valueToUse);
2004 args.add(RValue::get(finalArgument), CRE->getType());
2007 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2009 if (const ObjCIndirectCopyRestoreExpr *CRE
2010 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2011 assert(getLangOpts().ObjCAutoRefCount);
2012 assert(getContext().hasSameType(E->getType(), type));
2013 return emitWritebackArg(*this, args, CRE);
2016 assert(type->isReferenceType() == E->isGLValue() &&
2017 "reference binding to unmaterialized r-value!");
2019 if (E->isGLValue()) {
2020 assert(E->getObjectKind() == OK_Ordinary);
2021 return args.add(EmitReferenceBindingToExpr(E), type);
2024 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2026 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2027 // However, we still have to push an EH-only cleanup in case we unwind before
2028 // we make it to the call.
2029 if (HasAggregateEvalKind &&
2030 CGM.getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
2031 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2032 if (RD && RD->hasNonTrivialDestructor()) {
2033 AggValueSlot Slot = CreateAggTemp(type, "agg.arg.tmp");
2034 Slot.setExternallyDestructed();
2035 EmitAggExpr(E, Slot);
2036 RValue RV = Slot.asRValue();
2039 pushDestroy(EHCleanup, RV.getAggregateAddr(), type, destroyCXXObject,
2040 /*useEHCleanupForArray*/ true);
2041 // This unreachable is a temporary marker which will be removed later.
2042 llvm::Instruction *IsActive = Builder.CreateUnreachable();
2043 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2048 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2049 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2050 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2051 assert(L.isSimple());
2052 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2053 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2055 // We can't represent a misaligned lvalue in the CallArgList, so copy
2056 // to an aligned temporary now.
2057 llvm::Value *tmp = CreateMemTemp(type);
2058 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2060 args.add(RValue::getAggregate(tmp), type);
2065 args.add(EmitAnyExprToTemp(E), type);
2068 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2069 // optimizer it can aggressively ignore unwind edges.
2071 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2072 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2073 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2074 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2075 CGM.getNoObjCARCExceptionsMetadata());
2078 /// Emits a call to the given no-arguments nounwind runtime function.
2080 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2081 const llvm::Twine &name) {
2082 return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2085 /// Emits a call to the given nounwind runtime function.
2087 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2088 ArrayRef<llvm::Value*> args,
2089 const llvm::Twine &name) {
2090 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
2091 call->setDoesNotThrow();
2095 /// Emits a simple call (never an invoke) to the given no-arguments
2096 /// runtime function.
2098 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2099 const llvm::Twine &name) {
2100 return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
2103 /// Emits a simple call (never an invoke) to the given runtime
2106 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2107 ArrayRef<llvm::Value*> args,
2108 const llvm::Twine &name) {
2109 llvm::CallInst *call = Builder.CreateCall(callee, args, name);
2110 call->setCallingConv(getRuntimeCC());
2114 /// Emits a call or invoke to the given noreturn runtime function.
2115 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
2116 ArrayRef<llvm::Value*> args) {
2117 if (getInvokeDest()) {
2118 llvm::InvokeInst *invoke =
2119 Builder.CreateInvoke(callee,
2120 getUnreachableBlock(),
2123 invoke->setDoesNotReturn();
2124 invoke->setCallingConv(getRuntimeCC());
2126 llvm::CallInst *call = Builder.CreateCall(callee, args);
2127 call->setDoesNotReturn();
2128 call->setCallingConv(getRuntimeCC());
2129 Builder.CreateUnreachable();
2133 /// Emits a call or invoke instruction to the given nullary runtime
2136 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2137 const Twine &name) {
2138 return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
2141 /// Emits a call or invoke instruction to the given runtime function.
2143 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2144 ArrayRef<llvm::Value*> args,
2145 const Twine &name) {
2146 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
2147 callSite.setCallingConv(getRuntimeCC());
2152 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2153 const Twine &Name) {
2154 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
2157 /// Emits a call or invoke instruction to the given function, depending
2158 /// on the current state of the EH stack.
2160 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2161 ArrayRef<llvm::Value *> Args,
2162 const Twine &Name) {
2163 llvm::BasicBlock *InvokeDest = getInvokeDest();
2165 llvm::Instruction *Inst;
2167 Inst = Builder.CreateCall(Callee, Args, Name);
2169 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
2170 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
2174 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2175 // optimizer it can aggressively ignore unwind edges.
2176 if (CGM.getLangOpts().ObjCAutoRefCount)
2177 AddObjCARCExceptionMetadata(Inst);
2182 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
2183 llvm::FunctionType *FTy) {
2184 if (ArgNo < FTy->getNumParams())
2185 assert(Elt->getType() == FTy->getParamType(ArgNo));
2187 assert(FTy->isVarArg());
2191 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
2192 SmallVectorImpl<llvm::Value *> &Args,
2193 llvm::FunctionType *IRFuncTy) {
2194 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2195 unsigned NumElts = AT->getSize().getZExtValue();
2196 QualType EltTy = AT->getElementType();
2197 llvm::Value *Addr = RV.getAggregateAddr();
2198 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
2199 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
2200 RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation());
2201 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
2203 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
2204 RecordDecl *RD = RT->getDecl();
2205 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
2206 LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
2208 if (RD->isUnion()) {
2209 const FieldDecl *LargestFD = 0;
2210 CharUnits UnionSize = CharUnits::Zero();
2212 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2214 const FieldDecl *FD = *i;
2215 assert(!FD->isBitField() &&
2216 "Cannot expand structure with bit-field members.");
2217 CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
2218 if (UnionSize < FieldSize) {
2219 UnionSize = FieldSize;
2224 RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation());
2225 ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
2228 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2232 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
2233 ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
2236 } else if (Ty->isAnyComplexType()) {
2237 ComplexPairTy CV = RV.getComplexVal();
2238 Args.push_back(CV.first);
2239 Args.push_back(CV.second);
2241 assert(RV.isScalar() &&
2242 "Unexpected non-scalar rvalue during struct expansion.");
2244 // Insert a bitcast as needed.
2245 llvm::Value *V = RV.getScalarVal();
2246 if (Args.size() < IRFuncTy->getNumParams() &&
2247 V->getType() != IRFuncTy->getParamType(Args.size()))
2248 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
2255 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2256 llvm::Value *Callee,
2257 ReturnValueSlot ReturnValue,
2258 const CallArgList &CallArgs,
2259 const Decl *TargetDecl,
2260 llvm::Instruction **callOrInvoke) {
2261 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2262 SmallVector<llvm::Value*, 16> Args;
2264 // Handle struct-return functions by passing a pointer to the
2265 // location that we would like to return into.
2266 QualType RetTy = CallInfo.getReturnType();
2267 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2269 // IRArgNo - Keep track of the argument number in the callee we're looking at.
2270 unsigned IRArgNo = 0;
2271 llvm::FunctionType *IRFuncTy =
2272 cast<llvm::FunctionType>(
2273 cast<llvm::PointerType>(Callee->getType())->getElementType());
2275 // If the call returns a temporary with struct return, create a temporary
2276 // alloca to hold the result, unless one is given to us.
2277 if (CGM.ReturnTypeUsesSRet(CallInfo)) {
2278 llvm::Value *Value = ReturnValue.getValue();
2280 Value = CreateMemTemp(RetTy);
2281 Args.push_back(Value);
2282 checkArgMatches(Value, IRArgNo, IRFuncTy);
2285 assert(CallInfo.arg_size() == CallArgs.size() &&
2286 "Mismatch between function signature & arguments.");
2287 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
2288 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
2289 I != E; ++I, ++info_it) {
2290 const ABIArgInfo &ArgInfo = info_it->info;
2293 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
2295 // Insert a padding argument to ensure proper alignment.
2296 if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
2297 Args.push_back(llvm::UndefValue::get(PaddingType));
2301 switch (ArgInfo.getKind()) {
2302 case ABIArgInfo::Indirect: {
2303 if (RV.isScalar() || RV.isComplex()) {
2304 // Make a temporary alloca to pass the argument.
2305 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2306 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
2307 AI->setAlignment(ArgInfo.getIndirectAlign());
2311 MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
2314 EmitStoreOfScalar(RV.getScalarVal(), argLV, /*init*/ true);
2316 EmitStoreOfComplex(RV.getComplexVal(), argLV, /*init*/ true);
2318 // Validate argument match.
2319 checkArgMatches(AI, IRArgNo, IRFuncTy);
2321 // We want to avoid creating an unnecessary temporary+copy here;
2322 // however, we need one in three cases:
2323 // 1. If the argument is not byval, and we are required to copy the
2324 // source. (This case doesn't occur on any common architecture.)
2325 // 2. If the argument is byval, RV is not sufficiently aligned, and
2326 // we cannot force it to be sufficiently aligned.
2327 // 3. If the argument is byval, but RV is located in an address space
2328 // different than that of the argument (0).
2329 llvm::Value *Addr = RV.getAggregateAddr();
2330 unsigned Align = ArgInfo.getIndirectAlign();
2331 const llvm::DataLayout *TD = &CGM.getDataLayout();
2332 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
2333 const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
2334 IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
2335 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
2336 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
2337 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
2338 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
2339 // Create an aligned temporary, and copy to it.
2340 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
2341 if (Align > AI->getAlignment())
2342 AI->setAlignment(Align);
2344 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
2346 // Validate argument match.
2347 checkArgMatches(AI, IRArgNo, IRFuncTy);
2349 // Skip the extra memcpy call.
2350 Args.push_back(Addr);
2352 // Validate argument match.
2353 checkArgMatches(Addr, IRArgNo, IRFuncTy);
2359 case ABIArgInfo::Ignore:
2362 case ABIArgInfo::Extend:
2363 case ABIArgInfo::Direct: {
2364 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
2365 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
2366 ArgInfo.getDirectOffset() == 0) {
2369 V = RV.getScalarVal();
2371 V = Builder.CreateLoad(RV.getAggregateAddr());
2373 // If the argument doesn't match, perform a bitcast to coerce it. This
2374 // can happen due to trivial type mismatches.
2375 if (IRArgNo < IRFuncTy->getNumParams() &&
2376 V->getType() != IRFuncTy->getParamType(IRArgNo))
2377 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
2380 checkArgMatches(V, IRArgNo, IRFuncTy);
2384 // FIXME: Avoid the conversion through memory if possible.
2385 llvm::Value *SrcPtr;
2386 if (RV.isScalar() || RV.isComplex()) {
2387 SrcPtr = CreateMemTemp(I->Ty, "coerce");
2388 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
2389 if (RV.isScalar()) {
2390 EmitStoreOfScalar(RV.getScalarVal(), SrcLV, /*init*/ true);
2392 EmitStoreOfComplex(RV.getComplexVal(), SrcLV, /*init*/ true);
2395 SrcPtr = RV.getAggregateAddr();
2397 // If the value is offset in memory, apply the offset now.
2398 if (unsigned Offs = ArgInfo.getDirectOffset()) {
2399 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
2400 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
2401 SrcPtr = Builder.CreateBitCast(SrcPtr,
2402 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
2406 // If the coerce-to type is a first class aggregate, we flatten it and
2407 // pass the elements. Either way is semantically identical, but fast-isel
2408 // and the optimizer generally likes scalar values better than FCAs.
2409 if (llvm::StructType *STy =
2410 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
2412 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
2413 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
2414 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
2416 // If the source type is smaller than the destination type of the
2417 // coerce-to logic, copy the source value into a temp alloca the size
2418 // of the destination type to allow loading all of it. The bits past
2419 // the source value are left undef.
2420 if (SrcSize < DstSize) {
2421 llvm::AllocaInst *TempAlloca
2422 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
2423 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
2424 SrcPtr = TempAlloca;
2426 SrcPtr = Builder.CreateBitCast(SrcPtr,
2427 llvm::PointerType::getUnqual(STy));
2430 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2431 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
2432 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
2433 // We don't know what we're loading from.
2434 LI->setAlignment(1);
2437 // Validate argument match.
2438 checkArgMatches(LI, IRArgNo, IRFuncTy);
2441 // In the simple case, just pass the coerced loaded value.
2442 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
2445 // Validate argument match.
2446 checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
2452 case ABIArgInfo::Expand:
2453 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
2454 IRArgNo = Args.size();
2459 if (!CallArgs.getCleanupsToDeactivate().empty())
2460 deactivateArgCleanupsBeforeCall(*this, CallArgs);
2462 // If the callee is a bitcast of a function to a varargs pointer to function
2463 // type, check to see if we can remove the bitcast. This handles some cases
2464 // with unprototyped functions.
2465 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
2466 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
2467 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
2468 llvm::FunctionType *CurFT =
2469 cast<llvm::FunctionType>(CurPT->getElementType());
2470 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
2472 if (CE->getOpcode() == llvm::Instruction::BitCast &&
2473 ActualFT->getReturnType() == CurFT->getReturnType() &&
2474 ActualFT->getNumParams() == CurFT->getNumParams() &&
2475 ActualFT->getNumParams() == Args.size() &&
2476 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
2477 bool ArgsMatch = true;
2478 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
2479 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
2484 // Strip the cast if we can get away with it. This is a nice cleanup,
2485 // but also allows us to inline the function at -O0 if it is marked
2492 unsigned CallingConv;
2493 CodeGen::AttributeListType AttributeList;
2494 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
2496 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
2499 llvm::BasicBlock *InvokeDest = 0;
2500 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
2501 llvm::Attribute::NoUnwind))
2502 InvokeDest = getInvokeDest();
2506 CS = Builder.CreateCall(Callee, Args);
2508 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
2509 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
2513 *callOrInvoke = CS.getInstruction();
2515 CS.setAttributes(Attrs);
2516 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
2518 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2519 // optimizer it can aggressively ignore unwind edges.
2520 if (CGM.getLangOpts().ObjCAutoRefCount)
2521 AddObjCARCExceptionMetadata(CS.getInstruction());
2523 // If the call doesn't return, finish the basic block and clear the
2524 // insertion point; this allows the rest of IRgen to discard
2525 // unreachable code.
2526 if (CS.doesNotReturn()) {
2527 Builder.CreateUnreachable();
2528 Builder.ClearInsertionPoint();
2530 // FIXME: For now, emit a dummy basic block because expr emitters in
2531 // generally are not ready to handle emitting expressions at unreachable
2533 EnsureInsertPoint();
2535 // Return a reasonable RValue.
2536 return GetUndefRValue(RetTy);
2539 llvm::Instruction *CI = CS.getInstruction();
2540 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
2541 CI->setName("call");
2543 // Emit any writebacks immediately. Arguably this should happen
2544 // after any return-value munging.
2545 if (CallArgs.hasWritebacks())
2546 emitWritebacks(*this, CallArgs);
2548 switch (RetAI.getKind()) {
2549 case ABIArgInfo::Indirect:
2550 return convertTempToRValue(Args[0], RetTy, SourceLocation());
2552 case ABIArgInfo::Ignore:
2553 // If we are ignoring an argument that had a result, make sure to
2554 // construct the appropriate return value for our caller.
2555 return GetUndefRValue(RetTy);
2557 case ABIArgInfo::Extend:
2558 case ABIArgInfo::Direct: {
2559 llvm::Type *RetIRTy = ConvertType(RetTy);
2560 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
2561 switch (getEvaluationKind(RetTy)) {
2563 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
2564 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
2565 return RValue::getComplex(std::make_pair(Real, Imag));
2567 case TEK_Aggregate: {
2568 llvm::Value *DestPtr = ReturnValue.getValue();
2569 bool DestIsVolatile = ReturnValue.isVolatile();
2572 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
2573 DestIsVolatile = false;
2575 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
2576 return RValue::getAggregate(DestPtr);
2579 // If the argument doesn't match, perform a bitcast to coerce it. This
2580 // can happen due to trivial type mismatches.
2581 llvm::Value *V = CI;
2582 if (V->getType() != RetIRTy)
2583 V = Builder.CreateBitCast(V, RetIRTy);
2584 return RValue::get(V);
2587 llvm_unreachable("bad evaluation kind");
2590 llvm::Value *DestPtr = ReturnValue.getValue();
2591 bool DestIsVolatile = ReturnValue.isVolatile();
2594 DestPtr = CreateMemTemp(RetTy, "coerce");
2595 DestIsVolatile = false;
2598 // If the value is offset in memory, apply the offset now.
2599 llvm::Value *StorePtr = DestPtr;
2600 if (unsigned Offs = RetAI.getDirectOffset()) {
2601 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
2602 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
2603 StorePtr = Builder.CreateBitCast(StorePtr,
2604 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2606 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
2608 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
2611 case ABIArgInfo::Expand:
2612 llvm_unreachable("Invalid ABI kind for return argument");
2615 llvm_unreachable("Unhandled ABIArgInfo::Kind");
2618 /* VarArg handling */
2620 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
2621 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);