1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Attributes.h"
26 #include "llvm/Support/CallSite.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/InlineAsm.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 using namespace clang;
31 using namespace CodeGen;
35 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37 default: return llvm::CallingConv::C;
38 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
39 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
40 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
41 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
42 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
43 // TODO: add support for CC_X86Pascal to llvm
47 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
49 /// FIXME: address space qualification?
50 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
51 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
52 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
55 /// Returns the canonical formal type of the given C++ method.
56 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
57 return MD->getType()->getCanonicalTypeUnqualified()
58 .getAs<FunctionProtoType>();
61 /// Returns the "extra-canonicalized" return type, which discards
62 /// qualifiers on the return type. Codegen doesn't care about them,
63 /// and it makes ABI code a little easier to be able to assume that
64 /// all parameter and return types are top-level unqualified.
65 static CanQualType GetReturnType(QualType RetTy) {
66 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
69 const CGFunctionInfo &
70 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
71 return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
72 llvm::SmallVector<CanQualType, 16>(),
76 /// \param Args - contains any initial parameters besides those
77 /// in the formal type
78 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
79 llvm::SmallVectorImpl<CanQualType> &ArgTys,
80 CanQual<FunctionProtoType> FTP) {
82 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
83 ArgTys.push_back(FTP->getArgType(i));
84 CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
85 return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
88 const CGFunctionInfo &
89 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
90 llvm::SmallVector<CanQualType, 16> ArgTys;
91 return ::getFunctionInfo(*this, ArgTys, FTP);
94 static CallingConv getCallingConventionForDecl(const Decl *D) {
95 // Set the appropriate calling convention for the Function.
96 if (D->hasAttr<StdCallAttr>())
99 if (D->hasAttr<FastCallAttr>())
100 return CC_X86FastCall;
102 if (D->hasAttr<ThisCallAttr>())
103 return CC_X86ThisCall;
105 if (D->hasAttr<PascalAttr>())
108 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
109 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
114 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
115 const FunctionProtoType *FTP) {
116 llvm::SmallVector<CanQualType, 16> ArgTys;
118 // Add the 'this' pointer.
119 ArgTys.push_back(GetThisType(Context, RD));
121 return ::getFunctionInfo(*this, ArgTys,
122 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
125 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
126 llvm::SmallVector<CanQualType, 16> ArgTys;
128 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
129 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
131 // Add the 'this' pointer unless this is a static method.
132 if (MD->isInstance())
133 ArgTys.push_back(GetThisType(Context, MD->getParent()));
135 return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
138 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
140 llvm::SmallVector<CanQualType, 16> ArgTys;
141 ArgTys.push_back(GetThisType(Context, D->getParent()));
142 CanQualType ResTy = Context.VoidTy;
144 TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
146 CanQual<FunctionProtoType> FTP = GetFormalType(D);
148 // Add the formal parameters.
149 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
150 ArgTys.push_back(FTP->getArgType(i));
152 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
155 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
157 llvm::SmallVector<CanQualType, 2> ArgTys;
158 ArgTys.push_back(GetThisType(Context, D->getParent()));
159 CanQualType ResTy = Context.VoidTy;
161 TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
163 CanQual<FunctionProtoType> FTP = GetFormalType(D);
164 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
166 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
169 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
170 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
171 if (MD->isInstance())
172 return getFunctionInfo(MD);
174 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
175 assert(isa<FunctionType>(FTy));
176 if (isa<FunctionNoProtoType>(FTy))
177 return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
178 assert(isa<FunctionProtoType>(FTy));
179 return getFunctionInfo(FTy.getAs<FunctionProtoType>());
182 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
183 llvm::SmallVector<CanQualType, 16> ArgTys;
184 ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
185 ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
187 for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
188 e = MD->param_end(); i != e; ++i) {
189 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
192 FunctionType::ExtInfo einfo;
193 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
195 if (getContext().getLangOptions().ObjCAutoRefCount &&
196 MD->hasAttr<NSReturnsRetainedAttr>())
197 einfo = einfo.withProducesResult(true);
199 return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
202 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
203 // FIXME: Do we need to handle ObjCMethodDecl?
204 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
206 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
207 return getFunctionInfo(CD, GD.getCtorType());
209 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
210 return getFunctionInfo(DD, GD.getDtorType());
212 return getFunctionInfo(FD);
215 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
216 const CallArgList &Args,
217 const FunctionType::ExtInfo &Info) {
219 llvm::SmallVector<CanQualType, 16> ArgTys;
220 for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
222 ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
223 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
226 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
227 const FunctionArgList &Args,
228 const FunctionType::ExtInfo &Info) {
230 llvm::SmallVector<CanQualType, 16> ArgTys;
231 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
233 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
234 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
237 const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
238 llvm::SmallVector<CanQualType, 1> args;
239 return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
242 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
243 const llvm::SmallVectorImpl<CanQualType> &ArgTys,
244 const FunctionType::ExtInfo &Info) {
246 for (llvm::SmallVectorImpl<CanQualType>::const_iterator
247 I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
248 assert(I->isCanonicalAsParam());
251 unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
253 // Lookup or create unique function info.
254 llvm::FoldingSetNodeID ID;
255 CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
258 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
262 // Construct the function info.
263 FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
264 Info.getHasRegParm(), Info.getRegParm(), ResTy,
265 ArgTys.data(), ArgTys.size());
266 FunctionInfos.InsertNode(FI, InsertPos);
268 bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
269 assert(Inserted && "Recursively being processed?");
271 // Compute ABI information.
272 getABIInfo().computeInfo(*FI);
274 // Loop over all of the computed argument and return value info. If any of
275 // them are direct or extend without a specified coerce type, specify the
277 ABIArgInfo &RetInfo = FI->getReturnInfo();
278 if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
279 RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
281 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
283 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
284 I->info.setCoerceToType(ConvertType(I->type));
286 bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
287 assert(Erased && "Not in set?");
292 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
293 bool _NoReturn, bool returnsRetained,
294 bool _HasRegParm, unsigned _RegParm,
296 const CanQualType *ArgTys,
298 : CallingConvention(_CallingConvention),
299 EffectiveCallingConvention(_CallingConvention),
300 NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
301 HasRegParm(_HasRegParm), RegParm(_RegParm)
305 // FIXME: Coallocate with the CGFunctionInfo object.
306 Args = new ArgInfo[1 + NumArgTys];
307 Args[0].type = ResTy;
308 for (unsigned i = 0; i != NumArgTys; ++i)
309 Args[1 + i].type = ArgTys[i];
314 void CodeGenTypes::GetExpandedTypes(QualType type,
315 llvm::SmallVectorImpl<llvm::Type*> &expandedTypes) {
316 const RecordType *RT = type->getAsStructureType();
317 assert(RT && "Can only expand structure types.");
318 const RecordDecl *RD = RT->getDecl();
319 assert(!RD->hasFlexibleArrayMember() &&
320 "Cannot expand structure with flexible array.");
322 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
324 const FieldDecl *FD = *i;
325 assert(!FD->isBitField() &&
326 "Cannot expand structure with bit-field members.");
328 QualType fieldType = FD->getType();
329 if (fieldType->isRecordType())
330 GetExpandedTypes(fieldType, expandedTypes);
332 expandedTypes.push_back(ConvertType(fieldType));
336 llvm::Function::arg_iterator
337 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
338 llvm::Function::arg_iterator AI) {
339 const RecordType *RT = Ty->getAsStructureType();
340 assert(RT && "Can only expand structure types.");
342 RecordDecl *RD = RT->getDecl();
343 assert(LV.isSimple() &&
344 "Unexpected non-simple lvalue during struct expansion.");
345 llvm::Value *Addr = LV.getAddress();
346 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
349 QualType FT = FD->getType();
351 // FIXME: What are the right qualifiers here?
352 LValue LV = EmitLValueForField(Addr, FD, 0);
353 if (CodeGenFunction::hasAggregateLLVMType(FT)) {
354 AI = ExpandTypeFromArgs(FT, LV, AI);
356 EmitStoreThroughLValue(RValue::get(AI), LV);
364 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
365 /// accessing some number of bytes out of it, try to gep into the struct to get
366 /// at its inner goodness. Dive as deep as possible without entering an element
367 /// with an in-memory size smaller than DstSize.
369 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
370 const llvm::StructType *SrcSTy,
371 uint64_t DstSize, CodeGenFunction &CGF) {
372 // We can't dive into a zero-element struct.
373 if (SrcSTy->getNumElements() == 0) return SrcPtr;
375 const llvm::Type *FirstElt = SrcSTy->getElementType(0);
377 // If the first elt is at least as large as what we're looking for, or if the
378 // first element is the same size as the whole struct, we can enter it.
379 uint64_t FirstEltSize =
380 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
381 if (FirstEltSize < DstSize &&
382 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
385 // GEP into the first element.
386 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
388 // If the first element is a struct, recurse.
389 const llvm::Type *SrcTy =
390 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
391 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
392 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
397 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
398 /// are either integers or pointers. This does a truncation of the value if it
399 /// is too large or a zero extension if it is too small.
400 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
401 const llvm::Type *Ty,
402 CodeGenFunction &CGF) {
403 if (Val->getType() == Ty)
406 if (isa<llvm::PointerType>(Val->getType())) {
407 // If this is Pointer->Pointer avoid conversion to and from int.
408 if (isa<llvm::PointerType>(Ty))
409 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
411 // Convert the pointer to an integer so we can play with its width.
412 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
415 const llvm::Type *DestIntTy = Ty;
416 if (isa<llvm::PointerType>(DestIntTy))
417 DestIntTy = CGF.IntPtrTy;
419 if (Val->getType() != DestIntTy)
420 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
422 if (isa<llvm::PointerType>(Ty))
423 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
429 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
430 /// a pointer to an object of type \arg Ty.
432 /// This safely handles the case when the src type is smaller than the
433 /// destination type; in this situation the values of bits which not
434 /// present in the src are undefined.
435 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
436 const llvm::Type *Ty,
437 CodeGenFunction &CGF) {
438 const llvm::Type *SrcTy =
439 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
441 // If SrcTy and Ty are the same, just do a load.
443 return CGF.Builder.CreateLoad(SrcPtr);
445 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
447 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
448 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
449 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
452 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
454 // If the source and destination are integer or pointer types, just do an
455 // extension or truncation to the desired type.
456 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
457 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
458 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
459 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
462 // If load is legal, just bitcast the src pointer.
463 if (SrcSize >= DstSize) {
464 // Generally SrcSize is never greater than DstSize, since this means we are
465 // losing bits. However, this can happen in cases where the structure has
466 // additional padding, for example due to a user specified alignment.
468 // FIXME: Assert that we aren't truncating non-padding bits when have access
469 // to that information.
470 llvm::Value *Casted =
471 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
472 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
473 // FIXME: Use better alignment / avoid requiring aligned load.
474 Load->setAlignment(1);
478 // Otherwise do coercion through memory. This is stupid, but
480 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
481 llvm::Value *Casted =
482 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
483 llvm::StoreInst *Store =
484 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
485 // FIXME: Use better alignment / avoid requiring aligned store.
486 Store->setAlignment(1);
487 return CGF.Builder.CreateLoad(Tmp);
490 // Function to store a first-class aggregate into memory. We prefer to
491 // store the elements rather than the aggregate to be more friendly to
493 // FIXME: Do we need to recurse here?
494 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
495 llvm::Value *DestPtr, bool DestIsVolatile,
497 // Prefer scalar stores to first-class aggregate stores.
498 if (const llvm::StructType *STy =
499 dyn_cast<llvm::StructType>(Val->getType())) {
500 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
501 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
502 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
503 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
509 CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
513 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
514 /// where the source and destination may have different types.
516 /// This safely handles the case when the src type is larger than the
517 /// destination type; the upper bits of the src will be lost.
518 static void CreateCoercedStore(llvm::Value *Src,
521 CodeGenFunction &CGF) {
522 const llvm::Type *SrcTy = Src->getType();
523 const llvm::Type *DstTy =
524 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
525 if (SrcTy == DstTy) {
526 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
530 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
532 if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
533 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
534 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
537 // If the source and destination are integer or pointer types, just do an
538 // extension or truncation to the desired type.
539 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
540 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
541 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
542 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
546 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
548 // If store is legal, just bitcast the src pointer.
549 if (SrcSize <= DstSize) {
550 llvm::Value *Casted =
551 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
552 // FIXME: Use better alignment / avoid requiring aligned store.
553 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
555 // Otherwise do coercion through memory. This is stupid, but
558 // Generally SrcSize is never greater than DstSize, since this means we are
559 // losing bits. However, this can happen in cases where the structure has
560 // additional padding, for example due to a user specified alignment.
562 // FIXME: Assert that we aren't truncating non-padding bits when have access
563 // to that information.
564 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
565 CGF.Builder.CreateStore(Src, Tmp);
566 llvm::Value *Casted =
567 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
568 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
569 // FIXME: Use better alignment / avoid requiring aligned load.
570 Load->setAlignment(1);
571 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
577 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
578 return FI.getReturnInfo().isIndirect();
581 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
582 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
583 switch (BT->getKind()) {
586 case BuiltinType::Float:
587 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
588 case BuiltinType::Double:
589 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
590 case BuiltinType::LongDouble:
591 return getContext().Target.useObjCFPRetForRealType(
592 TargetInfo::LongDouble);
599 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
600 const CGFunctionInfo &FI = getFunctionInfo(GD);
602 // For definition purposes, don't consider a K&R function variadic.
603 bool Variadic = false;
604 if (const FunctionProtoType *FPT =
605 cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
606 Variadic = FPT->isVariadic();
608 return GetFunctionType(FI, Variadic);
612 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
614 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
615 assert(Inserted && "Recursively being processed?");
617 llvm::SmallVector<llvm::Type*, 8> argTypes;
618 const llvm::Type *resultType = 0;
620 const ABIArgInfo &retAI = FI.getReturnInfo();
621 switch (retAI.getKind()) {
622 case ABIArgInfo::Expand:
623 llvm_unreachable("Invalid ABI kind for return argument");
625 case ABIArgInfo::Extend:
626 case ABIArgInfo::Direct:
627 resultType = retAI.getCoerceToType();
630 case ABIArgInfo::Indirect: {
631 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
632 resultType = llvm::Type::getVoidTy(getLLVMContext());
634 QualType ret = FI.getReturnType();
635 const llvm::Type *ty = ConvertType(ret);
636 unsigned addressSpace = Context.getTargetAddressSpace(ret);
637 argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
641 case ABIArgInfo::Ignore:
642 resultType = llvm::Type::getVoidTy(getLLVMContext());
646 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
647 ie = FI.arg_end(); it != ie; ++it) {
648 const ABIArgInfo &argAI = it->info;
650 switch (argAI.getKind()) {
651 case ABIArgInfo::Ignore:
654 case ABIArgInfo::Indirect: {
655 // indirect arguments are always on the stack, which is addr space #0.
656 const llvm::Type *LTy = ConvertTypeForMem(it->type);
657 argTypes.push_back(LTy->getPointerTo());
661 case ABIArgInfo::Extend:
662 case ABIArgInfo::Direct: {
663 // If the coerce-to type is a first class aggregate, flatten it. Either
664 // way is semantically identical, but fast-isel and the optimizer
665 // generally likes scalar values better than FCAs.
666 llvm::Type *argType = argAI.getCoerceToType();
667 if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
668 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
669 argTypes.push_back(st->getElementType(i));
671 argTypes.push_back(argType);
676 case ABIArgInfo::Expand:
677 GetExpandedTypes(it->type, argTypes);
682 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
683 assert(Erased && "Not in set?");
685 return llvm::FunctionType::get(resultType, argTypes, isVariadic);
688 const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
689 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
690 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
692 if (!isFuncTypeConvertible(FPT))
693 return llvm::StructType::get(getLLVMContext());
695 const CGFunctionInfo *Info;
696 if (isa<CXXDestructorDecl>(MD))
697 Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
699 Info = &getFunctionInfo(MD);
700 return GetFunctionType(*Info, FPT->isVariadic());
703 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
704 const Decl *TargetDecl,
705 AttributeListType &PAL,
706 unsigned &CallingConv) {
707 unsigned FuncAttrs = 0;
708 unsigned RetAttrs = 0;
710 CallingConv = FI.getEffectiveCallingConvention();
713 FuncAttrs |= llvm::Attribute::NoReturn;
715 // FIXME: handle sseregparm someday...
717 if (TargetDecl->hasAttr<NoThrowAttr>())
718 FuncAttrs |= llvm::Attribute::NoUnwind;
719 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
720 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
721 if (FPT && FPT->isNothrow(getContext()))
722 FuncAttrs |= llvm::Attribute::NoUnwind;
725 if (TargetDecl->hasAttr<NoReturnAttr>())
726 FuncAttrs |= llvm::Attribute::NoReturn;
727 if (TargetDecl->hasAttr<ConstAttr>())
728 FuncAttrs |= llvm::Attribute::ReadNone;
729 else if (TargetDecl->hasAttr<PureAttr>())
730 FuncAttrs |= llvm::Attribute::ReadOnly;
731 if (TargetDecl->hasAttr<MallocAttr>())
732 RetAttrs |= llvm::Attribute::NoAlias;
735 if (CodeGenOpts.OptimizeSize)
736 FuncAttrs |= llvm::Attribute::OptimizeForSize;
737 if (CodeGenOpts.DisableRedZone)
738 FuncAttrs |= llvm::Attribute::NoRedZone;
739 if (CodeGenOpts.NoImplicitFloat)
740 FuncAttrs |= llvm::Attribute::NoImplicitFloat;
742 QualType RetTy = FI.getReturnType();
744 const ABIArgInfo &RetAI = FI.getReturnInfo();
745 switch (RetAI.getKind()) {
746 case ABIArgInfo::Extend:
747 if (RetTy->hasSignedIntegerRepresentation())
748 RetAttrs |= llvm::Attribute::SExt;
749 else if (RetTy->hasUnsignedIntegerRepresentation())
750 RetAttrs |= llvm::Attribute::ZExt;
752 case ABIArgInfo::Direct:
753 case ABIArgInfo::Ignore:
756 case ABIArgInfo::Indirect:
757 PAL.push_back(llvm::AttributeWithIndex::get(Index,
758 llvm::Attribute::StructRet));
760 // sret disables readnone and readonly
761 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
762 llvm::Attribute::ReadNone);
765 case ABIArgInfo::Expand:
766 assert(0 && "Invalid ABI kind for return argument");
770 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
772 // FIXME: RegParm should be reduced in case of global register variable.
774 if (FI.getHasRegParm())
775 RegParm = FI.getRegParm();
777 RegParm = CodeGenOpts.NumRegisterParameters;
779 unsigned PointerWidth = getContext().Target.getPointerWidth(0);
780 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
781 ie = FI.arg_end(); it != ie; ++it) {
782 QualType ParamType = it->type;
783 const ABIArgInfo &AI = it->info;
784 unsigned Attributes = 0;
786 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
787 // have the corresponding parameter variable. It doesn't make
788 // sense to do it here because parameters are so messed up.
789 switch (AI.getKind()) {
790 case ABIArgInfo::Extend:
791 if (ParamType->isSignedIntegerOrEnumerationType())
792 Attributes |= llvm::Attribute::SExt;
793 else if (ParamType->isUnsignedIntegerOrEnumerationType())
794 Attributes |= llvm::Attribute::ZExt;
796 case ABIArgInfo::Direct:
798 (ParamType->isIntegerType() || ParamType->isPointerType())) {
800 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
802 Attributes |= llvm::Attribute::InReg;
804 // FIXME: handle sseregparm someday...
806 if (const llvm::StructType *STy =
807 dyn_cast<llvm::StructType>(AI.getCoerceToType()))
808 Index += STy->getNumElements()-1; // 1 will be added below.
811 case ABIArgInfo::Indirect:
812 if (AI.getIndirectByVal())
813 Attributes |= llvm::Attribute::ByVal;
816 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
817 // byval disables readnone and readonly.
818 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
819 llvm::Attribute::ReadNone);
822 case ABIArgInfo::Ignore:
823 // Skip increment, no matching LLVM parameter.
826 case ABIArgInfo::Expand: {
827 llvm::SmallVector<llvm::Type*, 8> types;
828 // FIXME: This is rather inefficient. Do we ever actually need to do
829 // anything here? The result should be just reconstructed on the other
830 // side, so extension should be a non-issue.
831 getTypes().GetExpandedTypes(ParamType, types);
832 Index += types.size();
838 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
842 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
845 /// An argument came in as a promoted argument; demote it back to its
847 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
849 llvm::Value *value) {
850 const llvm::Type *varType = CGF.ConvertType(var->getType());
852 // This can happen with promotions that actually don't change the
853 // underlying type, like the enum promotions.
854 if (value->getType() == varType) return value;
856 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
857 && "unexpected promotion type");
859 if (isa<llvm::IntegerType>(varType))
860 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
862 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
865 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
867 const FunctionArgList &Args) {
868 // If this is an implicit-return-zero function, go ahead and
869 // initialize the return value. TODO: it might be nice to have
870 // a more general mechanism for this that didn't require synthesized
871 // return statements.
872 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
873 if (FD->hasImplicitReturnZero()) {
874 QualType RetTy = FD->getResultType().getUnqualifiedType();
875 const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
876 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
877 Builder.CreateStore(Zero, ReturnValue);
881 // FIXME: We no longer need the types from FunctionArgList; lift up and
884 // Emit allocs for param decls. Give the LLVM Argument nodes names.
885 llvm::Function::arg_iterator AI = Fn->arg_begin();
887 // Name the struct return argument.
888 if (CGM.ReturnTypeUsesSRet(FI)) {
889 AI->setName("agg.result");
893 assert(FI.arg_size() == Args.size() &&
894 "Mismatch between function signature & arguments.");
896 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
897 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
898 i != e; ++i, ++info_it, ++ArgNo) {
899 const VarDecl *Arg = *i;
900 QualType Ty = info_it->type;
901 const ABIArgInfo &ArgI = info_it->info;
904 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
906 switch (ArgI.getKind()) {
907 case ABIArgInfo::Indirect: {
910 if (hasAggregateLLVMType(Ty)) {
911 // Aggregates and complex variables are accessed by reference. All we
912 // need to do is realign the value, if requested
913 if (ArgI.getIndirectRealign()) {
914 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
916 // Copy from the incoming argument pointer to the temporary with the
917 // appropriate alignment.
919 // FIXME: We should have a common utility for generating an aggregate
921 const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
922 CharUnits Size = getContext().getTypeSizeInChars(Ty);
923 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
924 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
925 Builder.CreateMemCpy(Dst,
927 llvm::ConstantInt::get(IntPtrTy,
929 ArgI.getIndirectAlign(),
934 // Load scalar value from indirect argument.
935 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
936 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
939 V = emitArgumentDemotion(*this, Arg, V);
941 EmitParmDecl(*Arg, V, ArgNo);
945 case ABIArgInfo::Extend:
946 case ABIArgInfo::Direct: {
947 // If we have the trivial case, handle it with no muss and fuss.
948 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
949 ArgI.getCoerceToType() == ConvertType(Ty) &&
950 ArgI.getDirectOffset() == 0) {
951 assert(AI != Fn->arg_end() && "Argument mismatch!");
954 if (Arg->getType().isRestrictQualified())
955 AI->addAttr(llvm::Attribute::NoAlias);
958 V = emitArgumentDemotion(*this, Arg, V);
960 EmitParmDecl(*Arg, V, ArgNo);
964 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
966 // The alignment we need to use is the max of the requested alignment for
967 // the argument plus the alignment required by our access code below.
968 unsigned AlignmentToUse =
969 CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
970 AlignmentToUse = std::max(AlignmentToUse,
971 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
973 Alloca->setAlignment(AlignmentToUse);
974 llvm::Value *V = Alloca;
975 llvm::Value *Ptr = V; // Pointer to store into.
977 // If the value is offset in memory, apply the offset now.
978 if (unsigned Offs = ArgI.getDirectOffset()) {
979 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
980 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
981 Ptr = Builder.CreateBitCast(Ptr,
982 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
985 // If the coerce-to type is a first class aggregate, we flatten it and
986 // pass the elements. Either way is semantically identical, but fast-isel
987 // and the optimizer generally likes scalar values better than FCAs.
988 if (const llvm::StructType *STy =
989 dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
990 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
992 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
993 assert(AI != Fn->arg_end() && "Argument mismatch!");
994 AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
995 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
996 Builder.CreateStore(AI++, EltPtr);
999 // Simple case, just do a coerced store of the argument into the alloca.
1000 assert(AI != Fn->arg_end() && "Argument mismatch!");
1001 AI->setName(Arg->getName() + ".coerce");
1002 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1006 // Match to what EmitParmDecl is expecting for this type.
1007 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1008 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1010 V = emitArgumentDemotion(*this, Arg, V);
1012 EmitParmDecl(*Arg, V, ArgNo);
1013 continue; // Skip ++AI increment, already done.
1016 case ABIArgInfo::Expand: {
1017 // If this structure was expanded into multiple arguments then
1018 // we need to create a temporary and reconstruct it from the
1020 llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
1021 llvm::Function::arg_iterator End =
1022 ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
1023 EmitParmDecl(*Arg, Temp, ArgNo);
1025 // Name the arguments used in expansion and increment AI.
1027 for (; AI != End; ++AI, ++Index)
1028 AI->setName(Arg->getName() + "." + llvm::Twine(Index));
1032 case ABIArgInfo::Ignore:
1033 // Initialize the local variable appropriately.
1034 if (hasAggregateLLVMType(Ty))
1035 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1037 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1040 // Skip increment, no matching LLVM parameter.
1046 assert(AI == Fn->arg_end() && "Argument mismatch!");
1049 /// Try to emit a fused autorelease of a return result.
1050 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1051 llvm::Value *result) {
1052 // We must be immediately followed the cast.
1053 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1054 if (BB->empty()) return 0;
1055 if (&BB->back() != result) return 0;
1057 const llvm::Type *resultType = result->getType();
1059 // result is in a BasicBlock and is therefore an Instruction.
1060 llvm::Instruction *generator = cast<llvm::Instruction>(result);
1062 llvm::SmallVector<llvm::Instruction*,4> insnsToKill;
1065 // %generator = bitcast %type1* %generator2 to %type2*
1066 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1067 // We would have emitted this as a constant if the operand weren't
1069 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1071 // Require the generator to be immediately followed by the cast.
1072 if (generator->getNextNode() != bitcast)
1075 insnsToKill.push_back(bitcast);
1079 // %generator = call i8* @objc_retain(i8* %originalResult)
1081 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1082 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1083 if (!call) return 0;
1085 bool doRetainAutorelease;
1087 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1088 doRetainAutorelease = true;
1089 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1090 .objc_retainAutoreleasedReturnValue) {
1091 doRetainAutorelease = false;
1093 // Look for an inline asm immediately preceding the call and kill it, too.
1094 llvm::Instruction *prev = call->getPrevNode();
1095 if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
1096 if (asmCall->getCalledValue()
1097 == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
1098 insnsToKill.push_back(prev);
1103 result = call->getArgOperand(0);
1104 insnsToKill.push_back(call);
1106 // Keep killing bitcasts, for sanity. Note that we no longer care
1107 // about precise ordering as long as there's exactly one use.
1108 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1109 if (!bitcast->hasOneUse()) break;
1110 insnsToKill.push_back(bitcast);
1111 result = bitcast->getOperand(0);
1114 // Delete all the unnecessary instructions, from latest to earliest.
1115 for (llvm::SmallVectorImpl<llvm::Instruction*>::iterator
1116 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1117 (*i)->eraseFromParent();
1119 // Do the fused retain/autorelease if we were asked to.
1120 if (doRetainAutorelease)
1121 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1123 // Cast back to the result type.
1124 return CGF.Builder.CreateBitCast(result, resultType);
1127 /// Emit an ARC autorelease of the result of a function.
1128 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1129 llvm::Value *result) {
1130 // At -O0, try to emit a fused retain/autorelease.
1131 if (CGF.shouldUseFusedARCCalls())
1132 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1135 return CGF.EmitARCAutoreleaseReturnValue(result);
1138 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1139 // Functions with no result always return void.
1140 if (ReturnValue == 0) {
1141 Builder.CreateRetVoid();
1145 llvm::DebugLoc RetDbgLoc;
1146 llvm::Value *RV = 0;
1147 QualType RetTy = FI.getReturnType();
1148 const ABIArgInfo &RetAI = FI.getReturnInfo();
1150 switch (RetAI.getKind()) {
1151 case ABIArgInfo::Indirect: {
1152 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1153 if (RetTy->isAnyComplexType()) {
1154 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1155 StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1156 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1157 // Do nothing; aggregrates get evaluated directly into the destination.
1159 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1160 false, Alignment, RetTy);
1165 case ABIArgInfo::Extend:
1166 case ABIArgInfo::Direct:
1167 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1168 RetAI.getDirectOffset() == 0) {
1169 // The internal return value temp always will have pointer-to-return-type
1170 // type, just do a load.
1172 // If the instruction right before the insertion point is a store to the
1173 // return value, we can elide the load, zap the store, and usually zap the
1175 llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1176 llvm::StoreInst *SI = 0;
1177 if (InsertBB->empty() ||
1178 !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1179 SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1180 RV = Builder.CreateLoad(ReturnValue);
1182 // Get the stored value and nuke the now-dead store.
1183 RetDbgLoc = SI->getDebugLoc();
1184 RV = SI->getValueOperand();
1185 SI->eraseFromParent();
1187 // If that was the only use of the return value, nuke it as well now.
1188 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1189 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1194 llvm::Value *V = ReturnValue;
1195 // If the value is offset in memory, apply the offset now.
1196 if (unsigned Offs = RetAI.getDirectOffset()) {
1197 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1198 V = Builder.CreateConstGEP1_32(V, Offs);
1199 V = Builder.CreateBitCast(V,
1200 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1203 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1206 // In ARC, end functions that return a retainable type with a call
1207 // to objc_autoreleaseReturnValue.
1208 if (AutoreleaseResult) {
1209 assert(getLangOptions().ObjCAutoRefCount &&
1210 !FI.isReturnsRetained() &&
1211 RetTy->isObjCRetainableType());
1212 RV = emitAutoreleaseOfResult(*this, RV);
1217 case ABIArgInfo::Ignore:
1220 case ABIArgInfo::Expand:
1221 assert(0 && "Invalid ABI kind for return argument");
1224 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1225 if (!RetDbgLoc.isUnknown())
1226 Ret->setDebugLoc(RetDbgLoc);
1229 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1230 const VarDecl *param) {
1231 // StartFunction converted the ABI-lowered parameter(s) into a
1232 // local alloca. We need to turn that into an r-value suitable
1234 llvm::Value *local = GetAddrOfLocalVar(param);
1236 QualType type = param->getType();
1238 // For the most part, we just need to load the alloca, except:
1239 // 1) aggregate r-values are actually pointers to temporaries, and
1240 // 2) references to aggregates are pointers directly to the aggregate.
1241 // I don't know why references to non-aggregates are different here.
1242 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1243 if (hasAggregateLLVMType(ref->getPointeeType()))
1244 return args.add(RValue::getAggregate(local), type);
1246 // Locals which are references to scalars are represented
1247 // with allocas holding the pointer.
1248 return args.add(RValue::get(Builder.CreateLoad(local)), type);
1251 if (type->isAnyComplexType()) {
1252 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1253 return args.add(RValue::getComplex(complex), type);
1256 if (hasAggregateLLVMType(type))
1257 return args.add(RValue::getAggregate(local), type);
1259 unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1260 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1261 return args.add(RValue::get(value), type);
1264 static bool isProvablyNull(llvm::Value *addr) {
1265 return isa<llvm::ConstantPointerNull>(addr);
1268 static bool isProvablyNonNull(llvm::Value *addr) {
1269 return isa<llvm::AllocaInst>(addr);
1272 /// Emit the actual writing-back of a writeback.
1273 static void emitWriteback(CodeGenFunction &CGF,
1274 const CallArgList::Writeback &writeback) {
1275 llvm::Value *srcAddr = writeback.Address;
1276 assert(!isProvablyNull(srcAddr) &&
1277 "shouldn't have writeback for provably null argument");
1279 llvm::BasicBlock *contBB = 0;
1281 // If the argument wasn't provably non-null, we need to null check
1282 // before doing the store.
1283 bool provablyNonNull = isProvablyNonNull(srcAddr);
1284 if (!provablyNonNull) {
1285 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1286 contBB = CGF.createBasicBlock("icr.done");
1288 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1289 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1290 CGF.EmitBlock(writebackBB);
1293 // Load the value to writeback.
1294 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1296 // Cast it back, in case we're writing an id to a Foo* or something.
1297 value = CGF.Builder.CreateBitCast(value,
1298 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1299 "icr.writeback-cast");
1301 // Perform the writeback.
1302 QualType srcAddrType = writeback.AddressType;
1303 CGF.EmitStoreThroughLValue(RValue::get(value),
1304 CGF.MakeAddrLValue(srcAddr, srcAddrType));
1306 // Jump to the continuation block.
1307 if (!provablyNonNull)
1308 CGF.EmitBlock(contBB);
1311 static void emitWritebacks(CodeGenFunction &CGF,
1312 const CallArgList &args) {
1313 for (CallArgList::writeback_iterator
1314 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1315 emitWriteback(CGF, *i);
1318 /// Emit an argument that's being passed call-by-writeback. That is,
1319 /// we are passing the address of
1320 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1321 const ObjCIndirectCopyRestoreExpr *CRE) {
1322 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1324 // The dest and src types don't necessarily match in LLVM terms
1325 // because of the crazy ObjC compatibility rules.
1327 const llvm::PointerType *destType =
1328 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1330 // If the address is a constant null, just pass the appropriate null.
1331 if (isProvablyNull(srcAddr)) {
1332 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1337 QualType srcAddrType =
1338 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1340 // Create the temporary.
1341 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1344 // Zero-initialize it if we're not doing a copy-initialization.
1345 bool shouldCopy = CRE->shouldCopy();
1348 llvm::ConstantPointerNull::get(
1349 cast<llvm::PointerType>(destType->getElementType()));
1350 CGF.Builder.CreateStore(null, temp);
1353 llvm::BasicBlock *contBB = 0;
1355 // If the address is *not* known to be non-null, we need to switch.
1356 llvm::Value *finalArgument;
1358 bool provablyNonNull = isProvablyNonNull(srcAddr);
1359 if (provablyNonNull) {
1360 finalArgument = temp;
1362 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1364 finalArgument = CGF.Builder.CreateSelect(isNull,
1365 llvm::ConstantPointerNull::get(destType),
1366 temp, "icr.argument");
1368 // If we need to copy, then the load has to be conditional, which
1369 // means we need control flow.
1371 contBB = CGF.createBasicBlock("icr.cont");
1372 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1373 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1374 CGF.EmitBlock(copyBB);
1378 // Perform a copy if necessary.
1380 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1381 RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1382 assert(srcRV.isScalar());
1384 llvm::Value *src = srcRV.getScalarVal();
1385 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1388 // Use an ordinary store, not a store-to-lvalue.
1389 CGF.Builder.CreateStore(src, temp);
1392 // Finish the control flow if we needed it.
1393 if (shouldCopy && !provablyNonNull)
1394 CGF.EmitBlock(contBB);
1396 args.addWriteback(srcAddr, srcAddrType, temp);
1397 args.add(RValue::get(finalArgument), CRE->getType());
1400 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1402 if (const ObjCIndirectCopyRestoreExpr *CRE
1403 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1404 assert(getContext().getLangOptions().ObjCAutoRefCount);
1405 assert(getContext().hasSameType(E->getType(), type));
1406 return emitWritebackArg(*this, args, CRE);
1409 if (type->isReferenceType())
1410 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1413 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1414 isa<ImplicitCastExpr>(E) &&
1415 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1416 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1417 assert(L.isSimple());
1418 args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
1419 type, /*NeedsCopy*/true);
1423 args.add(EmitAnyExprToTemp(E), type);
1426 /// Emits a call or invoke instruction to the given function, depending
1427 /// on the current state of the EH stack.
1429 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1430 llvm::ArrayRef<llvm::Value *> Args,
1431 const llvm::Twine &Name) {
1432 llvm::BasicBlock *InvokeDest = getInvokeDest();
1434 return Builder.CreateCall(Callee, Args, Name);
1436 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1437 llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
1444 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1445 const llvm::Twine &Name) {
1446 return EmitCallOrInvoke(Callee, llvm::ArrayRef<llvm::Value *>(), Name);
1449 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1450 llvm::FunctionType *FTy) {
1451 if (ArgNo < FTy->getNumParams())
1452 assert(Elt->getType() == FTy->getParamType(ArgNo));
1454 assert(FTy->isVarArg());
1458 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1459 llvm::SmallVector<llvm::Value*,16> &Args,
1460 llvm::FunctionType *IRFuncTy) {
1461 const RecordType *RT = Ty->getAsStructureType();
1462 assert(RT && "Can only expand structure types.");
1464 RecordDecl *RD = RT->getDecl();
1465 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1466 llvm::Value *Addr = RV.getAggregateAddr();
1467 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1470 QualType FT = FD->getType();
1472 // FIXME: What are the right qualifiers here?
1473 LValue LV = EmitLValueForField(Addr, FD, 0);
1474 if (CodeGenFunction::hasAggregateLLVMType(FT)) {
1475 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()),
1480 RValue RV = EmitLoadOfLValue(LV);
1481 assert(RV.isScalar() &&
1482 "Unexpected non-scalar rvalue during struct expansion.");
1484 // Insert a bitcast as needed.
1485 llvm::Value *V = RV.getScalarVal();
1486 if (Args.size() < IRFuncTy->getNumParams() &&
1487 V->getType() != IRFuncTy->getParamType(Args.size()))
1488 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1495 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1496 llvm::Value *Callee,
1497 ReturnValueSlot ReturnValue,
1498 const CallArgList &CallArgs,
1499 const Decl *TargetDecl,
1500 llvm::Instruction **callOrInvoke) {
1501 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1502 llvm::SmallVector<llvm::Value*, 16> Args;
1504 // Handle struct-return functions by passing a pointer to the
1505 // location that we would like to return into.
1506 QualType RetTy = CallInfo.getReturnType();
1507 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1509 // IRArgNo - Keep track of the argument number in the callee we're looking at.
1510 unsigned IRArgNo = 0;
1511 llvm::FunctionType *IRFuncTy =
1512 cast<llvm::FunctionType>(
1513 cast<llvm::PointerType>(Callee->getType())->getElementType());
1515 // If the call returns a temporary with struct return, create a temporary
1516 // alloca to hold the result, unless one is given to us.
1517 if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1518 llvm::Value *Value = ReturnValue.getValue();
1520 Value = CreateMemTemp(RetTy);
1521 Args.push_back(Value);
1522 checkArgMatches(Value, IRArgNo, IRFuncTy);
1525 assert(CallInfo.arg_size() == CallArgs.size() &&
1526 "Mismatch between function signature & arguments.");
1527 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1528 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1529 I != E; ++I, ++info_it) {
1530 const ABIArgInfo &ArgInfo = info_it->info;
1533 unsigned TypeAlign =
1534 getContext().getTypeAlignInChars(I->Ty).getQuantity();
1535 switch (ArgInfo.getKind()) {
1536 case ABIArgInfo::Indirect: {
1537 if (RV.isScalar() || RV.isComplex()) {
1538 // Make a temporary alloca to pass the argument.
1539 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1540 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
1541 AI->setAlignment(ArgInfo.getIndirectAlign());
1545 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1548 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1550 // Validate argument match.
1551 checkArgMatches(AI, IRArgNo, IRFuncTy);
1553 // We want to avoid creating an unnecessary temporary+copy here;
1554 // however, we need one in two cases:
1555 // 1. If the argument is not byval, and we are required to copy the
1556 // source. (This case doesn't occur on any common architecture.)
1557 // 2. If the argument is byval, RV is not sufficiently aligned, and
1558 // we cannot force it to be sufficiently aligned.
1559 llvm::Value *Addr = RV.getAggregateAddr();
1560 unsigned Align = ArgInfo.getIndirectAlign();
1561 const llvm::TargetData *TD = &CGM.getTargetData();
1562 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
1563 (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
1564 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
1565 // Create an aligned temporary, and copy to it.
1566 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1567 if (Align > AI->getAlignment())
1568 AI->setAlignment(Align);
1570 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
1572 // Validate argument match.
1573 checkArgMatches(AI, IRArgNo, IRFuncTy);
1575 // Skip the extra memcpy call.
1576 Args.push_back(Addr);
1578 // Validate argument match.
1579 checkArgMatches(Addr, IRArgNo, IRFuncTy);
1585 case ABIArgInfo::Ignore:
1588 case ABIArgInfo::Extend:
1589 case ABIArgInfo::Direct: {
1590 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1591 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1592 ArgInfo.getDirectOffset() == 0) {
1595 V = RV.getScalarVal();
1597 V = Builder.CreateLoad(RV.getAggregateAddr());
1599 // If the argument doesn't match, perform a bitcast to coerce it. This
1600 // can happen due to trivial type mismatches.
1601 if (IRArgNo < IRFuncTy->getNumParams() &&
1602 V->getType() != IRFuncTy->getParamType(IRArgNo))
1603 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
1606 checkArgMatches(V, IRArgNo, IRFuncTy);
1610 // FIXME: Avoid the conversion through memory if possible.
1611 llvm::Value *SrcPtr;
1612 if (RV.isScalar()) {
1613 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1614 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
1615 } else if (RV.isComplex()) {
1616 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1617 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1619 SrcPtr = RV.getAggregateAddr();
1621 // If the value is offset in memory, apply the offset now.
1622 if (unsigned Offs = ArgInfo.getDirectOffset()) {
1623 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1624 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1625 SrcPtr = Builder.CreateBitCast(SrcPtr,
1626 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1630 // If the coerce-to type is a first class aggregate, we flatten it and
1631 // pass the elements. Either way is semantically identical, but fast-isel
1632 // and the optimizer generally likes scalar values better than FCAs.
1633 if (const llvm::StructType *STy =
1634 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1635 SrcPtr = Builder.CreateBitCast(SrcPtr,
1636 llvm::PointerType::getUnqual(STy));
1637 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1638 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1639 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
1640 // We don't know what we're loading from.
1641 LI->setAlignment(1);
1644 // Validate argument match.
1645 checkArgMatches(LI, IRArgNo, IRFuncTy);
1648 // In the simple case, just pass the coerced loaded value.
1649 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1652 // Validate argument match.
1653 checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
1659 case ABIArgInfo::Expand:
1660 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
1661 IRArgNo = Args.size();
1666 // If the callee is a bitcast of a function to a varargs pointer to function
1667 // type, check to see if we can remove the bitcast. This handles some cases
1668 // with unprototyped functions.
1669 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1670 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1671 const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1672 const llvm::FunctionType *CurFT =
1673 cast<llvm::FunctionType>(CurPT->getElementType());
1674 const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1676 if (CE->getOpcode() == llvm::Instruction::BitCast &&
1677 ActualFT->getReturnType() == CurFT->getReturnType() &&
1678 ActualFT->getNumParams() == CurFT->getNumParams() &&
1679 ActualFT->getNumParams() == Args.size() &&
1680 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
1681 bool ArgsMatch = true;
1682 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1683 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1688 // Strip the cast if we can get away with it. This is a nice cleanup,
1689 // but also allows us to inline the function at -O0 if it is marked
1696 unsigned CallingConv;
1697 CodeGen::AttributeListType AttributeList;
1698 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1699 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1700 AttributeList.end());
1702 llvm::BasicBlock *InvokeDest = 0;
1703 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
1704 InvokeDest = getInvokeDest();
1708 CS = Builder.CreateCall(Callee, Args);
1710 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1711 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
1715 *callOrInvoke = CS.getInstruction();
1717 CS.setAttributes(Attrs);
1718 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1720 // If the call doesn't return, finish the basic block and clear the
1721 // insertion point; this allows the rest of IRgen to discard
1722 // unreachable code.
1723 if (CS.doesNotReturn()) {
1724 Builder.CreateUnreachable();
1725 Builder.ClearInsertionPoint();
1727 // FIXME: For now, emit a dummy basic block because expr emitters in
1728 // generally are not ready to handle emitting expressions at unreachable
1730 EnsureInsertPoint();
1732 // Return a reasonable RValue.
1733 return GetUndefRValue(RetTy);
1736 llvm::Instruction *CI = CS.getInstruction();
1737 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1738 CI->setName("call");
1740 // Emit any writebacks immediately. Arguably this should happen
1741 // after any return-value munging.
1742 if (CallArgs.hasWritebacks())
1743 emitWritebacks(*this, CallArgs);
1745 switch (RetAI.getKind()) {
1746 case ABIArgInfo::Indirect: {
1747 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1748 if (RetTy->isAnyComplexType())
1749 return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1750 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1751 return RValue::getAggregate(Args[0]);
1752 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
1755 case ABIArgInfo::Ignore:
1756 // If we are ignoring an argument that had a result, make sure to
1757 // construct the appropriate return value for our caller.
1758 return GetUndefRValue(RetTy);
1760 case ABIArgInfo::Extend:
1761 case ABIArgInfo::Direct: {
1762 llvm::Type *RetIRTy = ConvertType(RetTy);
1763 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
1764 if (RetTy->isAnyComplexType()) {
1765 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1766 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1767 return RValue::getComplex(std::make_pair(Real, Imag));
1769 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1770 llvm::Value *DestPtr = ReturnValue.getValue();
1771 bool DestIsVolatile = ReturnValue.isVolatile();
1774 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1775 DestIsVolatile = false;
1777 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
1778 return RValue::getAggregate(DestPtr);
1781 // If the argument doesn't match, perform a bitcast to coerce it. This
1782 // can happen due to trivial type mismatches.
1783 llvm::Value *V = CI;
1784 if (V->getType() != RetIRTy)
1785 V = Builder.CreateBitCast(V, RetIRTy);
1786 return RValue::get(V);
1789 llvm::Value *DestPtr = ReturnValue.getValue();
1790 bool DestIsVolatile = ReturnValue.isVolatile();
1793 DestPtr = CreateMemTemp(RetTy, "coerce");
1794 DestIsVolatile = false;
1797 // If the value is offset in memory, apply the offset now.
1798 llvm::Value *StorePtr = DestPtr;
1799 if (unsigned Offs = RetAI.getDirectOffset()) {
1800 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
1801 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
1802 StorePtr = Builder.CreateBitCast(StorePtr,
1803 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1805 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
1807 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1808 if (RetTy->isAnyComplexType())
1809 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1810 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1811 return RValue::getAggregate(DestPtr);
1812 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
1815 case ABIArgInfo::Expand:
1816 assert(0 && "Invalid ABI kind for return argument");
1819 assert(0 && "Unhandled ABIArgInfo::Kind");
1820 return RValue::get(0);
1823 /* VarArg handling */
1825 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1826 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);