1 //===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Attributes.h"
26 #include "llvm/Support/CallSite.h"
27 #include "llvm/Target/TargetData.h"
28 #include "llvm/InlineAsm.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 using namespace clang;
31 using namespace CodeGen;
35 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
37 default: return llvm::CallingConv::C;
38 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
39 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
40 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
41 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
42 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
43 // TODO: add support for CC_X86Pascal to llvm
47 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
49 /// FIXME: address space qualification?
50 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
51 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
52 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
55 /// Returns the canonical formal type of the given C++ method.
56 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
57 return MD->getType()->getCanonicalTypeUnqualified()
58 .getAs<FunctionProtoType>();
61 /// Returns the "extra-canonicalized" return type, which discards
62 /// qualifiers on the return type. Codegen doesn't care about them,
63 /// and it makes ABI code a little easier to be able to assume that
64 /// all parameter and return types are top-level unqualified.
65 static CanQualType GetReturnType(QualType RetTy) {
66 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
69 const CGFunctionInfo &
70 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
71 return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
72 SmallVector<CanQualType, 16>(),
76 /// \param Args - contains any initial parameters besides those
77 /// in the formal type
78 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
79 SmallVectorImpl<CanQualType> &ArgTys,
80 CanQual<FunctionProtoType> FTP) {
82 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
83 ArgTys.push_back(FTP->getArgType(i));
84 CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
85 return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
88 const CGFunctionInfo &
89 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
90 SmallVector<CanQualType, 16> ArgTys;
91 return ::getFunctionInfo(*this, ArgTys, FTP);
94 static CallingConv getCallingConventionForDecl(const Decl *D) {
95 // Set the appropriate calling convention for the Function.
96 if (D->hasAttr<StdCallAttr>())
99 if (D->hasAttr<FastCallAttr>())
100 return CC_X86FastCall;
102 if (D->hasAttr<ThisCallAttr>())
103 return CC_X86ThisCall;
105 if (D->hasAttr<PascalAttr>())
108 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
109 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
114 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
115 const FunctionProtoType *FTP) {
116 SmallVector<CanQualType, 16> ArgTys;
118 // Add the 'this' pointer.
119 ArgTys.push_back(GetThisType(Context, RD));
121 return ::getFunctionInfo(*this, ArgTys,
122 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
125 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
126 SmallVector<CanQualType, 16> ArgTys;
128 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
129 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
131 // Add the 'this' pointer unless this is a static method.
132 if (MD->isInstance())
133 ArgTys.push_back(GetThisType(Context, MD->getParent()));
135 return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
138 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
140 SmallVector<CanQualType, 16> ArgTys;
141 ArgTys.push_back(GetThisType(Context, D->getParent()));
142 CanQualType ResTy = Context.VoidTy;
144 TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
146 CanQual<FunctionProtoType> FTP = GetFormalType(D);
148 // Add the formal parameters.
149 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
150 ArgTys.push_back(FTP->getArgType(i));
152 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
155 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
157 SmallVector<CanQualType, 2> ArgTys;
158 ArgTys.push_back(GetThisType(Context, D->getParent()));
159 CanQualType ResTy = Context.VoidTy;
161 TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
163 CanQual<FunctionProtoType> FTP = GetFormalType(D);
164 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
166 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
169 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
170 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
171 if (MD->isInstance())
172 return getFunctionInfo(MD);
174 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
175 assert(isa<FunctionType>(FTy));
176 if (isa<FunctionNoProtoType>(FTy))
177 return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
178 assert(isa<FunctionProtoType>(FTy));
179 return getFunctionInfo(FTy.getAs<FunctionProtoType>());
182 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
183 SmallVector<CanQualType, 16> ArgTys;
184 ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
185 ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
187 for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
188 e = MD->param_end(); i != e; ++i) {
189 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
192 FunctionType::ExtInfo einfo;
193 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
195 if (getContext().getLangOptions().ObjCAutoRefCount &&
196 MD->hasAttr<NSReturnsRetainedAttr>())
197 einfo = einfo.withProducesResult(true);
199 return getFunctionInfo(GetReturnType(MD->getResultType()), ArgTys, einfo);
202 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
203 // FIXME: Do we need to handle ObjCMethodDecl?
204 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
206 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
207 return getFunctionInfo(CD, GD.getCtorType());
209 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
210 return getFunctionInfo(DD, GD.getDtorType());
212 return getFunctionInfo(FD);
215 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
216 const CallArgList &Args,
217 const FunctionType::ExtInfo &Info) {
219 SmallVector<CanQualType, 16> ArgTys;
220 for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
222 ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
223 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
226 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
227 const FunctionArgList &Args,
228 const FunctionType::ExtInfo &Info) {
230 SmallVector<CanQualType, 16> ArgTys;
231 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
233 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
234 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
237 const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
238 SmallVector<CanQualType, 1> args;
239 return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
242 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
243 const SmallVectorImpl<CanQualType> &ArgTys,
244 const FunctionType::ExtInfo &Info) {
246 for (SmallVectorImpl<CanQualType>::const_iterator
247 I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
248 assert(I->isCanonicalAsParam());
251 unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
253 // Lookup or create unique function info.
254 llvm::FoldingSetNodeID ID;
255 CGFunctionInfo::Profile(ID, Info, ResTy, ArgTys.begin(), ArgTys.end());
258 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
262 // Construct the function info.
263 FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getProducesResult(),
264 Info.getHasRegParm(), Info.getRegParm(), ResTy,
265 ArgTys.data(), ArgTys.size());
266 FunctionInfos.InsertNode(FI, InsertPos);
268 bool Inserted = FunctionsBeingProcessed.insert(FI); (void)Inserted;
269 assert(Inserted && "Recursively being processed?");
271 // Compute ABI information.
272 getABIInfo().computeInfo(*FI);
274 // Loop over all of the computed argument and return value info. If any of
275 // them are direct or extend without a specified coerce type, specify the
277 ABIArgInfo &RetInfo = FI->getReturnInfo();
278 if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
279 RetInfo.setCoerceToType(ConvertType(FI->getReturnType()));
281 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
283 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
284 I->info.setCoerceToType(ConvertType(I->type));
286 bool Erased = FunctionsBeingProcessed.erase(FI); (void)Erased;
287 assert(Erased && "Not in set?");
292 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
293 bool _NoReturn, bool returnsRetained,
294 bool _HasRegParm, unsigned _RegParm,
296 const CanQualType *ArgTys,
298 : CallingConvention(_CallingConvention),
299 EffectiveCallingConvention(_CallingConvention),
300 NoReturn(_NoReturn), ReturnsRetained(returnsRetained),
301 HasRegParm(_HasRegParm), RegParm(_RegParm)
305 // FIXME: Coallocate with the CGFunctionInfo object.
306 Args = new ArgInfo[1 + NumArgTys];
307 Args[0].type = ResTy;
308 for (unsigned i = 0; i != NumArgTys; ++i)
309 Args[1 + i].type = ArgTys[i];
314 void CodeGenTypes::GetExpandedTypes(QualType type,
315 SmallVectorImpl<llvm::Type*> &expandedTypes) {
316 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
317 uint64_t NumElts = AT->getSize().getZExtValue();
318 for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
319 GetExpandedTypes(AT->getElementType(), expandedTypes);
320 } else if (const RecordType *RT = type->getAsStructureType()) {
321 const RecordDecl *RD = RT->getDecl();
322 assert(!RD->hasFlexibleArrayMember() &&
323 "Cannot expand structure with flexible array.");
324 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
326 const FieldDecl *FD = *i;
327 assert(!FD->isBitField() &&
328 "Cannot expand structure with bit-field members.");
329 GetExpandedTypes(FD->getType(), expandedTypes);
331 } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
332 llvm::Type *EltTy = ConvertType(CT->getElementType());
333 expandedTypes.push_back(EltTy);
334 expandedTypes.push_back(EltTy);
336 expandedTypes.push_back(ConvertType(type));
339 llvm::Function::arg_iterator
340 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
341 llvm::Function::arg_iterator AI) {
342 assert(LV.isSimple() &&
343 "Unexpected non-simple lvalue during struct expansion.");
344 llvm::Value *Addr = LV.getAddress();
346 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
347 unsigned NumElts = AT->getSize().getZExtValue();
348 QualType EltTy = AT->getElementType();
349 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
350 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
351 LValue LV = MakeAddrLValue(EltAddr, EltTy);
352 AI = ExpandTypeFromArgs(EltTy, LV, AI);
354 } else if (const RecordType *RT = Ty->getAsStructureType()) {
355 RecordDecl *RD = RT->getDecl();
356 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
359 QualType FT = FD->getType();
361 // FIXME: What are the right qualifiers here?
362 LValue LV = EmitLValueForField(Addr, FD, 0);
363 AI = ExpandTypeFromArgs(FT, LV, AI);
365 } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
366 QualType EltTy = CT->getElementType();
367 llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
368 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
369 llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 0, "imag");
370 EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
372 EmitStoreThroughLValue(RValue::get(AI), LV);
379 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
380 /// accessing some number of bytes out of it, try to gep into the struct to get
381 /// at its inner goodness. Dive as deep as possible without entering an element
382 /// with an in-memory size smaller than DstSize.
384 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
385 llvm::StructType *SrcSTy,
386 uint64_t DstSize, CodeGenFunction &CGF) {
387 // We can't dive into a zero-element struct.
388 if (SrcSTy->getNumElements() == 0) return SrcPtr;
390 llvm::Type *FirstElt = SrcSTy->getElementType(0);
392 // If the first elt is at least as large as what we're looking for, or if the
393 // first element is the same size as the whole struct, we can enter it.
394 uint64_t FirstEltSize =
395 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
396 if (FirstEltSize < DstSize &&
397 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
400 // GEP into the first element.
401 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
403 // If the first element is a struct, recurse.
405 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
406 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
407 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
412 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
413 /// are either integers or pointers. This does a truncation of the value if it
414 /// is too large or a zero extension if it is too small.
415 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
417 CodeGenFunction &CGF) {
418 if (Val->getType() == Ty)
421 if (isa<llvm::PointerType>(Val->getType())) {
422 // If this is Pointer->Pointer avoid conversion to and from int.
423 if (isa<llvm::PointerType>(Ty))
424 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
426 // Convert the pointer to an integer so we can play with its width.
427 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
430 llvm::Type *DestIntTy = Ty;
431 if (isa<llvm::PointerType>(DestIntTy))
432 DestIntTy = CGF.IntPtrTy;
434 if (Val->getType() != DestIntTy)
435 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
437 if (isa<llvm::PointerType>(Ty))
438 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
444 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
445 /// a pointer to an object of type \arg Ty.
447 /// This safely handles the case when the src type is smaller than the
448 /// destination type; in this situation the values of bits which not
449 /// present in the src are undefined.
450 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
452 CodeGenFunction &CGF) {
454 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
456 // If SrcTy and Ty are the same, just do a load.
458 return CGF.Builder.CreateLoad(SrcPtr);
460 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
462 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
463 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
464 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
467 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
469 // If the source and destination are integer or pointer types, just do an
470 // extension or truncation to the desired type.
471 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
472 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
473 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
474 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
477 // If load is legal, just bitcast the src pointer.
478 if (SrcSize >= DstSize) {
479 // Generally SrcSize is never greater than DstSize, since this means we are
480 // losing bits. However, this can happen in cases where the structure has
481 // additional padding, for example due to a user specified alignment.
483 // FIXME: Assert that we aren't truncating non-padding bits when have access
484 // to that information.
485 llvm::Value *Casted =
486 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
487 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
488 // FIXME: Use better alignment / avoid requiring aligned load.
489 Load->setAlignment(1);
493 // Otherwise do coercion through memory. This is stupid, but
495 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
496 llvm::Value *Casted =
497 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
498 llvm::StoreInst *Store =
499 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
500 // FIXME: Use better alignment / avoid requiring aligned store.
501 Store->setAlignment(1);
502 return CGF.Builder.CreateLoad(Tmp);
505 // Function to store a first-class aggregate into memory. We prefer to
506 // store the elements rather than the aggregate to be more friendly to
508 // FIXME: Do we need to recurse here?
509 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
510 llvm::Value *DestPtr, bool DestIsVolatile,
512 // Prefer scalar stores to first-class aggregate stores.
513 if (llvm::StructType *STy =
514 dyn_cast<llvm::StructType>(Val->getType())) {
515 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
516 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
517 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
518 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
524 CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
528 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
529 /// where the source and destination may have different types.
531 /// This safely handles the case when the src type is larger than the
532 /// destination type; the upper bits of the src will be lost.
533 static void CreateCoercedStore(llvm::Value *Src,
536 CodeGenFunction &CGF) {
537 llvm::Type *SrcTy = Src->getType();
539 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
540 if (SrcTy == DstTy) {
541 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
545 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
547 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
548 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
549 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
552 // If the source and destination are integer or pointer types, just do an
553 // extension or truncation to the desired type.
554 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
555 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
556 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
557 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
561 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
563 // If store is legal, just bitcast the src pointer.
564 if (SrcSize <= DstSize) {
565 llvm::Value *Casted =
566 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
567 // FIXME: Use better alignment / avoid requiring aligned store.
568 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
570 // Otherwise do coercion through memory. This is stupid, but
573 // Generally SrcSize is never greater than DstSize, since this means we are
574 // losing bits. However, this can happen in cases where the structure has
575 // additional padding, for example due to a user specified alignment.
577 // FIXME: Assert that we aren't truncating non-padding bits when have access
578 // to that information.
579 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
580 CGF.Builder.CreateStore(Src, Tmp);
581 llvm::Value *Casted =
582 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
583 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
584 // FIXME: Use better alignment / avoid requiring aligned load.
585 Load->setAlignment(1);
586 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
592 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
593 return FI.getReturnInfo().isIndirect();
596 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
597 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
598 switch (BT->getKind()) {
601 case BuiltinType::Float:
602 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
603 case BuiltinType::Double:
604 return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
605 case BuiltinType::LongDouble:
606 return getContext().getTargetInfo().useObjCFPRetForRealType(
607 TargetInfo::LongDouble);
614 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
615 const CGFunctionInfo &FI = getFunctionInfo(GD);
617 // For definition purposes, don't consider a K&R function variadic.
618 bool Variadic = false;
619 if (const FunctionProtoType *FPT =
620 cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
621 Variadic = FPT->isVariadic();
623 return GetFunctionType(FI, Variadic);
627 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic) {
629 bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
630 assert(Inserted && "Recursively being processed?");
632 SmallVector<llvm::Type*, 8> argTypes;
633 llvm::Type *resultType = 0;
635 const ABIArgInfo &retAI = FI.getReturnInfo();
636 switch (retAI.getKind()) {
637 case ABIArgInfo::Expand:
638 llvm_unreachable("Invalid ABI kind for return argument");
640 case ABIArgInfo::Extend:
641 case ABIArgInfo::Direct:
642 resultType = retAI.getCoerceToType();
645 case ABIArgInfo::Indirect: {
646 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
647 resultType = llvm::Type::getVoidTy(getLLVMContext());
649 QualType ret = FI.getReturnType();
650 llvm::Type *ty = ConvertType(ret);
651 unsigned addressSpace = Context.getTargetAddressSpace(ret);
652 argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
656 case ABIArgInfo::Ignore:
657 resultType = llvm::Type::getVoidTy(getLLVMContext());
661 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
662 ie = FI.arg_end(); it != ie; ++it) {
663 const ABIArgInfo &argAI = it->info;
665 switch (argAI.getKind()) {
666 case ABIArgInfo::Ignore:
669 case ABIArgInfo::Indirect: {
670 // indirect arguments are always on the stack, which is addr space #0.
671 llvm::Type *LTy = ConvertTypeForMem(it->type);
672 argTypes.push_back(LTy->getPointerTo());
676 case ABIArgInfo::Extend:
677 case ABIArgInfo::Direct: {
678 // If the coerce-to type is a first class aggregate, flatten it. Either
679 // way is semantically identical, but fast-isel and the optimizer
680 // generally likes scalar values better than FCAs.
681 llvm::Type *argType = argAI.getCoerceToType();
682 if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
683 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
684 argTypes.push_back(st->getElementType(i));
686 argTypes.push_back(argType);
691 case ABIArgInfo::Expand:
692 GetExpandedTypes(it->type, argTypes);
697 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
698 assert(Erased && "Not in set?");
700 return llvm::FunctionType::get(resultType, argTypes, isVariadic);
703 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
704 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
705 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
707 if (!isFuncTypeConvertible(FPT))
708 return llvm::StructType::get(getLLVMContext());
710 const CGFunctionInfo *Info;
711 if (isa<CXXDestructorDecl>(MD))
712 Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
714 Info = &getFunctionInfo(MD);
715 return GetFunctionType(*Info, FPT->isVariadic());
718 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
719 const Decl *TargetDecl,
720 AttributeListType &PAL,
721 unsigned &CallingConv) {
722 unsigned FuncAttrs = 0;
723 unsigned RetAttrs = 0;
725 CallingConv = FI.getEffectiveCallingConvention();
728 FuncAttrs |= llvm::Attribute::NoReturn;
730 // FIXME: handle sseregparm someday...
732 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
733 FuncAttrs |= llvm::Attribute::ReturnsTwice;
734 if (TargetDecl->hasAttr<NoThrowAttr>())
735 FuncAttrs |= llvm::Attribute::NoUnwind;
736 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
737 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
738 if (FPT && FPT->isNothrow(getContext()))
739 FuncAttrs |= llvm::Attribute::NoUnwind;
742 if (TargetDecl->hasAttr<NoReturnAttr>())
743 FuncAttrs |= llvm::Attribute::NoReturn;
745 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
746 FuncAttrs |= llvm::Attribute::ReturnsTwice;
748 // 'const' and 'pure' attribute functions are also nounwind.
749 if (TargetDecl->hasAttr<ConstAttr>()) {
750 FuncAttrs |= llvm::Attribute::ReadNone;
751 FuncAttrs |= llvm::Attribute::NoUnwind;
752 } else if (TargetDecl->hasAttr<PureAttr>()) {
753 FuncAttrs |= llvm::Attribute::ReadOnly;
754 FuncAttrs |= llvm::Attribute::NoUnwind;
756 if (TargetDecl->hasAttr<MallocAttr>())
757 RetAttrs |= llvm::Attribute::NoAlias;
760 if (CodeGenOpts.OptimizeSize)
761 FuncAttrs |= llvm::Attribute::OptimizeForSize;
762 if (CodeGenOpts.DisableRedZone)
763 FuncAttrs |= llvm::Attribute::NoRedZone;
764 if (CodeGenOpts.NoImplicitFloat)
765 FuncAttrs |= llvm::Attribute::NoImplicitFloat;
767 QualType RetTy = FI.getReturnType();
769 const ABIArgInfo &RetAI = FI.getReturnInfo();
770 switch (RetAI.getKind()) {
771 case ABIArgInfo::Extend:
772 if (RetTy->hasSignedIntegerRepresentation())
773 RetAttrs |= llvm::Attribute::SExt;
774 else if (RetTy->hasUnsignedIntegerRepresentation())
775 RetAttrs |= llvm::Attribute::ZExt;
777 case ABIArgInfo::Direct:
778 case ABIArgInfo::Ignore:
781 case ABIArgInfo::Indirect:
782 PAL.push_back(llvm::AttributeWithIndex::get(Index,
783 llvm::Attribute::StructRet));
785 // sret disables readnone and readonly
786 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
787 llvm::Attribute::ReadNone);
790 case ABIArgInfo::Expand:
791 llvm_unreachable("Invalid ABI kind for return argument");
795 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
797 // FIXME: RegParm should be reduced in case of global register variable.
799 if (FI.getHasRegParm())
800 RegParm = FI.getRegParm();
802 RegParm = CodeGenOpts.NumRegisterParameters;
804 unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
805 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
806 ie = FI.arg_end(); it != ie; ++it) {
807 QualType ParamType = it->type;
808 const ABIArgInfo &AI = it->info;
809 unsigned Attributes = 0;
811 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
812 // have the corresponding parameter variable. It doesn't make
813 // sense to do it here because parameters are so messed up.
814 switch (AI.getKind()) {
815 case ABIArgInfo::Extend:
816 if (ParamType->isSignedIntegerOrEnumerationType())
817 Attributes |= llvm::Attribute::SExt;
818 else if (ParamType->isUnsignedIntegerOrEnumerationType())
819 Attributes |= llvm::Attribute::ZExt;
821 case ABIArgInfo::Direct:
823 (ParamType->isIntegerType() || ParamType->isPointerType())) {
825 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
827 Attributes |= llvm::Attribute::InReg;
829 // FIXME: handle sseregparm someday...
831 if (llvm::StructType *STy =
832 dyn_cast<llvm::StructType>(AI.getCoerceToType()))
833 Index += STy->getNumElements()-1; // 1 will be added below.
836 case ABIArgInfo::Indirect:
837 if (AI.getIndirectByVal())
838 Attributes |= llvm::Attribute::ByVal;
841 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
842 // byval disables readnone and readonly.
843 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
844 llvm::Attribute::ReadNone);
847 case ABIArgInfo::Ignore:
848 // Skip increment, no matching LLVM parameter.
851 case ABIArgInfo::Expand: {
852 SmallVector<llvm::Type*, 8> types;
853 // FIXME: This is rather inefficient. Do we ever actually need to do
854 // anything here? The result should be just reconstructed on the other
855 // side, so extension should be a non-issue.
856 getTypes().GetExpandedTypes(ParamType, types);
857 Index += types.size();
863 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
867 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
870 /// An argument came in as a promoted argument; demote it back to its
872 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
874 llvm::Value *value) {
875 llvm::Type *varType = CGF.ConvertType(var->getType());
877 // This can happen with promotions that actually don't change the
878 // underlying type, like the enum promotions.
879 if (value->getType() == varType) return value;
881 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
882 && "unexpected promotion type");
884 if (isa<llvm::IntegerType>(varType))
885 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
887 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
890 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
892 const FunctionArgList &Args) {
893 // If this is an implicit-return-zero function, go ahead and
894 // initialize the return value. TODO: it might be nice to have
895 // a more general mechanism for this that didn't require synthesized
896 // return statements.
897 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
898 if (FD->hasImplicitReturnZero()) {
899 QualType RetTy = FD->getResultType().getUnqualifiedType();
900 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
901 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
902 Builder.CreateStore(Zero, ReturnValue);
906 // FIXME: We no longer need the types from FunctionArgList; lift up and
909 // Emit allocs for param decls. Give the LLVM Argument nodes names.
910 llvm::Function::arg_iterator AI = Fn->arg_begin();
912 // Name the struct return argument.
913 if (CGM.ReturnTypeUsesSRet(FI)) {
914 AI->setName("agg.result");
915 AI->addAttr(llvm::Attribute::NoAlias);
919 assert(FI.arg_size() == Args.size() &&
920 "Mismatch between function signature & arguments.");
922 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
923 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
924 i != e; ++i, ++info_it, ++ArgNo) {
925 const VarDecl *Arg = *i;
926 QualType Ty = info_it->type;
927 const ABIArgInfo &ArgI = info_it->info;
930 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
932 switch (ArgI.getKind()) {
933 case ABIArgInfo::Indirect: {
936 if (hasAggregateLLVMType(Ty)) {
937 // Aggregates and complex variables are accessed by reference. All we
938 // need to do is realign the value, if requested
939 if (ArgI.getIndirectRealign()) {
940 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
942 // Copy from the incoming argument pointer to the temporary with the
943 // appropriate alignment.
945 // FIXME: We should have a common utility for generating an aggregate
947 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
948 CharUnits Size = getContext().getTypeSizeInChars(Ty);
949 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
950 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
951 Builder.CreateMemCpy(Dst,
953 llvm::ConstantInt::get(IntPtrTy,
955 ArgI.getIndirectAlign(),
960 // Load scalar value from indirect argument.
961 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
962 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
965 V = emitArgumentDemotion(*this, Arg, V);
967 EmitParmDecl(*Arg, V, ArgNo);
971 case ABIArgInfo::Extend:
972 case ABIArgInfo::Direct: {
973 // If we have the trivial case, handle it with no muss and fuss.
974 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
975 ArgI.getCoerceToType() == ConvertType(Ty) &&
976 ArgI.getDirectOffset() == 0) {
977 assert(AI != Fn->arg_end() && "Argument mismatch!");
980 if (Arg->getType().isRestrictQualified())
981 AI->addAttr(llvm::Attribute::NoAlias);
983 // Ensure the argument is the correct type.
984 if (V->getType() != ArgI.getCoerceToType())
985 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
988 V = emitArgumentDemotion(*this, Arg, V);
990 EmitParmDecl(*Arg, V, ArgNo);
994 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
996 // The alignment we need to use is the max of the requested alignment for
997 // the argument plus the alignment required by our access code below.
998 unsigned AlignmentToUse =
999 CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
1000 AlignmentToUse = std::max(AlignmentToUse,
1001 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1003 Alloca->setAlignment(AlignmentToUse);
1004 llvm::Value *V = Alloca;
1005 llvm::Value *Ptr = V; // Pointer to store into.
1007 // If the value is offset in memory, apply the offset now.
1008 if (unsigned Offs = ArgI.getDirectOffset()) {
1009 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1010 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1011 Ptr = Builder.CreateBitCast(Ptr,
1012 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1015 // If the coerce-to type is a first class aggregate, we flatten it and
1016 // pass the elements. Either way is semantically identical, but fast-isel
1017 // and the optimizer generally likes scalar values better than FCAs.
1018 if (llvm::StructType *STy =
1019 dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
1020 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1022 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1023 assert(AI != Fn->arg_end() && "Argument mismatch!");
1024 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1025 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1026 Builder.CreateStore(AI++, EltPtr);
1029 // Simple case, just do a coerced store of the argument into the alloca.
1030 assert(AI != Fn->arg_end() && "Argument mismatch!");
1031 AI->setName(Arg->getName() + ".coerce");
1032 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1036 // Match to what EmitParmDecl is expecting for this type.
1037 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1038 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1040 V = emitArgumentDemotion(*this, Arg, V);
1042 EmitParmDecl(*Arg, V, ArgNo);
1043 continue; // Skip ++AI increment, already done.
1046 case ABIArgInfo::Expand: {
1047 // If this structure was expanded into multiple arguments then
1048 // we need to create a temporary and reconstruct it from the
1050 llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
1051 llvm::Function::arg_iterator End =
1052 ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
1053 EmitParmDecl(*Arg, Temp, ArgNo);
1055 // Name the arguments used in expansion and increment AI.
1057 for (; AI != End; ++AI, ++Index)
1058 AI->setName(Arg->getName() + "." + Twine(Index));
1062 case ABIArgInfo::Ignore:
1063 // Initialize the local variable appropriately.
1064 if (hasAggregateLLVMType(Ty))
1065 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1067 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1070 // Skip increment, no matching LLVM parameter.
1076 assert(AI == Fn->arg_end() && "Argument mismatch!");
1079 /// Try to emit a fused autorelease of a return result.
1080 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
1081 llvm::Value *result) {
1082 // We must be immediately followed the cast.
1083 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
1084 if (BB->empty()) return 0;
1085 if (&BB->back() != result) return 0;
1087 llvm::Type *resultType = result->getType();
1089 // result is in a BasicBlock and is therefore an Instruction.
1090 llvm::Instruction *generator = cast<llvm::Instruction>(result);
1092 SmallVector<llvm::Instruction*,4> insnsToKill;
1095 // %generator = bitcast %type1* %generator2 to %type2*
1096 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
1097 // We would have emitted this as a constant if the operand weren't
1099 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
1101 // Require the generator to be immediately followed by the cast.
1102 if (generator->getNextNode() != bitcast)
1105 insnsToKill.push_back(bitcast);
1109 // %generator = call i8* @objc_retain(i8* %originalResult)
1111 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
1112 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
1113 if (!call) return 0;
1115 bool doRetainAutorelease;
1117 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
1118 doRetainAutorelease = true;
1119 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
1120 .objc_retainAutoreleasedReturnValue) {
1121 doRetainAutorelease = false;
1123 // Look for an inline asm immediately preceding the call and kill it, too.
1124 llvm::Instruction *prev = call->getPrevNode();
1125 if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
1126 if (asmCall->getCalledValue()
1127 == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
1128 insnsToKill.push_back(prev);
1133 result = call->getArgOperand(0);
1134 insnsToKill.push_back(call);
1136 // Keep killing bitcasts, for sanity. Note that we no longer care
1137 // about precise ordering as long as there's exactly one use.
1138 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
1139 if (!bitcast->hasOneUse()) break;
1140 insnsToKill.push_back(bitcast);
1141 result = bitcast->getOperand(0);
1144 // Delete all the unnecessary instructions, from latest to earliest.
1145 for (SmallVectorImpl<llvm::Instruction*>::iterator
1146 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
1147 (*i)->eraseFromParent();
1149 // Do the fused retain/autorelease if we were asked to.
1150 if (doRetainAutorelease)
1151 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
1153 // Cast back to the result type.
1154 return CGF.Builder.CreateBitCast(result, resultType);
1157 /// Emit an ARC autorelease of the result of a function.
1158 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
1159 llvm::Value *result) {
1160 // At -O0, try to emit a fused retain/autorelease.
1161 if (CGF.shouldUseFusedARCCalls())
1162 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
1165 return CGF.EmitARCAutoreleaseReturnValue(result);
1168 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1169 // Functions with no result always return void.
1170 if (ReturnValue == 0) {
1171 Builder.CreateRetVoid();
1175 llvm::DebugLoc RetDbgLoc;
1176 llvm::Value *RV = 0;
1177 QualType RetTy = FI.getReturnType();
1178 const ABIArgInfo &RetAI = FI.getReturnInfo();
1180 switch (RetAI.getKind()) {
1181 case ABIArgInfo::Indirect: {
1182 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1183 if (RetTy->isAnyComplexType()) {
1184 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1185 StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1186 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1187 // Do nothing; aggregrates get evaluated directly into the destination.
1189 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1190 false, Alignment, RetTy);
1195 case ABIArgInfo::Extend:
1196 case ABIArgInfo::Direct:
1197 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1198 RetAI.getDirectOffset() == 0) {
1199 // The internal return value temp always will have pointer-to-return-type
1200 // type, just do a load.
1202 // If the instruction right before the insertion point is a store to the
1203 // return value, we can elide the load, zap the store, and usually zap the
1205 llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1206 llvm::StoreInst *SI = 0;
1207 if (InsertBB->empty() ||
1208 !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1209 SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1210 RV = Builder.CreateLoad(ReturnValue);
1212 // Get the stored value and nuke the now-dead store.
1213 RetDbgLoc = SI->getDebugLoc();
1214 RV = SI->getValueOperand();
1215 SI->eraseFromParent();
1217 // If that was the only use of the return value, nuke it as well now.
1218 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1219 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1224 llvm::Value *V = ReturnValue;
1225 // If the value is offset in memory, apply the offset now.
1226 if (unsigned Offs = RetAI.getDirectOffset()) {
1227 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1228 V = Builder.CreateConstGEP1_32(V, Offs);
1229 V = Builder.CreateBitCast(V,
1230 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1233 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1236 // In ARC, end functions that return a retainable type with a call
1237 // to objc_autoreleaseReturnValue.
1238 if (AutoreleaseResult) {
1239 assert(getLangOptions().ObjCAutoRefCount &&
1240 !FI.isReturnsRetained() &&
1241 RetTy->isObjCRetainableType());
1242 RV = emitAutoreleaseOfResult(*this, RV);
1247 case ABIArgInfo::Ignore:
1250 case ABIArgInfo::Expand:
1251 llvm_unreachable("Invalid ABI kind for return argument");
1254 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1255 if (!RetDbgLoc.isUnknown())
1256 Ret->setDebugLoc(RetDbgLoc);
1259 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1260 const VarDecl *param) {
1261 // StartFunction converted the ABI-lowered parameter(s) into a
1262 // local alloca. We need to turn that into an r-value suitable
1264 llvm::Value *local = GetAddrOfLocalVar(param);
1266 QualType type = param->getType();
1268 // For the most part, we just need to load the alloca, except:
1269 // 1) aggregate r-values are actually pointers to temporaries, and
1270 // 2) references to aggregates are pointers directly to the aggregate.
1271 // I don't know why references to non-aggregates are different here.
1272 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1273 if (hasAggregateLLVMType(ref->getPointeeType()))
1274 return args.add(RValue::getAggregate(local), type);
1276 // Locals which are references to scalars are represented
1277 // with allocas holding the pointer.
1278 return args.add(RValue::get(Builder.CreateLoad(local)), type);
1281 if (type->isAnyComplexType()) {
1282 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1283 return args.add(RValue::getComplex(complex), type);
1286 if (hasAggregateLLVMType(type))
1287 return args.add(RValue::getAggregate(local), type);
1289 unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1290 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1291 return args.add(RValue::get(value), type);
1294 static bool isProvablyNull(llvm::Value *addr) {
1295 return isa<llvm::ConstantPointerNull>(addr);
1298 static bool isProvablyNonNull(llvm::Value *addr) {
1299 return isa<llvm::AllocaInst>(addr);
1302 /// Emit the actual writing-back of a writeback.
1303 static void emitWriteback(CodeGenFunction &CGF,
1304 const CallArgList::Writeback &writeback) {
1305 llvm::Value *srcAddr = writeback.Address;
1306 assert(!isProvablyNull(srcAddr) &&
1307 "shouldn't have writeback for provably null argument");
1309 llvm::BasicBlock *contBB = 0;
1311 // If the argument wasn't provably non-null, we need to null check
1312 // before doing the store.
1313 bool provablyNonNull = isProvablyNonNull(srcAddr);
1314 if (!provablyNonNull) {
1315 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
1316 contBB = CGF.createBasicBlock("icr.done");
1318 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1319 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
1320 CGF.EmitBlock(writebackBB);
1323 // Load the value to writeback.
1324 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
1326 // Cast it back, in case we're writing an id to a Foo* or something.
1327 value = CGF.Builder.CreateBitCast(value,
1328 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
1329 "icr.writeback-cast");
1331 // Perform the writeback.
1332 QualType srcAddrType = writeback.AddressType;
1333 CGF.EmitStoreThroughLValue(RValue::get(value),
1334 CGF.MakeAddrLValue(srcAddr, srcAddrType));
1336 // Jump to the continuation block.
1337 if (!provablyNonNull)
1338 CGF.EmitBlock(contBB);
1341 static void emitWritebacks(CodeGenFunction &CGF,
1342 const CallArgList &args) {
1343 for (CallArgList::writeback_iterator
1344 i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
1345 emitWriteback(CGF, *i);
1348 /// Emit an argument that's being passed call-by-writeback. That is,
1349 /// we are passing the address of
1350 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
1351 const ObjCIndirectCopyRestoreExpr *CRE) {
1352 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
1354 // The dest and src types don't necessarily match in LLVM terms
1355 // because of the crazy ObjC compatibility rules.
1357 llvm::PointerType *destType =
1358 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
1360 // If the address is a constant null, just pass the appropriate null.
1361 if (isProvablyNull(srcAddr)) {
1362 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
1367 QualType srcAddrType =
1368 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
1370 // Create the temporary.
1371 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
1374 // Zero-initialize it if we're not doing a copy-initialization.
1375 bool shouldCopy = CRE->shouldCopy();
1378 llvm::ConstantPointerNull::get(
1379 cast<llvm::PointerType>(destType->getElementType()));
1380 CGF.Builder.CreateStore(null, temp);
1383 llvm::BasicBlock *contBB = 0;
1385 // If the address is *not* known to be non-null, we need to switch.
1386 llvm::Value *finalArgument;
1388 bool provablyNonNull = isProvablyNonNull(srcAddr);
1389 if (provablyNonNull) {
1390 finalArgument = temp;
1392 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
1394 finalArgument = CGF.Builder.CreateSelect(isNull,
1395 llvm::ConstantPointerNull::get(destType),
1396 temp, "icr.argument");
1398 // If we need to copy, then the load has to be conditional, which
1399 // means we need control flow.
1401 contBB = CGF.createBasicBlock("icr.cont");
1402 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
1403 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
1404 CGF.EmitBlock(copyBB);
1408 // Perform a copy if necessary.
1410 LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
1411 RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
1412 assert(srcRV.isScalar());
1414 llvm::Value *src = srcRV.getScalarVal();
1415 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
1418 // Use an ordinary store, not a store-to-lvalue.
1419 CGF.Builder.CreateStore(src, temp);
1422 // Finish the control flow if we needed it.
1423 if (shouldCopy && !provablyNonNull)
1424 CGF.EmitBlock(contBB);
1426 args.addWriteback(srcAddr, srcAddrType, temp);
1427 args.add(RValue::get(finalArgument), CRE->getType());
1430 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1432 if (const ObjCIndirectCopyRestoreExpr *CRE
1433 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
1434 assert(getContext().getLangOptions().ObjCAutoRefCount);
1435 assert(getContext().hasSameType(E->getType(), type));
1436 return emitWritebackArg(*this, args, CRE);
1439 assert(type->isReferenceType() == E->isGLValue() &&
1440 "reference binding to unmaterialized r-value!");
1442 if (E->isGLValue()) {
1443 assert(E->getObjectKind() == OK_Ordinary);
1444 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1448 if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
1449 isa<ImplicitCastExpr>(E) &&
1450 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1451 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1452 assert(L.isSimple());
1453 args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
1454 type, /*NeedsCopy*/true);
1458 args.add(EmitAnyExprToTemp(E), type);
1461 /// Emits a call or invoke instruction to the given function, depending
1462 /// on the current state of the EH stack.
1464 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1465 ArrayRef<llvm::Value *> Args,
1466 const Twine &Name) {
1467 llvm::BasicBlock *InvokeDest = getInvokeDest();
1469 return Builder.CreateCall(Callee, Args, Name);
1471 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1472 llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
1479 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1480 const Twine &Name) {
1481 return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
1484 static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
1485 llvm::FunctionType *FTy) {
1486 if (ArgNo < FTy->getNumParams())
1487 assert(Elt->getType() == FTy->getParamType(ArgNo));
1489 assert(FTy->isVarArg());
1493 void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
1494 SmallVector<llvm::Value*,16> &Args,
1495 llvm::FunctionType *IRFuncTy) {
1496 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1497 unsigned NumElts = AT->getSize().getZExtValue();
1498 QualType EltTy = AT->getElementType();
1499 llvm::Value *Addr = RV.getAggregateAddr();
1500 for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
1501 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
1502 LValue LV = MakeAddrLValue(EltAddr, EltTy);
1504 if (CodeGenFunction::hasAggregateLLVMType(EltTy))
1505 EltRV = RValue::getAggregate(LV.getAddress());
1507 EltRV = EmitLoadOfLValue(LV);
1508 ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
1510 } else if (const RecordType *RT = Ty->getAsStructureType()) {
1511 RecordDecl *RD = RT->getDecl();
1512 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
1513 llvm::Value *Addr = RV.getAggregateAddr();
1514 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1517 QualType FT = FD->getType();
1519 // FIXME: What are the right qualifiers here?
1520 LValue LV = EmitLValueForField(Addr, FD, 0);
1522 if (CodeGenFunction::hasAggregateLLVMType(FT))
1523 FldRV = RValue::getAggregate(LV.getAddress());
1525 FldRV = EmitLoadOfLValue(LV);
1526 ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
1528 } else if (isa<ComplexType>(Ty)) {
1529 ComplexPairTy CV = RV.getComplexVal();
1530 Args.push_back(CV.first);
1531 Args.push_back(CV.second);
1533 assert(RV.isScalar() &&
1534 "Unexpected non-scalar rvalue during struct expansion.");
1536 // Insert a bitcast as needed.
1537 llvm::Value *V = RV.getScalarVal();
1538 if (Args.size() < IRFuncTy->getNumParams() &&
1539 V->getType() != IRFuncTy->getParamType(Args.size()))
1540 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
1547 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1548 llvm::Value *Callee,
1549 ReturnValueSlot ReturnValue,
1550 const CallArgList &CallArgs,
1551 const Decl *TargetDecl,
1552 llvm::Instruction **callOrInvoke) {
1553 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1554 SmallVector<llvm::Value*, 16> Args;
1556 // Handle struct-return functions by passing a pointer to the
1557 // location that we would like to return into.
1558 QualType RetTy = CallInfo.getReturnType();
1559 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1561 // IRArgNo - Keep track of the argument number in the callee we're looking at.
1562 unsigned IRArgNo = 0;
1563 llvm::FunctionType *IRFuncTy =
1564 cast<llvm::FunctionType>(
1565 cast<llvm::PointerType>(Callee->getType())->getElementType());
1567 // If the call returns a temporary with struct return, create a temporary
1568 // alloca to hold the result, unless one is given to us.
1569 if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1570 llvm::Value *Value = ReturnValue.getValue();
1572 Value = CreateMemTemp(RetTy);
1573 Args.push_back(Value);
1574 checkArgMatches(Value, IRArgNo, IRFuncTy);
1577 assert(CallInfo.arg_size() == CallArgs.size() &&
1578 "Mismatch between function signature & arguments.");
1579 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1580 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1581 I != E; ++I, ++info_it) {
1582 const ABIArgInfo &ArgInfo = info_it->info;
1585 unsigned TypeAlign =
1586 getContext().getTypeAlignInChars(I->Ty).getQuantity();
1587 switch (ArgInfo.getKind()) {
1588 case ABIArgInfo::Indirect: {
1589 if (RV.isScalar() || RV.isComplex()) {
1590 // Make a temporary alloca to pass the argument.
1591 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1592 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
1593 AI->setAlignment(ArgInfo.getIndirectAlign());
1597 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1600 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1602 // Validate argument match.
1603 checkArgMatches(AI, IRArgNo, IRFuncTy);
1605 // We want to avoid creating an unnecessary temporary+copy here;
1606 // however, we need one in two cases:
1607 // 1. If the argument is not byval, and we are required to copy the
1608 // source. (This case doesn't occur on any common architecture.)
1609 // 2. If the argument is byval, RV is not sufficiently aligned, and
1610 // we cannot force it to be sufficiently aligned.
1611 llvm::Value *Addr = RV.getAggregateAddr();
1612 unsigned Align = ArgInfo.getIndirectAlign();
1613 const llvm::TargetData *TD = &CGM.getTargetData();
1614 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
1615 (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
1616 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
1617 // Create an aligned temporary, and copy to it.
1618 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
1619 if (Align > AI->getAlignment())
1620 AI->setAlignment(Align);
1622 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
1624 // Validate argument match.
1625 checkArgMatches(AI, IRArgNo, IRFuncTy);
1627 // Skip the extra memcpy call.
1628 Args.push_back(Addr);
1630 // Validate argument match.
1631 checkArgMatches(Addr, IRArgNo, IRFuncTy);
1637 case ABIArgInfo::Ignore:
1640 case ABIArgInfo::Extend:
1641 case ABIArgInfo::Direct: {
1642 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1643 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1644 ArgInfo.getDirectOffset() == 0) {
1647 V = RV.getScalarVal();
1649 V = Builder.CreateLoad(RV.getAggregateAddr());
1651 // If the argument doesn't match, perform a bitcast to coerce it. This
1652 // can happen due to trivial type mismatches.
1653 if (IRArgNo < IRFuncTy->getNumParams() &&
1654 V->getType() != IRFuncTy->getParamType(IRArgNo))
1655 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
1658 checkArgMatches(V, IRArgNo, IRFuncTy);
1662 // FIXME: Avoid the conversion through memory if possible.
1663 llvm::Value *SrcPtr;
1664 if (RV.isScalar()) {
1665 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1666 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
1667 } else if (RV.isComplex()) {
1668 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1669 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1671 SrcPtr = RV.getAggregateAddr();
1673 // If the value is offset in memory, apply the offset now.
1674 if (unsigned Offs = ArgInfo.getDirectOffset()) {
1675 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1676 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1677 SrcPtr = Builder.CreateBitCast(SrcPtr,
1678 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1682 // If the coerce-to type is a first class aggregate, we flatten it and
1683 // pass the elements. Either way is semantically identical, but fast-isel
1684 // and the optimizer generally likes scalar values better than FCAs.
1685 if (llvm::StructType *STy =
1686 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1687 SrcPtr = Builder.CreateBitCast(SrcPtr,
1688 llvm::PointerType::getUnqual(STy));
1689 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1690 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1691 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
1692 // We don't know what we're loading from.
1693 LI->setAlignment(1);
1696 // Validate argument match.
1697 checkArgMatches(LI, IRArgNo, IRFuncTy);
1700 // In the simple case, just pass the coerced loaded value.
1701 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1704 // Validate argument match.
1705 checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
1711 case ABIArgInfo::Expand:
1712 ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
1713 IRArgNo = Args.size();
1718 // If the callee is a bitcast of a function to a varargs pointer to function
1719 // type, check to see if we can remove the bitcast. This handles some cases
1720 // with unprototyped functions.
1721 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1722 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1723 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1724 llvm::FunctionType *CurFT =
1725 cast<llvm::FunctionType>(CurPT->getElementType());
1726 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1728 if (CE->getOpcode() == llvm::Instruction::BitCast &&
1729 ActualFT->getReturnType() == CurFT->getReturnType() &&
1730 ActualFT->getNumParams() == CurFT->getNumParams() &&
1731 ActualFT->getNumParams() == Args.size() &&
1732 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
1733 bool ArgsMatch = true;
1734 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1735 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1740 // Strip the cast if we can get away with it. This is a nice cleanup,
1741 // but also allows us to inline the function at -O0 if it is marked
1748 unsigned CallingConv;
1749 CodeGen::AttributeListType AttributeList;
1750 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1751 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1752 AttributeList.end());
1754 llvm::BasicBlock *InvokeDest = 0;
1755 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
1756 InvokeDest = getInvokeDest();
1760 CS = Builder.CreateCall(Callee, Args);
1762 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1763 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
1767 *callOrInvoke = CS.getInstruction();
1769 CS.setAttributes(Attrs);
1770 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1772 // If the call doesn't return, finish the basic block and clear the
1773 // insertion point; this allows the rest of IRgen to discard
1774 // unreachable code.
1775 if (CS.doesNotReturn()) {
1776 Builder.CreateUnreachable();
1777 Builder.ClearInsertionPoint();
1779 // FIXME: For now, emit a dummy basic block because expr emitters in
1780 // generally are not ready to handle emitting expressions at unreachable
1782 EnsureInsertPoint();
1784 // Return a reasonable RValue.
1785 return GetUndefRValue(RetTy);
1788 llvm::Instruction *CI = CS.getInstruction();
1789 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1790 CI->setName("call");
1792 // Emit any writebacks immediately. Arguably this should happen
1793 // after any return-value munging.
1794 if (CallArgs.hasWritebacks())
1795 emitWritebacks(*this, CallArgs);
1797 switch (RetAI.getKind()) {
1798 case ABIArgInfo::Indirect: {
1799 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1800 if (RetTy->isAnyComplexType())
1801 return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1802 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1803 return RValue::getAggregate(Args[0]);
1804 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
1807 case ABIArgInfo::Ignore:
1808 // If we are ignoring an argument that had a result, make sure to
1809 // construct the appropriate return value for our caller.
1810 return GetUndefRValue(RetTy);
1812 case ABIArgInfo::Extend:
1813 case ABIArgInfo::Direct: {
1814 llvm::Type *RetIRTy = ConvertType(RetTy);
1815 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
1816 if (RetTy->isAnyComplexType()) {
1817 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1818 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1819 return RValue::getComplex(std::make_pair(Real, Imag));
1821 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1822 llvm::Value *DestPtr = ReturnValue.getValue();
1823 bool DestIsVolatile = ReturnValue.isVolatile();
1826 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1827 DestIsVolatile = false;
1829 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
1830 return RValue::getAggregate(DestPtr);
1833 // If the argument doesn't match, perform a bitcast to coerce it. This
1834 // can happen due to trivial type mismatches.
1835 llvm::Value *V = CI;
1836 if (V->getType() != RetIRTy)
1837 V = Builder.CreateBitCast(V, RetIRTy);
1838 return RValue::get(V);
1841 llvm::Value *DestPtr = ReturnValue.getValue();
1842 bool DestIsVolatile = ReturnValue.isVolatile();
1845 DestPtr = CreateMemTemp(RetTy, "coerce");
1846 DestIsVolatile = false;
1849 // If the value is offset in memory, apply the offset now.
1850 llvm::Value *StorePtr = DestPtr;
1851 if (unsigned Offs = RetAI.getDirectOffset()) {
1852 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
1853 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
1854 StorePtr = Builder.CreateBitCast(StorePtr,
1855 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1857 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
1859 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1860 if (RetTy->isAnyComplexType())
1861 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1862 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1863 return RValue::getAggregate(DestPtr);
1864 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
1867 case ABIArgInfo::Expand:
1868 llvm_unreachable("Invalid ABI kind for return argument");
1871 llvm_unreachable("Unhandled ABIArgInfo::Kind");
1874 /* VarArg handling */
1876 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1877 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);