1 //===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "clang/Basic/TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/Attributes.h"
26 #include "llvm/Support/CallSite.h"
27 #include "llvm/Target/TargetData.h"
28 using namespace clang;
29 using namespace CodeGen;
33 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
35 default: return llvm::CallingConv::C;
36 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
37 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
38 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
39 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
40 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
41 // TODO: add support for CC_X86Pascal to llvm
45 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
47 /// FIXME: address space qualification?
48 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
49 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
50 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
53 /// Returns the canonical formal type of the given C++ method.
54 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
55 return MD->getType()->getCanonicalTypeUnqualified()
56 .getAs<FunctionProtoType>();
59 /// Returns the "extra-canonicalized" return type, which discards
60 /// qualifiers on the return type. Codegen doesn't care about them,
61 /// and it makes ABI code a little easier to be able to assume that
62 /// all parameter and return types are top-level unqualified.
63 static CanQualType GetReturnType(QualType RetTy) {
64 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
67 const CGFunctionInfo &
68 CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP,
70 return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
71 llvm::SmallVector<CanQualType, 16>(),
72 FTNP->getExtInfo(), IsRecursive);
75 /// \param Args - contains any initial parameters besides those
76 /// in the formal type
77 static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
78 llvm::SmallVectorImpl<CanQualType> &ArgTys,
79 CanQual<FunctionProtoType> FTP,
80 bool IsRecursive = false) {
82 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
83 ArgTys.push_back(FTP->getArgType(i));
84 CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
85 return CGT.getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo(), IsRecursive);
88 const CGFunctionInfo &
89 CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP,
91 llvm::SmallVector<CanQualType, 16> ArgTys;
92 return ::getFunctionInfo(*this, ArgTys, FTP, IsRecursive);
95 static CallingConv getCallingConventionForDecl(const Decl *D) {
96 // Set the appropriate calling convention for the Function.
97 if (D->hasAttr<StdCallAttr>())
100 if (D->hasAttr<FastCallAttr>())
101 return CC_X86FastCall;
103 if (D->hasAttr<ThisCallAttr>())
104 return CC_X86ThisCall;
106 if (D->hasAttr<PascalAttr>())
109 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
110 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
115 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
116 const FunctionProtoType *FTP) {
117 llvm::SmallVector<CanQualType, 16> ArgTys;
119 // Add the 'this' pointer.
120 ArgTys.push_back(GetThisType(Context, RD));
122 return ::getFunctionInfo(*this, ArgTys,
123 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
126 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
127 llvm::SmallVector<CanQualType, 16> ArgTys;
129 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
130 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
132 // Add the 'this' pointer unless this is a static method.
133 if (MD->isInstance())
134 ArgTys.push_back(GetThisType(Context, MD->getParent()));
136 return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
139 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
141 llvm::SmallVector<CanQualType, 16> ArgTys;
142 ArgTys.push_back(GetThisType(Context, D->getParent()));
143 CanQualType ResTy = Context.VoidTy;
145 TheCXXABI.BuildConstructorSignature(D, Type, ResTy, ArgTys);
147 CanQual<FunctionProtoType> FTP = GetFormalType(D);
149 // Add the formal parameters.
150 for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
151 ArgTys.push_back(FTP->getArgType(i));
153 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
156 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
158 llvm::SmallVector<CanQualType, 2> ArgTys;
159 ArgTys.push_back(GetThisType(Context, D->getParent()));
160 CanQualType ResTy = Context.VoidTy;
162 TheCXXABI.BuildDestructorSignature(D, Type, ResTy, ArgTys);
164 CanQual<FunctionProtoType> FTP = GetFormalType(D);
165 assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
167 return getFunctionInfo(ResTy, ArgTys, FTP->getExtInfo());
170 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
171 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
172 if (MD->isInstance())
173 return getFunctionInfo(MD);
175 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
176 assert(isa<FunctionType>(FTy));
177 if (isa<FunctionNoProtoType>(FTy))
178 return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
179 assert(isa<FunctionProtoType>(FTy));
180 return getFunctionInfo(FTy.getAs<FunctionProtoType>());
183 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
184 llvm::SmallVector<CanQualType, 16> ArgTys;
185 ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
186 ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
188 for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
189 e = MD->param_end(); i != e; ++i) {
190 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
192 return getFunctionInfo(GetReturnType(MD->getResultType()),
194 FunctionType::ExtInfo(
196 /*HasRegParm*/ false,
198 getCallingConventionForDecl(MD)));
201 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
202 // FIXME: Do we need to handle ObjCMethodDecl?
203 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
205 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
206 return getFunctionInfo(CD, GD.getCtorType());
208 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
209 return getFunctionInfo(DD, GD.getDtorType());
211 return getFunctionInfo(FD);
214 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
215 const CallArgList &Args,
216 const FunctionType::ExtInfo &Info) {
218 llvm::SmallVector<CanQualType, 16> ArgTys;
219 for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
221 ArgTys.push_back(Context.getCanonicalParamType(i->Ty));
222 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
225 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
226 const FunctionArgList &Args,
227 const FunctionType::ExtInfo &Info) {
229 llvm::SmallVector<CanQualType, 16> ArgTys;
230 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
232 ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
233 return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
236 const CGFunctionInfo &CodeGenTypes::getNullaryFunctionInfo() {
237 llvm::SmallVector<CanQualType, 1> args;
238 return getFunctionInfo(getContext().VoidTy, args, FunctionType::ExtInfo());
241 const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
242 const llvm::SmallVectorImpl<CanQualType> &ArgTys,
243 const FunctionType::ExtInfo &Info,
246 for (llvm::SmallVectorImpl<CanQualType>::const_iterator
247 I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
248 assert(I->isCanonicalAsParam());
251 unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
253 // Lookup or create unique function info.
254 llvm::FoldingSetNodeID ID;
255 CGFunctionInfo::Profile(ID, Info, ResTy,
256 ArgTys.begin(), ArgTys.end());
259 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, InsertPos);
263 // Construct the function info.
264 FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getHasRegParm(), Info.getRegParm(), ResTy,
265 ArgTys.data(), ArgTys.size());
266 FunctionInfos.InsertNode(FI, InsertPos);
268 // Compute ABI information.
269 getABIInfo().computeInfo(*FI);
271 // Loop over all of the computed argument and return value info. If any of
272 // them are direct or extend without a specified coerce type, specify the
274 ABIArgInfo &RetInfo = FI->getReturnInfo();
275 if (RetInfo.canHaveCoerceToType() && RetInfo.getCoerceToType() == 0)
276 RetInfo.setCoerceToType(ConvertTypeRecursive(FI->getReturnType()));
278 for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
280 if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
281 I->info.setCoerceToType(ConvertTypeRecursive(I->type));
283 // If this is a top-level call and ConvertTypeRecursive hit unresolved pointer
284 // types, resolve them now. These pointers may point to this function, which
285 // we *just* filled in the FunctionInfo for.
286 if (!IsRecursive && !PointersToResolve.empty())
287 HandleLateResolvedPointers();
292 CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
293 bool _NoReturn, bool _HasRegParm, unsigned _RegParm,
295 const CanQualType *ArgTys,
297 : CallingConvention(_CallingConvention),
298 EffectiveCallingConvention(_CallingConvention),
299 NoReturn(_NoReturn), HasRegParm(_HasRegParm), RegParm(_RegParm)
303 // FIXME: Coallocate with the CGFunctionInfo object.
304 Args = new ArgInfo[1 + NumArgTys];
305 Args[0].type = ResTy;
306 for (unsigned i = 0; i != NumArgTys; ++i)
307 Args[1 + i].type = ArgTys[i];
312 void CodeGenTypes::GetExpandedTypes(QualType type,
313 llvm::SmallVectorImpl<const llvm::Type*> &expandedTypes,
315 const RecordType *RT = type->getAsStructureType();
316 assert(RT && "Can only expand structure types.");
317 const RecordDecl *RD = RT->getDecl();
318 assert(!RD->hasFlexibleArrayMember() &&
319 "Cannot expand structure with flexible array.");
321 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
323 const FieldDecl *FD = *i;
324 assert(!FD->isBitField() &&
325 "Cannot expand structure with bit-field members.");
327 QualType fieldType = FD->getType();
328 if (fieldType->isRecordType())
329 GetExpandedTypes(fieldType, expandedTypes, isRecursive);
331 expandedTypes.push_back(ConvertType(fieldType, isRecursive));
335 llvm::Function::arg_iterator
336 CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
337 llvm::Function::arg_iterator AI) {
338 const RecordType *RT = Ty->getAsStructureType();
339 assert(RT && "Can only expand structure types.");
341 RecordDecl *RD = RT->getDecl();
342 assert(LV.isSimple() &&
343 "Unexpected non-simple lvalue during struct expansion.");
344 llvm::Value *Addr = LV.getAddress();
345 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
348 QualType FT = FD->getType();
350 // FIXME: What are the right qualifiers here?
351 LValue LV = EmitLValueForField(Addr, FD, 0);
352 if (CodeGenFunction::hasAggregateLLVMType(FT)) {
353 AI = ExpandTypeFromArgs(FT, LV, AI);
355 EmitStoreThroughLValue(RValue::get(AI), LV, FT);
364 CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
365 llvm::SmallVector<llvm::Value*, 16> &Args) {
366 const RecordType *RT = Ty->getAsStructureType();
367 assert(RT && "Can only expand structure types.");
369 RecordDecl *RD = RT->getDecl();
370 assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
371 llvm::Value *Addr = RV.getAggregateAddr();
372 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
375 QualType FT = FD->getType();
377 // FIXME: What are the right qualifiers here?
378 LValue LV = EmitLValueForField(Addr, FD, 0);
379 if (CodeGenFunction::hasAggregateLLVMType(FT)) {
380 ExpandTypeToArgs(FT, RValue::getAggregate(LV.getAddress()), Args);
382 RValue RV = EmitLoadOfLValue(LV, FT);
383 assert(RV.isScalar() &&
384 "Unexpected non-scalar rvalue during struct expansion.");
385 Args.push_back(RV.getScalarVal());
390 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
391 /// accessing some number of bytes out of it, try to gep into the struct to get
392 /// at its inner goodness. Dive as deep as possible without entering an element
393 /// with an in-memory size smaller than DstSize.
395 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
396 const llvm::StructType *SrcSTy,
397 uint64_t DstSize, CodeGenFunction &CGF) {
398 // We can't dive into a zero-element struct.
399 if (SrcSTy->getNumElements() == 0) return SrcPtr;
401 const llvm::Type *FirstElt = SrcSTy->getElementType(0);
403 // If the first elt is at least as large as what we're looking for, or if the
404 // first element is the same size as the whole struct, we can enter it.
405 uint64_t FirstEltSize =
406 CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
407 if (FirstEltSize < DstSize &&
408 FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
411 // GEP into the first element.
412 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
414 // If the first element is a struct, recurse.
415 const llvm::Type *SrcTy =
416 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
417 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
418 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
423 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
424 /// are either integers or pointers. This does a truncation of the value if it
425 /// is too large or a zero extension if it is too small.
426 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
427 const llvm::Type *Ty,
428 CodeGenFunction &CGF) {
429 if (Val->getType() == Ty)
432 if (isa<llvm::PointerType>(Val->getType())) {
433 // If this is Pointer->Pointer avoid conversion to and from int.
434 if (isa<llvm::PointerType>(Ty))
435 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
437 // Convert the pointer to an integer so we can play with its width.
438 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
441 const llvm::Type *DestIntTy = Ty;
442 if (isa<llvm::PointerType>(DestIntTy))
443 DestIntTy = CGF.IntPtrTy;
445 if (Val->getType() != DestIntTy)
446 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
448 if (isa<llvm::PointerType>(Ty))
449 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
455 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
456 /// a pointer to an object of type \arg Ty.
458 /// This safely handles the case when the src type is smaller than the
459 /// destination type; in this situation the values of bits which not
460 /// present in the src are undefined.
461 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
462 const llvm::Type *Ty,
463 CodeGenFunction &CGF) {
464 const llvm::Type *SrcTy =
465 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
467 // If SrcTy and Ty are the same, just do a load.
469 return CGF.Builder.CreateLoad(SrcPtr);
471 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
473 if (const llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
474 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
475 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
478 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
480 // If the source and destination are integer or pointer types, just do an
481 // extension or truncation to the desired type.
482 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
483 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
484 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
485 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
488 // If load is legal, just bitcast the src pointer.
489 if (SrcSize >= DstSize) {
490 // Generally SrcSize is never greater than DstSize, since this means we are
491 // losing bits. However, this can happen in cases where the structure has
492 // additional padding, for example due to a user specified alignment.
494 // FIXME: Assert that we aren't truncating non-padding bits when have access
495 // to that information.
496 llvm::Value *Casted =
497 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
498 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
499 // FIXME: Use better alignment / avoid requiring aligned load.
500 Load->setAlignment(1);
504 // Otherwise do coercion through memory. This is stupid, but
506 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
507 llvm::Value *Casted =
508 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
509 llvm::StoreInst *Store =
510 CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
511 // FIXME: Use better alignment / avoid requiring aligned store.
512 Store->setAlignment(1);
513 return CGF.Builder.CreateLoad(Tmp);
516 // Function to store a first-class aggregate into memory. We prefer to
517 // store the elements rather than the aggregate to be more friendly to
519 // FIXME: Do we need to recurse here?
520 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
521 llvm::Value *DestPtr, bool DestIsVolatile,
523 // Prefer scalar stores to first-class aggregate stores.
524 if (const llvm::StructType *STy =
525 dyn_cast<llvm::StructType>(Val->getType())) {
526 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
527 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
528 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
529 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
535 CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
539 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
540 /// where the source and destination may have different types.
542 /// This safely handles the case when the src type is larger than the
543 /// destination type; the upper bits of the src will be lost.
544 static void CreateCoercedStore(llvm::Value *Src,
547 CodeGenFunction &CGF) {
548 const llvm::Type *SrcTy = Src->getType();
549 const llvm::Type *DstTy =
550 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
551 if (SrcTy == DstTy) {
552 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
556 uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
558 if (const llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
559 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
560 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
563 // If the source and destination are integer or pointer types, just do an
564 // extension or truncation to the desired type.
565 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
566 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
567 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
568 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
572 uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
574 // If store is legal, just bitcast the src pointer.
575 if (SrcSize <= DstSize) {
576 llvm::Value *Casted =
577 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
578 // FIXME: Use better alignment / avoid requiring aligned store.
579 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
581 // Otherwise do coercion through memory. This is stupid, but
584 // Generally SrcSize is never greater than DstSize, since this means we are
585 // losing bits. However, this can happen in cases where the structure has
586 // additional padding, for example due to a user specified alignment.
588 // FIXME: Assert that we aren't truncating non-padding bits when have access
589 // to that information.
590 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
591 CGF.Builder.CreateStore(Src, Tmp);
592 llvm::Value *Casted =
593 CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
594 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
595 // FIXME: Use better alignment / avoid requiring aligned load.
596 Load->setAlignment(1);
597 CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
603 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
604 return FI.getReturnInfo().isIndirect();
607 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
608 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
609 switch (BT->getKind()) {
612 case BuiltinType::Float:
613 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Float);
614 case BuiltinType::Double:
615 return getContext().Target.useObjCFPRetForRealType(TargetInfo::Double);
616 case BuiltinType::LongDouble:
617 return getContext().Target.useObjCFPRetForRealType(
618 TargetInfo::LongDouble);
625 const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
626 const CGFunctionInfo &FI = getFunctionInfo(GD);
628 // For definition purposes, don't consider a K&R function variadic.
629 bool Variadic = false;
630 if (const FunctionProtoType *FPT =
631 cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
632 Variadic = FPT->isVariadic();
634 return GetFunctionType(FI, Variadic, false);
637 const llvm::FunctionType *
638 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool isVariadic,
640 llvm::SmallVector<const llvm::Type*, 8> argTypes;
641 const llvm::Type *resultType = 0;
643 const ABIArgInfo &retAI = FI.getReturnInfo();
644 switch (retAI.getKind()) {
645 case ABIArgInfo::Expand:
646 llvm_unreachable("Invalid ABI kind for return argument");
648 case ABIArgInfo::Extend:
649 case ABIArgInfo::Direct:
650 resultType = retAI.getCoerceToType();
653 case ABIArgInfo::Indirect: {
654 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
655 resultType = llvm::Type::getVoidTy(getLLVMContext());
657 QualType ret = FI.getReturnType();
658 const llvm::Type *ty = ConvertType(ret, isRecursive);
659 unsigned addressSpace = Context.getTargetAddressSpace(ret);
660 argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
664 case ABIArgInfo::Ignore:
665 resultType = llvm::Type::getVoidTy(getLLVMContext());
669 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
670 ie = FI.arg_end(); it != ie; ++it) {
671 const ABIArgInfo &argAI = it->info;
673 switch (argAI.getKind()) {
674 case ABIArgInfo::Ignore:
677 case ABIArgInfo::Indirect: {
678 // indirect arguments are always on the stack, which is addr space #0.
679 const llvm::Type *LTy = ConvertTypeForMem(it->type, isRecursive);
680 argTypes.push_back(LTy->getPointerTo());
684 case ABIArgInfo::Extend:
685 case ABIArgInfo::Direct: {
686 // If the coerce-to type is a first class aggregate, flatten it. Either
687 // way is semantically identical, but fast-isel and the optimizer
688 // generally likes scalar values better than FCAs.
689 const llvm::Type *argType = argAI.getCoerceToType();
690 if (const llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
691 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
692 argTypes.push_back(st->getElementType(i));
694 argTypes.push_back(argType);
699 case ABIArgInfo::Expand:
700 GetExpandedTypes(it->type, argTypes, isRecursive);
705 return llvm::FunctionType::get(resultType, argTypes, isVariadic);
708 const llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
709 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
710 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
712 if (!VerifyFuncTypeComplete(FPT)) {
713 const CGFunctionInfo *Info;
714 if (isa<CXXDestructorDecl>(MD))
715 Info = &getFunctionInfo(cast<CXXDestructorDecl>(MD), GD.getDtorType());
717 Info = &getFunctionInfo(MD);
718 return GetFunctionType(*Info, FPT->isVariadic(), false);
721 return llvm::OpaqueType::get(getLLVMContext());
724 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
725 const Decl *TargetDecl,
726 AttributeListType &PAL,
727 unsigned &CallingConv) {
728 unsigned FuncAttrs = 0;
729 unsigned RetAttrs = 0;
731 CallingConv = FI.getEffectiveCallingConvention();
734 FuncAttrs |= llvm::Attribute::NoReturn;
736 // FIXME: handle sseregparm someday...
738 if (TargetDecl->hasAttr<NoThrowAttr>())
739 FuncAttrs |= llvm::Attribute::NoUnwind;
740 else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
741 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
742 if (FPT && FPT->isNothrow(getContext()))
743 FuncAttrs |= llvm::Attribute::NoUnwind;
746 if (TargetDecl->hasAttr<NoReturnAttr>())
747 FuncAttrs |= llvm::Attribute::NoReturn;
748 if (TargetDecl->hasAttr<ConstAttr>())
749 FuncAttrs |= llvm::Attribute::ReadNone;
750 else if (TargetDecl->hasAttr<PureAttr>())
751 FuncAttrs |= llvm::Attribute::ReadOnly;
752 if (TargetDecl->hasAttr<MallocAttr>())
753 RetAttrs |= llvm::Attribute::NoAlias;
756 if (CodeGenOpts.OptimizeSize)
757 FuncAttrs |= llvm::Attribute::OptimizeForSize;
758 if (CodeGenOpts.DisableRedZone)
759 FuncAttrs |= llvm::Attribute::NoRedZone;
760 if (CodeGenOpts.NoImplicitFloat)
761 FuncAttrs |= llvm::Attribute::NoImplicitFloat;
763 QualType RetTy = FI.getReturnType();
765 const ABIArgInfo &RetAI = FI.getReturnInfo();
766 switch (RetAI.getKind()) {
767 case ABIArgInfo::Extend:
768 if (RetTy->hasSignedIntegerRepresentation())
769 RetAttrs |= llvm::Attribute::SExt;
770 else if (RetTy->hasUnsignedIntegerRepresentation())
771 RetAttrs |= llvm::Attribute::ZExt;
773 case ABIArgInfo::Direct:
774 case ABIArgInfo::Ignore:
777 case ABIArgInfo::Indirect:
778 PAL.push_back(llvm::AttributeWithIndex::get(Index,
779 llvm::Attribute::StructRet));
781 // sret disables readnone and readonly
782 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
783 llvm::Attribute::ReadNone);
786 case ABIArgInfo::Expand:
787 assert(0 && "Invalid ABI kind for return argument");
791 PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
793 // FIXME: RegParm should be reduced in case of global register variable.
795 if (FI.getHasRegParm())
796 RegParm = FI.getRegParm();
798 RegParm = CodeGenOpts.NumRegisterParameters;
800 unsigned PointerWidth = getContext().Target.getPointerWidth(0);
801 for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
802 ie = FI.arg_end(); it != ie; ++it) {
803 QualType ParamType = it->type;
804 const ABIArgInfo &AI = it->info;
805 unsigned Attributes = 0;
807 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
808 // have the corresponding parameter variable. It doesn't make
809 // sense to do it here because parameters are so messed up.
810 switch (AI.getKind()) {
811 case ABIArgInfo::Extend:
812 if (ParamType->isSignedIntegerOrEnumerationType())
813 Attributes |= llvm::Attribute::SExt;
814 else if (ParamType->isUnsignedIntegerOrEnumerationType())
815 Attributes |= llvm::Attribute::ZExt;
817 case ABIArgInfo::Direct:
819 (ParamType->isIntegerType() || ParamType->isPointerType())) {
821 (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
823 Attributes |= llvm::Attribute::InReg;
825 // FIXME: handle sseregparm someday...
827 if (const llvm::StructType *STy =
828 dyn_cast<llvm::StructType>(AI.getCoerceToType()))
829 Index += STy->getNumElements()-1; // 1 will be added below.
832 case ABIArgInfo::Indirect:
833 if (AI.getIndirectByVal())
834 Attributes |= llvm::Attribute::ByVal;
837 llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
838 // byval disables readnone and readonly.
839 FuncAttrs &= ~(llvm::Attribute::ReadOnly |
840 llvm::Attribute::ReadNone);
843 case ABIArgInfo::Ignore:
844 // Skip increment, no matching LLVM parameter.
847 case ABIArgInfo::Expand: {
848 llvm::SmallVector<const llvm::Type*, 8> types;
849 // FIXME: This is rather inefficient. Do we ever actually need to do
850 // anything here? The result should be just reconstructed on the other
851 // side, so extension should be a non-issue.
852 getTypes().GetExpandedTypes(ParamType, types, false);
853 Index += types.size();
859 PAL.push_back(llvm::AttributeWithIndex::get(Index, Attributes));
863 PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
866 /// An argument came in as a promoted argument; demote it back to its
868 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
870 llvm::Value *value) {
871 const llvm::Type *varType = CGF.ConvertType(var->getType());
873 // This can happen with promotions that actually don't change the
874 // underlying type, like the enum promotions.
875 if (value->getType() == varType) return value;
877 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
878 && "unexpected promotion type");
880 if (isa<llvm::IntegerType>(varType))
881 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
883 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
886 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
888 const FunctionArgList &Args) {
889 // If this is an implicit-return-zero function, go ahead and
890 // initialize the return value. TODO: it might be nice to have
891 // a more general mechanism for this that didn't require synthesized
892 // return statements.
893 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
894 if (FD->hasImplicitReturnZero()) {
895 QualType RetTy = FD->getResultType().getUnqualifiedType();
896 const llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
897 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
898 Builder.CreateStore(Zero, ReturnValue);
902 // FIXME: We no longer need the types from FunctionArgList; lift up and
905 // Emit allocs for param decls. Give the LLVM Argument nodes names.
906 llvm::Function::arg_iterator AI = Fn->arg_begin();
908 // Name the struct return argument.
909 if (CGM.ReturnTypeUsesSRet(FI)) {
910 AI->setName("agg.result");
914 assert(FI.arg_size() == Args.size() &&
915 "Mismatch between function signature & arguments.");
917 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
918 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
919 i != e; ++i, ++info_it, ++ArgNo) {
920 const VarDecl *Arg = *i;
921 QualType Ty = info_it->type;
922 const ABIArgInfo &ArgI = info_it->info;
925 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
927 switch (ArgI.getKind()) {
928 case ABIArgInfo::Indirect: {
931 if (hasAggregateLLVMType(Ty)) {
932 // Aggregates and complex variables are accessed by reference. All we
933 // need to do is realign the value, if requested
934 if (ArgI.getIndirectRealign()) {
935 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
937 // Copy from the incoming argument pointer to the temporary with the
938 // appropriate alignment.
940 // FIXME: We should have a common utility for generating an aggregate
942 const llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
943 CharUnits Size = getContext().getTypeSizeInChars(Ty);
944 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
945 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
946 Builder.CreateMemCpy(Dst,
948 llvm::ConstantInt::get(IntPtrTy,
950 ArgI.getIndirectAlign(),
955 // Load scalar value from indirect argument.
956 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
957 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
960 V = emitArgumentDemotion(*this, Arg, V);
962 EmitParmDecl(*Arg, V, ArgNo);
966 case ABIArgInfo::Extend:
967 case ABIArgInfo::Direct: {
968 // If we have the trivial case, handle it with no muss and fuss.
969 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
970 ArgI.getCoerceToType() == ConvertType(Ty) &&
971 ArgI.getDirectOffset() == 0) {
972 assert(AI != Fn->arg_end() && "Argument mismatch!");
975 if (Arg->getType().isRestrictQualified())
976 AI->addAttr(llvm::Attribute::NoAlias);
979 V = emitArgumentDemotion(*this, Arg, V);
981 EmitParmDecl(*Arg, V, ArgNo);
985 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, "coerce");
987 // The alignment we need to use is the max of the requested alignment for
988 // the argument plus the alignment required by our access code below.
989 unsigned AlignmentToUse =
990 CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
991 AlignmentToUse = std::max(AlignmentToUse,
992 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
994 Alloca->setAlignment(AlignmentToUse);
995 llvm::Value *V = Alloca;
996 llvm::Value *Ptr = V; // Pointer to store into.
998 // If the value is offset in memory, apply the offset now.
999 if (unsigned Offs = ArgI.getDirectOffset()) {
1000 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1001 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1002 Ptr = Builder.CreateBitCast(Ptr,
1003 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1006 // If the coerce-to type is a first class aggregate, we flatten it and
1007 // pass the elements. Either way is semantically identical, but fast-isel
1008 // and the optimizer generally likes scalar values better than FCAs.
1009 if (const llvm::StructType *STy =
1010 dyn_cast<llvm::StructType>(ArgI.getCoerceToType())) {
1011 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1013 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1014 assert(AI != Fn->arg_end() && "Argument mismatch!");
1015 AI->setName(Arg->getName() + ".coerce" + llvm::Twine(i));
1016 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1017 Builder.CreateStore(AI++, EltPtr);
1020 // Simple case, just do a coerced store of the argument into the alloca.
1021 assert(AI != Fn->arg_end() && "Argument mismatch!");
1022 AI->setName(Arg->getName() + ".coerce");
1023 CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
1027 // Match to what EmitParmDecl is expecting for this type.
1028 if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
1029 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
1031 V = emitArgumentDemotion(*this, Arg, V);
1033 EmitParmDecl(*Arg, V, ArgNo);
1034 continue; // Skip ++AI increment, already done.
1037 case ABIArgInfo::Expand: {
1038 // If this structure was expanded into multiple arguments then
1039 // we need to create a temporary and reconstruct it from the
1041 llvm::Value *Temp = CreateMemTemp(Ty, Arg->getName() + ".addr");
1042 llvm::Function::arg_iterator End =
1043 ExpandTypeFromArgs(Ty, MakeAddrLValue(Temp, Ty), AI);
1044 EmitParmDecl(*Arg, Temp, ArgNo);
1046 // Name the arguments used in expansion and increment AI.
1048 for (; AI != End; ++AI, ++Index)
1049 AI->setName(Arg->getName() + "." + llvm::Twine(Index));
1053 case ABIArgInfo::Ignore:
1054 // Initialize the local variable appropriately.
1055 if (hasAggregateLLVMType(Ty))
1056 EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
1058 EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
1061 // Skip increment, no matching LLVM parameter.
1067 assert(AI == Fn->arg_end() && "Argument mismatch!");
1070 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
1071 // Functions with no result always return void.
1072 if (ReturnValue == 0) {
1073 Builder.CreateRetVoid();
1077 llvm::DebugLoc RetDbgLoc;
1078 llvm::Value *RV = 0;
1079 QualType RetTy = FI.getReturnType();
1080 const ABIArgInfo &RetAI = FI.getReturnInfo();
1082 switch (RetAI.getKind()) {
1083 case ABIArgInfo::Indirect: {
1084 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1085 if (RetTy->isAnyComplexType()) {
1086 ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
1087 StoreComplexToAddr(RT, CurFn->arg_begin(), false);
1088 } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1089 // Do nothing; aggregrates get evaluated directly into the destination.
1091 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
1092 false, Alignment, RetTy);
1097 case ABIArgInfo::Extend:
1098 case ABIArgInfo::Direct:
1099 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1100 RetAI.getDirectOffset() == 0) {
1101 // The internal return value temp always will have pointer-to-return-type
1102 // type, just do a load.
1104 // If the instruction right before the insertion point is a store to the
1105 // return value, we can elide the load, zap the store, and usually zap the
1107 llvm::BasicBlock *InsertBB = Builder.GetInsertBlock();
1108 llvm::StoreInst *SI = 0;
1109 if (InsertBB->empty() ||
1110 !(SI = dyn_cast<llvm::StoreInst>(&InsertBB->back())) ||
1111 SI->getPointerOperand() != ReturnValue || SI->isVolatile()) {
1112 RV = Builder.CreateLoad(ReturnValue);
1114 // Get the stored value and nuke the now-dead store.
1115 RetDbgLoc = SI->getDebugLoc();
1116 RV = SI->getValueOperand();
1117 SI->eraseFromParent();
1119 // If that was the only use of the return value, nuke it as well now.
1120 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
1121 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
1126 llvm::Value *V = ReturnValue;
1127 // If the value is offset in memory, apply the offset now.
1128 if (unsigned Offs = RetAI.getDirectOffset()) {
1129 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
1130 V = Builder.CreateConstGEP1_32(V, Offs);
1131 V = Builder.CreateBitCast(V,
1132 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1135 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
1139 case ABIArgInfo::Ignore:
1142 case ABIArgInfo::Expand:
1143 assert(0 && "Invalid ABI kind for return argument");
1146 llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
1147 if (!RetDbgLoc.isUnknown())
1148 Ret->setDebugLoc(RetDbgLoc);
1151 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
1152 const VarDecl *param) {
1153 // StartFunction converted the ABI-lowered parameter(s) into a
1154 // local alloca. We need to turn that into an r-value suitable
1156 llvm::Value *local = GetAddrOfLocalVar(param);
1158 QualType type = param->getType();
1160 // For the most part, we just need to load the alloca, except:
1161 // 1) aggregate r-values are actually pointers to temporaries, and
1162 // 2) references to aggregates are pointers directly to the aggregate.
1163 // I don't know why references to non-aggregates are different here.
1164 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
1165 if (hasAggregateLLVMType(ref->getPointeeType()))
1166 return args.add(RValue::getAggregate(local), type);
1168 // Locals which are references to scalars are represented
1169 // with allocas holding the pointer.
1170 return args.add(RValue::get(Builder.CreateLoad(local)), type);
1173 if (type->isAnyComplexType()) {
1174 ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
1175 return args.add(RValue::getComplex(complex), type);
1178 if (hasAggregateLLVMType(type))
1179 return args.add(RValue::getAggregate(local), type);
1181 unsigned alignment = getContext().getDeclAlign(param).getQuantity();
1182 llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
1183 return args.add(RValue::get(value), type);
1186 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
1188 if (type->isReferenceType())
1189 return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
1192 if (hasAggregateLLVMType(type) && isa<ImplicitCastExpr>(E) &&
1193 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
1194 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
1195 assert(L.isSimple());
1196 args.add(RValue::getAggregate(L.getAddress(), L.isVolatileQualified()),
1197 type, /*NeedsCopy*/true);
1201 args.add(EmitAnyExprToTemp(E), type);
1204 /// Emits a call or invoke instruction to the given function, depending
1205 /// on the current state of the EH stack.
1207 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
1208 llvm::Value * const *ArgBegin,
1209 llvm::Value * const *ArgEnd,
1210 const llvm::Twine &Name) {
1211 llvm::BasicBlock *InvokeDest = getInvokeDest();
1213 return Builder.CreateCall(Callee, ArgBegin, ArgEnd, Name);
1215 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
1216 llvm::InvokeInst *Invoke = Builder.CreateInvoke(Callee, ContBB, InvokeDest,
1217 ArgBegin, ArgEnd, Name);
1222 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
1223 llvm::Value *Callee,
1224 ReturnValueSlot ReturnValue,
1225 const CallArgList &CallArgs,
1226 const Decl *TargetDecl,
1227 llvm::Instruction **callOrInvoke) {
1228 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
1229 llvm::SmallVector<llvm::Value*, 16> Args;
1231 // Handle struct-return functions by passing a pointer to the
1232 // location that we would like to return into.
1233 QualType RetTy = CallInfo.getReturnType();
1234 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
1237 // If the call returns a temporary with struct return, create a temporary
1238 // alloca to hold the result, unless one is given to us.
1239 if (CGM.ReturnTypeUsesSRet(CallInfo)) {
1240 llvm::Value *Value = ReturnValue.getValue();
1242 Value = CreateMemTemp(RetTy);
1243 Args.push_back(Value);
1246 assert(CallInfo.arg_size() == CallArgs.size() &&
1247 "Mismatch between function signature & arguments.");
1248 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
1249 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
1250 I != E; ++I, ++info_it) {
1251 const ABIArgInfo &ArgInfo = info_it->info;
1254 unsigned Alignment =
1255 getContext().getTypeAlignInChars(I->Ty).getQuantity();
1256 switch (ArgInfo.getKind()) {
1257 case ABIArgInfo::Indirect: {
1258 if (RV.isScalar() || RV.isComplex()) {
1259 // Make a temporary alloca to pass the argument.
1260 Args.push_back(CreateMemTemp(I->Ty));
1262 EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
1265 StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
1266 } else if (I->NeedsCopy && !ArgInfo.getIndirectByVal()) {
1267 Args.push_back(CreateMemTemp(I->Ty));
1268 EmitAggregateCopy(Args.back(), RV.getAggregateAddr(), I->Ty,
1269 RV.isVolatileQualified());
1271 Args.push_back(RV.getAggregateAddr());
1276 case ABIArgInfo::Ignore:
1279 case ABIArgInfo::Extend:
1280 case ABIArgInfo::Direct: {
1281 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
1282 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
1283 ArgInfo.getDirectOffset() == 0) {
1285 Args.push_back(RV.getScalarVal());
1287 Args.push_back(Builder.CreateLoad(RV.getAggregateAddr()));
1291 // FIXME: Avoid the conversion through memory if possible.
1292 llvm::Value *SrcPtr;
1293 if (RV.isScalar()) {
1294 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1295 EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, Alignment, I->Ty);
1296 } else if (RV.isComplex()) {
1297 SrcPtr = CreateMemTemp(I->Ty, "coerce");
1298 StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
1300 SrcPtr = RV.getAggregateAddr();
1302 // If the value is offset in memory, apply the offset now.
1303 if (unsigned Offs = ArgInfo.getDirectOffset()) {
1304 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
1305 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
1306 SrcPtr = Builder.CreateBitCast(SrcPtr,
1307 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
1311 // If the coerce-to type is a first class aggregate, we flatten it and
1312 // pass the elements. Either way is semantically identical, but fast-isel
1313 // and the optimizer generally likes scalar values better than FCAs.
1314 if (const llvm::StructType *STy =
1315 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
1316 SrcPtr = Builder.CreateBitCast(SrcPtr,
1317 llvm::PointerType::getUnqual(STy));
1318 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1319 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
1320 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
1321 // We don't know what we're loading from.
1322 LI->setAlignment(1);
1326 // In the simple case, just pass the coerced loaded value.
1327 Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
1334 case ABIArgInfo::Expand:
1335 ExpandTypeToArgs(I->Ty, RV, Args);
1340 // If the callee is a bitcast of a function to a varargs pointer to function
1341 // type, check to see if we can remove the bitcast. This handles some cases
1342 // with unprototyped functions.
1343 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
1344 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
1345 const llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
1346 const llvm::FunctionType *CurFT =
1347 cast<llvm::FunctionType>(CurPT->getElementType());
1348 const llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
1350 if (CE->getOpcode() == llvm::Instruction::BitCast &&
1351 ActualFT->getReturnType() == CurFT->getReturnType() &&
1352 ActualFT->getNumParams() == CurFT->getNumParams() &&
1353 ActualFT->getNumParams() == Args.size() &&
1354 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
1355 bool ArgsMatch = true;
1356 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
1357 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
1362 // Strip the cast if we can get away with it. This is a nice cleanup,
1363 // but also allows us to inline the function at -O0 if it is marked
1371 unsigned CallingConv;
1372 CodeGen::AttributeListType AttributeList;
1373 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
1374 llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
1375 AttributeList.end());
1377 llvm::BasicBlock *InvokeDest = 0;
1378 if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
1379 InvokeDest = getInvokeDest();
1383 CS = Builder.CreateCall(Callee, Args.data(), Args.data()+Args.size());
1385 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
1386 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest,
1387 Args.data(), Args.data()+Args.size());
1391 *callOrInvoke = CS.getInstruction();
1393 CS.setAttributes(Attrs);
1394 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
1396 // If the call doesn't return, finish the basic block and clear the
1397 // insertion point; this allows the rest of IRgen to discard
1398 // unreachable code.
1399 if (CS.doesNotReturn()) {
1400 Builder.CreateUnreachable();
1401 Builder.ClearInsertionPoint();
1403 // FIXME: For now, emit a dummy basic block because expr emitters in
1404 // generally are not ready to handle emitting expressions at unreachable
1406 EnsureInsertPoint();
1408 // Return a reasonable RValue.
1409 return GetUndefRValue(RetTy);
1412 llvm::Instruction *CI = CS.getInstruction();
1413 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
1414 CI->setName("call");
1416 switch (RetAI.getKind()) {
1417 case ABIArgInfo::Indirect: {
1418 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1419 if (RetTy->isAnyComplexType())
1420 return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
1421 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1422 return RValue::getAggregate(Args[0]);
1423 return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
1426 case ABIArgInfo::Ignore:
1427 // If we are ignoring an argument that had a result, make sure to
1428 // construct the appropriate return value for our caller.
1429 return GetUndefRValue(RetTy);
1431 case ABIArgInfo::Extend:
1432 case ABIArgInfo::Direct: {
1433 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
1434 RetAI.getDirectOffset() == 0) {
1435 if (RetTy->isAnyComplexType()) {
1436 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
1437 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
1438 return RValue::getComplex(std::make_pair(Real, Imag));
1440 if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
1441 llvm::Value *DestPtr = ReturnValue.getValue();
1442 bool DestIsVolatile = ReturnValue.isVolatile();
1445 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
1446 DestIsVolatile = false;
1448 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
1449 return RValue::getAggregate(DestPtr);
1451 return RValue::get(CI);
1454 llvm::Value *DestPtr = ReturnValue.getValue();
1455 bool DestIsVolatile = ReturnValue.isVolatile();
1458 DestPtr = CreateMemTemp(RetTy, "coerce");
1459 DestIsVolatile = false;
1462 // If the value is offset in memory, apply the offset now.
1463 llvm::Value *StorePtr = DestPtr;
1464 if (unsigned Offs = RetAI.getDirectOffset()) {
1465 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
1466 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
1467 StorePtr = Builder.CreateBitCast(StorePtr,
1468 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
1470 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
1472 unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
1473 if (RetTy->isAnyComplexType())
1474 return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
1475 if (CodeGenFunction::hasAggregateLLVMType(RetTy))
1476 return RValue::getAggregate(DestPtr);
1477 return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
1480 case ABIArgInfo::Expand:
1481 assert(0 && "Invalid ABI kind for return argument");
1484 assert(0 && "Unhandled ABIArgInfo::Kind");
1485 return RValue::get(0);
1488 /* VarArg handling */
1490 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
1491 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);