1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements some simple delegations needed for call lowering.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/Analysis.h"
15 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
16 #include "llvm/CodeGen/GlobalISel/Utils.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/CodeGen/MachineOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetLowering.h"
21 #include "llvm/IR/DataLayout.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Target/TargetMachine.h"
27 #define DEBUG_TYPE "call-lowering"
31 void CallLowering::anchor() {}
33 bool CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallBase &CB,
34 ArrayRef<Register> ResRegs,
35 ArrayRef<ArrayRef<Register>> ArgRegs,
36 Register SwiftErrorVReg,
37 std::function<unsigned()> GetCalleeReg) const {
38 CallLoweringInfo Info;
39 const DataLayout &DL = MIRBuilder.getDataLayout();
41 // First step is to marshall all the function's parameters into the correct
42 // physregs and memory locations. Gather the sequence of argument types that
43 // we'll pass to the assigner function.
45 unsigned NumFixedArgs = CB.getFunctionType()->getNumParams();
46 for (auto &Arg : CB.args()) {
47 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{},
49 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CB);
50 Info.OrigArgs.push_back(OrigArg);
54 // Try looking through a bitcast from one function type to another.
55 // Commonly happens with calls to objc_msgSend().
56 const Value *CalleeV = CB.getCalledOperand()->stripPointerCasts();
57 if (const Function *F = dyn_cast<Function>(CalleeV))
58 Info.Callee = MachineOperand::CreateGA(F, 0);
60 Info.Callee = MachineOperand::CreateReg(GetCalleeReg(), false);
62 Info.OrigRet = ArgInfo{ResRegs, CB.getType(), ISD::ArgFlagsTy{}};
63 if (!Info.OrigRet.Ty->isVoidTy())
64 setArgFlags(Info.OrigRet, AttributeList::ReturnIndex, DL, CB);
66 MachineFunction &MF = MIRBuilder.getMF();
67 Info.KnownCallees = CB.getMetadata(LLVMContext::MD_callees);
68 Info.CallConv = CB.getCallingConv();
69 Info.SwiftErrorVReg = SwiftErrorVReg;
70 Info.IsMustTailCall = CB.isMustTailCall();
72 CB.isTailCall() && isInTailCallPosition(CB, MF.getTarget()) &&
74 .getFnAttribute("disable-tail-calls")
75 .getValueAsString() != "true");
76 Info.IsVarArg = CB.getFunctionType()->isVarArg();
77 return lowerCall(MIRBuilder, Info);
80 template <typename FuncInfoTy>
81 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
83 const FuncInfoTy &FuncInfo) const {
84 auto &Flags = Arg.Flags[0];
85 const AttributeList &Attrs = FuncInfo.getAttributes();
86 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt))
88 if (Attrs.hasAttribute(OpIdx, Attribute::SExt))
90 if (Attrs.hasAttribute(OpIdx, Attribute::InReg))
92 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet))
94 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf))
96 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError))
97 Flags.setSwiftError();
98 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal))
100 if (Attrs.hasAttribute(OpIdx, Attribute::Preallocated))
101 Flags.setPreallocated();
102 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca))
105 if (Flags.isByVal() || Flags.isInAlloca() || Flags.isPreallocated()) {
106 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType();
108 auto Ty = Attrs.getAttribute(OpIdx, Attribute::ByVal).getValueAsType();
109 Flags.setByValSize(DL.getTypeAllocSize(Ty ? Ty : ElementTy));
111 // For ByVal, alignment should be passed from FE. BE will guess if
112 // this info is not there but there are cases it cannot get right.
114 if (auto ParamAlign = FuncInfo.getParamAlign(OpIdx - 2))
115 FrameAlign = *ParamAlign;
117 FrameAlign = Align(getTLI()->getByValTypeAlignment(ElementTy, DL));
118 Flags.setByValAlign(FrameAlign);
120 if (Attrs.hasAttribute(OpIdx, Attribute::Nest))
122 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
126 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
127 const DataLayout &DL,
128 const Function &FuncInfo) const;
131 CallLowering::setArgFlags<CallBase>(CallLowering::ArgInfo &Arg, unsigned OpIdx,
132 const DataLayout &DL,
133 const CallBase &FuncInfo) const;
135 Register CallLowering::packRegs(ArrayRef<Register> SrcRegs, Type *PackedTy,
136 MachineIRBuilder &MIRBuilder) const {
137 assert(SrcRegs.size() > 1 && "Nothing to pack");
139 const DataLayout &DL = MIRBuilder.getMF().getDataLayout();
140 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
142 LLT PackedLLT = getLLTForType(*PackedTy, DL);
144 SmallVector<LLT, 8> LLTs;
145 SmallVector<uint64_t, 8> Offsets;
146 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
147 assert(LLTs.size() == SrcRegs.size() && "Regs / types mismatch");
149 Register Dst = MRI->createGenericVirtualRegister(PackedLLT);
150 MIRBuilder.buildUndef(Dst);
151 for (unsigned i = 0; i < SrcRegs.size(); ++i) {
152 Register NewDst = MRI->createGenericVirtualRegister(PackedLLT);
153 MIRBuilder.buildInsert(NewDst, Dst, SrcRegs[i], Offsets[i]);
160 void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
162 MachineIRBuilder &MIRBuilder) const {
163 assert(DstRegs.size() > 1 && "Nothing to unpack");
165 const DataLayout &DL = MIRBuilder.getDataLayout();
167 SmallVector<LLT, 8> LLTs;
168 SmallVector<uint64_t, 8> Offsets;
169 computeValueLLTs(DL, *PackedTy, LLTs, &Offsets);
170 assert(LLTs.size() == DstRegs.size() && "Regs / types mismatch");
172 for (unsigned i = 0; i < DstRegs.size(); ++i)
173 MIRBuilder.buildExtract(DstRegs[i], SrcReg, Offsets[i]);
176 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
177 SmallVectorImpl<ArgInfo> &Args,
178 ValueHandler &Handler) const {
179 MachineFunction &MF = MIRBuilder.getMF();
180 const Function &F = MF.getFunction();
181 SmallVector<CCValAssign, 16> ArgLocs;
182 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
183 return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
186 bool CallLowering::handleAssignments(CCState &CCInfo,
187 SmallVectorImpl<CCValAssign> &ArgLocs,
188 MachineIRBuilder &MIRBuilder,
189 SmallVectorImpl<ArgInfo> &Args,
190 ValueHandler &Handler) const {
191 MachineFunction &MF = MIRBuilder.getMF();
192 const Function &F = MF.getFunction();
193 const DataLayout &DL = F.getParent()->getDataLayout();
195 unsigned NumArgs = Args.size();
196 for (unsigned i = 0; i != NumArgs; ++i) {
197 EVT CurVT = EVT::getEVT(Args[i].Ty);
198 if (!CurVT.isSimple() ||
199 Handler.assignArg(i, CurVT.getSimpleVT(), CurVT.getSimpleVT(),
200 CCValAssign::Full, Args[i], Args[i].Flags[0],
202 MVT NewVT = TLI->getRegisterTypeForCallingConv(
203 F.getContext(), F.getCallingConv(), EVT(CurVT));
205 // If we need to split the type over multiple regs, check it's a scenario
206 // we currently support.
207 unsigned NumParts = TLI->getNumRegistersForCallingConv(
208 F.getContext(), F.getCallingConv(), CurVT);
210 // For now only handle exact splits.
211 if (NewVT.getSizeInBits() * NumParts != CurVT.getSizeInBits())
215 // For incoming arguments (physregs to vregs), we could have values in
216 // physregs (or memlocs) which we want to extract and copy to vregs.
217 // During this, we might have to deal with the LLT being split across
218 // multiple regs, so we have to record this information for later.
220 // If we have outgoing args, then we have the opposite case. We have a
221 // vreg with an LLT which we want to assign to a physical location, and
222 // we might have to record that the value has to be split later.
223 if (Handler.isIncomingArgumentHandler()) {
225 // Try to use the register type if we couldn't assign the VT.
226 if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full, Args[i],
227 Args[i].Flags[0], CCInfo))
230 // We're handling an incoming arg which is split over multiple regs.
231 // E.g. passing an s128 on AArch64.
232 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
233 Args[i].OrigRegs.push_back(Args[i].Regs[0]);
234 Args[i].Regs.clear();
235 Args[i].Flags.clear();
236 LLT NewLLT = getLLTForMVT(NewVT);
237 // For each split register, create and assign a vreg that will store
238 // the incoming component of the larger value. These will later be
239 // merged to form the final vreg.
240 for (unsigned Part = 0; Part < NumParts; ++Part) {
242 MIRBuilder.getMRI()->createGenericVirtualRegister(NewLLT);
243 ISD::ArgFlagsTy Flags = OrigFlags;
247 Flags.setOrigAlign(Align(1));
248 if (Part == NumParts - 1)
251 Args[i].Regs.push_back(Reg);
252 Args[i].Flags.push_back(Flags);
253 if (Handler.assignArg(i + Part, NewVT, NewVT, CCValAssign::Full,
254 Args[i], Args[i].Flags[Part], CCInfo)) {
255 // Still couldn't assign this smaller part type for some reason.
261 // Handling an outgoing arg that might need to be split.
263 return false; // Don't know how to deal with this type combination.
265 // This type is passed via multiple registers in the calling convention.
266 // We need to extract the individual parts.
267 Register LargeReg = Args[i].Regs[0];
268 LLT SmallTy = LLT::scalar(NewVT.getSizeInBits());
269 auto Unmerge = MIRBuilder.buildUnmerge(SmallTy, LargeReg);
270 assert(Unmerge->getNumOperands() == NumParts + 1);
271 ISD::ArgFlagsTy OrigFlags = Args[i].Flags[0];
272 // We're going to replace the regs and flags with the split ones.
273 Args[i].Regs.clear();
274 Args[i].Flags.clear();
275 for (unsigned PartIdx = 0; PartIdx < NumParts; ++PartIdx) {
276 ISD::ArgFlagsTy Flags = OrigFlags;
280 Flags.setOrigAlign(Align(1));
281 if (PartIdx == NumParts - 1)
284 Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
285 Args[i].Flags.push_back(Flags);
286 if (Handler.assignArg(i + PartIdx, NewVT, NewVT, CCValAssign::Full,
287 Args[i], Args[i].Flags[PartIdx], CCInfo))
294 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) {
295 assert(j < ArgLocs.size() && "Skipped too many arg locs");
297 CCValAssign &VA = ArgLocs[j];
298 assert(VA.getValNo() == i && "Location doesn't correspond to current arg");
300 if (VA.needsCustom()) {
301 unsigned NumArgRegs =
302 Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j));
309 // FIXME: Pack registers if we have more than one.
310 Register ArgReg = Args[i].Regs[0];
312 EVT OrigVT = EVT::getEVT(Args[i].Ty);
313 EVT VAVT = VA.getValVT();
314 const LLT OrigTy = getLLTForType(*Args[i].Ty, DL);
317 if (Handler.isIncomingArgumentHandler() && VAVT != OrigVT) {
318 if (VAVT.getSizeInBits() < OrigVT.getSizeInBits()) {
319 // Expected to be multiple regs for a single incoming arg.
320 unsigned NumArgRegs = Args[i].Regs.size();
324 assert((j + (NumArgRegs - 1)) < ArgLocs.size() &&
325 "Too many regs for number of args");
326 for (unsigned Part = 0; Part < NumArgRegs; ++Part) {
327 // There should be Regs.size() ArgLocs per argument.
328 VA = ArgLocs[j + Part];
329 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
332 // Merge the split registers into the expected larger result vreg
333 // of the original call.
334 MIRBuilder.buildMerge(Args[i].OrigRegs[0], Args[i].Regs);
337 const LLT VATy(VAVT.getSimpleVT());
339 MIRBuilder.getMRI()->createGenericVirtualRegister(VATy);
340 Handler.assignValueToReg(NewReg, VA.getLocReg(), VA);
341 // If it's a vector type, we either need to truncate the elements
342 // or do an unmerge to get the lower block of elements.
343 if (VATy.isVector() &&
344 VATy.getNumElements() > OrigVT.getVectorNumElements()) {
345 // Just handle the case where the VA type is 2 * original type.
346 if (VATy.getNumElements() != OrigVT.getVectorNumElements() * 2) {
348 << "Incoming promoted vector arg has too many elts");
351 auto Unmerge = MIRBuilder.buildUnmerge({OrigTy, OrigTy}, {NewReg});
352 MIRBuilder.buildCopy(ArgReg, Unmerge.getReg(0));
354 MIRBuilder.buildTrunc(ArgReg, {NewReg}).getReg(0);
356 } else if (!Handler.isIncomingArgumentHandler()) {
357 assert((j + (Args[i].Regs.size() - 1)) < ArgLocs.size() &&
358 "Too many regs for number of args");
359 // This is an outgoing argument that might have been split.
360 for (unsigned Part = 0; Part < Args[i].Regs.size(); ++Part) {
361 // There should be Regs.size() ArgLocs per argument.
362 VA = ArgLocs[j + Part];
363 Handler.assignValueToReg(Args[i].Regs[Part], VA.getLocReg(), VA);
365 j += Args[i].Regs.size() - 1;
367 Handler.assignValueToReg(ArgReg, VA.getLocReg(), VA);
369 } else if (VA.isMemLoc()) {
370 // Don't currently support loading/storing a type that needs to be split
371 // to the stack. Should be easy, just not implemented yet.
372 if (Args[i].Regs.size() > 1) {
375 << "Load/store a split arg to/from the stack not implemented yet");
379 EVT LocVT = VA.getValVT();
380 unsigned MemSize = LocVT == MVT::iPTR ? DL.getPointerSize()
381 : LocVT.getStoreSize();
383 unsigned Offset = VA.getLocMemOffset();
384 MachinePointerInfo MPO;
385 Register StackAddr = Handler.getStackAddress(MemSize, Offset, MPO);
386 Handler.assignValueToAddress(Args[i], StackAddr, MemSize, MPO, VA);
388 // FIXME: Support byvals and other weirdness
395 bool CallLowering::analyzeArgInfo(CCState &CCState,
396 SmallVectorImpl<ArgInfo> &Args,
397 CCAssignFn &AssignFnFixed,
398 CCAssignFn &AssignFnVarArg) const {
399 for (unsigned i = 0, e = Args.size(); i < e; ++i) {
400 MVT VT = MVT::getVT(Args[i].Ty);
401 CCAssignFn &Fn = Args[i].IsFixed ? AssignFnFixed : AssignFnVarArg;
402 if (Fn(i, VT, VT, CCValAssign::Full, Args[i].Flags[0], CCState)) {
403 // Bail out on anything we can't handle.
404 LLVM_DEBUG(dbgs() << "Cannot analyze " << EVT(VT).getEVTString()
405 << " (arg number = " << i << "\n");
412 bool CallLowering::resultsCompatible(CallLoweringInfo &Info,
414 SmallVectorImpl<ArgInfo> &InArgs,
415 CCAssignFn &CalleeAssignFnFixed,
416 CCAssignFn &CalleeAssignFnVarArg,
417 CCAssignFn &CallerAssignFnFixed,
418 CCAssignFn &CallerAssignFnVarArg) const {
419 const Function &F = MF.getFunction();
420 CallingConv::ID CalleeCC = Info.CallConv;
421 CallingConv::ID CallerCC = F.getCallingConv();
423 if (CallerCC == CalleeCC)
426 SmallVector<CCValAssign, 16> ArgLocs1;
427 CCState CCInfo1(CalleeCC, false, MF, ArgLocs1, F.getContext());
428 if (!analyzeArgInfo(CCInfo1, InArgs, CalleeAssignFnFixed,
429 CalleeAssignFnVarArg))
432 SmallVector<CCValAssign, 16> ArgLocs2;
433 CCState CCInfo2(CallerCC, false, MF, ArgLocs2, F.getContext());
434 if (!analyzeArgInfo(CCInfo2, InArgs, CallerAssignFnFixed,
435 CalleeAssignFnVarArg))
438 // We need the argument locations to match up exactly. If there's more in
439 // one than the other, then we are done.
440 if (ArgLocs1.size() != ArgLocs2.size())
443 // Make sure that each location is passed in exactly the same way.
444 for (unsigned i = 0, e = ArgLocs1.size(); i < e; ++i) {
445 const CCValAssign &Loc1 = ArgLocs1[i];
446 const CCValAssign &Loc2 = ArgLocs2[i];
448 // We need both of them to be the same. So if one is a register and one
449 // isn't, we're done.
450 if (Loc1.isRegLoc() != Loc2.isRegLoc())
453 if (Loc1.isRegLoc()) {
454 // If they don't have the same register location, we're done.
455 if (Loc1.getLocReg() != Loc2.getLocReg())
458 // They matched, so we can move to the next ArgLoc.
462 // Loc1 wasn't a RegLoc, so they both must be MemLocs. Check if they match.
463 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
470 Register CallLowering::ValueHandler::extendRegister(Register ValReg,
472 unsigned MaxSizeBits) {
473 LLT LocTy{VA.getLocVT()};
474 LLT ValTy = MRI.getType(ValReg);
475 if (LocTy.getSizeInBits() == ValTy.getSizeInBits())
478 if (LocTy.isScalar() && MaxSizeBits && MaxSizeBits < LocTy.getSizeInBits()) {
479 if (MaxSizeBits <= ValTy.getSizeInBits())
481 LocTy = LLT::scalar(MaxSizeBits);
484 switch (VA.getLocInfo()) {
486 case CCValAssign::Full:
487 case CCValAssign::BCvt:
488 // FIXME: bitconverting between vector types may or may not be a
489 // nop in big-endian situations.
491 case CCValAssign::AExt: {
492 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg);
493 return MIB.getReg(0);
495 case CCValAssign::SExt: {
496 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
497 MIRBuilder.buildSExt(NewReg, ValReg);
500 case CCValAssign::ZExt: {
501 Register NewReg = MRI.createGenericVirtualRegister(LocTy);
502 MIRBuilder.buildZExt(NewReg, ValReg);
506 llvm_unreachable("unable to extend register");
509 void CallLowering::ValueHandler::anchor() {}