1 //===-- llvm/lib/Target/AMDGPU/AMDGPUCallLowering.cpp - Call lowering -----===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the lowering of LLVM calls to machine code calls for
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUCallLowering.h"
17 #include "AMDGPUISelLowering.h"
18 #include "AMDGPUSubtarget.h"
19 #include "AMDGPUTargetMachine.h"
20 #include "SIISelLowering.h"
21 #include "SIMachineFunctionInfo.h"
22 #include "SIRegisterInfo.h"
23 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
24 #include "llvm/CodeGen/Analysis.h"
25 #include "llvm/CodeGen/CallingConvLower.h"
26 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/Support/LowLevelTypeImpl.h"
34 struct OutgoingValueHandler : public CallLowering::ValueHandler {
35 OutgoingValueHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
36 MachineInstrBuilder MIB, CCAssignFn *AssignFn)
37 : ValueHandler(B, MRI, AssignFn), MIB(MIB) {}
39 MachineInstrBuilder MIB;
41 bool isIncomingArgumentHandler() const override { return false; }
43 Register getStackAddress(uint64_t Size, int64_t Offset,
44 MachinePointerInfo &MPO) override {
45 llvm_unreachable("not implemented");
48 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
49 MachinePointerInfo &MPO, CCValAssign &VA) override {
50 llvm_unreachable("not implemented");
53 void assignValueToReg(Register ValVReg, Register PhysReg,
54 CCValAssign &VA) override {
56 if (VA.getLocVT().getSizeInBits() < 32) {
57 // 16-bit types are reported as legal for 32-bit registers. We need to
58 // extend and do a 32-bit copy to avoid the verifier complaining about it.
59 ExtReg = MIRBuilder.buildAnyExt(LLT::scalar(32), ValVReg).getReg(0);
61 ExtReg = extendRegister(ValVReg, VA);
63 // If this is a scalar return, insert a readfirstlane just in case the value
65 // FIXME: Assert this is a shader return.
66 const SIRegisterInfo *TRI
67 = static_cast<const SIRegisterInfo *>(MRI.getTargetRegisterInfo());
68 if (TRI->isSGPRReg(MRI, PhysReg)) {
69 auto ToSGPR = MIRBuilder.buildIntrinsic(Intrinsic::amdgcn_readfirstlane,
70 {MRI.getType(ExtReg)}, false)
72 ExtReg = ToSGPR.getReg(0);
75 MIRBuilder.buildCopy(PhysReg, ExtReg);
76 MIB.addUse(PhysReg, RegState::Implicit);
79 bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
80 CCValAssign::LocInfo LocInfo,
81 const CallLowering::ArgInfo &Info,
82 ISD::ArgFlagsTy Flags,
83 CCState &State) override {
84 return AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
88 struct IncomingArgHandler : public CallLowering::ValueHandler {
89 uint64_t StackUsed = 0;
91 IncomingArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
93 : ValueHandler(B, MRI, AssignFn) {}
95 Register getStackAddress(uint64_t Size, int64_t Offset,
96 MachinePointerInfo &MPO) override {
97 auto &MFI = MIRBuilder.getMF().getFrameInfo();
98 int FI = MFI.CreateFixedObject(Size, Offset, true);
99 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
100 auto AddrReg = MIRBuilder.buildFrameIndex(
101 LLT::pointer(AMDGPUAS::PRIVATE_ADDRESS, 32), FI);
102 StackUsed = std::max(StackUsed, Size + Offset);
103 return AddrReg.getReg(0);
106 void assignValueToReg(Register ValVReg, Register PhysReg,
107 CCValAssign &VA) override {
108 markPhysRegUsed(PhysReg);
110 if (VA.getLocVT().getSizeInBits() < 32) {
111 // 16-bit types are reported as legal for 32-bit registers. We need to do
112 // a 32-bit copy, and truncate to avoid the verifier complaining about it.
113 auto Copy = MIRBuilder.buildCopy(LLT::scalar(32), PhysReg);
114 MIRBuilder.buildTrunc(ValVReg, Copy);
118 switch (VA.getLocInfo()) {
119 case CCValAssign::LocInfo::SExt:
120 case CCValAssign::LocInfo::ZExt:
121 case CCValAssign::LocInfo::AExt: {
122 auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
123 MIRBuilder.buildTrunc(ValVReg, Copy);
127 MIRBuilder.buildCopy(ValVReg, PhysReg);
132 void assignValueToAddress(Register ValVReg, Register Addr, uint64_t MemSize,
133 MachinePointerInfo &MPO, CCValAssign &VA) override {
134 MachineFunction &MF = MIRBuilder.getMF();
136 // The reported memory location may be wider than the value.
137 const LLT RegTy = MRI.getType(ValVReg);
138 MemSize = std::min(static_cast<uint64_t>(RegTy.getSizeInBytes()), MemSize);
140 // FIXME: Get alignment
141 auto MMO = MF.getMachineMemOperand(
142 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemSize,
143 inferAlignFromPtrInfo(MF, MPO));
144 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
147 /// How the physical register gets marked varies between formal
148 /// parameters (it's a basic-block live-in), and a call instruction
149 /// (it's an implicit-def of the BL).
150 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
152 // FIXME: What is the point of this being a callback?
153 bool isIncomingArgumentHandler() const override { return true; }
156 struct FormalArgHandler : public IncomingArgHandler {
157 FormalArgHandler(MachineIRBuilder &B, MachineRegisterInfo &MRI,
158 CCAssignFn *AssignFn)
159 : IncomingArgHandler(B, MRI, AssignFn) {}
161 void markPhysRegUsed(unsigned PhysReg) override {
162 MIRBuilder.getMBB().addLiveIn(PhysReg);
168 AMDGPUCallLowering::AMDGPUCallLowering(const AMDGPUTargetLowering &TLI)
169 : CallLowering(&TLI) {
172 // FIXME: Compatability shim
173 static ISD::NodeType extOpcodeToISDExtOpcode(unsigned MIOpc) {
175 case TargetOpcode::G_SEXT:
176 return ISD::SIGN_EXTEND;
177 case TargetOpcode::G_ZEXT:
178 return ISD::ZERO_EXTEND;
179 case TargetOpcode::G_ANYEXT:
180 return ISD::ANY_EXTEND;
182 llvm_unreachable("not an extend opcode");
186 void AMDGPUCallLowering::splitToValueTypes(
188 const ArgInfo &OrigArg, unsigned OrigArgIdx,
189 SmallVectorImpl<ArgInfo> &SplitArgs,
190 const DataLayout &DL, CallingConv::ID CallConv,
191 SplitArgTy PerformArgSplit) const {
192 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
193 LLVMContext &Ctx = OrigArg.Ty->getContext();
195 if (OrigArg.Ty->isVoidTy())
198 SmallVector<EVT, 4> SplitVTs;
199 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs);
201 assert(OrigArg.Regs.size() == SplitVTs.size());
204 for (EVT VT : SplitVTs) {
205 Register Reg = OrigArg.Regs[SplitIdx];
206 Type *Ty = VT.getTypeForEVT(Ctx);
207 LLT LLTy = getLLTForType(*Ty, DL);
209 if (OrigArgIdx == AttributeList::ReturnIndex && VT.isScalarInteger()) {
210 unsigned ExtendOp = TargetOpcode::G_ANYEXT;
211 if (OrigArg.Flags[0].isSExt()) {
212 assert(OrigArg.Regs.size() == 1 && "expect only simple return values");
213 ExtendOp = TargetOpcode::G_SEXT;
214 } else if (OrigArg.Flags[0].isZExt()) {
215 assert(OrigArg.Regs.size() == 1 && "expect only simple return values");
216 ExtendOp = TargetOpcode::G_ZEXT;
219 EVT ExtVT = TLI.getTypeForExtReturn(Ctx, VT,
220 extOpcodeToISDExtOpcode(ExtendOp));
223 Ty = ExtVT.getTypeForEVT(Ctx);
224 LLTy = getLLTForType(*Ty, DL);
225 Reg = B.buildInstr(ExtendOp, {LLTy}, {Reg}).getReg(0);
229 unsigned NumParts = TLI.getNumRegistersForCallingConv(Ctx, CallConv, VT);
230 MVT RegVT = TLI.getRegisterTypeForCallingConv(Ctx, CallConv, VT);
233 // No splitting to do, but we want to replace the original type (e.g. [1 x
234 // double] -> double).
235 SplitArgs.emplace_back(Reg, Ty, OrigArg.Flags, OrigArg.IsFixed);
241 SmallVector<Register, 8> SplitRegs;
242 Type *PartTy = EVT(RegVT).getTypeForEVT(Ctx);
243 LLT PartLLT = getLLTForType(*PartTy, DL);
244 MachineRegisterInfo &MRI = *B.getMRI();
246 // FIXME: Should we be reporting all of the part registers for a single
247 // argument, and let handleAssignments take care of the repacking?
248 for (unsigned i = 0; i < NumParts; ++i) {
249 Register PartReg = MRI.createGenericVirtualRegister(PartLLT);
250 SplitRegs.push_back(PartReg);
251 SplitArgs.emplace_back(ArrayRef<Register>(PartReg), PartTy, OrigArg.Flags);
254 PerformArgSplit(SplitRegs, Reg, LLTy, PartLLT, SplitIdx);
260 // Get the appropriate type to make \p OrigTy \p Factor times bigger.
261 static LLT getMultipleType(LLT OrigTy, int Factor) {
262 if (OrigTy.isVector()) {
263 return LLT::vector(OrigTy.getNumElements() * Factor,
264 OrigTy.getElementType());
267 return LLT::scalar(OrigTy.getSizeInBits() * Factor);
270 // TODO: Move to generic code
271 static void unpackRegsToOrigType(MachineIRBuilder &B,
272 ArrayRef<Register> DstRegs,
274 const CallLowering::ArgInfo &Info,
277 assert(DstRegs.size() > 1 && "Nothing to unpack");
279 const unsigned SrcSize = SrcTy.getSizeInBits();
280 const unsigned PartSize = PartTy.getSizeInBits();
282 if (SrcTy.isVector() && !PartTy.isVector() &&
283 PartSize > SrcTy.getElementType().getSizeInBits()) {
284 // Vector was scalarized, and the elements extended.
285 auto UnmergeToEltTy = B.buildUnmerge(SrcTy.getElementType(),
287 for (int i = 0, e = DstRegs.size(); i != e; ++i)
288 B.buildAnyExt(DstRegs[i], UnmergeToEltTy.getReg(i));
292 if (SrcSize % PartSize == 0) {
293 B.buildUnmerge(DstRegs, SrcReg);
297 const int NumRoundedParts = (SrcSize + PartSize - 1) / PartSize;
299 LLT BigTy = getMultipleType(PartTy, NumRoundedParts);
300 auto ImpDef = B.buildUndef(BigTy);
302 auto Big = B.buildInsert(BigTy, ImpDef.getReg(0), SrcReg, 0).getReg(0);
305 for (unsigned i = 0, e = DstRegs.size(); i != e; ++i, Offset += PartSize)
306 B.buildExtract(DstRegs[i], Big, Offset);
309 /// Lower the return value for the already existing \p Ret. This assumes that
310 /// \p B's insertion point is correct.
311 bool AMDGPUCallLowering::lowerReturnVal(MachineIRBuilder &B,
312 const Value *Val, ArrayRef<Register> VRegs,
313 MachineInstrBuilder &Ret) const {
317 auto &MF = B.getMF();
318 const auto &F = MF.getFunction();
319 const DataLayout &DL = MF.getDataLayout();
320 MachineRegisterInfo *MRI = B.getMRI();
322 CallingConv::ID CC = F.getCallingConv();
323 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
325 ArgInfo OrigRetInfo(VRegs, Val->getType());
326 setArgFlags(OrigRetInfo, AttributeList::ReturnIndex, DL, F);
327 SmallVector<ArgInfo, 4> SplitRetInfos;
330 B, OrigRetInfo, AttributeList::ReturnIndex, SplitRetInfos, DL, CC,
331 [&](ArrayRef<Register> Regs, Register SrcReg, LLT LLTy, LLT PartLLT,
333 unpackRegsToOrigType(B, Regs, SrcReg,
334 SplitRetInfos[VTSplitIdx],
338 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(CC, F.isVarArg());
339 OutgoingValueHandler RetHandler(B, *MRI, Ret, AssignFn);
340 return handleAssignments(B, SplitRetInfos, RetHandler);
343 bool AMDGPUCallLowering::lowerReturn(MachineIRBuilder &B,
345 ArrayRef<Register> VRegs) const {
347 MachineFunction &MF = B.getMF();
348 MachineRegisterInfo &MRI = MF.getRegInfo();
349 SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
350 MFI->setIfReturnsVoid(!Val);
352 assert(!Val == VRegs.empty() && "Return value without a vreg");
354 CallingConv::ID CC = B.getMF().getFunction().getCallingConv();
355 const bool IsShader = AMDGPU::isShader(CC);
356 const bool IsWaveEnd = (IsShader && MFI->returnsVoid()) ||
357 AMDGPU::isKernel(CC);
359 B.buildInstr(AMDGPU::S_ENDPGM)
364 auto const &ST = MF.getSubtarget<GCNSubtarget>();
367 IsShader ? AMDGPU::SI_RETURN_TO_EPILOG : AMDGPU::S_SETPC_B64_return;
369 auto Ret = B.buildInstrNoInsert(ReturnOpc);
370 Register ReturnAddrVReg;
371 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
372 ReturnAddrVReg = MRI.createVirtualRegister(&AMDGPU::CCR_SGPR_64RegClass);
373 Ret.addUse(ReturnAddrVReg);
376 if (!lowerReturnVal(B, Val, VRegs, Ret))
379 if (ReturnOpc == AMDGPU::S_SETPC_B64_return) {
380 const SIRegisterInfo *TRI = ST.getRegisterInfo();
381 Register LiveInReturn = MF.addLiveIn(TRI->getReturnAddressReg(MF),
382 &AMDGPU::SGPR_64RegClass);
383 B.buildCopy(ReturnAddrVReg, LiveInReturn);
386 // TODO: Handle CalleeSavedRegsViaCopy.
392 Register AMDGPUCallLowering::lowerParameterPtr(MachineIRBuilder &B,
394 uint64_t Offset) const {
396 MachineFunction &MF = B.getMF();
397 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
398 MachineRegisterInfo &MRI = MF.getRegInfo();
399 const Function &F = MF.getFunction();
400 const DataLayout &DL = F.getParent()->getDataLayout();
401 PointerType *PtrTy = PointerType::get(ParamTy, AMDGPUAS::CONSTANT_ADDRESS);
402 LLT PtrType = getLLTForType(*PtrTy, DL);
403 Register KernArgSegmentPtr =
404 MFI->getPreloadedReg(AMDGPUFunctionArgInfo::KERNARG_SEGMENT_PTR);
405 Register KernArgSegmentVReg = MRI.getLiveInVirtReg(KernArgSegmentPtr);
407 auto OffsetReg = B.buildConstant(LLT::scalar(64), Offset);
409 return B.buildPtrAdd(PtrType, KernArgSegmentVReg, OffsetReg).getReg(0);
412 void AMDGPUCallLowering::lowerParameter(MachineIRBuilder &B, Type *ParamTy,
413 uint64_t Offset, Align Alignment,
414 Register DstReg) const {
415 MachineFunction &MF = B.getMF();
416 const Function &F = MF.getFunction();
417 const DataLayout &DL = F.getParent()->getDataLayout();
418 MachinePointerInfo PtrInfo(AMDGPUAS::CONSTANT_ADDRESS);
419 unsigned TypeSize = DL.getTypeStoreSize(ParamTy);
420 Register PtrReg = lowerParameterPtr(B, ParamTy, Offset);
422 MachineMemOperand *MMO = MF.getMachineMemOperand(
424 MachineMemOperand::MOLoad | MachineMemOperand::MODereferenceable |
425 MachineMemOperand::MOInvariant,
426 TypeSize, Alignment);
428 B.buildLoad(DstReg, PtrReg, *MMO);
431 // Allocate special inputs passed in user SGPRs.
432 static void allocateHSAUserSGPRs(CCState &CCInfo,
435 const SIRegisterInfo &TRI,
436 SIMachineFunctionInfo &Info) {
437 // FIXME: How should these inputs interact with inreg / custom SGPR inputs?
438 if (Info.hasPrivateSegmentBuffer()) {
439 Register PrivateSegmentBufferReg = Info.addPrivateSegmentBuffer(TRI);
440 MF.addLiveIn(PrivateSegmentBufferReg, &AMDGPU::SGPR_128RegClass);
441 CCInfo.AllocateReg(PrivateSegmentBufferReg);
444 if (Info.hasDispatchPtr()) {
445 Register DispatchPtrReg = Info.addDispatchPtr(TRI);
446 MF.addLiveIn(DispatchPtrReg, &AMDGPU::SGPR_64RegClass);
447 CCInfo.AllocateReg(DispatchPtrReg);
450 if (Info.hasQueuePtr()) {
451 Register QueuePtrReg = Info.addQueuePtr(TRI);
452 MF.addLiveIn(QueuePtrReg, &AMDGPU::SGPR_64RegClass);
453 CCInfo.AllocateReg(QueuePtrReg);
456 if (Info.hasKernargSegmentPtr()) {
457 MachineRegisterInfo &MRI = MF.getRegInfo();
458 Register InputPtrReg = Info.addKernargSegmentPtr(TRI);
459 const LLT P4 = LLT::pointer(AMDGPUAS::CONSTANT_ADDRESS, 64);
460 Register VReg = MRI.createGenericVirtualRegister(P4);
461 MRI.addLiveIn(InputPtrReg, VReg);
462 B.getMBB().addLiveIn(InputPtrReg);
463 B.buildCopy(VReg, InputPtrReg);
464 CCInfo.AllocateReg(InputPtrReg);
467 if (Info.hasDispatchID()) {
468 Register DispatchIDReg = Info.addDispatchID(TRI);
469 MF.addLiveIn(DispatchIDReg, &AMDGPU::SGPR_64RegClass);
470 CCInfo.AllocateReg(DispatchIDReg);
473 if (Info.hasFlatScratchInit()) {
474 Register FlatScratchInitReg = Info.addFlatScratchInit(TRI);
475 MF.addLiveIn(FlatScratchInitReg, &AMDGPU::SGPR_64RegClass);
476 CCInfo.AllocateReg(FlatScratchInitReg);
479 // TODO: Add GridWorkGroupCount user SGPRs when used. For now with HSA we read
480 // these from the dispatch pointer.
483 bool AMDGPUCallLowering::lowerFormalArgumentsKernel(
484 MachineIRBuilder &B, const Function &F,
485 ArrayRef<ArrayRef<Register>> VRegs) const {
486 MachineFunction &MF = B.getMF();
487 const GCNSubtarget *Subtarget = &MF.getSubtarget<GCNSubtarget>();
488 MachineRegisterInfo &MRI = MF.getRegInfo();
489 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
490 const SIRegisterInfo *TRI = Subtarget->getRegisterInfo();
491 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
493 const DataLayout &DL = F.getParent()->getDataLayout();
495 SmallVector<CCValAssign, 16> ArgLocs;
496 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
498 allocateHSAUserSGPRs(CCInfo, B, MF, *TRI, *Info);
501 const Align KernArgBaseAlign(16);
502 const unsigned BaseOffset = Subtarget->getExplicitKernelArgOffset(F);
503 uint64_t ExplicitArgOffset = 0;
505 // TODO: Align down to dword alignment and extract bits for extending loads.
506 for (auto &Arg : F.args()) {
507 Type *ArgTy = Arg.getType();
508 unsigned AllocSize = DL.getTypeAllocSize(ArgTy);
512 Align ABIAlign = DL.getABITypeAlign(ArgTy);
514 uint64_t ArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + BaseOffset;
515 ExplicitArgOffset = alignTo(ExplicitArgOffset, ABIAlign) + AllocSize;
517 if (Arg.use_empty()) {
522 ArrayRef<Register> OrigArgRegs = VRegs[i];
524 OrigArgRegs.size() == 1
526 : MRI.createGenericVirtualRegister(getLLTForType(*ArgTy, DL));
528 Align Alignment = commonAlignment(KernArgBaseAlign, ArgOffset);
529 lowerParameter(B, ArgTy, ArgOffset, Alignment, ArgReg);
530 if (OrigArgRegs.size() > 1)
531 unpackRegs(OrigArgRegs, ArgReg, ArgTy, B);
535 TLI.allocateSpecialEntryInputVGPRs(CCInfo, MF, *TRI, *Info);
536 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, F.getCallingConv(), false);
540 /// Pack values \p SrcRegs to cover the vector type result \p DstRegs.
541 static MachineInstrBuilder mergeVectorRegsToResultRegs(
542 MachineIRBuilder &B, ArrayRef<Register> DstRegs, ArrayRef<Register> SrcRegs) {
543 MachineRegisterInfo &MRI = *B.getMRI();
544 LLT LLTy = MRI.getType(DstRegs[0]);
545 LLT PartLLT = MRI.getType(SrcRegs[0]);
547 // Deal with v3s16 split into v2s16
548 LLT LCMTy = getLCMType(LLTy, PartLLT);
550 // Common case where no padding is needed.
551 assert(DstRegs.size() == 1);
552 return B.buildConcatVectors(DstRegs[0], SrcRegs);
555 const int NumWide = LCMTy.getSizeInBits() / PartLLT.getSizeInBits();
556 Register Undef = B.buildUndef(PartLLT).getReg(0);
558 // Build vector of undefs.
559 SmallVector<Register, 8> WidenedSrcs(NumWide, Undef);
561 // Replace the first sources with the real registers.
562 std::copy(SrcRegs.begin(), SrcRegs.end(), WidenedSrcs.begin());
564 auto Widened = B.buildConcatVectors(LCMTy, WidenedSrcs);
565 int NumDst = LCMTy.getSizeInBits() / LLTy.getSizeInBits();
567 SmallVector<Register, 8> PadDstRegs(NumDst);
568 std::copy(DstRegs.begin(), DstRegs.end(), PadDstRegs.begin());
570 // Create the excess dead defs for the unmerge.
571 for (int I = DstRegs.size(); I != NumDst; ++I)
572 PadDstRegs[I] = MRI.createGenericVirtualRegister(LLTy);
574 return B.buildUnmerge(PadDstRegs, Widened);
577 // TODO: Move this to generic code
578 static void packSplitRegsToOrigType(MachineIRBuilder &B,
579 ArrayRef<Register> OrigRegs,
580 ArrayRef<Register> Regs,
583 MachineRegisterInfo &MRI = *B.getMRI();
585 if (!LLTy.isVector() && !PartLLT.isVector()) {
586 assert(OrigRegs.size() == 1);
587 LLT OrigTy = MRI.getType(OrigRegs[0]);
589 unsigned SrcSize = PartLLT.getSizeInBits() * Regs.size();
590 if (SrcSize == OrigTy.getSizeInBits())
591 B.buildMerge(OrigRegs[0], Regs);
593 auto Widened = B.buildMerge(LLT::scalar(SrcSize), Regs);
594 B.buildTrunc(OrigRegs[0], Widened);
600 if (LLTy.isVector() && PartLLT.isVector()) {
601 assert(OrigRegs.size() == 1);
602 assert(LLTy.getElementType() == PartLLT.getElementType());
603 mergeVectorRegsToResultRegs(B, OrigRegs, Regs);
607 assert(LLTy.isVector() && !PartLLT.isVector());
609 LLT DstEltTy = LLTy.getElementType();
611 // Pointer information was discarded. We'll need to coerce some register types
612 // to avoid violating type constraints.
613 LLT RealDstEltTy = MRI.getType(OrigRegs[0]).getElementType();
615 assert(DstEltTy.getSizeInBits() == RealDstEltTy.getSizeInBits());
617 if (DstEltTy == PartLLT) {
618 // Vector was trivially scalarized.
620 if (RealDstEltTy.isPointer()) {
621 for (Register Reg : Regs)
622 MRI.setType(Reg, RealDstEltTy);
625 B.buildBuildVector(OrigRegs[0], Regs);
626 } else if (DstEltTy.getSizeInBits() > PartLLT.getSizeInBits()) {
627 // Deal with vector with 64-bit elements decomposed to 32-bit
628 // registers. Need to create intermediate 64-bit elements.
629 SmallVector<Register, 8> EltMerges;
630 int PartsPerElt = DstEltTy.getSizeInBits() / PartLLT.getSizeInBits();
632 assert(DstEltTy.getSizeInBits() % PartLLT.getSizeInBits() == 0);
634 for (int I = 0, NumElts = LLTy.getNumElements(); I != NumElts; ++I) {
635 auto Merge = B.buildMerge(RealDstEltTy, Regs.take_front(PartsPerElt));
636 // Fix the type in case this is really a vector of pointers.
637 MRI.setType(Merge.getReg(0), RealDstEltTy);
638 EltMerges.push_back(Merge.getReg(0));
639 Regs = Regs.drop_front(PartsPerElt);
642 B.buildBuildVector(OrigRegs[0], EltMerges);
644 // Vector was split, and elements promoted to a wider type.
645 LLT BVType = LLT::vector(LLTy.getNumElements(), PartLLT);
646 auto BV = B.buildBuildVector(BVType, Regs);
647 B.buildTrunc(OrigRegs[0], BV);
651 bool AMDGPUCallLowering::lowerFormalArguments(
652 MachineIRBuilder &B, const Function &F,
653 ArrayRef<ArrayRef<Register>> VRegs) const {
654 CallingConv::ID CC = F.getCallingConv();
656 // The infrastructure for normal calling convention lowering is essentially
657 // useless for kernels. We want to avoid any kind of legalization or argument
659 if (CC == CallingConv::AMDGPU_KERNEL)
660 return lowerFormalArgumentsKernel(B, F, VRegs);
662 const bool IsShader = AMDGPU::isShader(CC);
663 const bool IsEntryFunc = AMDGPU::isEntryFunctionCC(CC);
665 MachineFunction &MF = B.getMF();
666 MachineBasicBlock &MBB = B.getMBB();
667 MachineRegisterInfo &MRI = MF.getRegInfo();
668 SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
669 const GCNSubtarget &Subtarget = MF.getSubtarget<GCNSubtarget>();
670 const SIRegisterInfo *TRI = Subtarget.getRegisterInfo();
671 const DataLayout &DL = F.getParent()->getDataLayout();
674 SmallVector<CCValAssign, 16> ArgLocs;
675 CCState CCInfo(CC, F.isVarArg(), MF, ArgLocs, F.getContext());
678 Register ReturnAddrReg = TRI->getReturnAddressReg(MF);
679 Register LiveInReturn = MF.addLiveIn(ReturnAddrReg,
680 &AMDGPU::SGPR_64RegClass);
681 MBB.addLiveIn(ReturnAddrReg);
682 B.buildCopy(LiveInReturn, ReturnAddrReg);
685 if (Info->hasImplicitBufferPtr()) {
686 Register ImplicitBufferPtrReg = Info->addImplicitBufferPtr(*TRI);
687 MF.addLiveIn(ImplicitBufferPtrReg, &AMDGPU::SGPR_64RegClass);
688 CCInfo.AllocateReg(ImplicitBufferPtrReg);
692 SmallVector<ArgInfo, 32> SplitArgs;
694 unsigned PSInputNum = 0;
696 for (auto &Arg : F.args()) {
697 if (DL.getTypeStoreSize(Arg.getType()) == 0)
700 const bool InReg = Arg.hasAttribute(Attribute::InReg);
702 // SGPR arguments to functions not implemented.
703 if (!IsShader && InReg)
706 if (Arg.hasAttribute(Attribute::SwiftSelf) ||
707 Arg.hasAttribute(Attribute::SwiftError) ||
708 Arg.hasAttribute(Attribute::Nest))
711 if (CC == CallingConv::AMDGPU_PS && !InReg && PSInputNum <= 15) {
712 const bool ArgUsed = !Arg.use_empty();
713 bool SkipArg = !ArgUsed && !Info->isPSInputAllocated(PSInputNum);
716 Info->markPSInputAllocated(PSInputNum);
718 Info->markPSInputEnabled(PSInputNum);
724 for (int I = 0, E = VRegs[Idx].size(); I != E; ++I)
725 B.buildUndef(VRegs[Idx][I]);
732 ArgInfo OrigArg(VRegs[Idx], Arg.getType());
733 const unsigned OrigArgIdx = Idx + AttributeList::FirstArgIndex;
734 setArgFlags(OrigArg, OrigArgIdx, DL, F);
737 B, OrigArg, OrigArgIdx, SplitArgs, DL, CC,
738 // FIXME: We should probably be passing multiple registers to
739 // handleAssignments to do this
740 [&](ArrayRef<Register> Regs, Register DstReg,
741 LLT LLTy, LLT PartLLT, int VTSplitIdx) {
742 assert(DstReg == VRegs[Idx][VTSplitIdx]);
743 packSplitRegsToOrigType(B, VRegs[Idx][VTSplitIdx], Regs,
750 // At least one interpolation mode must be enabled or else the GPU will
753 // Check PSInputAddr instead of PSInputEnable. The idea is that if the user
754 // set PSInputAddr, the user wants to enable some bits after the compilation
755 // based on run-time states. Since we can't know what the final PSInputEna
756 // will look like, so we shouldn't do anything here and the user should take
757 // responsibility for the correct programming.
759 // Otherwise, the following restrictions apply:
760 // - At least one of PERSP_* (0xF) or LINEAR_* (0x70) must be enabled.
761 // - If POS_W_FLOAT (11) is enabled, at least one of PERSP_* must be
763 if (CC == CallingConv::AMDGPU_PS) {
764 if ((Info->getPSInputAddr() & 0x7F) == 0 ||
765 ((Info->getPSInputAddr() & 0xF) == 0 &&
766 Info->isPSInputAllocated(11))) {
767 CCInfo.AllocateReg(AMDGPU::VGPR0);
768 CCInfo.AllocateReg(AMDGPU::VGPR1);
769 Info->markPSInputAllocated(0);
770 Info->markPSInputEnabled(0);
773 if (Subtarget.isAmdPalOS()) {
774 // For isAmdPalOS, the user does not enable some bits after compilation
775 // based on run-time states; the register values being generated here are
776 // the final ones set in hardware. Therefore we need to apply the
777 // workaround to PSInputAddr and PSInputEnable together. (The case where
778 // a bit is set in PSInputAddr but not PSInputEnable is where the frontend
779 // set up an input arg for a particular interpolation mode, but nothing
780 // uses that input arg. Really we should have an earlier pass that removes
782 unsigned PsInputBits = Info->getPSInputAddr() & Info->getPSInputEnable();
783 if ((PsInputBits & 0x7F) == 0 ||
784 ((PsInputBits & 0xF) == 0 &&
785 (PsInputBits >> 11 & 1)))
786 Info->markPSInputEnabled(
787 countTrailingZeros(Info->getPSInputAddr(), ZB_Undefined));
791 const SITargetLowering &TLI = *getTLI<SITargetLowering>();
792 CCAssignFn *AssignFn = TLI.CCAssignFnForCall(CC, F.isVarArg());
795 B.setInstr(*MBB.begin());
798 // For the fixed ABI, pass workitem IDs in the last argument register.
799 if (AMDGPUTargetMachine::EnableFixedFunctionABI)
800 TLI.allocateSpecialInputVGPRsFixed(CCInfo, MF, *TRI, *Info);
803 FormalArgHandler Handler(B, MRI, AssignFn);
804 if (!handleAssignments(CCInfo, ArgLocs, B, SplitArgs, Handler))
807 if (!IsEntryFunc && !AMDGPUTargetMachine::EnableFixedFunctionABI) {
808 // Special inputs come after user arguments.
809 TLI.allocateSpecialInputVGPRs(CCInfo, MF, *TRI, *Info);
812 // Start adding system SGPRs.
814 TLI.allocateSystemSGPRs(CCInfo, MF, *Info, CC, IsShader);
816 CCInfo.AllocateReg(Info->getScratchRSrcReg());
817 TLI.allocateSpecialInputSGPRs(CCInfo, MF, *TRI, *Info);
820 // Move back to the end of the basic block.