1 //===-- llvm/lib/Target/AArch64/AArch64CallLowering.cpp - Call lowering ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file implements the lowering of LLVM calls to machine code calls for
14 //===----------------------------------------------------------------------===//
16 #include "AArch64CallLowering.h"
17 #include "AArch64ISelLowering.h"
19 #include "llvm/CodeGen/Analysis.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
22 #include "llvm/CodeGen/GlobalISel/Utils.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Target/TargetRegisterInfo.h"
26 #include "llvm/Target/TargetSubtargetInfo.h"
29 #ifndef LLVM_BUILD_GLOBAL_ISEL
30 #error "This shouldn't be built without GISel"
33 AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
34 : CallLowering(&TLI) {
37 struct IncomingArgHandler : public CallLowering::ValueHandler {
38 IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
39 : ValueHandler(MIRBuilder, MRI) {}
41 unsigned getStackAddress(uint64_t Size, int64_t Offset,
42 MachinePointerInfo &MPO) override {
43 auto &MFI = MIRBuilder.getMF().getFrameInfo();
44 int FI = MFI.CreateFixedObject(Size, Offset, true);
45 MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
46 unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 64));
47 MIRBuilder.buildFrameIndex(AddrReg, FI);
51 void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
52 CCValAssign &VA) override {
53 markPhysRegUsed(PhysReg);
54 MIRBuilder.buildCopy(ValVReg, PhysReg);
55 // FIXME: assert extension
58 void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
59 MachinePointerInfo &MPO, CCValAssign &VA) override {
60 auto MMO = MIRBuilder.getMF().getMachineMemOperand(
61 MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, Size,
63 MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
66 /// How the physical register gets marked varies between formal
67 /// parameters (it's a basic-block live-in), and a call instruction
68 /// (it's an implicit-def of the BL).
69 virtual void markPhysRegUsed(unsigned PhysReg) = 0;
72 struct FormalArgHandler : public IncomingArgHandler {
73 FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI)
74 : IncomingArgHandler(MIRBuilder, MRI) {}
76 void markPhysRegUsed(unsigned PhysReg) override {
77 MIRBuilder.getMBB().addLiveIn(PhysReg);
81 struct CallReturnHandler : public IncomingArgHandler {
82 CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
83 MachineInstrBuilder MIB)
84 : IncomingArgHandler(MIRBuilder, MRI), MIB(MIB) {}
86 void markPhysRegUsed(unsigned PhysReg) override {
87 MIB.addDef(PhysReg, RegState::Implicit);
90 MachineInstrBuilder MIB;
93 struct OutgoingArgHandler : public CallLowering::ValueHandler {
94 OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
95 MachineInstrBuilder MIB)
96 : ValueHandler(MIRBuilder, MRI), MIB(MIB) {}
98 unsigned getStackAddress(uint64_t Size, int64_t Offset,
99 MachinePointerInfo &MPO) override {
100 LLT p0 = LLT::pointer(0, 64);
101 LLT s64 = LLT::scalar(64);
102 unsigned SPReg = MRI.createGenericVirtualRegister(p0);
103 MIRBuilder.buildCopy(SPReg, AArch64::SP);
105 unsigned OffsetReg = MRI.createGenericVirtualRegister(s64);
106 MIRBuilder.buildConstant(OffsetReg, Offset);
108 unsigned AddrReg = MRI.createGenericVirtualRegister(p0);
109 MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg);
111 MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset);
115 void assignValueToReg(unsigned ValVReg, unsigned PhysReg,
116 CCValAssign &VA) override {
117 MIB.addUse(PhysReg, RegState::Implicit);
118 unsigned ExtReg = extendRegister(ValVReg, VA);
119 MIRBuilder.buildCopy(PhysReg, ExtReg);
122 void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size,
123 MachinePointerInfo &MPO, CCValAssign &VA) override {
124 auto MMO = MIRBuilder.getMF().getMachineMemOperand(
125 MPO, MachineMemOperand::MOStore, Size, 0);
126 MIRBuilder.buildStore(ValVReg, Addr, *MMO);
129 MachineInstrBuilder MIB;
132 void AArch64CallLowering::splitToValueTypes(const ArgInfo &OrigArg,
133 SmallVectorImpl<ArgInfo> &SplitArgs,
134 const DataLayout &DL,
135 MachineRegisterInfo &MRI,
136 SplitArgTy PerformArgSplit) const {
137 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
138 LLVMContext &Ctx = OrigArg.Ty->getContext();
140 SmallVector<EVT, 4> SplitVTs;
141 SmallVector<uint64_t, 4> Offsets;
142 ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
144 if (SplitVTs.size() == 1) {
145 // No splitting to do, but we want to replace the original type (e.g. [1 x
146 // double] -> double).
147 SplitArgs.emplace_back(OrigArg.Reg, SplitVTs[0].getTypeForEVT(Ctx),
152 unsigned FirstRegIdx = SplitArgs.size();
153 for (auto SplitVT : SplitVTs) {
154 // FIXME: set split flags if they're actually used (e.g. i128 on AAPCS).
155 Type *SplitTy = SplitVT.getTypeForEVT(Ctx);
157 ArgInfo{MRI.createGenericVirtualRegister(LLT{*SplitTy, DL}), SplitTy,
161 SmallVector<uint64_t, 4> BitOffsets;
162 for (auto Offset : Offsets)
163 BitOffsets.push_back(Offset * 8);
165 SmallVector<unsigned, 8> SplitRegs;
166 for (auto I = &SplitArgs[FirstRegIdx]; I != SplitArgs.end(); ++I)
167 SplitRegs.push_back(I->Reg);
169 PerformArgSplit(SplitRegs, BitOffsets);
172 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
173 const Value *Val, unsigned VReg) const {
174 MachineFunction &MF = MIRBuilder.getMF();
175 const Function &F = *MF.getFunction();
177 auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
178 assert(((Val && VReg) || (!Val && !VReg)) && "Return value without a vreg");
181 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
182 CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
183 MachineRegisterInfo &MRI = MF.getRegInfo();
184 auto &DL = F.getParent()->getDataLayout();
186 ArgInfo OrigArg{VReg, Val->getType()};
187 setArgFlags(OrigArg, AttributeSet::ReturnIndex, DL, F);
189 SmallVector<ArgInfo, 8> SplitArgs;
190 splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
191 [&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
192 MIRBuilder.buildExtract(Regs, Offsets, VReg);
195 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
196 Success = handleAssignments(MIRBuilder, AssignFn, SplitArgs, Handler);
199 MIRBuilder.insertInstr(MIB);
203 bool AArch64CallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
205 ArrayRef<unsigned> VRegs) const {
206 auto &Args = F.getArgumentList();
207 MachineFunction &MF = MIRBuilder.getMF();
208 MachineBasicBlock &MBB = MIRBuilder.getMBB();
209 MachineRegisterInfo &MRI = MF.getRegInfo();
210 auto &DL = F.getParent()->getDataLayout();
212 SmallVector<ArgInfo, 8> SplitArgs;
214 for (auto &Arg : Args) {
215 ArgInfo OrigArg{VRegs[i], Arg.getType()};
216 setArgFlags(OrigArg, i + 1, DL, F);
217 splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
218 [&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
219 MIRBuilder.buildSequence(VRegs[i], Regs, Offsets);
225 MIRBuilder.setInstr(*MBB.begin());
227 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
228 CCAssignFn *AssignFn =
229 TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
231 FormalArgHandler Handler(MIRBuilder, MRI);
232 if (!handleAssignments(MIRBuilder, AssignFn, SplitArgs, Handler))
235 // Move back to the end of the basic block.
236 MIRBuilder.setMBB(MBB);
241 bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
242 const MachineOperand &Callee,
243 const ArgInfo &OrigRet,
244 ArrayRef<ArgInfo> OrigArgs) const {
245 MachineFunction &MF = MIRBuilder.getMF();
246 const Function &F = *MF.getFunction();
247 MachineRegisterInfo &MRI = MF.getRegInfo();
248 auto &DL = F.getParent()->getDataLayout();
250 SmallVector<ArgInfo, 8> SplitArgs;
251 for (auto &OrigArg : OrigArgs) {
252 splitToValueTypes(OrigArg, SplitArgs, DL, MRI,
253 [&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
254 MIRBuilder.buildExtract(Regs, Offsets, OrigArg.Reg);
258 // Find out which ABI gets to decide where things go.
259 const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
260 CCAssignFn *CallAssignFn =
261 TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
263 // Create a temporarily-floating call instruction so we can add the implicit
264 // uses of arg registers.
265 auto MIB = MIRBuilder.buildInstrNoInsert(Callee.isReg() ? AArch64::BLR
267 MIB.addOperand(Callee);
269 // Tell the call which registers are clobbered.
270 auto TRI = MF.getSubtarget().getRegisterInfo();
271 MIB.addRegMask(TRI->getCallPreservedMask(MF, F.getCallingConv()));
273 // Do the actual argument marshalling.
274 SmallVector<unsigned, 8> PhysRegs;
275 OutgoingArgHandler Handler(MIRBuilder, MRI, MIB);
276 if (!handleAssignments(MIRBuilder, CallAssignFn, SplitArgs, Handler))
279 // Now we can add the actual call instruction to the correct basic block.
280 MIRBuilder.insertInstr(MIB);
282 // If Callee is a reg, since it is used by a target specific
283 // instruction, it must have a register class matching the
284 // constraint of that instruction.
286 MIB->getOperand(0).setReg(constrainOperandRegClass(
287 MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
288 *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(),
289 Callee.getReg(), 0));
291 // Finally we can copy the returned value back into its virtual-register. In
292 // symmetry with the arugments, the physical register must be an
293 // implicit-define of the call instruction.
294 CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
298 SmallVector<uint64_t, 8> RegOffsets;
299 SmallVector<unsigned, 8> SplitRegs;
300 splitToValueTypes(OrigRet, SplitArgs, DL, MRI,
301 [&](ArrayRef<unsigned> Regs, ArrayRef<uint64_t> Offsets) {
302 std::copy(Offsets.begin(), Offsets.end(),
303 std::back_inserter(RegOffsets));
304 std::copy(Regs.begin(), Regs.end(),
305 std::back_inserter(SplitRegs));
308 CallReturnHandler Handler(MIRBuilder, MRI, MIB);
309 if (!handleAssignments(MIRBuilder, RetAssignFn, SplitArgs, Handler))
312 if (!RegOffsets.empty())
313 MIRBuilder.buildSequence(OrigRet.Reg, SplitRegs, RegOffsets);