1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "SystemZRegisterInfo.h"
11 #include "SystemZInstrInfo.h"
12 #include "SystemZSubtarget.h"
13 #include "llvm/CodeGen/LiveIntervals.h"
14 #include "llvm/ADT/SmallSet.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineRegisterInfo.h"
17 #include "llvm/CodeGen/TargetFrameLowering.h"
18 #include "llvm/CodeGen/VirtRegMap.h"
22 #define GET_REGINFO_TARGET_DESC
23 #include "SystemZGenRegisterInfo.inc"
25 SystemZRegisterInfo::SystemZRegisterInfo()
26 : SystemZGenRegisterInfo(SystemZ::R14D) {}
28 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
29 // somehow belongs in it. Otherwise, return GRX32.
30 static const TargetRegisterClass *getRC32(MachineOperand &MO,
31 const VirtRegMap *VRM,
32 const MachineRegisterInfo *MRI) {
33 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
35 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
36 MO.getSubReg() == SystemZ::subreg_l32 ||
37 MO.getSubReg() == SystemZ::subreg_hl32)
38 return &SystemZ::GR32BitRegClass;
39 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
40 MO.getSubReg() == SystemZ::subreg_h32 ||
41 MO.getSubReg() == SystemZ::subreg_hh32)
42 return &SystemZ::GRH32BitRegClass;
44 if (VRM && VRM->hasPhys(MO.getReg())) {
45 unsigned PhysReg = VRM->getPhys(MO.getReg());
46 if (SystemZ::GR32BitRegClass.contains(PhysReg))
47 return &SystemZ::GR32BitRegClass;
48 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
49 "Phys reg not in GR32 or GRH32?");
50 return &SystemZ::GRH32BitRegClass;
53 assert (RC == &SystemZ::GRX32BitRegClass);
58 SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
59 ArrayRef<MCPhysReg> Order,
60 SmallVectorImpl<MCPhysReg> &Hints,
61 const MachineFunction &MF,
62 const VirtRegMap *VRM,
63 const LiveRegMatrix *Matrix) const {
64 const MachineRegisterInfo *MRI = &MF.getRegInfo();
65 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
67 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
68 VirtReg, Order, Hints, MF, VRM, Matrix);
70 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
71 SmallVector<unsigned, 8> Worklist;
72 SmallSet<unsigned, 4> DoneRegs;
73 Worklist.push_back(VirtReg);
74 while (Worklist.size()) {
75 unsigned Reg = Worklist.pop_back_val();
76 if (!DoneRegs.insert(Reg).second)
79 for (auto &Use : MRI->use_instructions(Reg))
80 // For LOCRMux, see if the other operand is already a high or low
81 // register, and in that case give the correpsonding hints for
82 // VirtReg. LOCR instructions need both operands in either high or
84 if (Use.getOpcode() == SystemZ::LOCRMux) {
85 MachineOperand &TrueMO = Use.getOperand(1);
86 MachineOperand &FalseMO = Use.getOperand(2);
87 const TargetRegisterClass *RC =
88 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
89 getRC32(TrueMO, VRM, MRI));
90 if (RC && RC != &SystemZ::GRX32BitRegClass) {
91 // Pass the registers of RC as hints while making sure that if
92 // any of these registers are copy hints, hint them first.
93 SmallSet<unsigned, 4> CopyHints;
94 CopyHints.insert(Hints.begin(), Hints.end());
96 for (MCPhysReg Reg : Order)
97 if (CopyHints.count(Reg) &&
98 RC->contains(Reg) && !MRI->isReserved(Reg))
100 for (MCPhysReg Reg : Order)
101 if (!CopyHints.count(Reg) &&
102 RC->contains(Reg) && !MRI->isReserved(Reg))
103 Hints.push_back(Reg);
104 // Return true to make these hints the only regs available to
105 // RA. This may mean extra spilling but since the alternative is
106 // a jump sequence expansion of the LOCRMux, it is preferred.
110 // Add the other operand of the LOCRMux to the worklist.
112 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
113 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
114 Worklist.push_back(OtherReg);
119 return BaseImplRetVal;
123 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
124 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
125 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
126 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
127 : CSR_SystemZ_AllRegs_SaveList;
128 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
129 MF->getFunction().getAttributes().hasAttrSomewhere(
130 Attribute::SwiftError))
131 return CSR_SystemZ_SwiftError_SaveList;
132 return CSR_SystemZ_SaveList;
136 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
137 CallingConv::ID CC) const {
138 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
139 if (CC == CallingConv::AnyReg)
140 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
141 : CSR_SystemZ_AllRegs_RegMask;
142 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
143 MF.getFunction().getAttributes().hasAttrSomewhere(
144 Attribute::SwiftError))
145 return CSR_SystemZ_SwiftError_RegMask;
146 return CSR_SystemZ_RegMask;
150 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
151 BitVector Reserved(getNumRegs());
152 const SystemZFrameLowering *TFI = getFrameLowering(MF);
154 if (TFI->hasFP(MF)) {
155 // R11D is the frame pointer. Reserve all aliases.
156 Reserved.set(SystemZ::R11D);
157 Reserved.set(SystemZ::R11L);
158 Reserved.set(SystemZ::R11H);
159 Reserved.set(SystemZ::R10Q);
162 // R15D is the stack pointer. Reserve all aliases.
163 Reserved.set(SystemZ::R15D);
164 Reserved.set(SystemZ::R15L);
165 Reserved.set(SystemZ::R15H);
166 Reserved.set(SystemZ::R14Q);
168 // A0 and A1 hold the thread pointer.
169 Reserved.set(SystemZ::A0);
170 Reserved.set(SystemZ::A1);
176 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
177 int SPAdj, unsigned FIOperandNum,
178 RegScavenger *RS) const {
179 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
181 MachineBasicBlock &MBB = *MI->getParent();
182 MachineFunction &MF = *MBB.getParent();
184 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
185 const SystemZFrameLowering *TFI = getFrameLowering(MF);
186 DebugLoc DL = MI->getDebugLoc();
188 // Decompose the frame index into a base and offset.
189 int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
191 int64_t Offset = (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr) +
192 MI->getOperand(FIOperandNum + 1).getImm());
194 // Special handling of dbg_value instructions.
195 if (MI->isDebugValue()) {
196 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
197 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
201 // See if the offset is in range, or if an equivalent instruction that
202 // accepts the offset exists.
203 unsigned Opcode = MI->getOpcode();
204 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
205 if (OpcodeForOffset) {
206 if (OpcodeForOffset == SystemZ::LE &&
207 MF.getSubtarget<SystemZSubtarget>().hasVector()) {
208 // If LE is ok for offset, use LDE instead on z13.
209 OpcodeForOffset = SystemZ::LDE32;
211 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
214 // Create an anchor point that is in range. Start at 0xffff so that
215 // can use LLILH to load the immediate.
216 int64_t OldOffset = Offset;
217 int64_t Mask = 0xffff;
219 Offset = OldOffset & Mask;
220 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
222 assert(Mask && "One offset must be OK");
223 } while (!OpcodeForOffset);
225 unsigned ScratchReg =
226 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
227 int64_t HighOffset = OldOffset - Offset;
229 if (MI->getDesc().TSFlags & SystemZII::HasIndex
230 && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
231 // Load the offset into the scratch register and use it as an index.
232 // The scratch register then dies here.
233 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
234 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
235 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
238 // Load the anchor address into a scratch register.
239 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
241 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
242 .addReg(BasePtr).addImm(HighOffset).addReg(0);
244 // Load the high offset into the scratch register and use it as
246 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
247 BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg)
248 .addReg(ScratchReg, RegState::Kill).addReg(BasePtr);
251 // Use the scratch register as the base. It then dies here.
252 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
256 MI->setDesc(TII->get(OpcodeForOffset));
257 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
260 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
261 const TargetRegisterClass *SrcRC,
263 const TargetRegisterClass *DstRC,
265 const TargetRegisterClass *NewRC,
266 LiveIntervals &LIS) const {
267 assert (MI->isCopy() && "Only expecting COPY instructions");
269 // Coalesce anything which is not a COPY involving a subreg to/from GR128.
270 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
271 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64)))
274 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small
275 // and local to one MBB with not too much interferring registers. Otherwise
276 // regalloc may run out of registers.
278 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0);
279 unsigned GR128Reg = MI->getOperand(WideOpNo).getReg();
280 unsigned GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();
281 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);
282 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg);
284 // Check that the two virtual registers are local to MBB.
285 MachineBasicBlock *MBB = MI->getParent();
286 MachineInstr *FirstMI_GR128 =
287 LIS.getInstructionFromIndex(IntGR128.beginIndex());
288 MachineInstr *FirstMI_GRNar =
289 LIS.getInstructionFromIndex(IntGRNar.beginIndex());
290 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex());
291 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex());
292 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) ||
293 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) ||
294 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) ||
295 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB))
298 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr;
307 // Check if coalescing seems safe by finding the set of clobbered physreg
308 // pairs in the region.
309 BitVector PhysClobbered(getNumRegs());
311 for (; MII != MEE; ++MII) {
312 for (const MachineOperand &MO : MII->operands())
313 if (MO.isReg() && isPhysicalRegister(MO.getReg())) {
314 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
316 if (NewRC->contains(*SI)) {
317 PhysClobbered.set(*SI);
323 // Demand an arbitrary margin of free regs.
324 unsigned const DemandedFreeGR128 = 3;
325 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
332 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
333 const SystemZFrameLowering *TFI = getFrameLowering(MF);
334 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
337 const TargetRegisterClass *
338 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
339 if (RC == &SystemZ::CCRRegClass)
340 return &SystemZ::GR32BitRegClass;