1 //===-- SIFoldOperands.cpp - Fold operands --- ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
9 //===----------------------------------------------------------------------===//
13 #include "AMDGPUSubtarget.h"
14 #include "SIInstrInfo.h"
15 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
16 #include "llvm/CodeGen/MachineFunctionPass.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/raw_ostream.h"
21 #include "llvm/Target/TargetMachine.h"
23 #define DEBUG_TYPE "si-fold-operands"
28 class SIFoldOperands : public MachineFunctionPass {
33 SIFoldOperands() : MachineFunctionPass(ID) {
34 initializeSIFoldOperandsPass(*PassRegistry::getPassRegistry());
37 bool runOnMachineFunction(MachineFunction &MF) override;
39 const char *getPassName() const override {
40 return "SI Fold Operands";
43 void getAnalysisUsage(AnalysisUsage &AU) const override {
45 MachineFunctionPass::getAnalysisUsage(AU);
49 struct FoldCandidate {
52 MachineOperand *OpToFold;
55 FoldCandidate(MachineInstr *MI, unsigned OpNo, MachineOperand *FoldOp) :
56 UseMI(MI), UseOpNo(OpNo) {
58 if (FoldOp->isImm()) {
60 ImmToFold = FoldOp->getImm();
62 assert(FoldOp->isReg());
72 } // End anonymous namespace.
74 INITIALIZE_PASS(SIFoldOperands, DEBUG_TYPE,
75 "SI Fold Operands", false, false)
77 char SIFoldOperands::ID = 0;
79 char &llvm::SIFoldOperandsID = SIFoldOperands::ID;
81 FunctionPass *llvm::createSIFoldOperandsPass() {
82 return new SIFoldOperands();
85 static bool isSafeToFold(unsigned Opcode) {
87 case AMDGPU::V_MOV_B32_e32:
88 case AMDGPU::V_MOV_B32_e64:
89 case AMDGPU::V_MOV_B64_PSEUDO:
90 case AMDGPU::S_MOV_B32:
91 case AMDGPU::S_MOV_B64:
99 static bool updateOperand(FoldCandidate &Fold,
100 const TargetRegisterInfo &TRI) {
101 MachineInstr *MI = Fold.UseMI;
102 MachineOperand &Old = MI->getOperand(Fold.UseOpNo);
106 Old.ChangeToImmediate(Fold.ImmToFold);
110 MachineOperand *New = Fold.OpToFold;
111 if (TargetRegisterInfo::isVirtualRegister(Old.getReg()) &&
112 TargetRegisterInfo::isVirtualRegister(New->getReg())) {
113 Old.substVirtReg(New->getReg(), New->getSubReg(), TRI);
117 // FIXME: Handle physical registers.
122 static bool isUseMIInFoldList(const std::vector<FoldCandidate> &FoldList,
123 const MachineInstr *MI) {
124 for (auto Candidate : FoldList) {
125 if (Candidate.UseMI == MI)
131 static bool tryAddToFoldList(std::vector<FoldCandidate> &FoldList,
132 MachineInstr *MI, unsigned OpNo,
133 MachineOperand *OpToFold,
134 const SIInstrInfo *TII) {
135 if (!TII->isOperandLegal(*MI, OpNo, OpToFold)) {
137 // Special case for v_mac_f32_e64 if we are trying to fold into src2
138 unsigned Opc = MI->getOpcode();
139 if (Opc == AMDGPU::V_MAC_F32_e64 &&
140 (int)OpNo == AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2)) {
141 // Check if changing this to a v_mad_f32 instruction will allow us to
143 MI->setDesc(TII->get(AMDGPU::V_MAD_F32));
144 bool FoldAsMAD = tryAddToFoldList(FoldList, MI, OpNo, OpToFold, TII);
146 MI->untieRegOperand(OpNo);
149 MI->setDesc(TII->get(Opc));
152 // If we are already folding into another operand of MI, then
153 // we can't commute the instruction, otherwise we risk making the
154 // other fold illegal.
155 if (isUseMIInFoldList(FoldList, MI))
158 // Operand is not legal, so try to commute the instruction to
159 // see if this makes it possible to fold.
160 unsigned CommuteIdx0 = TargetInstrInfo::CommuteAnyOperandIndex;
161 unsigned CommuteIdx1 = TargetInstrInfo::CommuteAnyOperandIndex;
162 bool CanCommute = TII->findCommutedOpIndices(*MI, CommuteIdx0, CommuteIdx1);
165 if (CommuteIdx0 == OpNo)
167 else if (CommuteIdx1 == OpNo)
171 // One of operands might be an Imm operand, and OpNo may refer to it after
172 // the call of commuteInstruction() below. Such situations are avoided
173 // here explicitly as OpNo must be a register operand to be a candidate
174 // for memory folding.
175 if (CanCommute && (!MI->getOperand(CommuteIdx0).isReg() ||
176 !MI->getOperand(CommuteIdx1).isReg()))
180 !TII->commuteInstruction(*MI, false, CommuteIdx0, CommuteIdx1))
183 if (!TII->isOperandLegal(*MI, OpNo, OpToFold))
187 FoldList.push_back(FoldCandidate(MI, OpNo, OpToFold));
191 static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI,
193 std::vector<FoldCandidate> &FoldList,
194 SmallVectorImpl<MachineInstr *> &CopiesToReplace,
195 const SIInstrInfo *TII, const SIRegisterInfo &TRI,
196 MachineRegisterInfo &MRI) {
197 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx);
199 // FIXME: Fold operands with subregs.
200 if (UseOp.isReg() && ((UseOp.getSubReg() && OpToFold.isReg()) ||
201 UseOp.isImplicit())) {
205 bool FoldingImm = OpToFold.isImm();
209 unsigned UseReg = UseOp.getReg();
210 const TargetRegisterClass *UseRC
211 = TargetRegisterInfo::isVirtualRegister(UseReg) ?
212 MRI.getRegClass(UseReg) :
213 TRI.getPhysRegClass(UseReg);
215 Imm = APInt(64, OpToFold.getImm());
217 const MCInstrDesc &FoldDesc = TII->get(OpToFold.getParent()->getOpcode());
218 const TargetRegisterClass *FoldRC =
219 TRI.getRegClass(FoldDesc.OpInfo[0].RegClass);
221 // Split 64-bit constants into 32-bits for folding.
222 if (FoldRC->getSize() == 8 && UseOp.getSubReg()) {
223 if (UseRC->getSize() != 8)
226 if (UseOp.getSubReg() == AMDGPU::sub0) {
227 Imm = Imm.getLoBits(32);
229 assert(UseOp.getSubReg() == AMDGPU::sub1);
230 Imm = Imm.getHiBits(32);
234 // In order to fold immediates into copies, we need to change the
236 if (UseMI->getOpcode() == AMDGPU::COPY) {
237 unsigned DestReg = UseMI->getOperand(0).getReg();
238 const TargetRegisterClass *DestRC
239 = TargetRegisterInfo::isVirtualRegister(DestReg) ?
240 MRI.getRegClass(DestReg) :
241 TRI.getPhysRegClass(DestReg);
243 unsigned MovOp = TII->getMovOpcode(DestRC);
244 if (MovOp == AMDGPU::COPY)
247 UseMI->setDesc(TII->get(MovOp));
248 CopiesToReplace.push_back(UseMI);
252 // Special case for REG_SEQUENCE: We can't fold literals into
253 // REG_SEQUENCE instructions, so we have to fold them into the
254 // uses of REG_SEQUENCE.
255 if (UseMI->getOpcode() == AMDGPU::REG_SEQUENCE) {
256 unsigned RegSeqDstReg = UseMI->getOperand(0).getReg();
257 unsigned RegSeqDstSubReg = UseMI->getOperand(UseOpIdx + 1).getImm();
259 for (MachineRegisterInfo::use_iterator
260 RSUse = MRI.use_begin(RegSeqDstReg),
261 RSE = MRI.use_end(); RSUse != RSE; ++RSUse) {
263 MachineInstr *RSUseMI = RSUse->getParent();
264 if (RSUse->getSubReg() != RegSeqDstSubReg)
267 foldOperand(OpToFold, RSUseMI, RSUse.getOperandNo(), FoldList,
268 CopiesToReplace, TII, TRI, MRI);
273 const MCInstrDesc &UseDesc = UseMI->getDesc();
275 // Don't fold into target independent nodes. Target independent opcodes
276 // don't have defined register classes.
277 if (UseDesc.isVariadic() ||
278 UseDesc.OpInfo[UseOpIdx].RegClass == -1)
282 MachineOperand ImmOp = MachineOperand::CreateImm(Imm.getSExtValue());
283 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &ImmOp, TII);
287 tryAddToFoldList(FoldList, UseMI, UseOpIdx, &OpToFold, TII);
289 // FIXME: We could try to change the instruction from 64-bit to 32-bit
290 // to enable more folding opportunites. The shrink operands pass
291 // already does this.
295 bool SIFoldOperands::runOnMachineFunction(MachineFunction &MF) {
296 if (skipFunction(*MF.getFunction()))
299 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
301 MachineRegisterInfo &MRI = MF.getRegInfo();
302 const SIInstrInfo *TII = ST.getInstrInfo();
303 const SIRegisterInfo &TRI = TII->getRegisterInfo();
305 for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
308 MachineBasicBlock &MBB = *BI;
309 MachineBasicBlock::iterator I, Next;
310 for (I = MBB.begin(); I != MBB.end(); I = Next) {
312 MachineInstr &MI = *I;
314 if (!isSafeToFold(MI.getOpcode()))
317 unsigned OpSize = TII->getOpSize(MI, 1);
318 MachineOperand &OpToFold = MI.getOperand(1);
319 bool FoldingImm = OpToFold.isImm();
321 // FIXME: We could also be folding things like FrameIndexes and
323 if (!FoldingImm && !OpToFold.isReg())
326 // Folding immediates with more than one use will increase program size.
327 // FIXME: This will also reduce register usage, which may be better
328 // in some cases. A better heuristic is needed.
329 if (FoldingImm && !TII->isInlineConstant(OpToFold, OpSize) &&
330 !MRI.hasOneUse(MI.getOperand(0).getReg()))
333 if (OpToFold.isReg() &&
334 !TargetRegisterInfo::isVirtualRegister(OpToFold.getReg()))
337 // Prevent folding operands backwards in the function. For example,
338 // the COPY opcode must not be replaced by 1 in this example:
340 // %vreg3<def> = COPY %VGPR0; VGPR_32:%vreg3
342 // %VGPR0<def> = V_MOV_B32_e32 1, %EXEC<imp-use>
343 MachineOperand &Dst = MI.getOperand(0);
345 !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
348 // We need mutate the operands of new mov instructions to add implicit
349 // uses of EXEC, but adding them invalidates the use_iterator, so defer
351 SmallVector<MachineInstr *, 4> CopiesToReplace;
353 std::vector<FoldCandidate> FoldList;
354 for (MachineRegisterInfo::use_iterator
355 Use = MRI.use_begin(MI.getOperand(0).getReg()), E = MRI.use_end();
358 MachineInstr *UseMI = Use->getParent();
360 foldOperand(OpToFold, UseMI, Use.getOperandNo(), FoldList,
361 CopiesToReplace, TII, TRI, MRI);
364 // Make sure we add EXEC uses to any new v_mov instructions created.
365 for (MachineInstr *Copy : CopiesToReplace)
366 Copy->addImplicitDefUseOperands(MF);
368 for (FoldCandidate &Fold : FoldList) {
369 if (updateOperand(Fold, TRI)) {
372 assert(Fold.OpToFold && Fold.OpToFold->isReg());
373 // FIXME: Probably shouldn't bother trying to fold if not an
374 // SGPR. PeepholeOptimizer can eliminate redundant VGPR->VGPR
376 MRI.clearKillFlags(Fold.OpToFold->getReg());
378 DEBUG(dbgs() << "Folded source from " << MI << " into OpNo " <<
379 Fold.UseOpNo << " of " << *Fold.UseMI << '\n');