1 //===-- DelaySlotFiller.cpp - SPARC delay slot filler ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This is a simple local pass that attempts to fill delay slots with useful
11 // instructions. If no instructions can be moved into the delay slot, then a
13 //===----------------------------------------------------------------------===//
16 #include "SparcSubtarget.h"
17 #include "llvm/ADT/SmallSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/CodeGen/TargetInstrInfo.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/Support/CommandLine.h"
25 #include "llvm/Target/TargetMachine.h"
29 #define DEBUG_TYPE "delay-slot-filler"
31 STATISTIC(FilledSlots, "Number of delay slots filled");
33 static cl::opt<bool> DisableDelaySlotFiller(
34 "disable-sparc-delay-filler",
36 cl::desc("Disable the Sparc delay slot filler."),
40 struct Filler : public MachineFunctionPass {
41 const SparcSubtarget *Subtarget;
44 Filler() : MachineFunctionPass(ID) {}
46 StringRef getPassName() const override { return "SPARC Delay Slot Filler"; }
48 bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
49 bool runOnMachineFunction(MachineFunction &F) override {
51 Subtarget = &F.getSubtarget<SparcSubtarget>();
53 // This pass invalidates liveness information when it reorders
54 // instructions to fill delay slot.
55 F.getRegInfo().invalidateLiveness();
57 for (MachineFunction::iterator FI = F.begin(), FE = F.end();
59 Changed |= runOnMachineBasicBlock(*FI);
63 MachineFunctionProperties getRequiredProperties() const override {
64 return MachineFunctionProperties().set(
65 MachineFunctionProperties::Property::NoVRegs);
68 void insertCallDefsUses(MachineBasicBlock::iterator MI,
69 SmallSet<unsigned, 32>& RegDefs,
70 SmallSet<unsigned, 32>& RegUses);
72 void insertDefsUses(MachineBasicBlock::iterator MI,
73 SmallSet<unsigned, 32>& RegDefs,
74 SmallSet<unsigned, 32>& RegUses);
76 bool IsRegInSet(SmallSet<unsigned, 32>& RegSet,
79 bool delayHasHazard(MachineBasicBlock::iterator candidate,
80 bool &sawLoad, bool &sawStore,
81 SmallSet<unsigned, 32> &RegDefs,
82 SmallSet<unsigned, 32> &RegUses);
84 MachineBasicBlock::iterator
85 findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot);
87 bool needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize);
89 bool tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
90 MachineBasicBlock::iterator MBBI);
94 } // end of anonymous namespace
96 /// createSparcDelaySlotFillerPass - Returns a pass that fills in delay
97 /// slots in Sparc MachineFunctions
99 FunctionPass *llvm::createSparcDelaySlotFillerPass() {
104 /// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
105 /// We assume there is only one delay slot per delayed instruction.
107 bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
108 bool Changed = false;
109 Subtarget = &MBB.getParent()->getSubtarget<SparcSubtarget>();
110 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
112 for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
113 MachineBasicBlock::iterator MI = I;
116 // If MI is restore, try combining it with previous inst.
117 if (!DisableDelaySlotFiller &&
118 (MI->getOpcode() == SP::RESTORErr
119 || MI->getOpcode() == SP::RESTOREri)) {
120 Changed |= tryCombineRestoreWithPrevInst(MBB, MI);
124 // TODO: If we ever want to support v7, this needs to be extended
125 // to cover all floating point operations.
126 if (!Subtarget->isV9() &&
127 (MI->getOpcode() == SP::FCMPS || MI->getOpcode() == SP::FCMPD
128 || MI->getOpcode() == SP::FCMPQ)) {
129 BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
134 // If MI has no delay slot, skip.
135 if (!MI->hasDelaySlot())
138 MachineBasicBlock::iterator D = MBB.end();
140 if (!DisableDelaySlotFiller)
141 D = findDelayInstr(MBB, MI);
147 BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
149 MBB.splice(I, &MBB, D);
151 unsigned structSize = 0;
152 if (needsUnimp(MI, structSize)) {
153 MachineBasicBlock::iterator J = MI;
154 ++J; // skip the delay filler.
155 assert (J != MBB.end() && "MI needs a delay instruction.");
156 BuildMI(MBB, ++J, MI->getDebugLoc(),
157 TII->get(SP::UNIMP)).addImm(structSize);
158 // Bundle the delay filler and unimp with the instruction.
159 MIBundleBuilder(MBB, MachineBasicBlock::iterator(MI), J);
161 MIBundleBuilder(MBB, MachineBasicBlock::iterator(MI), I);
167 MachineBasicBlock::iterator
168 Filler::findDelayInstr(MachineBasicBlock &MBB,
169 MachineBasicBlock::iterator slot)
171 SmallSet<unsigned, 32> RegDefs;
172 SmallSet<unsigned, 32> RegUses;
173 bool sawLoad = false;
174 bool sawStore = false;
176 if (slot == MBB.begin())
179 if (slot->getOpcode() == SP::RET || slot->getOpcode() == SP::TLS_CALL)
182 if (slot->getOpcode() == SP::RETL) {
183 MachineBasicBlock::iterator J = slot;
186 if (J->getOpcode() == SP::RESTORErr
187 || J->getOpcode() == SP::RESTOREri) {
188 // change retl to ret.
189 slot->setDesc(Subtarget->getInstrInfo()->get(SP::RET));
194 // Call's delay filler can def some of call's uses.
196 insertCallDefsUses(slot, RegDefs, RegUses);
198 insertDefsUses(slot, RegDefs, RegUses);
202 MachineBasicBlock::iterator I = slot;
205 done = (I == MBB.begin());
211 if (I->isDebugValue())
214 if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isPosition() ||
215 I->hasDelaySlot() || I->isBundledWithSucc())
218 if (delayHasHazard(I, sawLoad, sawStore, RegDefs, RegUses)) {
219 insertDefsUses(I, RegDefs, RegUses);
228 bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
231 SmallSet<unsigned, 32> &RegDefs,
232 SmallSet<unsigned, 32> &RegUses)
235 if (candidate->isImplicitDef() || candidate->isKill())
238 if (candidate->mayLoad()) {
244 if (candidate->mayStore()) {
252 for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) {
253 const MachineOperand &MO = candidate->getOperand(i);
257 unsigned Reg = MO.getReg();
260 // check whether Reg is defined or used before delay slot.
261 if (IsRegInSet(RegDefs, Reg) || IsRegInSet(RegUses, Reg))
265 // check whether Reg is defined before delay slot.
266 if (IsRegInSet(RegDefs, Reg))
271 unsigned Opcode = candidate->getOpcode();
272 // LD and LDD may have NOPs inserted afterwards in the case of some LEON
273 // processors, so we can't use the delay slot if this feature is switched-on.
274 if (Subtarget->insertNOPLoad()
276 Opcode >= SP::LDDArr && Opcode <= SP::LDrr)
279 // Same as above for FDIV and FSQRT on some LEON processors.
280 if (Subtarget->fixAllFDIVSQRT()
282 Opcode >= SP::FDIVD && Opcode <= SP::FSQRTD)
290 void Filler::insertCallDefsUses(MachineBasicBlock::iterator MI,
291 SmallSet<unsigned, 32>& RegDefs,
292 SmallSet<unsigned, 32>& RegUses)
294 // Call defines o7, which is visible to the instruction in delay slot.
295 RegDefs.insert(SP::O7);
297 switch(MI->getOpcode()) {
298 default: llvm_unreachable("Unknown opcode.");
299 case SP::CALL: break;
302 assert(MI->getNumOperands() >= 2);
303 const MachineOperand &Reg = MI->getOperand(0);
304 assert(Reg.isReg() && "CALL first operand is not a register.");
305 assert(Reg.isUse() && "CALL first operand is not a use.");
306 RegUses.insert(Reg.getReg());
308 const MachineOperand &Operand1 = MI->getOperand(1);
309 if (Operand1.isImm() || Operand1.isGlobal())
311 assert(Operand1.isReg() && "CALLrr second operand is not a register.");
312 assert(Operand1.isUse() && "CALLrr second operand is not a use.");
313 RegUses.insert(Operand1.getReg());
318 // Insert Defs and Uses of MI into the sets RegDefs and RegUses.
319 void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
320 SmallSet<unsigned, 32>& RegDefs,
321 SmallSet<unsigned, 32>& RegUses)
323 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
324 const MachineOperand &MO = MI->getOperand(i);
328 unsigned Reg = MO.getReg();
334 // Implicit register uses of retl are return values and
335 // retl does not use them.
336 if (MO.isImplicit() && MI->getOpcode() == SP::RETL)
343 // returns true if the Reg or its alias is in the RegSet.
344 bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
346 // Check Reg and all aliased Registers.
347 for (MCRegAliasIterator AI(Reg, Subtarget->getRegisterInfo(), true);
349 if (RegSet.count(*AI))
354 bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
359 unsigned structSizeOpNum = 0;
360 switch (I->getOpcode()) {
361 default: llvm_unreachable("Unknown call opcode.");
362 case SP::CALL: structSizeOpNum = 1; break;
364 case SP::CALLri: structSizeOpNum = 2; break;
365 case SP::TLS_CALL: return false;
368 const MachineOperand &MO = I->getOperand(structSizeOpNum);
371 StructSize = MO.getImm();
375 static bool combineRestoreADD(MachineBasicBlock::iterator RestoreMI,
376 MachineBasicBlock::iterator AddMI,
377 const TargetInstrInfo *TII)
379 // Before: add <op0>, <op1>, %i[0-7]
380 // restore %g0, %g0, %i[0-7]
382 // After : restore <op0>, <op1>, %o[0-7]
384 unsigned reg = AddMI->getOperand(0).getReg();
385 if (reg < SP::I0 || reg > SP::I7)
389 RestoreMI->eraseFromParent();
391 // Change ADD to RESTORE.
392 AddMI->setDesc(TII->get((AddMI->getOpcode() == SP::ADDrr)
396 // Map the destination register.
397 AddMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
402 static bool combineRestoreOR(MachineBasicBlock::iterator RestoreMI,
403 MachineBasicBlock::iterator OrMI,
404 const TargetInstrInfo *TII)
406 // Before: or <op0>, <op1>, %i[0-7]
407 // restore %g0, %g0, %i[0-7]
408 // and <op0> or <op1> is zero,
410 // After : restore <op0>, <op1>, %o[0-7]
412 unsigned reg = OrMI->getOperand(0).getReg();
413 if (reg < SP::I0 || reg > SP::I7)
416 // check whether it is a copy.
417 if (OrMI->getOpcode() == SP::ORrr
418 && OrMI->getOperand(1).getReg() != SP::G0
419 && OrMI->getOperand(2).getReg() != SP::G0)
422 if (OrMI->getOpcode() == SP::ORri
423 && OrMI->getOperand(1).getReg() != SP::G0
424 && (!OrMI->getOperand(2).isImm() || OrMI->getOperand(2).getImm() != 0))
428 RestoreMI->eraseFromParent();
430 // Change OR to RESTORE.
431 OrMI->setDesc(TII->get((OrMI->getOpcode() == SP::ORrr)
435 // Map the destination register.
436 OrMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
441 static bool combineRestoreSETHIi(MachineBasicBlock::iterator RestoreMI,
442 MachineBasicBlock::iterator SetHiMI,
443 const TargetInstrInfo *TII)
445 // Before: sethi imm3, %i[0-7]
446 // restore %g0, %g0, %g0
448 // After : restore %g0, (imm3<<10), %o[0-7]
450 unsigned reg = SetHiMI->getOperand(0).getReg();
451 if (reg < SP::I0 || reg > SP::I7)
454 if (!SetHiMI->getOperand(1).isImm())
457 int64_t imm = SetHiMI->getOperand(1).getImm();
459 // Is it a 3 bit immediate?
463 // Make it a 13 bit immediate.
464 imm = (imm << 10) & 0x1FFF;
466 assert(RestoreMI->getOpcode() == SP::RESTORErr);
468 RestoreMI->setDesc(TII->get(SP::RESTOREri));
470 RestoreMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
471 RestoreMI->getOperand(1).setReg(SP::G0);
472 RestoreMI->getOperand(2).ChangeToImmediate(imm);
475 // Erase the original SETHI.
476 SetHiMI->eraseFromParent();
481 bool Filler::tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
482 MachineBasicBlock::iterator MBBI)
484 // No previous instruction.
485 if (MBBI == MBB.begin())
488 // assert that MBBI is a "restore %g0, %g0, %g0".
489 assert(MBBI->getOpcode() == SP::RESTORErr
490 && MBBI->getOperand(0).getReg() == SP::G0
491 && MBBI->getOperand(1).getReg() == SP::G0
492 && MBBI->getOperand(2).getReg() == SP::G0);
494 MachineBasicBlock::iterator PrevInst = std::prev(MBBI);
496 // It cannot be combined with a bundled instruction.
497 if (PrevInst->isBundledWithSucc())
500 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
502 switch (PrevInst->getOpcode()) {
505 case SP::ADDri: return combineRestoreADD(MBBI, PrevInst, TII); break;
507 case SP::ORri: return combineRestoreOR(MBBI, PrevInst, TII); break;
508 case SP::SETHIi: return combineRestoreSETHIi(MBBI, PrevInst, TII); break;
510 // It cannot combine with the previous instruction.