1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/CodeGen/TargetFrameLowering.h"
23 #include "llvm/CodeGen/TargetLowering.h"
24 #include "llvm/CodeGen/TargetRegisterInfo.h"
25 #include "llvm/CodeGen/TargetSchedule.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCInstrItineraries.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetMachine.h"
37 static cl::opt<bool> DisableHazardRecognizer(
38 "disable-sched-hazard", cl::Hidden, cl::init(false),
39 cl::desc("Disable hazard detection during preRA scheduling"));
41 TargetInstrInfo::~TargetInstrInfo() {
44 const TargetRegisterClass*
45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
46 const TargetRegisterInfo *TRI,
47 const MachineFunction &MF) const {
48 if (OpNum >= MCID.getNumOperands())
51 short RegClass = MCID.OpInfo[OpNum].RegClass;
52 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
53 return TRI->getPointerRegClass(MF, RegClass);
55 // Instructions like INSERT_SUBREG do not have fixed register classes.
59 // Otherwise just look it up normally.
60 return TRI->getRegClass(RegClass);
63 /// insertNoop - Insert a noop into the instruction stream at the specified
65 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
66 MachineBasicBlock::iterator MI) const {
67 llvm_unreachable("Target didn't implement insertNoop!");
70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
71 return strncmp(Str, MAI.getCommentString().data(),
72 MAI.getCommentString().size()) == 0;
75 /// Measure the specified inline asm to determine an approximation of its
77 /// Comments (which run till the next SeparatorString or newline) do not
78 /// count as an instruction.
79 /// Any other non-whitespace text is considered an instruction, with
80 /// multiple instructions separated by SeparatorString or newlines.
81 /// Variable-length instructions are not handled here; this function
82 /// may be overloaded in the target code to do that.
83 /// We implement a special case of the .space directive which takes only a
84 /// single integer argument in base 10 that is the size in bytes. This is a
85 /// restricted form of the GAS directive in that we only interpret
86 /// simple--i.e. not a logical or arithmetic expression--size values without
87 /// the optional fill value. This is primarily used for creating arbitrary
88 /// sized inline asm blocks for testing purposes.
89 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
90 const MCAsmInfo &MAI) const {
91 // Count the number of instructions in the asm.
92 bool AtInsnStart = true;
95 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
96 strlen(MAI.getSeparatorString())) == 0) {
98 } else if (isAsmComment(Str, MAI)) {
99 // Stop counting as an instruction after a comment until the next
104 if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
105 unsigned AddLength = MAI.getMaxInstLength();
106 if (strncmp(Str, ".space", 6) == 0) {
109 SpaceSize = strtol(Str + 6, &EStr, 10);
110 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
111 while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
113 if (*EStr == '\0' || *EStr == '\n' ||
114 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
115 AddLength = SpaceSize;
125 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
126 /// after it, replacing it with an unconditional branch to NewDest.
128 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
129 MachineBasicBlock *NewDest) const {
130 MachineBasicBlock *MBB = Tail->getParent();
132 // Remove all the old successors of MBB from the CFG.
133 while (!MBB->succ_empty())
134 MBB->removeSuccessor(MBB->succ_begin());
136 // Save off the debug loc before erasing the instruction.
137 DebugLoc DL = Tail->getDebugLoc();
139 // Remove all the dead instructions from the end of MBB.
140 MBB->erase(Tail, MBB->end());
142 // If MBB isn't immediately before MBB, insert a branch to it.
143 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
144 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
145 MBB->addSuccessor(NewDest);
148 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
149 bool NewMI, unsigned Idx1,
150 unsigned Idx2) const {
151 const MCInstrDesc &MCID = MI.getDesc();
152 bool HasDef = MCID.getNumDefs();
153 if (HasDef && !MI.getOperand(0).isReg())
154 // No idea how to commute this instruction. Target should implement its own.
157 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
158 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
159 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
160 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
161 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
162 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
163 "This only knows how to commute register operands so far");
165 unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
166 unsigned Reg1 = MI.getOperand(Idx1).getReg();
167 unsigned Reg2 = MI.getOperand(Idx2).getReg();
168 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
169 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
170 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
171 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
172 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
173 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
174 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
175 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
176 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
177 // If destination is tied to either of the commuted source register, then
178 // it must be updated.
179 if (HasDef && Reg0 == Reg1 &&
180 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
184 } else if (HasDef && Reg0 == Reg2 &&
185 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
191 MachineInstr *CommutedMI = nullptr;
193 // Create a new instruction.
194 MachineFunction &MF = *MI.getMF();
195 CommutedMI = MF.CloneMachineInstr(&MI);
201 CommutedMI->getOperand(0).setReg(Reg0);
202 CommutedMI->getOperand(0).setSubReg(SubReg0);
204 CommutedMI->getOperand(Idx2).setReg(Reg1);
205 CommutedMI->getOperand(Idx1).setReg(Reg2);
206 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
207 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
208 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
209 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
210 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
211 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
212 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
213 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
217 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
219 unsigned OpIdx2) const {
220 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
221 // any commutable operand, which is done in findCommutedOpIndices() method
223 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
224 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
225 assert(MI.isCommutable() &&
226 "Precondition violation: MI must be commutable.");
229 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
232 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
233 unsigned &ResultIdx2,
234 unsigned CommutableOpIdx1,
235 unsigned CommutableOpIdx2) {
236 if (ResultIdx1 == CommuteAnyOperandIndex &&
237 ResultIdx2 == CommuteAnyOperandIndex) {
238 ResultIdx1 = CommutableOpIdx1;
239 ResultIdx2 = CommutableOpIdx2;
240 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
241 if (ResultIdx2 == CommutableOpIdx1)
242 ResultIdx1 = CommutableOpIdx2;
243 else if (ResultIdx2 == CommutableOpIdx2)
244 ResultIdx1 = CommutableOpIdx1;
247 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
248 if (ResultIdx1 == CommutableOpIdx1)
249 ResultIdx2 = CommutableOpIdx2;
250 else if (ResultIdx1 == CommutableOpIdx2)
251 ResultIdx2 = CommutableOpIdx1;
255 // Check that the result operand indices match the given commutable
257 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
258 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
263 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI,
265 unsigned &SrcOpIdx2) const {
266 assert(!MI.isBundle() &&
267 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
269 const MCInstrDesc &MCID = MI.getDesc();
270 if (!MCID.isCommutable())
273 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
274 // is not true, then the target must implement this.
275 unsigned CommutableOpIdx1 = MCID.getNumDefs();
276 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
277 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
278 CommutableOpIdx1, CommutableOpIdx2))
281 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
287 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
288 if (!MI.isTerminator()) return false;
290 // Conditional branch is a special case.
291 if (MI.isBranch() && !MI.isBarrier())
293 if (!MI.isPredicable())
295 return !isPredicated(MI);
298 bool TargetInstrInfo::PredicateInstruction(
299 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
300 bool MadeChange = false;
302 assert(!MI.isBundle() &&
303 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
305 const MCInstrDesc &MCID = MI.getDesc();
306 if (!MI.isPredicable())
309 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
310 if (MCID.OpInfo[i].isPredicate()) {
311 MachineOperand &MO = MI.getOperand(i);
313 MO.setReg(Pred[j].getReg());
315 } else if (MO.isImm()) {
316 MO.setImm(Pred[j].getImm());
318 } else if (MO.isMBB()) {
319 MO.setMBB(Pred[j].getMBB());
328 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr &MI,
329 const MachineMemOperand *&MMO,
330 int &FrameIndex) const {
331 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
332 oe = MI.memoperands_end();
334 if ((*o)->isLoad()) {
335 if (const FixedStackPseudoSourceValue *Value =
336 dyn_cast_or_null<FixedStackPseudoSourceValue>(
337 (*o)->getPseudoValue())) {
338 FrameIndex = Value->getFrameIndex();
347 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr &MI,
348 const MachineMemOperand *&MMO,
349 int &FrameIndex) const {
350 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
351 oe = MI.memoperands_end();
353 if ((*o)->isStore()) {
354 if (const FixedStackPseudoSourceValue *Value =
355 dyn_cast_or_null<FixedStackPseudoSourceValue>(
356 (*o)->getPseudoValue())) {
357 FrameIndex = Value->getFrameIndex();
366 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
367 unsigned SubIdx, unsigned &Size,
369 const MachineFunction &MF) const {
370 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
372 Size = TRI->getSpillSize(*RC);
376 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
377 // Convert bit size to byte size to be consistent with
378 // MCRegisterClass::getSize().
382 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
383 if (BitOffset < 0 || BitOffset % 8)
387 Offset = (unsigned)BitOffset / 8;
389 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
391 if (!MF.getDataLayout().isLittleEndian()) {
392 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
397 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
398 MachineBasicBlock::iterator I,
399 unsigned DestReg, unsigned SubIdx,
400 const MachineInstr &Orig,
401 const TargetRegisterInfo &TRI) const {
402 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
403 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
407 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
408 const MachineInstr &MI1,
409 const MachineRegisterInfo *MRI) const {
410 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
413 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
414 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
415 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
416 MachineFunction &MF = *MBB.getParent();
417 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
420 // If the COPY instruction in MI can be folded to a stack operation, return
421 // the register class to use.
422 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
424 assert(MI.isCopy() && "MI must be a COPY instruction");
425 if (MI.getNumOperands() != 2)
427 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
429 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
430 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
432 if (FoldOp.getSubReg() || LiveOp.getSubReg())
435 unsigned FoldReg = FoldOp.getReg();
436 unsigned LiveReg = LiveOp.getReg();
438 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
439 "Cannot fold physregs");
441 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
442 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
444 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
445 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
447 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
450 // FIXME: Allow folding when register classes are memory compatible.
454 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
455 llvm_unreachable("Not implemented");
458 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
459 ArrayRef<unsigned> Ops, int FrameIndex,
460 const TargetInstrInfo &TII) {
461 unsigned StartIdx = 0;
462 switch (MI.getOpcode()) {
463 case TargetOpcode::STACKMAP: {
464 // StackMapLiveValues are foldable
465 StartIdx = StackMapOpers(&MI).getVarIdx();
468 case TargetOpcode::PATCHPOINT: {
469 // For PatchPoint, the call args are not foldable (even if reported in the
470 // stackmap e.g. via anyregcc).
471 StartIdx = PatchPointOpers(&MI).getVarIdx();
474 case TargetOpcode::STATEPOINT: {
475 // For statepoints, fold deopt and gc arguments, but not call arguments.
476 StartIdx = StatepointOpers(&MI).getVarIdx();
480 llvm_unreachable("unexpected stackmap opcode");
483 // Return false if any operands requested for folding are not foldable (not
484 // part of the stackmap's live values).
485 for (unsigned Op : Ops) {
490 MachineInstr *NewMI =
491 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
492 MachineInstrBuilder MIB(MF, NewMI);
494 // No need to fold return, the meta data, and function arguments
495 for (unsigned i = 0; i < StartIdx; ++i)
496 MIB.add(MI.getOperand(i));
498 for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
499 MachineOperand &MO = MI.getOperand(i);
500 if (is_contained(Ops, i)) {
502 unsigned SpillOffset;
503 // Compute the spill slot size and offset.
504 const TargetRegisterClass *RC =
505 MF.getRegInfo().getRegClass(MO.getReg());
507 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
509 report_fatal_error("cannot spill patchpoint subregister operand");
510 MIB.addImm(StackMaps::IndirectMemRefOp);
511 MIB.addImm(SpillSize);
512 MIB.addFrameIndex(FrameIndex);
513 MIB.addImm(SpillOffset);
521 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
522 ArrayRef<unsigned> Ops, int FI,
523 LiveIntervals *LIS) const {
524 auto Flags = MachineMemOperand::MONone;
525 for (unsigned OpIdx : Ops)
526 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
527 : MachineMemOperand::MOLoad;
529 MachineBasicBlock *MBB = MI.getParent();
530 assert(MBB && "foldMemoryOperand needs an inserted instruction");
531 MachineFunction &MF = *MBB->getParent();
533 // If we're not folding a load into a subreg, the size of the load is the
534 // size of the spill slot. But if we are, we need to figure out what the
535 // actual load size is.
537 const MachineFrameInfo &MFI = MF.getFrameInfo();
538 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
540 if (Flags & MachineMemOperand::MOStore) {
541 MemSize = MFI.getObjectSize(FI);
543 for (unsigned OpIdx : Ops) {
544 int64_t OpSize = MFI.getObjectSize(FI);
546 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
547 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
548 if (SubRegSize > 0 && !(SubRegSize % 8))
549 OpSize = SubRegSize / 8;
552 MemSize = std::max(MemSize, OpSize);
556 assert(MemSize && "Did not expect a zero-sized stack slot");
558 MachineInstr *NewMI = nullptr;
560 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
561 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
562 MI.getOpcode() == TargetOpcode::STATEPOINT) {
563 // Fold stackmap/patchpoint.
564 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
566 MBB->insert(MI, NewMI);
568 // Ask the target to do the actual folding.
569 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
573 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
574 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
575 assert((!(Flags & MachineMemOperand::MOStore) ||
576 NewMI->mayStore()) &&
577 "Folded a def to a non-store!");
578 assert((!(Flags & MachineMemOperand::MOLoad) ||
580 "Folded a use to a non-load!");
581 assert(MFI.getObjectOffset(FI) != -1);
582 MachineMemOperand *MMO = MF.getMachineMemOperand(
583 MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
584 MFI.getObjectAlignment(FI));
585 NewMI->addMemOperand(MF, MMO);
590 // Straight COPY may fold as load/store.
591 if (!MI.isCopy() || Ops.size() != 1)
594 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
598 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
599 MachineBasicBlock::iterator Pos = MI;
601 if (Flags == MachineMemOperand::MOStore)
602 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
604 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
608 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
609 ArrayRef<unsigned> Ops,
610 MachineInstr &LoadMI,
611 LiveIntervals *LIS) const {
612 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
614 for (unsigned OpIdx : Ops)
615 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
618 MachineBasicBlock &MBB = *MI.getParent();
619 MachineFunction &MF = *MBB.getParent();
621 // Ask the target to do the actual folding.
622 MachineInstr *NewMI = nullptr;
625 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
626 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
627 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
628 isLoadFromStackSlot(LoadMI, FrameIndex)) {
629 // Fold stackmap/patchpoint.
630 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
632 NewMI = &*MBB.insert(MI, NewMI);
634 // Ask the target to do the actual folding.
635 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
641 // Copy the memoperands from the load to the folded instruction.
642 if (MI.memoperands_empty()) {
643 NewMI->setMemRefs(LoadMI.memoperands_begin(), LoadMI.memoperands_end());
645 // Handle the rare case of folding multiple loads.
646 NewMI->setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
647 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
648 E = LoadMI.memoperands_end();
650 NewMI->addMemOperand(MF, *I);
656 bool TargetInstrInfo::hasReassociableOperands(
657 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
658 const MachineOperand &Op1 = Inst.getOperand(1);
659 const MachineOperand &Op2 = Inst.getOperand(2);
660 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
662 // We need virtual register definitions for the operands that we will
664 MachineInstr *MI1 = nullptr;
665 MachineInstr *MI2 = nullptr;
666 if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
667 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
668 if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
669 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
671 // And they need to be in the trace (otherwise, they won't have a depth).
672 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
675 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
676 bool &Commuted) const {
677 const MachineBasicBlock *MBB = Inst.getParent();
678 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
679 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
680 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
681 unsigned AssocOpcode = Inst.getOpcode();
683 // If only one operand has the same opcode and it's the second source operand,
684 // the operands must be commuted.
685 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
689 // 1. The previous instruction must be the same type as Inst.
690 // 2. The previous instruction must have virtual register definitions for its
691 // operands in the same basic block as Inst.
692 // 3. The previous instruction's result must only be used by Inst.
693 return MI1->getOpcode() == AssocOpcode &&
694 hasReassociableOperands(*MI1, MBB) &&
695 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
698 // 1. The operation must be associative and commutative.
699 // 2. The instruction must have virtual register definitions for its
700 // operands in the same basic block.
701 // 3. The instruction must have a reassociable sibling.
702 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
703 bool &Commuted) const {
704 return isAssociativeAndCommutative(Inst) &&
705 hasReassociableOperands(Inst, Inst.getParent()) &&
706 hasReassociableSibling(Inst, Commuted);
709 // The concept of the reassociation pass is that these operations can benefit
710 // from this kind of transformation:
720 // breaking the dependency between A and B, allowing them to be executed in
721 // parallel (or back-to-back in a pipeline) instead of depending on each other.
723 // FIXME: This has the potential to be expensive (compile time) while not
724 // improving the code at all. Some ways to limit the overhead:
725 // 1. Track successful transforms; bail out if hit rate gets too low.
726 // 2. Only enable at -O3 or some other non-default optimization level.
727 // 3. Pre-screen pattern candidates here: if an operand of the previous
728 // instruction is known to not increase the critical path, then don't match
730 bool TargetInstrInfo::getMachineCombinerPatterns(
732 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
734 if (isReassociationCandidate(Root, Commute)) {
735 // We found a sequence of instructions that may be suitable for a
736 // reassociation of operands to increase ILP. Specify each commutation
737 // possibility for the Prev instruction in the sequence and let the
738 // machine combiner decide if changing the operands is worthwhile.
740 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
741 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
743 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
744 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
752 /// Return true when a code sequence can improve loop throughput.
754 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
758 /// Attempt the reassociation transformation to reduce critical path length.
759 /// See the above comments before getMachineCombinerPatterns().
760 void TargetInstrInfo::reassociateOps(
761 MachineInstr &Root, MachineInstr &Prev,
762 MachineCombinerPattern Pattern,
763 SmallVectorImpl<MachineInstr *> &InsInstrs,
764 SmallVectorImpl<MachineInstr *> &DelInstrs,
765 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
766 MachineFunction *MF = Root.getMF();
767 MachineRegisterInfo &MRI = MF->getRegInfo();
768 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
769 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
770 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
772 // This array encodes the operand index for each parameter because the
773 // operands may be commuted. Each row corresponds to a pattern value,
774 // and each column specifies the index of A, B, X, Y.
775 unsigned OpIdx[4][4] = {
784 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
785 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
786 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
787 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
788 default: llvm_unreachable("unexpected MachineCombinerPattern");
791 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
792 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
793 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
794 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
795 MachineOperand &OpC = Root.getOperand(0);
797 unsigned RegA = OpA.getReg();
798 unsigned RegB = OpB.getReg();
799 unsigned RegX = OpX.getReg();
800 unsigned RegY = OpY.getReg();
801 unsigned RegC = OpC.getReg();
803 if (TargetRegisterInfo::isVirtualRegister(RegA))
804 MRI.constrainRegClass(RegA, RC);
805 if (TargetRegisterInfo::isVirtualRegister(RegB))
806 MRI.constrainRegClass(RegB, RC);
807 if (TargetRegisterInfo::isVirtualRegister(RegX))
808 MRI.constrainRegClass(RegX, RC);
809 if (TargetRegisterInfo::isVirtualRegister(RegY))
810 MRI.constrainRegClass(RegY, RC);
811 if (TargetRegisterInfo::isVirtualRegister(RegC))
812 MRI.constrainRegClass(RegC, RC);
814 // Create a new virtual register for the result of (X op Y) instead of
815 // recycling RegB because the MachineCombiner's computation of the critical
816 // path requires a new register definition rather than an existing one.
817 unsigned NewVR = MRI.createVirtualRegister(RC);
818 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
820 unsigned Opcode = Root.getOpcode();
821 bool KillA = OpA.isKill();
822 bool KillX = OpX.isKill();
823 bool KillY = OpY.isKill();
825 // Create new instructions for insertion.
826 MachineInstrBuilder MIB1 =
827 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
828 .addReg(RegX, getKillRegState(KillX))
829 .addReg(RegY, getKillRegState(KillY));
830 MachineInstrBuilder MIB2 =
831 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
832 .addReg(RegA, getKillRegState(KillA))
833 .addReg(NewVR, getKillRegState(true));
835 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
837 // Record new instructions for insertion and old instructions for deletion.
838 InsInstrs.push_back(MIB1);
839 InsInstrs.push_back(MIB2);
840 DelInstrs.push_back(&Prev);
841 DelInstrs.push_back(&Root);
844 void TargetInstrInfo::genAlternativeCodeSequence(
845 MachineInstr &Root, MachineCombinerPattern Pattern,
846 SmallVectorImpl<MachineInstr *> &InsInstrs,
847 SmallVectorImpl<MachineInstr *> &DelInstrs,
848 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
849 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
851 // Select the previous instruction in the sequence based on the input pattern.
852 MachineInstr *Prev = nullptr;
854 case MachineCombinerPattern::REASSOC_AX_BY:
855 case MachineCombinerPattern::REASSOC_XA_BY:
856 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
858 case MachineCombinerPattern::REASSOC_AX_YB:
859 case MachineCombinerPattern::REASSOC_XA_YB:
860 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
866 assert(Prev && "Unknown pattern for machine combiner");
868 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
871 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
872 const MachineInstr &MI, AliasAnalysis *AA) const {
873 const MachineFunction &MF = *MI.getMF();
874 const MachineRegisterInfo &MRI = MF.getRegInfo();
876 // Remat clients assume operand 0 is the defined register.
877 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
879 unsigned DefReg = MI.getOperand(0).getReg();
881 // A sub-register definition can only be rematerialized if the instruction
882 // doesn't read the other parts of the register. Otherwise it is really a
883 // read-modify-write operation on the full virtual register which cannot be
885 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
886 MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
889 // A load from a fixed stack slot can be rematerialized. This may be
890 // redundant with subsequent checks, but it's target-independent,
891 // simple, and a common case.
893 if (isLoadFromStackSlot(MI, FrameIdx) &&
894 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
897 // Avoid instructions obviously unsafe for remat.
898 if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
901 // Don't remat inline asm. We have no idea how expensive it is
902 // even if it's side effect free.
903 if (MI.isInlineAsm())
906 // Avoid instructions which load from potentially varying memory.
907 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
910 // If any of the registers accessed are non-constant, conservatively assume
911 // the instruction is not rematerializable.
912 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
913 const MachineOperand &MO = MI.getOperand(i);
914 if (!MO.isReg()) continue;
915 unsigned Reg = MO.getReg();
919 // Check for a well-behaved physical register.
920 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
922 // If the physreg has no defs anywhere, it's just an ambient register
923 // and we can freely move its uses. Alternatively, if it's allocatable,
924 // it could get allocated to something with a def during allocation.
925 if (!MRI.isConstantPhysReg(Reg))
928 // A physreg def. We can't remat it.
934 // Only allow one virtual-register def. There may be multiple defs of the
935 // same virtual register, though.
936 if (MO.isDef() && Reg != DefReg)
939 // Don't allow any virtual-register uses. Rematting an instruction with
940 // virtual register uses would length the live ranges of the uses, which
941 // is not necessarily a good idea, certainly not "trivial".
946 // Everything checked out.
950 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
951 const MachineFunction *MF = MI.getMF();
952 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
953 bool StackGrowsDown =
954 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
956 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
957 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
959 if (!isFrameInstr(MI))
962 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
964 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
965 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
971 /// isSchedulingBoundary - Test if the given instruction should be
972 /// considered a scheduling boundary. This primarily includes labels
974 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
975 const MachineBasicBlock *MBB,
976 const MachineFunction &MF) const {
977 // Terminators and labels can't be scheduled around.
978 if (MI.isTerminator() || MI.isPosition())
981 // Don't attempt to schedule around any instruction that defines
982 // a stack-oriented pointer, as it's unlikely to be profitable. This
983 // saves compile time, because it doesn't require every single
984 // stack slot reference to depend on the instruction that does the
986 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
987 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
988 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
991 // Provide a global flag for disabling the PreRA hazard recognizer that targets
992 // may choose to honor.
993 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
994 return !DisableHazardRecognizer;
997 // Default implementation of CreateTargetRAHazardRecognizer.
998 ScheduleHazardRecognizer *TargetInstrInfo::
999 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1000 const ScheduleDAG *DAG) const {
1001 // Dummy hazard recognizer allows all instructions to issue.
1002 return new ScheduleHazardRecognizer();
1005 // Default implementation of CreateTargetMIHazardRecognizer.
1006 ScheduleHazardRecognizer *TargetInstrInfo::
1007 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
1008 const ScheduleDAG *DAG) const {
1009 return (ScheduleHazardRecognizer *)
1010 new ScoreboardHazardRecognizer(II, DAG, "misched");
1013 // Default implementation of CreateTargetPostRAHazardRecognizer.
1014 ScheduleHazardRecognizer *TargetInstrInfo::
1015 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1016 const ScheduleDAG *DAG) const {
1017 return (ScheduleHazardRecognizer *)
1018 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1021 //===----------------------------------------------------------------------===//
1022 // SelectionDAG latency interface.
1023 //===----------------------------------------------------------------------===//
1026 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1027 SDNode *DefNode, unsigned DefIdx,
1028 SDNode *UseNode, unsigned UseIdx) const {
1029 if (!ItinData || ItinData->isEmpty())
1032 if (!DefNode->isMachineOpcode())
1035 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1036 if (!UseNode->isMachineOpcode())
1037 return ItinData->getOperandCycle(DefClass, DefIdx);
1038 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1039 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1042 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1044 if (!ItinData || ItinData->isEmpty())
1047 if (!N->isMachineOpcode())
1050 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1053 //===----------------------------------------------------------------------===//
1054 // MachineInstr latency interface.
1055 //===----------------------------------------------------------------------===//
1057 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1058 const MachineInstr &MI) const {
1059 if (!ItinData || ItinData->isEmpty())
1062 unsigned Class = MI.getDesc().getSchedClass();
1063 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1067 // The # of u-ops is dynamically determined. The specific target should
1068 // override this function to return the right number.
1072 /// Return the default expected latency for a def based on it's opcode.
1073 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1074 const MachineInstr &DefMI) const {
1075 if (DefMI.isTransient())
1077 if (DefMI.mayLoad())
1078 return SchedModel.LoadLatency;
1079 if (isHighLatencyDef(DefMI.getOpcode()))
1080 return SchedModel.HighLatency;
1084 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1088 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1089 const MachineInstr &MI,
1090 unsigned *PredCost) const {
1091 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1092 // still have a MinLatency property, which getStageLatency checks.
1094 return MI.mayLoad() ? 2 : 1;
1096 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1099 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1100 const MachineInstr &DefMI,
1101 unsigned DefIdx) const {
1102 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1103 if (!ItinData || ItinData->isEmpty())
1106 unsigned DefClass = DefMI.getDesc().getSchedClass();
1107 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1108 return (DefCycle != -1 && DefCycle <= 1);
1111 /// Both DefMI and UseMI must be valid. By default, call directly to the
1112 /// itinerary. This may be overriden by the target.
1113 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1114 const MachineInstr &DefMI,
1116 const MachineInstr &UseMI,
1117 unsigned UseIdx) const {
1118 unsigned DefClass = DefMI.getDesc().getSchedClass();
1119 unsigned UseClass = UseMI.getDesc().getSchedClass();
1120 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1123 /// If we can determine the operand latency from the def only, without itinerary
1124 /// lookup, do so. Otherwise return -1.
1125 int TargetInstrInfo::computeDefOperandLatency(
1126 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1128 // Let the target hook getInstrLatency handle missing itineraries.
1130 return getInstrLatency(ItinData, DefMI);
1132 if(ItinData->isEmpty())
1133 return defaultDefLatency(ItinData->SchedModel, DefMI);
1135 // ...operand lookup required
1139 bool TargetInstrInfo::getRegSequenceInputs(
1140 const MachineInstr &MI, unsigned DefIdx,
1141 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1142 assert((MI.isRegSequence() ||
1143 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1145 if (!MI.isRegSequence())
1146 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1148 // We are looking at:
1149 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1150 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1151 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1153 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1154 if (MOReg.isUndef())
1156 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1157 assert(MOSubIdx.isImm() &&
1158 "One of the subindex of the reg_sequence is not an immediate");
1159 // Record Reg:SubReg, SubIdx.
1160 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1161 (unsigned)MOSubIdx.getImm()));
1166 bool TargetInstrInfo::getExtractSubregInputs(
1167 const MachineInstr &MI, unsigned DefIdx,
1168 RegSubRegPairAndIdx &InputReg) const {
1169 assert((MI.isExtractSubreg() ||
1170 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1172 if (!MI.isExtractSubreg())
1173 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1175 // We are looking at:
1176 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1177 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1178 const MachineOperand &MOReg = MI.getOperand(1);
1179 if (MOReg.isUndef())
1181 const MachineOperand &MOSubIdx = MI.getOperand(2);
1182 assert(MOSubIdx.isImm() &&
1183 "The subindex of the extract_subreg is not an immediate");
1185 InputReg.Reg = MOReg.getReg();
1186 InputReg.SubReg = MOReg.getSubReg();
1187 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1191 bool TargetInstrInfo::getInsertSubregInputs(
1192 const MachineInstr &MI, unsigned DefIdx,
1193 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1194 assert((MI.isInsertSubreg() ||
1195 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1197 if (!MI.isInsertSubreg())
1198 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1200 // We are looking at:
1201 // Def = INSERT_SEQUENCE v0, v1, sub0.
1202 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1203 const MachineOperand &MOBaseReg = MI.getOperand(1);
1204 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1205 if (MOInsertedReg.isUndef())
1207 const MachineOperand &MOSubIdx = MI.getOperand(3);
1208 assert(MOSubIdx.isImm() &&
1209 "One of the subindex of the reg_sequence is not an immediate");
1210 BaseReg.Reg = MOBaseReg.getReg();
1211 BaseReg.SubReg = MOBaseReg.getSubReg();
1213 InsertedReg.Reg = MOInsertedReg.getReg();
1214 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1215 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();