1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/CodeGen/MachineFrameInfo.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineMemOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/MachineScheduler.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/CodeGen/TargetFrameLowering.h"
23 #include "llvm/CodeGen/TargetLowering.h"
24 #include "llvm/CodeGen/TargetRegisterInfo.h"
25 #include "llvm/CodeGen/TargetSchedule.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/DebugInfoMetadata.h"
28 #include "llvm/MC/MCAsmInfo.h"
29 #include "llvm/MC/MCInstrItineraries.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/raw_ostream.h"
33 #include "llvm/Target/TargetMachine.h"
38 static cl::opt<bool> DisableHazardRecognizer(
39 "disable-sched-hazard", cl::Hidden, cl::init(false),
40 cl::desc("Disable hazard detection during preRA scheduling"));
42 TargetInstrInfo::~TargetInstrInfo() {
45 const TargetRegisterClass*
46 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
47 const TargetRegisterInfo *TRI,
48 const MachineFunction &MF) const {
49 if (OpNum >= MCID.getNumOperands())
52 short RegClass = MCID.OpInfo[OpNum].RegClass;
53 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
54 return TRI->getPointerRegClass(MF, RegClass);
56 // Instructions like INSERT_SUBREG do not have fixed register classes.
60 // Otherwise just look it up normally.
61 return TRI->getRegClass(RegClass);
64 /// insertNoop - Insert a noop into the instruction stream at the specified
66 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
67 MachineBasicBlock::iterator MI) const {
68 llvm_unreachable("Target didn't implement insertNoop!");
71 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
72 return strncmp(Str, MAI.getCommentString().data(),
73 MAI.getCommentString().size()) == 0;
76 /// Measure the specified inline asm to determine an approximation of its
78 /// Comments (which run till the next SeparatorString or newline) do not
79 /// count as an instruction.
80 /// Any other non-whitespace text is considered an instruction, with
81 /// multiple instructions separated by SeparatorString or newlines.
82 /// Variable-length instructions are not handled here; this function
83 /// may be overloaded in the target code to do that.
84 /// We implement a special case of the .space directive which takes only a
85 /// single integer argument in base 10 that is the size in bytes. This is a
86 /// restricted form of the GAS directive in that we only interpret
87 /// simple--i.e. not a logical or arithmetic expression--size values without
88 /// the optional fill value. This is primarily used for creating arbitrary
89 /// sized inline asm blocks for testing purposes.
90 unsigned TargetInstrInfo::getInlineAsmLength(
92 const MCAsmInfo &MAI, const TargetSubtargetInfo *STI) const {
93 // Count the number of instructions in the asm.
94 bool AtInsnStart = true;
96 const unsigned MaxInstLength = MAI.getMaxInstLength(STI);
98 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
99 strlen(MAI.getSeparatorString())) == 0) {
101 } else if (isAsmComment(Str, MAI)) {
102 // Stop counting as an instruction after a comment until the next
107 if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
108 unsigned AddLength = MaxInstLength;
109 if (strncmp(Str, ".space", 6) == 0) {
112 SpaceSize = strtol(Str + 6, &EStr, 10);
113 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
114 while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
116 if (*EStr == '\0' || *EStr == '\n' ||
117 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
118 AddLength = SpaceSize;
128 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
129 /// after it, replacing it with an unconditional branch to NewDest.
131 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
132 MachineBasicBlock *NewDest) const {
133 MachineBasicBlock *MBB = Tail->getParent();
135 // Remove all the old successors of MBB from the CFG.
136 while (!MBB->succ_empty())
137 MBB->removeSuccessor(MBB->succ_begin());
139 // Save off the debug loc before erasing the instruction.
140 DebugLoc DL = Tail->getDebugLoc();
142 // Update call site info and remove all the dead instructions
143 // from the end of MBB.
144 while (Tail != MBB->end()) {
147 MBB->getParent()->eraseCallSiteInfo(&*MI);
151 // If MBB isn't immediately before MBB, insert a branch to it.
152 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
153 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
154 MBB->addSuccessor(NewDest);
157 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
158 bool NewMI, unsigned Idx1,
159 unsigned Idx2) const {
160 const MCInstrDesc &MCID = MI.getDesc();
161 bool HasDef = MCID.getNumDefs();
162 if (HasDef && !MI.getOperand(0).isReg())
163 // No idea how to commute this instruction. Target should implement its own.
166 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
167 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
168 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
169 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
170 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
171 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
172 "This only knows how to commute register operands so far");
174 Register Reg0 = HasDef ? MI.getOperand(0).getReg() : Register();
175 Register Reg1 = MI.getOperand(Idx1).getReg();
176 Register Reg2 = MI.getOperand(Idx2).getReg();
177 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
178 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
179 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
180 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
181 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
182 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
183 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
184 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
185 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
186 // Avoid calling isRenamable for virtual registers since we assert that
187 // renamable property is only queried/set for physical registers.
188 bool Reg1IsRenamable = Register::isPhysicalRegister(Reg1)
189 ? MI.getOperand(Idx1).isRenamable()
191 bool Reg2IsRenamable = Register::isPhysicalRegister(Reg2)
192 ? MI.getOperand(Idx2).isRenamable()
194 // If destination is tied to either of the commuted source register, then
195 // it must be updated.
196 if (HasDef && Reg0 == Reg1 &&
197 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
201 } else if (HasDef && Reg0 == Reg2 &&
202 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
208 MachineInstr *CommutedMI = nullptr;
210 // Create a new instruction.
211 MachineFunction &MF = *MI.getMF();
212 CommutedMI = MF.CloneMachineInstr(&MI);
218 CommutedMI->getOperand(0).setReg(Reg0);
219 CommutedMI->getOperand(0).setSubReg(SubReg0);
221 CommutedMI->getOperand(Idx2).setReg(Reg1);
222 CommutedMI->getOperand(Idx1).setReg(Reg2);
223 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
224 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
225 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
226 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
227 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
228 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
229 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
230 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
231 // Avoid calling setIsRenamable for virtual registers since we assert that
232 // renamable property is only queried/set for physical registers.
233 if (Register::isPhysicalRegister(Reg1))
234 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
235 if (Register::isPhysicalRegister(Reg2))
236 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
240 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
242 unsigned OpIdx2) const {
243 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
244 // any commutable operand, which is done in findCommutedOpIndices() method
246 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
247 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
248 assert(MI.isCommutable() &&
249 "Precondition violation: MI must be commutable.");
252 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
255 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
256 unsigned &ResultIdx2,
257 unsigned CommutableOpIdx1,
258 unsigned CommutableOpIdx2) {
259 if (ResultIdx1 == CommuteAnyOperandIndex &&
260 ResultIdx2 == CommuteAnyOperandIndex) {
261 ResultIdx1 = CommutableOpIdx1;
262 ResultIdx2 = CommutableOpIdx2;
263 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
264 if (ResultIdx2 == CommutableOpIdx1)
265 ResultIdx1 = CommutableOpIdx2;
266 else if (ResultIdx2 == CommutableOpIdx2)
267 ResultIdx1 = CommutableOpIdx1;
270 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
271 if (ResultIdx1 == CommutableOpIdx1)
272 ResultIdx2 = CommutableOpIdx2;
273 else if (ResultIdx1 == CommutableOpIdx2)
274 ResultIdx2 = CommutableOpIdx1;
278 // Check that the result operand indices match the given commutable
280 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
281 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
286 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr &MI,
288 unsigned &SrcOpIdx2) const {
289 assert(!MI.isBundle() &&
290 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
292 const MCInstrDesc &MCID = MI.getDesc();
293 if (!MCID.isCommutable())
296 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
297 // is not true, then the target must implement this.
298 unsigned CommutableOpIdx1 = MCID.getNumDefs();
299 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
300 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
301 CommutableOpIdx1, CommutableOpIdx2))
304 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
310 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
311 if (!MI.isTerminator()) return false;
313 // Conditional branch is a special case.
314 if (MI.isBranch() && !MI.isBarrier())
316 if (!MI.isPredicable())
318 return !isPredicated(MI);
321 bool TargetInstrInfo::PredicateInstruction(
322 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
323 bool MadeChange = false;
325 assert(!MI.isBundle() &&
326 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
328 const MCInstrDesc &MCID = MI.getDesc();
329 if (!MI.isPredicable())
332 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
333 if (MCID.OpInfo[i].isPredicate()) {
334 MachineOperand &MO = MI.getOperand(i);
336 MO.setReg(Pred[j].getReg());
338 } else if (MO.isImm()) {
339 MO.setImm(Pred[j].getImm());
341 } else if (MO.isMBB()) {
342 MO.setMBB(Pred[j].getMBB());
351 bool TargetInstrInfo::hasLoadFromStackSlot(
352 const MachineInstr &MI,
353 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
354 size_t StartSize = Accesses.size();
355 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
356 oe = MI.memoperands_end();
358 if ((*o)->isLoad() &&
359 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
360 Accesses.push_back(*o);
362 return Accesses.size() != StartSize;
365 bool TargetInstrInfo::hasStoreToStackSlot(
366 const MachineInstr &MI,
367 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
368 size_t StartSize = Accesses.size();
369 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
370 oe = MI.memoperands_end();
372 if ((*o)->isStore() &&
373 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
374 Accesses.push_back(*o);
376 return Accesses.size() != StartSize;
379 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
380 unsigned SubIdx, unsigned &Size,
382 const MachineFunction &MF) const {
383 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
385 Size = TRI->getSpillSize(*RC);
389 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
390 // Convert bit size to byte size.
394 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
395 if (BitOffset < 0 || BitOffset % 8)
399 Offset = (unsigned)BitOffset / 8;
401 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
403 if (!MF.getDataLayout().isLittleEndian()) {
404 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
409 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
410 MachineBasicBlock::iterator I,
411 unsigned DestReg, unsigned SubIdx,
412 const MachineInstr &Orig,
413 const TargetRegisterInfo &TRI) const {
414 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
415 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
419 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
420 const MachineInstr &MI1,
421 const MachineRegisterInfo *MRI) const {
422 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
425 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
426 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
427 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
428 MachineFunction &MF = *MBB.getParent();
429 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
432 // If the COPY instruction in MI can be folded to a stack operation, return
433 // the register class to use.
434 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
436 assert(MI.isCopy() && "MI must be a COPY instruction");
437 if (MI.getNumOperands() != 2)
439 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
441 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
442 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
444 if (FoldOp.getSubReg() || LiveOp.getSubReg())
447 Register FoldReg = FoldOp.getReg();
448 Register LiveReg = LiveOp.getReg();
450 assert(Register::isVirtualRegister(FoldReg) && "Cannot fold physregs");
452 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
453 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
455 if (Register::isPhysicalRegister(LiveOp.getReg()))
456 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
458 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
461 // FIXME: Allow folding when register classes are memory compatible.
465 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
466 llvm_unreachable("Not implemented");
469 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
470 ArrayRef<unsigned> Ops, int FrameIndex,
471 const TargetInstrInfo &TII) {
472 unsigned StartIdx = 0;
473 switch (MI.getOpcode()) {
474 case TargetOpcode::STACKMAP: {
475 // StackMapLiveValues are foldable
476 StartIdx = StackMapOpers(&MI).getVarIdx();
479 case TargetOpcode::PATCHPOINT: {
480 // For PatchPoint, the call args are not foldable (even if reported in the
481 // stackmap e.g. via anyregcc).
482 StartIdx = PatchPointOpers(&MI).getVarIdx();
485 case TargetOpcode::STATEPOINT: {
486 // For statepoints, fold deopt and gc arguments, but not call arguments.
487 StartIdx = StatepointOpers(&MI).getVarIdx();
491 llvm_unreachable("unexpected stackmap opcode");
494 // Return false if any operands requested for folding are not foldable (not
495 // part of the stackmap's live values).
496 for (unsigned Op : Ops) {
501 MachineInstr *NewMI =
502 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
503 MachineInstrBuilder MIB(MF, NewMI);
505 // No need to fold return, the meta data, and function arguments
506 for (unsigned i = 0; i < StartIdx; ++i)
507 MIB.add(MI.getOperand(i));
509 for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
510 MachineOperand &MO = MI.getOperand(i);
511 if (is_contained(Ops, i)) {
513 unsigned SpillOffset;
514 // Compute the spill slot size and offset.
515 const TargetRegisterClass *RC =
516 MF.getRegInfo().getRegClass(MO.getReg());
518 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
520 report_fatal_error("cannot spill patchpoint subregister operand");
521 MIB.addImm(StackMaps::IndirectMemRefOp);
522 MIB.addImm(SpillSize);
523 MIB.addFrameIndex(FrameIndex);
524 MIB.addImm(SpillOffset);
532 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
533 ArrayRef<unsigned> Ops, int FI,
535 VirtRegMap *VRM) const {
536 auto Flags = MachineMemOperand::MONone;
537 for (unsigned OpIdx : Ops)
538 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
539 : MachineMemOperand::MOLoad;
541 MachineBasicBlock *MBB = MI.getParent();
542 assert(MBB && "foldMemoryOperand needs an inserted instruction");
543 MachineFunction &MF = *MBB->getParent();
545 // If we're not folding a load into a subreg, the size of the load is the
546 // size of the spill slot. But if we are, we need to figure out what the
547 // actual load size is.
549 const MachineFrameInfo &MFI = MF.getFrameInfo();
550 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
552 if (Flags & MachineMemOperand::MOStore) {
553 MemSize = MFI.getObjectSize(FI);
555 for (unsigned OpIdx : Ops) {
556 int64_t OpSize = MFI.getObjectSize(FI);
558 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
559 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
560 if (SubRegSize > 0 && !(SubRegSize % 8))
561 OpSize = SubRegSize / 8;
564 MemSize = std::max(MemSize, OpSize);
568 assert(MemSize && "Did not expect a zero-sized stack slot");
570 MachineInstr *NewMI = nullptr;
572 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
573 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
574 MI.getOpcode() == TargetOpcode::STATEPOINT) {
575 // Fold stackmap/patchpoint.
576 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
578 MBB->insert(MI, NewMI);
580 // Ask the target to do the actual folding.
581 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS, VRM);
585 NewMI->setMemRefs(MF, MI.memoperands());
586 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
587 assert((!(Flags & MachineMemOperand::MOStore) ||
588 NewMI->mayStore()) &&
589 "Folded a def to a non-store!");
590 assert((!(Flags & MachineMemOperand::MOLoad) ||
592 "Folded a use to a non-load!");
593 assert(MFI.getObjectOffset(FI) != -1);
594 MachineMemOperand *MMO = MF.getMachineMemOperand(
595 MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
596 MFI.getObjectAlignment(FI));
597 NewMI->addMemOperand(MF, MMO);
602 // Straight COPY may fold as load/store.
603 if (!MI.isCopy() || Ops.size() != 1)
606 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
610 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
611 MachineBasicBlock::iterator Pos = MI;
613 if (Flags == MachineMemOperand::MOStore)
614 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
616 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
620 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
621 ArrayRef<unsigned> Ops,
622 MachineInstr &LoadMI,
623 LiveIntervals *LIS) const {
624 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
626 for (unsigned OpIdx : Ops)
627 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
630 MachineBasicBlock &MBB = *MI.getParent();
631 MachineFunction &MF = *MBB.getParent();
633 // Ask the target to do the actual folding.
634 MachineInstr *NewMI = nullptr;
637 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
638 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
639 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
640 isLoadFromStackSlot(LoadMI, FrameIndex)) {
641 // Fold stackmap/patchpoint.
642 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
644 NewMI = &*MBB.insert(MI, NewMI);
646 // Ask the target to do the actual folding.
647 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
653 // Copy the memoperands from the load to the folded instruction.
654 if (MI.memoperands_empty()) {
655 NewMI->setMemRefs(MF, LoadMI.memoperands());
657 // Handle the rare case of folding multiple loads.
658 NewMI->setMemRefs(MF, MI.memoperands());
659 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
660 E = LoadMI.memoperands_end();
662 NewMI->addMemOperand(MF, *I);
668 bool TargetInstrInfo::hasReassociableOperands(
669 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
670 const MachineOperand &Op1 = Inst.getOperand(1);
671 const MachineOperand &Op2 = Inst.getOperand(2);
672 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
674 // We need virtual register definitions for the operands that we will
676 MachineInstr *MI1 = nullptr;
677 MachineInstr *MI2 = nullptr;
678 if (Op1.isReg() && Register::isVirtualRegister(Op1.getReg()))
679 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
680 if (Op2.isReg() && Register::isVirtualRegister(Op2.getReg()))
681 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
683 // And they need to be in the trace (otherwise, they won't have a depth).
684 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
687 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
688 bool &Commuted) const {
689 const MachineBasicBlock *MBB = Inst.getParent();
690 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
691 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
692 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
693 unsigned AssocOpcode = Inst.getOpcode();
695 // If only one operand has the same opcode and it's the second source operand,
696 // the operands must be commuted.
697 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
701 // 1. The previous instruction must be the same type as Inst.
702 // 2. The previous instruction must have virtual register definitions for its
703 // operands in the same basic block as Inst.
704 // 3. The previous instruction's result must only be used by Inst.
705 return MI1->getOpcode() == AssocOpcode &&
706 hasReassociableOperands(*MI1, MBB) &&
707 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
710 // 1. The operation must be associative and commutative.
711 // 2. The instruction must have virtual register definitions for its
712 // operands in the same basic block.
713 // 3. The instruction must have a reassociable sibling.
714 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
715 bool &Commuted) const {
716 return isAssociativeAndCommutative(Inst) &&
717 hasReassociableOperands(Inst, Inst.getParent()) &&
718 hasReassociableSibling(Inst, Commuted);
721 // The concept of the reassociation pass is that these operations can benefit
722 // from this kind of transformation:
732 // breaking the dependency between A and B, allowing them to be executed in
733 // parallel (or back-to-back in a pipeline) instead of depending on each other.
735 // FIXME: This has the potential to be expensive (compile time) while not
736 // improving the code at all. Some ways to limit the overhead:
737 // 1. Track successful transforms; bail out if hit rate gets too low.
738 // 2. Only enable at -O3 or some other non-default optimization level.
739 // 3. Pre-screen pattern candidates here: if an operand of the previous
740 // instruction is known to not increase the critical path, then don't match
742 bool TargetInstrInfo::getMachineCombinerPatterns(
744 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
746 if (isReassociationCandidate(Root, Commute)) {
747 // We found a sequence of instructions that may be suitable for a
748 // reassociation of operands to increase ILP. Specify each commutation
749 // possibility for the Prev instruction in the sequence and let the
750 // machine combiner decide if changing the operands is worthwhile.
752 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
753 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
755 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
756 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
764 /// Return true when a code sequence can improve loop throughput.
766 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
770 /// Attempt the reassociation transformation to reduce critical path length.
771 /// See the above comments before getMachineCombinerPatterns().
772 void TargetInstrInfo::reassociateOps(
773 MachineInstr &Root, MachineInstr &Prev,
774 MachineCombinerPattern Pattern,
775 SmallVectorImpl<MachineInstr *> &InsInstrs,
776 SmallVectorImpl<MachineInstr *> &DelInstrs,
777 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
778 MachineFunction *MF = Root.getMF();
779 MachineRegisterInfo &MRI = MF->getRegInfo();
780 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
781 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
782 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
784 // This array encodes the operand index for each parameter because the
785 // operands may be commuted. Each row corresponds to a pattern value,
786 // and each column specifies the index of A, B, X, Y.
787 unsigned OpIdx[4][4] = {
796 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
797 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
798 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
799 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
800 default: llvm_unreachable("unexpected MachineCombinerPattern");
803 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
804 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
805 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
806 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
807 MachineOperand &OpC = Root.getOperand(0);
809 Register RegA = OpA.getReg();
810 Register RegB = OpB.getReg();
811 Register RegX = OpX.getReg();
812 Register RegY = OpY.getReg();
813 Register RegC = OpC.getReg();
815 if (Register::isVirtualRegister(RegA))
816 MRI.constrainRegClass(RegA, RC);
817 if (Register::isVirtualRegister(RegB))
818 MRI.constrainRegClass(RegB, RC);
819 if (Register::isVirtualRegister(RegX))
820 MRI.constrainRegClass(RegX, RC);
821 if (Register::isVirtualRegister(RegY))
822 MRI.constrainRegClass(RegY, RC);
823 if (Register::isVirtualRegister(RegC))
824 MRI.constrainRegClass(RegC, RC);
826 // Create a new virtual register for the result of (X op Y) instead of
827 // recycling RegB because the MachineCombiner's computation of the critical
828 // path requires a new register definition rather than an existing one.
829 Register NewVR = MRI.createVirtualRegister(RC);
830 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
832 unsigned Opcode = Root.getOpcode();
833 bool KillA = OpA.isKill();
834 bool KillX = OpX.isKill();
835 bool KillY = OpY.isKill();
837 // Create new instructions for insertion.
838 MachineInstrBuilder MIB1 =
839 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
840 .addReg(RegX, getKillRegState(KillX))
841 .addReg(RegY, getKillRegState(KillY));
842 MachineInstrBuilder MIB2 =
843 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
844 .addReg(RegA, getKillRegState(KillA))
845 .addReg(NewVR, getKillRegState(true));
847 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
849 // Record new instructions for insertion and old instructions for deletion.
850 InsInstrs.push_back(MIB1);
851 InsInstrs.push_back(MIB2);
852 DelInstrs.push_back(&Prev);
853 DelInstrs.push_back(&Root);
856 void TargetInstrInfo::genAlternativeCodeSequence(
857 MachineInstr &Root, MachineCombinerPattern Pattern,
858 SmallVectorImpl<MachineInstr *> &InsInstrs,
859 SmallVectorImpl<MachineInstr *> &DelInstrs,
860 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
861 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
863 // Select the previous instruction in the sequence based on the input pattern.
864 MachineInstr *Prev = nullptr;
866 case MachineCombinerPattern::REASSOC_AX_BY:
867 case MachineCombinerPattern::REASSOC_XA_BY:
868 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
870 case MachineCombinerPattern::REASSOC_AX_YB:
871 case MachineCombinerPattern::REASSOC_XA_YB:
872 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
878 assert(Prev && "Unknown pattern for machine combiner");
880 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
883 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
884 const MachineInstr &MI, AAResults *AA) const {
885 const MachineFunction &MF = *MI.getMF();
886 const MachineRegisterInfo &MRI = MF.getRegInfo();
888 // Remat clients assume operand 0 is the defined register.
889 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
891 Register DefReg = MI.getOperand(0).getReg();
893 // A sub-register definition can only be rematerialized if the instruction
894 // doesn't read the other parts of the register. Otherwise it is really a
895 // read-modify-write operation on the full virtual register which cannot be
897 if (Register::isVirtualRegister(DefReg) && MI.getOperand(0).getSubReg() &&
898 MI.readsVirtualRegister(DefReg))
901 // A load from a fixed stack slot can be rematerialized. This may be
902 // redundant with subsequent checks, but it's target-independent,
903 // simple, and a common case.
905 if (isLoadFromStackSlot(MI, FrameIdx) &&
906 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
909 // Avoid instructions obviously unsafe for remat.
910 if (MI.isNotDuplicable() || MI.mayStore() || MI.mayRaiseFPException() ||
911 MI.hasUnmodeledSideEffects())
914 // Don't remat inline asm. We have no idea how expensive it is
915 // even if it's side effect free.
916 if (MI.isInlineAsm())
919 // Avoid instructions which load from potentially varying memory.
920 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
923 // If any of the registers accessed are non-constant, conservatively assume
924 // the instruction is not rematerializable.
925 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
926 const MachineOperand &MO = MI.getOperand(i);
927 if (!MO.isReg()) continue;
928 Register Reg = MO.getReg();
932 // Check for a well-behaved physical register.
933 if (Register::isPhysicalRegister(Reg)) {
935 // If the physreg has no defs anywhere, it's just an ambient register
936 // and we can freely move its uses. Alternatively, if it's allocatable,
937 // it could get allocated to something with a def during allocation.
938 if (!MRI.isConstantPhysReg(Reg))
941 // A physreg def. We can't remat it.
947 // Only allow one virtual-register def. There may be multiple defs of the
948 // same virtual register, though.
949 if (MO.isDef() && Reg != DefReg)
952 // Don't allow any virtual-register uses. Rematting an instruction with
953 // virtual register uses would length the live ranges of the uses, which
954 // is not necessarily a good idea, certainly not "trivial".
959 // Everything checked out.
963 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
964 const MachineFunction *MF = MI.getMF();
965 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
966 bool StackGrowsDown =
967 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
969 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
970 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
972 if (!isFrameInstr(MI))
975 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
977 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
978 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
984 /// isSchedulingBoundary - Test if the given instruction should be
985 /// considered a scheduling boundary. This primarily includes labels
987 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
988 const MachineBasicBlock *MBB,
989 const MachineFunction &MF) const {
990 // Terminators and labels can't be scheduled around.
991 if (MI.isTerminator() || MI.isPosition())
994 // Don't attempt to schedule around any instruction that defines
995 // a stack-oriented pointer, as it's unlikely to be profitable. This
996 // saves compile time, because it doesn't require every single
997 // stack slot reference to depend on the instruction that does the
999 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
1000 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1001 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
1004 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1005 // may choose to honor.
1006 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1007 return !DisableHazardRecognizer;
1010 // Default implementation of CreateTargetRAHazardRecognizer.
1011 ScheduleHazardRecognizer *TargetInstrInfo::
1012 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1013 const ScheduleDAG *DAG) const {
1014 // Dummy hazard recognizer allows all instructions to issue.
1015 return new ScheduleHazardRecognizer();
1018 // Default implementation of CreateTargetMIHazardRecognizer.
1019 ScheduleHazardRecognizer *TargetInstrInfo::CreateTargetMIHazardRecognizer(
1020 const InstrItineraryData *II, const ScheduleDAGMI *DAG) const {
1021 return new ScoreboardHazardRecognizer(II, DAG, "machine-scheduler");
1024 // Default implementation of CreateTargetPostRAHazardRecognizer.
1025 ScheduleHazardRecognizer *TargetInstrInfo::
1026 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1027 const ScheduleDAG *DAG) const {
1028 return new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1031 //===----------------------------------------------------------------------===//
1032 // SelectionDAG latency interface.
1033 //===----------------------------------------------------------------------===//
1036 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1037 SDNode *DefNode, unsigned DefIdx,
1038 SDNode *UseNode, unsigned UseIdx) const {
1039 if (!ItinData || ItinData->isEmpty())
1042 if (!DefNode->isMachineOpcode())
1045 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1046 if (!UseNode->isMachineOpcode())
1047 return ItinData->getOperandCycle(DefClass, DefIdx);
1048 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1049 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1052 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1054 if (!ItinData || ItinData->isEmpty())
1057 if (!N->isMachineOpcode())
1060 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1063 //===----------------------------------------------------------------------===//
1064 // MachineInstr latency interface.
1065 //===----------------------------------------------------------------------===//
1067 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1068 const MachineInstr &MI) const {
1069 if (!ItinData || ItinData->isEmpty())
1072 unsigned Class = MI.getDesc().getSchedClass();
1073 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1077 // The # of u-ops is dynamically determined. The specific target should
1078 // override this function to return the right number.
1082 /// Return the default expected latency for a def based on it's opcode.
1083 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1084 const MachineInstr &DefMI) const {
1085 if (DefMI.isTransient())
1087 if (DefMI.mayLoad())
1088 return SchedModel.LoadLatency;
1089 if (isHighLatencyDef(DefMI.getOpcode()))
1090 return SchedModel.HighLatency;
1094 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1098 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1099 const MachineInstr &MI,
1100 unsigned *PredCost) const {
1101 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1102 // still have a MinLatency property, which getStageLatency checks.
1104 return MI.mayLoad() ? 2 : 1;
1106 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1109 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1110 const MachineInstr &DefMI,
1111 unsigned DefIdx) const {
1112 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1113 if (!ItinData || ItinData->isEmpty())
1116 unsigned DefClass = DefMI.getDesc().getSchedClass();
1117 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1118 return (DefCycle != -1 && DefCycle <= 1);
1121 Optional<ParamLoadedValue>
1122 TargetInstrInfo::describeLoadedValue(const MachineInstr &MI,
1123 Register Reg) const {
1124 const MachineFunction *MF = MI.getMF();
1125 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1126 DIExpression *Expr = DIExpression::get(MF->getFunction().getContext(), {});
1129 // To simplify the sub-register handling, verify that we only need to
1130 // consider physical registers.
1131 assert(MF->getProperties().hasProperty(
1132 MachineFunctionProperties::Property::NoVRegs));
1134 if (auto DestSrc = isCopyInstr(MI)) {
1135 Register DestReg = DestSrc->Destination->getReg();
1138 return ParamLoadedValue(*DestSrc->Source, Expr);
1140 // Cases where super- or sub-registers needs to be described should
1141 // be handled by the target's hook implementation.
1142 assert(!TRI->isSuperOrSubRegisterEq(Reg, DestReg) &&
1143 "TargetInstrInfo::describeLoadedValue can't describe super- or "
1144 "sub-regs for copy instructions");
1146 } else if (auto RegImm = isAddImmediate(MI, Reg)) {
1147 Register SrcReg = RegImm->Reg;
1148 Offset = RegImm->Imm;
1149 Expr = DIExpression::prepend(Expr, DIExpression::ApplyOffset, Offset);
1150 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg, false), Expr);
1151 } else if (MI.hasOneMemOperand()) {
1152 // Only describe memory which provably does not escape the function. As
1153 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1154 // callee (or by another thread).
1155 const auto &TII = MF->getSubtarget().getInstrInfo();
1156 const MachineFrameInfo &MFI = MF->getFrameInfo();
1157 const MachineMemOperand *MMO = MI.memoperands()[0];
1158 const PseudoSourceValue *PSV = MMO->getPseudoValue();
1160 // If the address points to "special" memory (e.g. a spill slot), it's
1161 // sufficient to check that it isn't aliased by any high-level IR value.
1162 if (!PSV || PSV->mayAlias(&MFI))
1165 const MachineOperand *BaseOp;
1166 if (!TII->getMemOperandWithOffset(MI, BaseOp, Offset, TRI))
1169 assert(MI.getNumExplicitDefs() == 1 &&
1170 "Can currently only handle mem instructions with a single define");
1172 // TODO: In what way do we need to take Reg into consideration here?
1174 SmallVector<uint64_t, 8> Ops;
1175 DIExpression::appendOffset(Ops, Offset);
1176 Ops.push_back(dwarf::DW_OP_deref_size);
1177 Ops.push_back(MMO->getSize());
1178 Expr = DIExpression::prependOpcodes(Expr, Ops);
1179 return ParamLoadedValue(*BaseOp, Expr);
1185 /// Both DefMI and UseMI must be valid. By default, call directly to the
1186 /// itinerary. This may be overriden by the target.
1187 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1188 const MachineInstr &DefMI,
1190 const MachineInstr &UseMI,
1191 unsigned UseIdx) const {
1192 unsigned DefClass = DefMI.getDesc().getSchedClass();
1193 unsigned UseClass = UseMI.getDesc().getSchedClass();
1194 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1197 /// If we can determine the operand latency from the def only, without itinerary
1198 /// lookup, do so. Otherwise return -1.
1199 int TargetInstrInfo::computeDefOperandLatency(
1200 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1202 // Let the target hook getInstrLatency handle missing itineraries.
1204 return getInstrLatency(ItinData, DefMI);
1206 if(ItinData->isEmpty())
1207 return defaultDefLatency(ItinData->SchedModel, DefMI);
1209 // ...operand lookup required
1213 bool TargetInstrInfo::getRegSequenceInputs(
1214 const MachineInstr &MI, unsigned DefIdx,
1215 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1216 assert((MI.isRegSequence() ||
1217 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1219 if (!MI.isRegSequence())
1220 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1222 // We are looking at:
1223 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1224 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1225 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1227 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1228 if (MOReg.isUndef())
1230 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1231 assert(MOSubIdx.isImm() &&
1232 "One of the subindex of the reg_sequence is not an immediate");
1233 // Record Reg:SubReg, SubIdx.
1234 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1235 (unsigned)MOSubIdx.getImm()));
1240 bool TargetInstrInfo::getExtractSubregInputs(
1241 const MachineInstr &MI, unsigned DefIdx,
1242 RegSubRegPairAndIdx &InputReg) const {
1243 assert((MI.isExtractSubreg() ||
1244 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1246 if (!MI.isExtractSubreg())
1247 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1249 // We are looking at:
1250 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1251 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1252 const MachineOperand &MOReg = MI.getOperand(1);
1253 if (MOReg.isUndef())
1255 const MachineOperand &MOSubIdx = MI.getOperand(2);
1256 assert(MOSubIdx.isImm() &&
1257 "The subindex of the extract_subreg is not an immediate");
1259 InputReg.Reg = MOReg.getReg();
1260 InputReg.SubReg = MOReg.getSubReg();
1261 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1265 bool TargetInstrInfo::getInsertSubregInputs(
1266 const MachineInstr &MI, unsigned DefIdx,
1267 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1268 assert((MI.isInsertSubreg() ||
1269 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1271 if (!MI.isInsertSubreg())
1272 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1274 // We are looking at:
1275 // Def = INSERT_SEQUENCE v0, v1, sub0.
1276 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1277 const MachineOperand &MOBaseReg = MI.getOperand(1);
1278 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1279 if (MOInsertedReg.isUndef())
1281 const MachineOperand &MOSubIdx = MI.getOperand(3);
1282 assert(MOSubIdx.isImm() &&
1283 "One of the subindex of the reg_sequence is not an immediate");
1284 BaseReg.Reg = MOBaseReg.getReg();
1285 BaseReg.SubReg = MOBaseReg.getSubReg();
1287 InsertedReg.Reg = MOInsertedReg.getReg();
1288 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1289 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1293 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {}