1 //===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "HexagonInstrInfo.h"
16 #include "HexagonRegisterInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/DFAPacketizer.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PseudoSourceValue.h"
26 #include "llvm/MC/MCAsmInfo.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/MathExtras.h"
30 #include "llvm/Support/raw_ostream.h"
35 #define DEBUG_TYPE "hexagon-instrinfo"
37 #define GET_INSTRINFO_CTOR_DTOR
38 #define GET_INSTRMAP_INFO
39 #include "HexagonGenInstrInfo.inc"
40 #include "HexagonGenDFAPacketizer.inc"
44 cl::opt<bool> ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden,
45 cl::init(false), cl::desc("Do not consider inline-asm a scheduling/"
46 "packetization boundary."));
48 static cl::opt<bool> EnableBranchPrediction("hexagon-enable-branch-prediction",
49 cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"));
51 static cl::opt<bool> DisableNVSchedule("disable-hexagon-nv-schedule",
52 cl::Hidden, cl::ZeroOrMore, cl::init(false),
53 cl::desc("Disable schedule adjustment for new value stores."));
55 static cl::opt<bool> EnableTimingClassLatency(
56 "enable-timing-class-latency", cl::Hidden, cl::init(false),
57 cl::desc("Enable timing class latency"));
59 static cl::opt<bool> EnableALUForwarding(
60 "enable-alu-forwarding", cl::Hidden, cl::init(true),
61 cl::desc("Enable vec alu forwarding"));
63 static cl::opt<bool> EnableACCForwarding(
64 "enable-acc-forwarding", cl::Hidden, cl::init(true),
65 cl::desc("Enable vec acc forwarding"));
67 static cl::opt<bool> BranchRelaxAsmLarge("branch-relax-asm-large",
68 cl::init(true), cl::Hidden, cl::ZeroOrMore, cl::desc("branch relax asm"));
71 /// Constants for Hexagon instructions.
73 const int Hexagon_MEMV_OFFSET_MAX_128B = 896; // #s4: -8*128...7*128
74 const int Hexagon_MEMV_OFFSET_MIN_128B = -1024; // #s4
75 const int Hexagon_MEMV_OFFSET_MAX = 448; // #s4: -8*64...7*64
76 const int Hexagon_MEMV_OFFSET_MIN = -512; // #s4
77 const int Hexagon_MEMW_OFFSET_MAX = 4095;
78 const int Hexagon_MEMW_OFFSET_MIN = -4096;
79 const int Hexagon_MEMD_OFFSET_MAX = 8191;
80 const int Hexagon_MEMD_OFFSET_MIN = -8192;
81 const int Hexagon_MEMH_OFFSET_MAX = 2047;
82 const int Hexagon_MEMH_OFFSET_MIN = -2048;
83 const int Hexagon_MEMB_OFFSET_MAX = 1023;
84 const int Hexagon_MEMB_OFFSET_MIN = -1024;
85 const int Hexagon_ADDI_OFFSET_MAX = 32767;
86 const int Hexagon_ADDI_OFFSET_MIN = -32768;
87 const int Hexagon_MEMD_AUTOINC_MAX = 56;
88 const int Hexagon_MEMD_AUTOINC_MIN = -64;
89 const int Hexagon_MEMW_AUTOINC_MAX = 28;
90 const int Hexagon_MEMW_AUTOINC_MIN = -32;
91 const int Hexagon_MEMH_AUTOINC_MAX = 14;
92 const int Hexagon_MEMH_AUTOINC_MIN = -16;
93 const int Hexagon_MEMB_AUTOINC_MAX = 7;
94 const int Hexagon_MEMB_AUTOINC_MIN = -8;
95 const int Hexagon_MEMV_AUTOINC_MAX = 192; // #s3
96 const int Hexagon_MEMV_AUTOINC_MIN = -256; // #s3
97 const int Hexagon_MEMV_AUTOINC_MAX_128B = 384; // #s3
98 const int Hexagon_MEMV_AUTOINC_MIN_128B = -512; // #s3
100 // Pin the vtable to this file.
101 void HexagonInstrInfo::anchor() {}
103 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
104 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
108 static bool isIntRegForSubInst(unsigned Reg) {
109 return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
110 (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
114 static bool isDblRegForSubInst(unsigned Reg, const HexagonRegisterInfo &HRI) {
115 return isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::subreg_loreg)) &&
116 isIntRegForSubInst(HRI.getSubReg(Reg, Hexagon::subreg_hireg));
120 /// Calculate number of instructions excluding the debug instructions.
121 static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB,
122 MachineBasicBlock::const_instr_iterator MIE) {
124 for (; MIB != MIE; ++MIB) {
125 if (!MIB->isDebugValue())
132 /// Find the hardware loop instruction used to set-up the specified loop.
133 /// On Hexagon, we have two instructions used to set-up the hardware loop
134 /// (LOOP0, LOOP1) with corresponding endloop (ENDLOOP0, ENDLOOP1) instructions
135 /// to indicate the end of a loop.
136 static MachineInstr *findLoopInstr(MachineBasicBlock *BB, int EndLoopOp,
137 SmallPtrSet<MachineBasicBlock *, 8> &Visited) {
140 if (EndLoopOp == Hexagon::ENDLOOP0) {
141 LOOPi = Hexagon::J2_loop0i;
142 LOOPr = Hexagon::J2_loop0r;
143 } else { // EndLoopOp == Hexagon::EndLOOP1
144 LOOPi = Hexagon::J2_loop1i;
145 LOOPr = Hexagon::J2_loop1r;
148 // The loop set-up instruction will be in a predecessor block
149 for (MachineBasicBlock::pred_iterator PB = BB->pred_begin(),
150 PE = BB->pred_end(); PB != PE; ++PB) {
151 // If this has been visited, already skip it.
152 if (!Visited.insert(*PB).second)
156 for (MachineBasicBlock::reverse_instr_iterator I = (*PB)->instr_rbegin(),
157 E = (*PB)->instr_rend(); I != E; ++I) {
158 int Opc = I->getOpcode();
159 if (Opc == LOOPi || Opc == LOOPr)
161 // We've reached a different loop, which means the loop0 has been removed.
162 if (Opc == EndLoopOp)
165 // Check the predecessors for the LOOP instruction.
166 MachineInstr *loop = findLoopInstr(*PB, EndLoopOp, Visited);
174 /// Gather register def/uses from MI.
175 /// This treats possible (predicated) defs as actually happening ones
176 /// (conservatively).
177 static inline void parseOperands(const MachineInstr *MI,
178 SmallVector<unsigned, 4> &Defs, SmallVector<unsigned, 8> &Uses) {
182 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
183 const MachineOperand &MO = MI->getOperand(i);
188 unsigned Reg = MO.getReg();
193 Uses.push_back(MO.getReg());
196 Defs.push_back(MO.getReg());
201 // Position dependent, so check twice for swap.
202 static bool isDuplexPairMatch(unsigned Ga, unsigned Gb) {
204 case HexagonII::HSIG_None:
207 case HexagonII::HSIG_L1:
208 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_A);
209 case HexagonII::HSIG_L2:
210 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
211 Gb == HexagonII::HSIG_A);
212 case HexagonII::HSIG_S1:
213 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
214 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_A);
215 case HexagonII::HSIG_S2:
216 return (Gb == HexagonII::HSIG_L1 || Gb == HexagonII::HSIG_L2 ||
217 Gb == HexagonII::HSIG_S1 || Gb == HexagonII::HSIG_S2 ||
218 Gb == HexagonII::HSIG_A);
219 case HexagonII::HSIG_A:
220 return (Gb == HexagonII::HSIG_A);
221 case HexagonII::HSIG_Compound:
222 return (Gb == HexagonII::HSIG_Compound);
229 /// isLoadFromStackSlot - If the specified machine instruction is a direct
230 /// load from a stack slot, return the virtual or physical register number of
231 /// the destination along with the FrameIndex of the loaded stack slot. If
232 /// not, return 0. This predicate must return 0 if the instruction has
233 /// any side effects other than loading from the stack slot.
234 unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
235 int &FrameIndex) const {
236 switch (MI.getOpcode()) {
239 case Hexagon::L2_loadrb_io:
240 case Hexagon::L2_loadrub_io:
241 case Hexagon::L2_loadrh_io:
242 case Hexagon::L2_loadruh_io:
243 case Hexagon::L2_loadri_io:
244 case Hexagon::L2_loadrd_io:
245 case Hexagon::V6_vL32b_ai:
246 case Hexagon::V6_vL32b_ai_128B:
247 case Hexagon::V6_vL32Ub_ai:
248 case Hexagon::V6_vL32Ub_ai_128B:
249 case Hexagon::LDriw_pred:
250 case Hexagon::LDriw_mod:
251 case Hexagon::LDriq_pred_V6:
252 case Hexagon::LDriq_pred_vec_V6:
253 case Hexagon::LDriv_pseudo_V6:
254 case Hexagon::LDrivv_pseudo_V6:
255 case Hexagon::LDriq_pred_V6_128B:
256 case Hexagon::LDriq_pred_vec_V6_128B:
257 case Hexagon::LDriv_pseudo_V6_128B:
258 case Hexagon::LDrivv_pseudo_V6_128B: {
259 const MachineOperand OpFI = MI.getOperand(1);
262 const MachineOperand OpOff = MI.getOperand(2);
263 if (!OpOff.isImm() || OpOff.getImm() != 0)
265 FrameIndex = OpFI.getIndex();
266 return MI.getOperand(0).getReg();
269 case Hexagon::L2_ploadrbt_io:
270 case Hexagon::L2_ploadrbf_io:
271 case Hexagon::L2_ploadrubt_io:
272 case Hexagon::L2_ploadrubf_io:
273 case Hexagon::L2_ploadrht_io:
274 case Hexagon::L2_ploadrhf_io:
275 case Hexagon::L2_ploadruht_io:
276 case Hexagon::L2_ploadruhf_io:
277 case Hexagon::L2_ploadrit_io:
278 case Hexagon::L2_ploadrif_io:
279 case Hexagon::L2_ploadrdt_io:
280 case Hexagon::L2_ploadrdf_io: {
281 const MachineOperand OpFI = MI.getOperand(2);
284 const MachineOperand OpOff = MI.getOperand(3);
285 if (!OpOff.isImm() || OpOff.getImm() != 0)
287 FrameIndex = OpFI.getIndex();
288 return MI.getOperand(0).getReg();
296 /// isStoreToStackSlot - If the specified machine instruction is a direct
297 /// store to a stack slot, return the virtual or physical register number of
298 /// the source reg along with the FrameIndex of the loaded stack slot. If
299 /// not, return 0. This predicate must return 0 if the instruction has
300 /// any side effects other than storing to the stack slot.
301 unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
302 int &FrameIndex) const {
303 switch (MI.getOpcode()) {
306 case Hexagon::S2_storerb_io:
307 case Hexagon::S2_storerh_io:
308 case Hexagon::S2_storeri_io:
309 case Hexagon::S2_storerd_io:
310 case Hexagon::V6_vS32b_ai:
311 case Hexagon::V6_vS32b_ai_128B:
312 case Hexagon::V6_vS32Ub_ai:
313 case Hexagon::V6_vS32Ub_ai_128B:
314 case Hexagon::STriw_pred:
315 case Hexagon::STriw_mod:
316 case Hexagon::STriq_pred_V6:
317 case Hexagon::STriq_pred_vec_V6:
318 case Hexagon::STriv_pseudo_V6:
319 case Hexagon::STrivv_pseudo_V6:
320 case Hexagon::STriq_pred_V6_128B:
321 case Hexagon::STriq_pred_vec_V6_128B:
322 case Hexagon::STriv_pseudo_V6_128B:
323 case Hexagon::STrivv_pseudo_V6_128B: {
324 const MachineOperand &OpFI = MI.getOperand(0);
327 const MachineOperand &OpOff = MI.getOperand(1);
328 if (!OpOff.isImm() || OpOff.getImm() != 0)
330 FrameIndex = OpFI.getIndex();
331 return MI.getOperand(2).getReg();
334 case Hexagon::S2_pstorerbt_io:
335 case Hexagon::S2_pstorerbf_io:
336 case Hexagon::S2_pstorerht_io:
337 case Hexagon::S2_pstorerhf_io:
338 case Hexagon::S2_pstorerit_io:
339 case Hexagon::S2_pstorerif_io:
340 case Hexagon::S2_pstorerdt_io:
341 case Hexagon::S2_pstorerdf_io: {
342 const MachineOperand &OpFI = MI.getOperand(1);
345 const MachineOperand &OpOff = MI.getOperand(2);
346 if (!OpOff.isImm() || OpOff.getImm() != 0)
348 FrameIndex = OpFI.getIndex();
349 return MI.getOperand(3).getReg();
357 /// This function can analyze one/two way branching only and should (mostly) be
358 /// called by target independent side.
359 /// First entry is always the opcode of the branching instruction, except when
360 /// the Cond vector is supposed to be empty, e.g., when AnalyzeBranch fails, a
361 /// BB with only unconditional jump. Subsequent entries depend upon the opcode,
362 /// e.g. Jump_c p will have
366 /// Cond[0] = ENDLOOP
369 /// Cond[0] = Hexagon::CMPEQri_f_Jumpnv_t_V4 -- specific opcode
373 bool HexagonInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
374 MachineBasicBlock *&TBB,
375 MachineBasicBlock *&FBB,
376 SmallVectorImpl<MachineOperand> &Cond,
377 bool AllowModify) const {
382 // If the block has no terminators, it just falls into the block after it.
383 MachineBasicBlock::instr_iterator I = MBB.instr_end();
384 if (I == MBB.instr_begin())
387 // A basic block may looks like this:
397 // It has two succs but does not have a terminator
398 // Don't know how to handle it.
402 // Don't analyze EH branches.
404 } while (I != MBB.instr_begin());
409 while (I->isDebugValue()) {
410 if (I == MBB.instr_begin())
415 bool JumpToBlock = I->getOpcode() == Hexagon::J2_jump &&
416 I->getOperand(0).isMBB();
417 // Delete the J2_jump if it's equivalent to a fall-through.
418 if (AllowModify && JumpToBlock &&
419 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
420 DEBUG(dbgs()<< "\nErasing the jump to successor block\n";);
421 I->eraseFromParent();
423 if (I == MBB.instr_begin())
427 if (!isUnpredicatedTerminator(*I))
430 // Get the last instruction in the block.
431 MachineInstr *LastInst = &*I;
432 MachineInstr *SecondLastInst = nullptr;
433 // Find one more terminator if present.
435 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
437 SecondLastInst = &*I;
439 // This is a third branch.
442 if (I == MBB.instr_begin())
447 int LastOpcode = LastInst->getOpcode();
448 int SecLastOpcode = SecondLastInst ? SecondLastInst->getOpcode() : 0;
449 // If the branch target is not a basic block, it could be a tail call.
450 // (It is, if the target is a function.)
451 if (LastOpcode == Hexagon::J2_jump && !LastInst->getOperand(0).isMBB())
453 if (SecLastOpcode == Hexagon::J2_jump &&
454 !SecondLastInst->getOperand(0).isMBB())
457 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
458 bool LastOpcodeHasNVJump = isNewValueJump(LastInst);
460 if (LastOpcodeHasJMP_c && !LastInst->getOperand(1).isMBB())
463 // If there is only one terminator instruction, process it.
464 if (LastInst && !SecondLastInst) {
465 if (LastOpcode == Hexagon::J2_jump) {
466 TBB = LastInst->getOperand(0).getMBB();
469 if (isEndLoopN(LastOpcode)) {
470 TBB = LastInst->getOperand(0).getMBB();
471 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
472 Cond.push_back(LastInst->getOperand(0));
475 if (LastOpcodeHasJMP_c) {
476 TBB = LastInst->getOperand(1).getMBB();
477 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
478 Cond.push_back(LastInst->getOperand(0));
481 // Only supporting rr/ri versions of new-value jumps.
482 if (LastOpcodeHasNVJump && (LastInst->getNumExplicitOperands() == 3)) {
483 TBB = LastInst->getOperand(2).getMBB();
484 Cond.push_back(MachineOperand::CreateImm(LastInst->getOpcode()));
485 Cond.push_back(LastInst->getOperand(0));
486 Cond.push_back(LastInst->getOperand(1));
489 DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
490 << " with one jump\n";);
491 // Otherwise, don't know what this is.
495 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
496 bool SecLastOpcodeHasNVJump = isNewValueJump(SecondLastInst);
497 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
498 if (!SecondLastInst->getOperand(1).isMBB())
500 TBB = SecondLastInst->getOperand(1).getMBB();
501 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
502 Cond.push_back(SecondLastInst->getOperand(0));
503 FBB = LastInst->getOperand(0).getMBB();
507 // Only supporting rr/ri versions of new-value jumps.
508 if (SecLastOpcodeHasNVJump &&
509 (SecondLastInst->getNumExplicitOperands() == 3) &&
510 (LastOpcode == Hexagon::J2_jump)) {
511 TBB = SecondLastInst->getOperand(2).getMBB();
512 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
513 Cond.push_back(SecondLastInst->getOperand(0));
514 Cond.push_back(SecondLastInst->getOperand(1));
515 FBB = LastInst->getOperand(0).getMBB();
519 // If the block ends with two Hexagon:JMPs, handle it. The second one is not
520 // executed, so remove it.
521 if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
522 TBB = SecondLastInst->getOperand(0).getMBB();
523 I = LastInst->getIterator();
525 I->eraseFromParent();
529 // If the block ends with an ENDLOOP, and J2_jump, handle it.
530 if (isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
531 TBB = SecondLastInst->getOperand(0).getMBB();
532 Cond.push_back(MachineOperand::CreateImm(SecondLastInst->getOpcode()));
533 Cond.push_back(SecondLastInst->getOperand(0));
534 FBB = LastInst->getOperand(0).getMBB();
537 DEBUG(dbgs() << "\nCant analyze BB#" << MBB.getNumber()
538 << " with two jumps";);
539 // Otherwise, can't handle this.
544 unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
545 DEBUG(dbgs() << "\nRemoving branches out of BB#" << MBB.getNumber());
546 MachineBasicBlock::iterator I = MBB.end();
548 while (I != MBB.begin()) {
550 if (I->isDebugValue())
552 // Only removing branches from end of MBB.
555 if (Count && (I->getOpcode() == Hexagon::J2_jump))
556 llvm_unreachable("Malformed basic block: unconditional branch not last");
557 MBB.erase(&MBB.back());
564 unsigned HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,
565 MachineBasicBlock *TBB,
566 MachineBasicBlock *FBB,
567 ArrayRef<MachineOperand> Cond,
568 const DebugLoc &DL) const {
569 unsigned BOpc = Hexagon::J2_jump;
570 unsigned BccOpc = Hexagon::J2_jumpt;
571 assert(validateBranchCond(Cond) && "Invalid branching condition");
572 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
574 // Check if ReverseBranchCondition has asked to reverse this branch
575 // If we want to reverse the branch an odd number of times, we want
577 if (!Cond.empty() && Cond[0].isImm())
578 BccOpc = Cond[0].getImm();
582 // Due to a bug in TailMerging/CFG Optimization, we need to add a
583 // special case handling of a predicated jump followed by an
584 // unconditional jump. If not, Tail Merging and CFG Optimization go
585 // into an infinite loop.
586 MachineBasicBlock *NewTBB, *NewFBB;
587 SmallVector<MachineOperand, 4> Cond;
588 auto Term = MBB.getFirstTerminator();
589 if (Term != MBB.end() && isPredicated(*Term) &&
590 !analyzeBranch(MBB, NewTBB, NewFBB, Cond, false)) {
591 MachineBasicBlock *NextBB = &*++MBB.getIterator();
592 if (NewTBB == NextBB) {
593 ReverseBranchCondition(Cond);
595 return InsertBranch(MBB, TBB, nullptr, Cond, DL);
598 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
599 } else if (isEndLoopN(Cond[0].getImm())) {
600 int EndLoopOp = Cond[0].getImm();
601 assert(Cond[1].isMBB());
602 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
603 // Check for it, and change the BB target if needed.
604 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
605 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs);
606 assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP");
607 Loop->getOperand(0).setMBB(TBB);
608 // Add the ENDLOOP after the finding the LOOP0.
609 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
610 } else if (isNewValueJump(Cond[0].getImm())) {
611 assert((Cond.size() == 3) && "Only supporting rr/ri version of nvjump");
613 // (ins IntRegs:$src1, IntRegs:$src2, brtarget:$offset)
614 // (ins IntRegs:$src1, u5Imm:$src2, brtarget:$offset)
615 unsigned Flags1 = getUndefRegState(Cond[1].isUndef());
616 DEBUG(dbgs() << "\nInserting NVJump for BB#" << MBB.getNumber(););
617 if (Cond[2].isReg()) {
618 unsigned Flags2 = getUndefRegState(Cond[2].isUndef());
619 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
620 addReg(Cond[2].getReg(), Flags2).addMBB(TBB);
621 } else if(Cond[2].isImm()) {
622 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[1].getReg(), Flags1).
623 addImm(Cond[2].getImm()).addMBB(TBB);
625 llvm_unreachable("Invalid condition for branching");
627 assert((Cond.size() == 2) && "Malformed cond vector");
628 const MachineOperand &RO = Cond[1];
629 unsigned Flags = getUndefRegState(RO.isUndef());
630 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
634 assert((!Cond.empty()) &&
635 "Cond. cannot be empty when multiple branchings are required");
636 assert((!isNewValueJump(Cond[0].getImm())) &&
637 "NV-jump cannot be inserted with another branch");
638 // Special case for hardware loops. The condition is a basic block.
639 if (isEndLoopN(Cond[0].getImm())) {
640 int EndLoopOp = Cond[0].getImm();
641 assert(Cond[1].isMBB());
642 // Since we're adding an ENDLOOP, there better be a LOOP instruction.
643 // Check for it, and change the BB target if needed.
644 SmallPtrSet<MachineBasicBlock *, 8> VisitedBBs;
645 MachineInstr *Loop = findLoopInstr(TBB, EndLoopOp, VisitedBBs);
646 assert(Loop != 0 && "Inserting an ENDLOOP without a LOOP");
647 Loop->getOperand(0).setMBB(TBB);
648 // Add the ENDLOOP after the finding the LOOP0.
649 BuildMI(&MBB, DL, get(EndLoopOp)).addMBB(TBB);
651 const MachineOperand &RO = Cond[1];
652 unsigned Flags = getUndefRegState(RO.isUndef());
653 BuildMI(&MBB, DL, get(BccOpc)).addReg(RO.getReg(), Flags).addMBB(TBB);
655 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
661 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
662 unsigned NumCycles, unsigned ExtraPredCycles,
663 BranchProbability Probability) const {
664 return nonDbgBBSize(&MBB) <= 3;
668 bool HexagonInstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
669 unsigned NumTCycles, unsigned ExtraTCycles, MachineBasicBlock &FMBB,
670 unsigned NumFCycles, unsigned ExtraFCycles, BranchProbability Probability)
672 return nonDbgBBSize(&TMBB) <= 3 && nonDbgBBSize(&FMBB) <= 3;
676 bool HexagonInstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
677 unsigned NumInstrs, BranchProbability Probability) const {
678 return NumInstrs <= 4;
681 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
682 MachineBasicBlock::iterator I,
683 const DebugLoc &DL, unsigned DestReg,
684 unsigned SrcReg, bool KillSrc) const {
685 auto &HRI = getRegisterInfo();
686 unsigned KillFlag = getKillRegState(KillSrc);
688 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
689 BuildMI(MBB, I, DL, get(Hexagon::A2_tfr), DestReg)
690 .addReg(SrcReg, KillFlag);
693 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
694 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrp), DestReg)
695 .addReg(SrcReg, KillFlag);
698 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
699 // Map Pd = Ps to Pd = or(Ps, Ps).
700 BuildMI(MBB, I, DL, get(Hexagon::C2_or), DestReg)
701 .addReg(SrcReg).addReg(SrcReg, KillFlag);
704 if (Hexagon::CtrRegsRegClass.contains(DestReg) &&
705 Hexagon::IntRegsRegClass.contains(SrcReg)) {
706 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
707 .addReg(SrcReg, KillFlag);
710 if (Hexagon::IntRegsRegClass.contains(DestReg) &&
711 Hexagon::CtrRegsRegClass.contains(SrcReg)) {
712 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrcrr), DestReg)
713 .addReg(SrcReg, KillFlag);
716 if (Hexagon::ModRegsRegClass.contains(DestReg) &&
717 Hexagon::IntRegsRegClass.contains(SrcReg)) {
718 BuildMI(MBB, I, DL, get(Hexagon::A2_tfrrcr), DestReg)
719 .addReg(SrcReg, KillFlag);
722 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
723 Hexagon::IntRegsRegClass.contains(DestReg)) {
724 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
725 .addReg(SrcReg, KillFlag);
728 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
729 Hexagon::PredRegsRegClass.contains(DestReg)) {
730 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrrp), DestReg)
731 .addReg(SrcReg, KillFlag);
734 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
735 Hexagon::IntRegsRegClass.contains(DestReg)) {
736 BuildMI(MBB, I, DL, get(Hexagon::C2_tfrpr), DestReg)
737 .addReg(SrcReg, KillFlag);
740 if (Hexagon::VectorRegsRegClass.contains(SrcReg, DestReg)) {
741 BuildMI(MBB, I, DL, get(Hexagon::V6_vassign), DestReg).
742 addReg(SrcReg, KillFlag);
745 if (Hexagon::VecDblRegsRegClass.contains(SrcReg, DestReg)) {
746 BuildMI(MBB, I, DL, get(Hexagon::V6_vcombine), DestReg)
747 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg), KillFlag)
748 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg), KillFlag);
751 if (Hexagon::VecPredRegsRegClass.contains(SrcReg, DestReg)) {
752 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DestReg)
754 .addReg(SrcReg, KillFlag);
757 if (Hexagon::VecPredRegsRegClass.contains(SrcReg) &&
758 Hexagon::VectorRegsRegClass.contains(DestReg)) {
759 llvm_unreachable("Unimplemented pred to vec");
762 if (Hexagon::VecPredRegsRegClass.contains(DestReg) &&
763 Hexagon::VectorRegsRegClass.contains(SrcReg)) {
764 llvm_unreachable("Unimplemented vec to pred");
767 if (Hexagon::VecPredRegs128BRegClass.contains(SrcReg, DestReg)) {
768 unsigned DstHi = HRI.getSubReg(DestReg, Hexagon::subreg_hireg);
769 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DstHi)
770 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_hireg), KillFlag);
771 unsigned DstLo = HRI.getSubReg(DestReg, Hexagon::subreg_loreg);
772 BuildMI(MBB, I, DL, get(Hexagon::V6_pred_and), DstLo)
773 .addReg(HRI.getSubReg(SrcReg, Hexagon::subreg_loreg), KillFlag);
778 // Show the invalid registers to ease debugging.
779 dbgs() << "Invalid registers for copy in BB#" << MBB.getNumber()
780 << ": " << PrintReg(DestReg, &HRI)
781 << " = " << PrintReg(SrcReg, &HRI) << '\n';
783 llvm_unreachable("Unimplemented");
787 void HexagonInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
788 MachineBasicBlock::iterator I, unsigned SrcReg, bool isKill, int FI,
789 const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const {
790 DebugLoc DL = MBB.findDebugLoc(I);
791 MachineFunction &MF = *MBB.getParent();
792 MachineFrameInfo &MFI = *MF.getFrameInfo();
793 unsigned Align = MFI.getObjectAlignment(FI);
794 unsigned KillFlag = getKillRegState(isKill);
796 MachineMemOperand *MMO = MF.getMachineMemOperand(
797 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
798 MFI.getObjectSize(FI), Align);
800 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
801 BuildMI(MBB, I, DL, get(Hexagon::S2_storeri_io))
802 .addFrameIndex(FI).addImm(0)
803 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
804 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
805 BuildMI(MBB, I, DL, get(Hexagon::S2_storerd_io))
806 .addFrameIndex(FI).addImm(0)
807 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
808 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
809 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
810 .addFrameIndex(FI).addImm(0)
811 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
812 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
813 BuildMI(MBB, I, DL, get(Hexagon::STriw_mod))
814 .addFrameIndex(FI).addImm(0)
815 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
816 } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
817 BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6_128B))
818 .addFrameIndex(FI).addImm(0)
819 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
820 } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
821 BuildMI(MBB, I, DL, get(Hexagon::STriq_pred_V6))
822 .addFrameIndex(FI).addImm(0)
823 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
824 } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
825 DEBUG(dbgs() << "++Generating 128B vector spill");
826 BuildMI(MBB, I, DL, get(Hexagon::STriv_pseudo_V6_128B))
827 .addFrameIndex(FI).addImm(0)
828 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
829 } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
830 DEBUG(dbgs() << "++Generating vector spill");
831 BuildMI(MBB, I, DL, get(Hexagon::STriv_pseudo_V6))
832 .addFrameIndex(FI).addImm(0)
833 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
834 } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
835 DEBUG(dbgs() << "++Generating double vector spill");
836 BuildMI(MBB, I, DL, get(Hexagon::STrivv_pseudo_V6))
837 .addFrameIndex(FI).addImm(0)
838 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
839 } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
840 DEBUG(dbgs() << "++Generating 128B double vector spill");
841 BuildMI(MBB, I, DL, get(Hexagon::STrivv_pseudo_V6_128B))
842 .addFrameIndex(FI).addImm(0)
843 .addReg(SrcReg, KillFlag).addMemOperand(MMO);
845 llvm_unreachable("Unimplemented");
849 void HexagonInstrInfo::loadRegFromStackSlot(
850 MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg,
851 int FI, const TargetRegisterClass *RC,
852 const TargetRegisterInfo *TRI) const {
853 DebugLoc DL = MBB.findDebugLoc(I);
854 MachineFunction &MF = *MBB.getParent();
855 MachineFrameInfo &MFI = *MF.getFrameInfo();
856 unsigned Align = MFI.getObjectAlignment(FI);
858 MachineMemOperand *MMO = MF.getMachineMemOperand(
859 MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
860 MFI.getObjectSize(FI), Align);
862 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
863 BuildMI(MBB, I, DL, get(Hexagon::L2_loadri_io), DestReg)
864 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
865 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
866 BuildMI(MBB, I, DL, get(Hexagon::L2_loadrd_io), DestReg)
867 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
868 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
869 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
870 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
871 } else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
872 BuildMI(MBB, I, DL, get(Hexagon::LDriw_mod), DestReg)
873 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
874 } else if (Hexagon::VecPredRegs128BRegClass.hasSubClassEq(RC)) {
875 BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6_128B), DestReg)
876 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
877 } else if (Hexagon::VecPredRegsRegClass.hasSubClassEq(RC)) {
878 BuildMI(MBB, I, DL, get(Hexagon::LDriq_pred_V6), DestReg)
879 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
880 } else if (Hexagon::VecDblRegs128BRegClass.hasSubClassEq(RC)) {
881 DEBUG(dbgs() << "++Generating 128B double vector restore");
882 BuildMI(MBB, I, DL, get(Hexagon::LDrivv_pseudo_V6_128B), DestReg)
883 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
884 } else if (Hexagon::VectorRegs128BRegClass.hasSubClassEq(RC)) {
885 DEBUG(dbgs() << "++Generating 128B vector restore");
886 BuildMI(MBB, I, DL, get(Hexagon::LDriv_pseudo_V6_128B), DestReg)
887 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
888 } else if (Hexagon::VectorRegsRegClass.hasSubClassEq(RC)) {
889 DEBUG(dbgs() << "++Generating vector restore");
890 BuildMI(MBB, I, DL, get(Hexagon::LDriv_pseudo_V6), DestReg)
891 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
892 } else if (Hexagon::VecDblRegsRegClass.hasSubClassEq(RC)) {
893 DEBUG(dbgs() << "++Generating double vector restore");
894 BuildMI(MBB, I, DL, get(Hexagon::LDrivv_pseudo_V6), DestReg)
895 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
897 llvm_unreachable("Can't store this register to stack slot");
902 /// expandPostRAPseudo - This function is called for all pseudo instructions
903 /// that remain after register allocation. Many pseudo instructions are
904 /// created to help register allocation. This is the place to convert them
905 /// into real instructions. The target can edit MI in place, or it can insert
906 /// new instructions and erase MI. The function should return true if
907 /// anything was changed.
908 bool HexagonInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
909 const HexagonRegisterInfo &HRI = getRegisterInfo();
910 MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
911 MachineBasicBlock &MBB = *MI.getParent();
912 DebugLoc DL = MI.getDebugLoc();
913 unsigned Opc = MI.getOpcode();
914 const unsigned VecOffset = 1;
918 case TargetOpcode::COPY: {
919 MachineOperand &MD = MI.getOperand(0);
920 MachineOperand &MS = MI.getOperand(1);
921 MachineBasicBlock::iterator MBBI = MI.getIterator();
922 if (MD.getReg() != MS.getReg() && !MS.isUndef()) {
923 copyPhysReg(MBB, MI, DL, MD.getReg(), MS.getReg(), MS.isKill());
924 std::prev(MBBI)->copyImplicitOps(*MBB.getParent(), MI);
929 case Hexagon::ALIGNA:
930 BuildMI(MBB, MI, DL, get(Hexagon::A2_andir), MI.getOperand(0).getReg())
931 .addReg(HRI.getFrameRegister())
932 .addImm(-MI.getOperand(1).getImm());
935 case Hexagon::HEXAGON_V6_vassignp_128B:
936 case Hexagon::HEXAGON_V6_vassignp: {
937 unsigned SrcReg = MI.getOperand(1).getReg();
938 unsigned DstReg = MI.getOperand(0).getReg();
939 if (SrcReg != DstReg)
940 copyPhysReg(MBB, MI, DL, DstReg, SrcReg, MI.getOperand(1).isKill());
944 case Hexagon::HEXAGON_V6_lo_128B:
945 case Hexagon::HEXAGON_V6_lo: {
946 unsigned SrcReg = MI.getOperand(1).getReg();
947 unsigned DstReg = MI.getOperand(0).getReg();
948 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg);
949 copyPhysReg(MBB, MI, DL, DstReg, SrcSubLo, MI.getOperand(1).isKill());
951 MRI.clearKillFlags(SrcSubLo);
954 case Hexagon::HEXAGON_V6_hi_128B:
955 case Hexagon::HEXAGON_V6_hi: {
956 unsigned SrcReg = MI.getOperand(1).getReg();
957 unsigned DstReg = MI.getOperand(0).getReg();
958 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg);
959 copyPhysReg(MBB, MI, DL, DstReg, SrcSubHi, MI.getOperand(1).isKill());
961 MRI.clearKillFlags(SrcSubHi);
964 case Hexagon::STrivv_indexed_128B:
966 case Hexagon::STrivv_indexed: {
967 unsigned SrcReg = MI.getOperand(2).getReg();
968 unsigned SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::subreg_hireg);
969 unsigned SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::subreg_loreg);
970 unsigned NewOpcd = Is128B ? Hexagon::V6_vS32b_ai_128B
971 : Hexagon::V6_vS32b_ai;
972 unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
973 MachineInstr *MI1New =
974 BuildMI(MBB, MI, DL, get(NewOpcd))
975 .addOperand(MI.getOperand(0))
976 .addImm(MI.getOperand(1).getImm())
978 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
979 MI1New->getOperand(0).setIsKill(false);
980 BuildMI(MBB, MI, DL, get(NewOpcd))
981 .addOperand(MI.getOperand(0))
982 // The Vectors are indexed in multiples of vector size.
983 .addImm(MI.getOperand(1).getImm() + Offset)
985 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
989 case Hexagon::LDrivv_pseudo_V6_128B:
990 case Hexagon::LDrivv_indexed_128B:
992 case Hexagon::LDrivv_pseudo_V6:
993 case Hexagon::LDrivv_indexed: {
994 unsigned NewOpcd = Is128B ? Hexagon::V6_vL32b_ai_128B
995 : Hexagon::V6_vL32b_ai;
996 unsigned DstReg = MI.getOperand(0).getReg();
997 unsigned Offset = Is128B ? VecOffset << 7 : VecOffset << 6;
998 MachineInstr *MI1New =
999 BuildMI(MBB, MI, DL, get(NewOpcd),
1000 HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
1001 .addOperand(MI.getOperand(1))
1002 .addImm(MI.getOperand(2).getImm());
1003 MI1New->getOperand(1).setIsKill(false);
1004 BuildMI(MBB, MI, DL, get(NewOpcd),
1005 HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
1006 .addOperand(MI.getOperand(1))
1007 // The Vectors are indexed in multiples of vector size.
1008 .addImm(MI.getOperand(2).getImm() + Offset)
1009 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1013 case Hexagon::LDriv_pseudo_V6_128B:
1015 case Hexagon::LDriv_pseudo_V6: {
1016 unsigned DstReg = MI.getOperand(0).getReg();
1017 unsigned NewOpc = Is128B ? Hexagon::V6_vL32b_ai_128B
1018 : Hexagon::V6_vL32b_ai;
1019 int32_t Off = MI.getOperand(2).getImm();
1020 BuildMI(MBB, MI, DL, get(NewOpc), DstReg)
1021 .addOperand(MI.getOperand(1))
1023 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1027 case Hexagon::STriv_pseudo_V6_128B:
1029 case Hexagon::STriv_pseudo_V6: {
1030 unsigned NewOpc = Is128B ? Hexagon::V6_vS32b_ai_128B
1031 : Hexagon::V6_vS32b_ai;
1032 int32_t Off = MI.getOperand(1).getImm();
1033 BuildMI(MBB, MI, DL, get(NewOpc))
1034 .addOperand(MI.getOperand(0))
1036 .addOperand(MI.getOperand(2))
1037 .setMemRefs(MI.memoperands_begin(), MI.memoperands_end());
1041 case Hexagon::TFR_PdTrue: {
1042 unsigned Reg = MI.getOperand(0).getReg();
1043 BuildMI(MBB, MI, DL, get(Hexagon::C2_orn), Reg)
1044 .addReg(Reg, RegState::Undef)
1045 .addReg(Reg, RegState::Undef);
1049 case Hexagon::TFR_PdFalse: {
1050 unsigned Reg = MI.getOperand(0).getReg();
1051 BuildMI(MBB, MI, DL, get(Hexagon::C2_andn), Reg)
1052 .addReg(Reg, RegState::Undef)
1053 .addReg(Reg, RegState::Undef);
1057 case Hexagon::VMULW: {
1058 // Expand a 64-bit vector multiply into 2 32-bit scalar multiplies.
1059 unsigned DstReg = MI.getOperand(0).getReg();
1060 unsigned Src1Reg = MI.getOperand(1).getReg();
1061 unsigned Src2Reg = MI.getOperand(2).getReg();
1062 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg);
1063 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg);
1064 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg);
1065 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg);
1066 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1067 HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
1070 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_mpyi),
1071 HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
1075 MRI.clearKillFlags(Src1SubHi);
1076 MRI.clearKillFlags(Src1SubLo);
1077 MRI.clearKillFlags(Src2SubHi);
1078 MRI.clearKillFlags(Src2SubLo);
1081 case Hexagon::VMULW_ACC: {
1082 // Expand 64-bit vector multiply with addition into 2 scalar multiplies.
1083 unsigned DstReg = MI.getOperand(0).getReg();
1084 unsigned Src1Reg = MI.getOperand(1).getReg();
1085 unsigned Src2Reg = MI.getOperand(2).getReg();
1086 unsigned Src3Reg = MI.getOperand(3).getReg();
1087 unsigned Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::subreg_hireg);
1088 unsigned Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::subreg_loreg);
1089 unsigned Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::subreg_hireg);
1090 unsigned Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::subreg_loreg);
1091 unsigned Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::subreg_hireg);
1092 unsigned Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::subreg_loreg);
1093 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1094 HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
1098 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::M2_maci),
1099 HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
1104 MRI.clearKillFlags(Src1SubHi);
1105 MRI.clearKillFlags(Src1SubLo);
1106 MRI.clearKillFlags(Src2SubHi);
1107 MRI.clearKillFlags(Src2SubLo);
1108 MRI.clearKillFlags(Src3SubHi);
1109 MRI.clearKillFlags(Src3SubLo);
1112 case Hexagon::Insert4: {
1113 unsigned DstReg = MI.getOperand(0).getReg();
1114 unsigned Src1Reg = MI.getOperand(1).getReg();
1115 unsigned Src2Reg = MI.getOperand(2).getReg();
1116 unsigned Src3Reg = MI.getOperand(3).getReg();
1117 unsigned Src4Reg = MI.getOperand(4).getReg();
1118 unsigned Src1RegIsKill = getKillRegState(MI.getOperand(1).isKill());
1119 unsigned Src2RegIsKill = getKillRegState(MI.getOperand(2).isKill());
1120 unsigned Src3RegIsKill = getKillRegState(MI.getOperand(3).isKill());
1121 unsigned Src4RegIsKill = getKillRegState(MI.getOperand(4).isKill());
1122 unsigned DstSubHi = HRI.getSubReg(DstReg, Hexagon::subreg_hireg);
1123 unsigned DstSubLo = HRI.getSubReg(DstReg, Hexagon::subreg_loreg);
1124 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
1125 HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
1127 .addReg(Src1Reg, Src1RegIsKill)
1130 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
1131 HRI.getSubReg(DstReg, Hexagon::subreg_loreg))
1133 .addReg(Src2Reg, Src2RegIsKill)
1136 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
1137 HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
1139 .addReg(Src3Reg, Src3RegIsKill)
1142 BuildMI(MBB, MI, MI.getDebugLoc(), get(Hexagon::S2_insert),
1143 HRI.getSubReg(DstReg, Hexagon::subreg_hireg))
1145 .addReg(Src4Reg, Src4RegIsKill)
1149 MRI.clearKillFlags(DstReg);
1150 MRI.clearKillFlags(DstSubHi);
1151 MRI.clearKillFlags(DstSubLo);
1154 case Hexagon::MUX64_rr: {
1155 const MachineOperand &Op0 = MI.getOperand(0);
1156 const MachineOperand &Op1 = MI.getOperand(1);
1157 const MachineOperand &Op2 = MI.getOperand(2);
1158 const MachineOperand &Op3 = MI.getOperand(3);
1159 unsigned Rd = Op0.getReg();
1160 unsigned Pu = Op1.getReg();
1161 unsigned Rs = Op2.getReg();
1162 unsigned Rt = Op3.getReg();
1163 DebugLoc DL = MI.getDebugLoc();
1164 unsigned K1 = getKillRegState(Op1.isKill());
1165 unsigned K2 = getKillRegState(Op2.isKill());
1166 unsigned K3 = getKillRegState(Op3.isKill());
1168 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpt), Rd)
1169 .addReg(Pu, (Rd == Rt) ? K1 : 0)
1172 BuildMI(MBB, MI, DL, get(Hexagon::A2_tfrpf), Rd)
1178 case Hexagon::VSelectPseudo_V6: {
1179 const MachineOperand &Op0 = MI.getOperand(0);
1180 const MachineOperand &Op1 = MI.getOperand(1);
1181 const MachineOperand &Op2 = MI.getOperand(2);
1182 const MachineOperand &Op3 = MI.getOperand(3);
1183 BuildMI(MBB, MI, DL, get(Hexagon::V6_vcmov))
1187 BuildMI(MBB, MI, DL, get(Hexagon::V6_vncmov))
1194 case Hexagon::VSelectDblPseudo_V6: {
1195 MachineOperand &Op0 = MI.getOperand(0);
1196 MachineOperand &Op1 = MI.getOperand(1);
1197 MachineOperand &Op2 = MI.getOperand(2);
1198 MachineOperand &Op3 = MI.getOperand(3);
1199 unsigned SrcLo = HRI.getSubReg(Op2.getReg(), Hexagon::subreg_loreg);
1200 unsigned SrcHi = HRI.getSubReg(Op2.getReg(), Hexagon::subreg_hireg);
1201 BuildMI(MBB, MI, DL, get(Hexagon::V6_vccombine))
1206 SrcLo = HRI.getSubReg(Op3.getReg(), Hexagon::subreg_loreg);
1207 SrcHi = HRI.getSubReg(Op3.getReg(), Hexagon::subreg_hireg);
1208 BuildMI(MBB, MI, DL, get(Hexagon::V6_vnccombine))
1216 case Hexagon::TCRETURNi:
1217 MI.setDesc(get(Hexagon::J2_jump));
1219 case Hexagon::TCRETURNr:
1220 MI.setDesc(get(Hexagon::J2_jumpr));
1222 case Hexagon::TFRI_f:
1223 case Hexagon::TFRI_cPt_f:
1224 case Hexagon::TFRI_cNotPt_f: {
1225 unsigned Opx = (Opc == Hexagon::TFRI_f) ? 1 : 2;
1226 APFloat FVal = MI.getOperand(Opx).getFPImm()->getValueAPF();
1227 APInt IVal = FVal.bitcastToAPInt();
1228 MI.RemoveOperand(Opx);
1229 unsigned NewOpc = (Opc == Hexagon::TFRI_f) ? Hexagon::A2_tfrsi :
1230 (Opc == Hexagon::TFRI_cPt_f) ? Hexagon::C2_cmoveit :
1231 Hexagon::C2_cmoveif;
1232 MI.setDesc(get(NewOpc));
1233 MI.addOperand(MachineOperand::CreateImm(IVal.getZExtValue()));
1242 // We indicate that we want to reverse the branch by
1243 // inserting the reversed branching opcode.
1244 bool HexagonInstrInfo::ReverseBranchCondition(
1245 SmallVectorImpl<MachineOperand> &Cond) const {
1248 assert(Cond[0].isImm() && "First entry in the cond vector not imm-val");
1249 unsigned opcode = Cond[0].getImm();
1251 assert(get(opcode).isBranch() && "Should be a branching condition.");
1252 if (isEndLoopN(opcode))
1254 unsigned NewOpcode = getInvertedPredicatedOpcode(opcode);
1255 Cond[0].setImm(NewOpcode);
1260 void HexagonInstrInfo::insertNoop(MachineBasicBlock &MBB,
1261 MachineBasicBlock::iterator MI) const {
1263 BuildMI(MBB, MI, DL, get(Hexagon::A2_nop));
1267 // Returns true if an instruction is predicated irrespective of the predicate
1268 // sense. For example, all of the following will return true.
1269 // if (p0) R1 = add(R2, R3)
1270 // if (!p0) R1 = add(R2, R3)
1271 // if (p0.new) R1 = add(R2, R3)
1272 // if (!p0.new) R1 = add(R2, R3)
1273 // Note: New-value stores are not included here as in the current
1274 // implementation, we don't need to check their predicate sense.
1275 bool HexagonInstrInfo::isPredicated(const MachineInstr &MI) const {
1276 const uint64_t F = MI.getDesc().TSFlags;
1277 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
1281 bool HexagonInstrInfo::PredicateInstruction(
1282 MachineInstr &MI, ArrayRef<MachineOperand> Cond) const {
1283 if (Cond.empty() || isNewValueJump(Cond[0].getImm()) ||
1284 isEndLoopN(Cond[0].getImm())) {
1285 DEBUG(dbgs() << "\nCannot predicate:"; MI.dump(););
1288 int Opc = MI.getOpcode();
1289 assert (isPredicable(MI) && "Expected predicable instruction");
1290 bool invertJump = predOpcodeHasNot(Cond);
1292 // We have to predicate MI "in place", i.e. after this function returns,
1293 // MI will need to be transformed into a predicated form. To avoid com-
1294 // plicated manipulations with the operands (handling tied operands,
1295 // etc.), build a new temporary instruction, then overwrite MI with it.
1297 MachineBasicBlock &B = *MI.getParent();
1298 DebugLoc DL = MI.getDebugLoc();
1299 unsigned PredOpc = getCondOpcode(Opc, invertJump);
1300 MachineInstrBuilder T = BuildMI(B, MI, DL, get(PredOpc));
1301 unsigned NOp = 0, NumOps = MI.getNumOperands();
1302 while (NOp < NumOps) {
1303 MachineOperand &Op = MI.getOperand(NOp);
1304 if (!Op.isReg() || !Op.isDef() || Op.isImplicit())
1310 unsigned PredReg, PredRegPos, PredRegFlags;
1311 bool GotPredReg = getPredReg(Cond, PredReg, PredRegPos, PredRegFlags);
1314 T.addReg(PredReg, PredRegFlags);
1315 while (NOp < NumOps)
1316 T.addOperand(MI.getOperand(NOp++));
1318 MI.setDesc(get(PredOpc));
1319 while (unsigned n = MI.getNumOperands())
1320 MI.RemoveOperand(n-1);
1321 for (unsigned i = 0, n = T->getNumOperands(); i < n; ++i)
1322 MI.addOperand(T->getOperand(i));
1324 MachineBasicBlock::instr_iterator TI = T->getIterator();
1327 MachineRegisterInfo &MRI = B.getParent()->getRegInfo();
1328 MRI.clearKillFlags(PredReg);
1333 bool HexagonInstrInfo::SubsumesPredicate(ArrayRef<MachineOperand> Pred1,
1334 ArrayRef<MachineOperand> Pred2) const {
1340 bool HexagonInstrInfo::DefinesPredicate(
1341 MachineInstr &MI, std::vector<MachineOperand> &Pred) const {
1342 auto &HRI = getRegisterInfo();
1343 for (unsigned oper = 0; oper < MI.getNumOperands(); ++oper) {
1344 MachineOperand MO = MI.getOperand(oper);
1345 if (MO.isReg() && MO.isDef()) {
1346 const TargetRegisterClass* RC = HRI.getMinimalPhysRegClass(MO.getReg());
1347 if (RC == &Hexagon::PredRegsRegClass) {
1357 bool HexagonInstrInfo::isPredicable(MachineInstr &MI) const {
1358 return MI.getDesc().isPredicable();
1361 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
1362 const MachineBasicBlock *MBB,
1363 const MachineFunction &MF) const {
1364 // Debug info is never a scheduling boundary. It's necessary to be explicit
1365 // due to the special treatment of IT instructions below, otherwise a
1366 // dbg_value followed by an IT will result in the IT instruction being
1367 // considered a scheduling hazard, which is wrong. It should be the actual
1368 // instruction preceding the dbg_value instruction(s), just like it is
1369 // when debug info is not present.
1370 if (MI.isDebugValue())
1373 // Throwing call is a boundary.
1375 // If any of the block's successors is a landing pad, this could be a
1377 for (auto I : MBB->successors())
1382 // Don't mess around with no return calls.
1383 if (MI.getOpcode() == Hexagon::CALLv3nr)
1386 // Terminators and labels can't be scheduled around.
1387 if (MI.getDesc().isTerminator() || MI.isPosition())
1390 if (MI.isInlineAsm() && !ScheduleInlineAsm)
1397 /// Measure the specified inline asm to determine an approximation of its
1399 /// Comments (which run till the next SeparatorString or newline) do not
1400 /// count as an instruction.
1401 /// Any other non-whitespace text is considered an instruction, with
1402 /// multiple instructions separated by SeparatorString or newlines.
1403 /// Variable-length instructions are not handled here; this function
1404 /// may be overloaded in the target code to do that.
1405 /// Hexagon counts the number of ##'s and adjust for that many
1406 /// constant exenders.
1407 unsigned HexagonInstrInfo::getInlineAsmLength(const char *Str,
1408 const MCAsmInfo &MAI) const {
1409 StringRef AStr(Str);
1410 // Count the number of instructions in the asm.
1411 bool atInsnStart = true;
1412 unsigned Length = 0;
1413 for (; *Str; ++Str) {
1414 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
1415 strlen(MAI.getSeparatorString())) == 0)
1417 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
1418 Length += MAI.getMaxInstLength();
1419 atInsnStart = false;
1421 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
1422 strlen(MAI.getCommentString())) == 0)
1423 atInsnStart = false;
1426 // Add to size number of constant extenders seen * 4.
1427 StringRef Occ("##");
1428 Length += AStr.count(Occ)*4;
1433 ScheduleHazardRecognizer*
1434 HexagonInstrInfo::CreateTargetPostRAHazardRecognizer(
1435 const InstrItineraryData *II, const ScheduleDAG *DAG) const {
1436 return TargetInstrInfo::CreateTargetPostRAHazardRecognizer(II, DAG);
1440 /// \brief For a comparison instruction, return the source registers in
1441 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
1442 /// compares against in CmpValue. Return true if the comparison instruction
1443 /// can be analyzed.
1444 bool HexagonInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
1445 unsigned &SrcReg2, int &Mask,
1447 unsigned Opc = MI.getOpcode();
1449 // Set mask and the first source register.
1451 case Hexagon::C2_cmpeq:
1452 case Hexagon::C2_cmpeqp:
1453 case Hexagon::C2_cmpgt:
1454 case Hexagon::C2_cmpgtp:
1455 case Hexagon::C2_cmpgtu:
1456 case Hexagon::C2_cmpgtup:
1457 case Hexagon::C4_cmpneq:
1458 case Hexagon::C4_cmplte:
1459 case Hexagon::C4_cmplteu:
1460 case Hexagon::C2_cmpeqi:
1461 case Hexagon::C2_cmpgti:
1462 case Hexagon::C2_cmpgtui:
1463 case Hexagon::C4_cmpneqi:
1464 case Hexagon::C4_cmplteui:
1465 case Hexagon::C4_cmpltei:
1466 SrcReg = MI.getOperand(1).getReg();
1469 case Hexagon::A4_cmpbeq:
1470 case Hexagon::A4_cmpbgt:
1471 case Hexagon::A4_cmpbgtu:
1472 case Hexagon::A4_cmpbeqi:
1473 case Hexagon::A4_cmpbgti:
1474 case Hexagon::A4_cmpbgtui:
1475 SrcReg = MI.getOperand(1).getReg();
1478 case Hexagon::A4_cmpheq:
1479 case Hexagon::A4_cmphgt:
1480 case Hexagon::A4_cmphgtu:
1481 case Hexagon::A4_cmpheqi:
1482 case Hexagon::A4_cmphgti:
1483 case Hexagon::A4_cmphgtui:
1484 SrcReg = MI.getOperand(1).getReg();
1489 // Set the value/second source register.
1491 case Hexagon::C2_cmpeq:
1492 case Hexagon::C2_cmpeqp:
1493 case Hexagon::C2_cmpgt:
1494 case Hexagon::C2_cmpgtp:
1495 case Hexagon::C2_cmpgtu:
1496 case Hexagon::C2_cmpgtup:
1497 case Hexagon::A4_cmpbeq:
1498 case Hexagon::A4_cmpbgt:
1499 case Hexagon::A4_cmpbgtu:
1500 case Hexagon::A4_cmpheq:
1501 case Hexagon::A4_cmphgt:
1502 case Hexagon::A4_cmphgtu:
1503 case Hexagon::C4_cmpneq:
1504 case Hexagon::C4_cmplte:
1505 case Hexagon::C4_cmplteu:
1506 SrcReg2 = MI.getOperand(2).getReg();
1509 case Hexagon::C2_cmpeqi:
1510 case Hexagon::C2_cmpgtui:
1511 case Hexagon::C2_cmpgti:
1512 case Hexagon::C4_cmpneqi:
1513 case Hexagon::C4_cmplteui:
1514 case Hexagon::C4_cmpltei:
1515 case Hexagon::A4_cmpbeqi:
1516 case Hexagon::A4_cmpbgti:
1517 case Hexagon::A4_cmpbgtui:
1518 case Hexagon::A4_cmpheqi:
1519 case Hexagon::A4_cmphgti:
1520 case Hexagon::A4_cmphgtui:
1522 Value = MI.getOperand(2).getImm();
1529 unsigned HexagonInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1530 const MachineInstr &MI,
1531 unsigned *PredCost) const {
1532 return getInstrTimingClassLatency(ItinData, &MI);
1536 DFAPacketizer *HexagonInstrInfo::CreateTargetScheduleState(
1537 const TargetSubtargetInfo &STI) const {
1538 const InstrItineraryData *II = STI.getInstrItineraryData();
1539 return static_cast<const HexagonSubtarget&>(STI).createDFAPacketizer(II);
1543 // Inspired by this pair:
1544 // %R13<def> = L2_loadri_io %R29, 136; mem:LD4[FixedStack0]
1545 // S2_storeri_io %R29, 132, %R1<kill>; flags: mem:ST4[FixedStack1]
1546 // Currently AA considers the addresses in these instructions to be aliasing.
1547 bool HexagonInstrInfo::areMemAccessesTriviallyDisjoint(
1548 MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA) const {
1549 int OffsetA = 0, OffsetB = 0;
1550 unsigned SizeA = 0, SizeB = 0;
1552 if (MIa.hasUnmodeledSideEffects() || MIb.hasUnmodeledSideEffects() ||
1553 MIa.hasOrderedMemoryRef() || MIb.hasOrderedMemoryRef())
1556 // Instructions that are pure loads, not loads and stores like memops are not
1558 if (MIa.mayLoad() && !isMemOp(&MIa) && MIb.mayLoad() && !isMemOp(&MIb))
1561 // Get base, offset, and access size in MIa.
1562 unsigned BaseRegA = getBaseAndOffset(&MIa, OffsetA, SizeA);
1563 if (!BaseRegA || !SizeA)
1566 // Get base, offset, and access size in MIb.
1567 unsigned BaseRegB = getBaseAndOffset(&MIb, OffsetB, SizeB);
1568 if (!BaseRegB || !SizeB)
1571 if (BaseRegA != BaseRegB)
1574 // This is a mem access with the same base register and known offsets from it.
1576 if (OffsetA > OffsetB) {
1577 uint64_t offDiff = (uint64_t)((int64_t)OffsetA - (int64_t)OffsetB);
1578 return (SizeB <= offDiff);
1579 } else if (OffsetA < OffsetB) {
1580 uint64_t offDiff = (uint64_t)((int64_t)OffsetB - (int64_t)OffsetA);
1581 return (SizeA <= offDiff);
1588 unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
1589 MachineRegisterInfo &MRI = MF->getRegInfo();
1590 const TargetRegisterClass *TRC;
1591 if (VT == MVT::i1) {
1592 TRC = &Hexagon::PredRegsRegClass;
1593 } else if (VT == MVT::i32 || VT == MVT::f32) {
1594 TRC = &Hexagon::IntRegsRegClass;
1595 } else if (VT == MVT::i64 || VT == MVT::f64) {
1596 TRC = &Hexagon::DoubleRegsRegClass;
1598 llvm_unreachable("Cannot handle this register class");
1601 unsigned NewReg = MRI.createVirtualRegister(TRC);
1606 bool HexagonInstrInfo::isAbsoluteSet(const MachineInstr* MI) const {
1607 return (getAddrMode(MI) == HexagonII::AbsoluteSet);
1611 bool HexagonInstrInfo::isAccumulator(const MachineInstr *MI) const {
1612 const uint64_t F = MI->getDesc().TSFlags;
1613 return((F >> HexagonII::AccumulatorPos) & HexagonII::AccumulatorMask);
1617 bool HexagonInstrInfo::isComplex(const MachineInstr *MI) const {
1618 const MachineFunction *MF = MI->getParent()->getParent();
1619 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1620 const HexagonInstrInfo *QII = (const HexagonInstrInfo *) TII;
1623 && !(QII->isTC2Early(MI))
1624 && !(MI->getDesc().mayLoad())
1625 && !(MI->getDesc().mayStore())
1626 && (MI->getDesc().getOpcode() != Hexagon::S2_allocframe)
1627 && (MI->getDesc().getOpcode() != Hexagon::L2_deallocframe)
1628 && !(QII->isMemOp(MI))
1629 && !(MI->isBranch())
1630 && !(MI->isReturn())
1638 // Return true if the instruction is a compund branch instruction.
1639 bool HexagonInstrInfo::isCompoundBranchInstr(const MachineInstr *MI) const {
1640 return (getType(MI) == HexagonII::TypeCOMPOUND && MI->isBranch());
1644 bool HexagonInstrInfo::isCondInst(const MachineInstr *MI) const {
1645 return (MI->isBranch() && isPredicated(*MI)) ||
1646 isConditionalTransfer(MI) ||
1647 isConditionalALU32(MI) ||
1648 isConditionalLoad(MI) ||
1649 // Predicated stores which don't have a .new on any operands.
1650 (MI->mayStore() && isPredicated(*MI) && !isNewValueStore(MI) &&
1651 !isPredicatedNew(*MI));
1655 bool HexagonInstrInfo::isConditionalALU32(const MachineInstr* MI) const {
1656 switch (MI->getOpcode()) {
1657 case Hexagon::A2_paddf:
1658 case Hexagon::A2_paddfnew:
1659 case Hexagon::A2_paddif:
1660 case Hexagon::A2_paddifnew:
1661 case Hexagon::A2_paddit:
1662 case Hexagon::A2_padditnew:
1663 case Hexagon::A2_paddt:
1664 case Hexagon::A2_paddtnew:
1665 case Hexagon::A2_pandf:
1666 case Hexagon::A2_pandfnew:
1667 case Hexagon::A2_pandt:
1668 case Hexagon::A2_pandtnew:
1669 case Hexagon::A2_porf:
1670 case Hexagon::A2_porfnew:
1671 case Hexagon::A2_port:
1672 case Hexagon::A2_portnew:
1673 case Hexagon::A2_psubf:
1674 case Hexagon::A2_psubfnew:
1675 case Hexagon::A2_psubt:
1676 case Hexagon::A2_psubtnew:
1677 case Hexagon::A2_pxorf:
1678 case Hexagon::A2_pxorfnew:
1679 case Hexagon::A2_pxort:
1680 case Hexagon::A2_pxortnew:
1681 case Hexagon::A4_paslhf:
1682 case Hexagon::A4_paslhfnew:
1683 case Hexagon::A4_paslht:
1684 case Hexagon::A4_paslhtnew:
1685 case Hexagon::A4_pasrhf:
1686 case Hexagon::A4_pasrhfnew:
1687 case Hexagon::A4_pasrht:
1688 case Hexagon::A4_pasrhtnew:
1689 case Hexagon::A4_psxtbf:
1690 case Hexagon::A4_psxtbfnew:
1691 case Hexagon::A4_psxtbt:
1692 case Hexagon::A4_psxtbtnew:
1693 case Hexagon::A4_psxthf:
1694 case Hexagon::A4_psxthfnew:
1695 case Hexagon::A4_psxtht:
1696 case Hexagon::A4_psxthtnew:
1697 case Hexagon::A4_pzxtbf:
1698 case Hexagon::A4_pzxtbfnew:
1699 case Hexagon::A4_pzxtbt:
1700 case Hexagon::A4_pzxtbtnew:
1701 case Hexagon::A4_pzxthf:
1702 case Hexagon::A4_pzxthfnew:
1703 case Hexagon::A4_pzxtht:
1704 case Hexagon::A4_pzxthtnew:
1705 case Hexagon::C2_ccombinewf:
1706 case Hexagon::C2_ccombinewt:
1713 // FIXME - Function name and it's functionality don't match.
1714 // It should be renamed to hasPredNewOpcode()
1715 bool HexagonInstrInfo::isConditionalLoad(const MachineInstr* MI) const {
1716 if (!MI->getDesc().mayLoad() || !isPredicated(*MI))
1719 int PNewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode());
1720 // Instruction with valid predicated-new opcode can be promoted to .new.
1721 return PNewOpcode >= 0;
1725 // Returns true if an instruction is a conditional store.
1727 // Note: It doesn't include conditional new-value stores as they can't be
1728 // converted to .new predicate.
1729 bool HexagonInstrInfo::isConditionalStore(const MachineInstr* MI) const {
1730 switch (MI->getOpcode()) {
1731 default: return false;
1732 case Hexagon::S4_storeirbt_io:
1733 case Hexagon::S4_storeirbf_io:
1734 case Hexagon::S4_pstorerbt_rr:
1735 case Hexagon::S4_pstorerbf_rr:
1736 case Hexagon::S2_pstorerbt_io:
1737 case Hexagon::S2_pstorerbf_io:
1738 case Hexagon::S2_pstorerbt_pi:
1739 case Hexagon::S2_pstorerbf_pi:
1740 case Hexagon::S2_pstorerdt_io:
1741 case Hexagon::S2_pstorerdf_io:
1742 case Hexagon::S4_pstorerdt_rr:
1743 case Hexagon::S4_pstorerdf_rr:
1744 case Hexagon::S2_pstorerdt_pi:
1745 case Hexagon::S2_pstorerdf_pi:
1746 case Hexagon::S2_pstorerht_io:
1747 case Hexagon::S2_pstorerhf_io:
1748 case Hexagon::S4_storeirht_io:
1749 case Hexagon::S4_storeirhf_io:
1750 case Hexagon::S4_pstorerht_rr:
1751 case Hexagon::S4_pstorerhf_rr:
1752 case Hexagon::S2_pstorerht_pi:
1753 case Hexagon::S2_pstorerhf_pi:
1754 case Hexagon::S2_pstorerit_io:
1755 case Hexagon::S2_pstorerif_io:
1756 case Hexagon::S4_storeirit_io:
1757 case Hexagon::S4_storeirif_io:
1758 case Hexagon::S4_pstorerit_rr:
1759 case Hexagon::S4_pstorerif_rr:
1760 case Hexagon::S2_pstorerit_pi:
1761 case Hexagon::S2_pstorerif_pi:
1763 // V4 global address store before promoting to dot new.
1764 case Hexagon::S4_pstorerdt_abs:
1765 case Hexagon::S4_pstorerdf_abs:
1766 case Hexagon::S4_pstorerbt_abs:
1767 case Hexagon::S4_pstorerbf_abs:
1768 case Hexagon::S4_pstorerht_abs:
1769 case Hexagon::S4_pstorerhf_abs:
1770 case Hexagon::S4_pstorerit_abs:
1771 case Hexagon::S4_pstorerif_abs:
1774 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
1775 // from the "Conditional Store" list. Because a predicated new value store
1776 // would NOT be promoted to a double dot new store.
1777 // This function returns yes for those stores that are predicated but not
1778 // yet promoted to predicate dot new instructions.
1783 bool HexagonInstrInfo::isConditionalTransfer(const MachineInstr *MI) const {
1784 switch (MI->getOpcode()) {
1785 case Hexagon::A2_tfrt:
1786 case Hexagon::A2_tfrf:
1787 case Hexagon::C2_cmoveit:
1788 case Hexagon::C2_cmoveif:
1789 case Hexagon::A2_tfrtnew:
1790 case Hexagon::A2_tfrfnew:
1791 case Hexagon::C2_cmovenewit:
1792 case Hexagon::C2_cmovenewif:
1793 case Hexagon::A2_tfrpt:
1794 case Hexagon::A2_tfrpf:
1804 // TODO: In order to have isExtendable for fpimm/f32Ext, we need to handle
1805 // isFPImm and later getFPImm as well.
1806 bool HexagonInstrInfo::isConstExtended(const MachineInstr *MI) const {
1807 const uint64_t F = MI->getDesc().TSFlags;
1808 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
1809 if (isExtended) // Instruction must be extended.
1812 unsigned isExtendable =
1813 (F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask;
1820 short ExtOpNum = getCExtOpNum(MI);
1821 const MachineOperand &MO = MI->getOperand(ExtOpNum);
1822 // Use MO operand flags to determine if MO
1823 // has the HMOTF_ConstExtended flag set.
1824 if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
1826 // If this is a Machine BB address we are talking about, and it is
1827 // not marked as extended, say so.
1831 // We could be using an instruction with an extendable immediate and shoehorn
1832 // a global address into it. If it is a global address it will be constant
1833 // extended. We do this for COMBINE.
1834 // We currently only handle isGlobal() because it is the only kind of
1835 // object we are going to end up with here for now.
1836 // In the future we probably should add isSymbol(), etc.
1837 if (MO.isGlobal() || MO.isSymbol() || MO.isBlockAddress() ||
1838 MO.isJTI() || MO.isCPI())
1841 // If the extendable operand is not 'Immediate' type, the instruction should
1842 // have 'isExtended' flag set.
1843 assert(MO.isImm() && "Extendable operand must be Immediate type");
1845 int MinValue = getMinValue(MI);
1846 int MaxValue = getMaxValue(MI);
1847 int ImmValue = MO.getImm();
1849 return (ImmValue < MinValue || ImmValue > MaxValue);
1853 bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
1854 switch (MI->getOpcode()) {
1855 case Hexagon::L4_return :
1856 case Hexagon::L4_return_t :
1857 case Hexagon::L4_return_f :
1858 case Hexagon::L4_return_tnew_pnt :
1859 case Hexagon::L4_return_fnew_pnt :
1860 case Hexagon::L4_return_tnew_pt :
1861 case Hexagon::L4_return_fnew_pt :
1868 // Return true when ConsMI uses a register defined by ProdMI.
1869 bool HexagonInstrInfo::isDependent(const MachineInstr *ProdMI,
1870 const MachineInstr *ConsMI) const {
1871 const MCInstrDesc &ProdMCID = ProdMI->getDesc();
1872 if (!ProdMCID.getNumDefs())
1875 auto &HRI = getRegisterInfo();
1877 SmallVector<unsigned, 4> DefsA;
1878 SmallVector<unsigned, 4> DefsB;
1879 SmallVector<unsigned, 8> UsesA;
1880 SmallVector<unsigned, 8> UsesB;
1882 parseOperands(ProdMI, DefsA, UsesA);
1883 parseOperands(ConsMI, DefsB, UsesB);
1885 for (auto &RegA : DefsA)
1886 for (auto &RegB : UsesB) {
1887 // True data dependency.
1891 if (Hexagon::DoubleRegsRegClass.contains(RegA))
1892 for (MCSubRegIterator SubRegs(RegA, &HRI); SubRegs.isValid(); ++SubRegs)
1893 if (RegB == *SubRegs)
1896 if (Hexagon::DoubleRegsRegClass.contains(RegB))
1897 for (MCSubRegIterator SubRegs(RegB, &HRI); SubRegs.isValid(); ++SubRegs)
1898 if (RegA == *SubRegs)
1906 // Returns true if the instruction is alread a .cur.
1907 bool HexagonInstrInfo::isDotCurInst(const MachineInstr* MI) const {
1908 switch (MI->getOpcode()) {
1909 case Hexagon::V6_vL32b_cur_pi:
1910 case Hexagon::V6_vL32b_cur_ai:
1911 case Hexagon::V6_vL32b_cur_pi_128B:
1912 case Hexagon::V6_vL32b_cur_ai_128B:
1919 // Returns true, if any one of the operands is a dot new
1920 // insn, whether it is predicated dot new or register dot new.
1921 bool HexagonInstrInfo::isDotNewInst(const MachineInstr* MI) const {
1922 if (isNewValueInst(MI) || (isPredicated(*MI) && isPredicatedNew(*MI)))
1929 /// Symmetrical. See if these two instructions are fit for duplex pair.
1930 bool HexagonInstrInfo::isDuplexPair(const MachineInstr *MIa,
1931 const MachineInstr *MIb) const {
1932 HexagonII::SubInstructionGroup MIaG = getDuplexCandidateGroup(MIa);
1933 HexagonII::SubInstructionGroup MIbG = getDuplexCandidateGroup(MIb);
1934 return (isDuplexPairMatch(MIaG, MIbG) || isDuplexPairMatch(MIbG, MIaG));
1938 bool HexagonInstrInfo::isEarlySourceInstr(const MachineInstr *MI) const {
1942 if (MI->mayLoad() || MI->mayStore() || MI->isCompare())
1946 unsigned SchedClass = MI->getDesc().getSchedClass();
1947 if (SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23)
1953 bool HexagonInstrInfo::isEndLoopN(unsigned Opcode) const {
1954 return (Opcode == Hexagon::ENDLOOP0 ||
1955 Opcode == Hexagon::ENDLOOP1);
1959 bool HexagonInstrInfo::isExpr(unsigned OpType) const {
1961 case MachineOperand::MO_MachineBasicBlock:
1962 case MachineOperand::MO_GlobalAddress:
1963 case MachineOperand::MO_ExternalSymbol:
1964 case MachineOperand::MO_JumpTableIndex:
1965 case MachineOperand::MO_ConstantPoolIndex:
1966 case MachineOperand::MO_BlockAddress:
1974 bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
1975 const MCInstrDesc &MID = MI->getDesc();
1976 const uint64_t F = MID.TSFlags;
1977 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
1980 // TODO: This is largely obsolete now. Will need to be removed
1981 // in consecutive patches.
1982 switch(MI->getOpcode()) {
1983 // TFR_FI Remains a special case.
1984 case Hexagon::TFR_FI:
1993 // This returns true in two cases:
1994 // - The OP code itself indicates that this is an extended instruction.
1995 // - One of MOs has been marked with HMOTF_ConstExtended flag.
1996 bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
1997 // First check if this is permanently extended op code.
1998 const uint64_t F = MI->getDesc().TSFlags;
1999 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
2001 // Use MO operand flags to determine if one of MI's operands
2002 // has HMOTF_ConstExtended flag set.
2003 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
2004 E = MI->operands_end(); I != E; ++I) {
2005 if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
2012 bool HexagonInstrInfo::isFloat(const MachineInstr *MI) const {
2013 unsigned Opcode = MI->getOpcode();
2014 const uint64_t F = get(Opcode).TSFlags;
2015 return (F >> HexagonII::FPPos) & HexagonII::FPMask;
2019 // No V60 HVX VMEM with A_INDIRECT.
2020 bool HexagonInstrInfo::isHVXMemWithAIndirect(const MachineInstr *I,
2021 const MachineInstr *J) const {
2022 if (!isV60VectorInstruction(I))
2024 if (!I->mayLoad() && !I->mayStore())
2026 return J->isIndirectBranch() || isIndirectCall(J) || isIndirectL4Return(J);
2030 bool HexagonInstrInfo::isIndirectCall(const MachineInstr *MI) const {
2031 switch (MI->getOpcode()) {
2032 case Hexagon::J2_callr :
2033 case Hexagon::J2_callrf :
2034 case Hexagon::J2_callrt :
2041 bool HexagonInstrInfo::isIndirectL4Return(const MachineInstr *MI) const {
2042 switch (MI->getOpcode()) {
2043 case Hexagon::L4_return :
2044 case Hexagon::L4_return_t :
2045 case Hexagon::L4_return_f :
2046 case Hexagon::L4_return_fnew_pnt :
2047 case Hexagon::L4_return_fnew_pt :
2048 case Hexagon::L4_return_tnew_pnt :
2049 case Hexagon::L4_return_tnew_pt :
2056 bool HexagonInstrInfo::isJumpR(const MachineInstr *MI) const {
2057 switch (MI->getOpcode()) {
2058 case Hexagon::J2_jumpr :
2059 case Hexagon::J2_jumprt :
2060 case Hexagon::J2_jumprf :
2061 case Hexagon::J2_jumprtnewpt :
2062 case Hexagon::J2_jumprfnewpt :
2063 case Hexagon::J2_jumprtnew :
2064 case Hexagon::J2_jumprfnew :
2071 // Return true if a given MI can accomodate given offset.
2072 // Use abs estimate as oppose to the exact number.
2073 // TODO: This will need to be changed to use MC level
2074 // definition of instruction extendable field size.
2075 bool HexagonInstrInfo::isJumpWithinBranchRange(const MachineInstr *MI,
2076 unsigned offset) const {
2077 // This selection of jump instructions matches to that what
2078 // AnalyzeBranch can parse, plus NVJ.
2079 if (isNewValueJump(MI)) // r9:2
2080 return isInt<11>(offset);
2082 switch (MI->getOpcode()) {
2083 // Still missing Jump to address condition on register value.
2086 case Hexagon::J2_jump: // bits<24> dst; // r22:2
2087 case Hexagon::J2_call:
2088 case Hexagon::CALLv3nr:
2089 return isInt<24>(offset);
2090 case Hexagon::J2_jumpt: //bits<17> dst; // r15:2
2091 case Hexagon::J2_jumpf:
2092 case Hexagon::J2_jumptnew:
2093 case Hexagon::J2_jumptnewpt:
2094 case Hexagon::J2_jumpfnew:
2095 case Hexagon::J2_jumpfnewpt:
2096 case Hexagon::J2_callt:
2097 case Hexagon::J2_callf:
2098 return isInt<17>(offset);
2099 case Hexagon::J2_loop0i:
2100 case Hexagon::J2_loop0iext:
2101 case Hexagon::J2_loop0r:
2102 case Hexagon::J2_loop0rext:
2103 case Hexagon::J2_loop1i:
2104 case Hexagon::J2_loop1iext:
2105 case Hexagon::J2_loop1r:
2106 case Hexagon::J2_loop1rext:
2107 return isInt<9>(offset);
2108 // TODO: Add all the compound branches here. Can we do this in Relation model?
2109 case Hexagon::J4_cmpeqi_tp0_jump_nt:
2110 case Hexagon::J4_cmpeqi_tp1_jump_nt:
2111 return isInt<11>(offset);
2116 bool HexagonInstrInfo::isLateInstrFeedsEarlyInstr(const MachineInstr *LRMI,
2117 const MachineInstr *ESMI) const {
2121 bool isLate = isLateResultInstr(LRMI);
2122 bool isEarly = isEarlySourceInstr(ESMI);
2124 DEBUG(dbgs() << "V60" << (isLate ? "-LR " : " -- "));
2125 DEBUG(LRMI->dump());
2126 DEBUG(dbgs() << "V60" << (isEarly ? "-ES " : " -- "));
2127 DEBUG(ESMI->dump());
2129 if (isLate && isEarly) {
2130 DEBUG(dbgs() << "++Is Late Result feeding Early Source\n");
2138 bool HexagonInstrInfo::isLateResultInstr(const MachineInstr *MI) const {
2142 switch (MI->getOpcode()) {
2143 case TargetOpcode::EXTRACT_SUBREG:
2144 case TargetOpcode::INSERT_SUBREG:
2145 case TargetOpcode::SUBREG_TO_REG:
2146 case TargetOpcode::REG_SEQUENCE:
2147 case TargetOpcode::IMPLICIT_DEF:
2148 case TargetOpcode::COPY:
2149 case TargetOpcode::INLINEASM:
2150 case TargetOpcode::PHI:
2156 unsigned SchedClass = MI->getDesc().getSchedClass();
2158 switch (SchedClass) {
2159 case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
2160 case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
2161 case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
2162 case Hexagon::Sched::ALU64_tc_1_SLOT23:
2163 case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
2164 case Hexagon::Sched::S_2op_tc_1_SLOT23:
2165 case Hexagon::Sched::S_3op_tc_1_SLOT23:
2166 case Hexagon::Sched::V2LDST_tc_ld_SLOT01:
2167 case Hexagon::Sched::V2LDST_tc_st_SLOT0:
2168 case Hexagon::Sched::V2LDST_tc_st_SLOT01:
2169 case Hexagon::Sched::V4LDST_tc_ld_SLOT01:
2170 case Hexagon::Sched::V4LDST_tc_st_SLOT0:
2171 case Hexagon::Sched::V4LDST_tc_st_SLOT01:
2178 bool HexagonInstrInfo::isLateSourceInstr(const MachineInstr *MI) const {
2182 // Instructions with iclass A_CVI_VX and attribute A_CVI_LATE uses a multiply
2183 // resource, but all operands can be received late like an ALU instruction.
2184 return MI->getDesc().getSchedClass() == Hexagon::Sched::CVI_VX_LATE;
2188 bool HexagonInstrInfo::isLoopN(const MachineInstr *MI) const {
2189 unsigned Opcode = MI->getOpcode();
2190 return Opcode == Hexagon::J2_loop0i ||
2191 Opcode == Hexagon::J2_loop0r ||
2192 Opcode == Hexagon::J2_loop0iext ||
2193 Opcode == Hexagon::J2_loop0rext ||
2194 Opcode == Hexagon::J2_loop1i ||
2195 Opcode == Hexagon::J2_loop1r ||
2196 Opcode == Hexagon::J2_loop1iext ||
2197 Opcode == Hexagon::J2_loop1rext;
2201 bool HexagonInstrInfo::isMemOp(const MachineInstr *MI) const {
2202 switch (MI->getOpcode()) {
2203 default: return false;
2204 case Hexagon::L4_iadd_memopw_io :
2205 case Hexagon::L4_isub_memopw_io :
2206 case Hexagon::L4_add_memopw_io :
2207 case Hexagon::L4_sub_memopw_io :
2208 case Hexagon::L4_and_memopw_io :
2209 case Hexagon::L4_or_memopw_io :
2210 case Hexagon::L4_iadd_memoph_io :
2211 case Hexagon::L4_isub_memoph_io :
2212 case Hexagon::L4_add_memoph_io :
2213 case Hexagon::L4_sub_memoph_io :
2214 case Hexagon::L4_and_memoph_io :
2215 case Hexagon::L4_or_memoph_io :
2216 case Hexagon::L4_iadd_memopb_io :
2217 case Hexagon::L4_isub_memopb_io :
2218 case Hexagon::L4_add_memopb_io :
2219 case Hexagon::L4_sub_memopb_io :
2220 case Hexagon::L4_and_memopb_io :
2221 case Hexagon::L4_or_memopb_io :
2222 case Hexagon::L4_ior_memopb_io:
2223 case Hexagon::L4_ior_memoph_io:
2224 case Hexagon::L4_ior_memopw_io:
2225 case Hexagon::L4_iand_memopb_io:
2226 case Hexagon::L4_iand_memoph_io:
2227 case Hexagon::L4_iand_memopw_io:
2234 bool HexagonInstrInfo::isNewValue(const MachineInstr* MI) const {
2235 const uint64_t F = MI->getDesc().TSFlags;
2236 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2240 bool HexagonInstrInfo::isNewValue(unsigned Opcode) const {
2241 const uint64_t F = get(Opcode).TSFlags;
2242 return (F >> HexagonII::NewValuePos) & HexagonII::NewValueMask;
2246 bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
2247 return isNewValueJump(MI) || isNewValueStore(MI);
2251 bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
2252 return isNewValue(MI) && MI->isBranch();
2256 bool HexagonInstrInfo::isNewValueJump(unsigned Opcode) const {
2257 return isNewValue(Opcode) && get(Opcode).isBranch() && isPredicated(Opcode);
2261 bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
2262 const uint64_t F = MI->getDesc().TSFlags;
2263 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2267 bool HexagonInstrInfo::isNewValueStore(unsigned Opcode) const {
2268 const uint64_t F = get(Opcode).TSFlags;
2269 return (F >> HexagonII::NVStorePos) & HexagonII::NVStoreMask;
2273 // Returns true if a particular operand is extendable for an instruction.
2274 bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
2275 unsigned OperandNum) const {
2276 const uint64_t F = MI->getDesc().TSFlags;
2277 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2282 bool HexagonInstrInfo::isPostIncrement(const MachineInstr* MI) const {
2283 return getAddrMode(MI) == HexagonII::PostInc;
2287 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr &MI) const {
2288 const uint64_t F = MI.getDesc().TSFlags;
2289 assert(isPredicated(MI));
2290 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2294 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
2295 const uint64_t F = get(Opcode).TSFlags;
2296 assert(isPredicated(Opcode));
2297 return (F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask;
2301 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr &MI) const {
2302 const uint64_t F = MI.getDesc().TSFlags;
2303 return !((F >> HexagonII::PredicatedFalsePos) &
2304 HexagonII::PredicatedFalseMask);
2308 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
2309 const uint64_t F = get(Opcode).TSFlags;
2310 // Make sure that the instruction is predicated.
2311 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
2312 return !((F >> HexagonII::PredicatedFalsePos) &
2313 HexagonII::PredicatedFalseMask);
2317 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
2318 const uint64_t F = get(Opcode).TSFlags;
2319 return (F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask;
2323 bool HexagonInstrInfo::isPredicateLate(unsigned Opcode) const {
2324 const uint64_t F = get(Opcode).TSFlags;
2325 return ~(F >> HexagonII::PredicateLatePos) & HexagonII::PredicateLateMask;
2329 bool HexagonInstrInfo::isPredictedTaken(unsigned Opcode) const {
2330 const uint64_t F = get(Opcode).TSFlags;
2331 assert(get(Opcode).isBranch() &&
2332 (isPredicatedNew(Opcode) || isNewValue(Opcode)));
2333 return (F >> HexagonII::TakenPos) & HexagonII::TakenMask;
2337 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
2338 return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2339 MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2340 MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2341 MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2344 bool HexagonInstrInfo::isSignExtendingLoad(const MachineInstr &MI) const {
2345 switch (MI.getOpcode()) {
2347 case Hexagon::L2_loadrb_io:
2348 case Hexagon::L4_loadrb_ur:
2349 case Hexagon::L4_loadrb_ap:
2350 case Hexagon::L2_loadrb_pr:
2351 case Hexagon::L2_loadrb_pbr:
2352 case Hexagon::L2_loadrb_pi:
2353 case Hexagon::L2_loadrb_pci:
2354 case Hexagon::L2_loadrb_pcr:
2355 case Hexagon::L2_loadbsw2_io:
2356 case Hexagon::L4_loadbsw2_ur:
2357 case Hexagon::L4_loadbsw2_ap:
2358 case Hexagon::L2_loadbsw2_pr:
2359 case Hexagon::L2_loadbsw2_pbr:
2360 case Hexagon::L2_loadbsw2_pi:
2361 case Hexagon::L2_loadbsw2_pci:
2362 case Hexagon::L2_loadbsw2_pcr:
2363 case Hexagon::L2_loadbsw4_io:
2364 case Hexagon::L4_loadbsw4_ur:
2365 case Hexagon::L4_loadbsw4_ap:
2366 case Hexagon::L2_loadbsw4_pr:
2367 case Hexagon::L2_loadbsw4_pbr:
2368 case Hexagon::L2_loadbsw4_pi:
2369 case Hexagon::L2_loadbsw4_pci:
2370 case Hexagon::L2_loadbsw4_pcr:
2371 case Hexagon::L4_loadrb_rr:
2372 case Hexagon::L2_ploadrbt_io:
2373 case Hexagon::L2_ploadrbt_pi:
2374 case Hexagon::L2_ploadrbf_io:
2375 case Hexagon::L2_ploadrbf_pi:
2376 case Hexagon::L2_ploadrbtnew_io:
2377 case Hexagon::L2_ploadrbfnew_io:
2378 case Hexagon::L4_ploadrbt_rr:
2379 case Hexagon::L4_ploadrbf_rr:
2380 case Hexagon::L4_ploadrbtnew_rr:
2381 case Hexagon::L4_ploadrbfnew_rr:
2382 case Hexagon::L2_ploadrbtnew_pi:
2383 case Hexagon::L2_ploadrbfnew_pi:
2384 case Hexagon::L4_ploadrbt_abs:
2385 case Hexagon::L4_ploadrbf_abs:
2386 case Hexagon::L4_ploadrbtnew_abs:
2387 case Hexagon::L4_ploadrbfnew_abs:
2388 case Hexagon::L2_loadrbgp:
2390 case Hexagon::L2_loadrh_io:
2391 case Hexagon::L4_loadrh_ur:
2392 case Hexagon::L4_loadrh_ap:
2393 case Hexagon::L2_loadrh_pr:
2394 case Hexagon::L2_loadrh_pbr:
2395 case Hexagon::L2_loadrh_pi:
2396 case Hexagon::L2_loadrh_pci:
2397 case Hexagon::L2_loadrh_pcr:
2398 case Hexagon::L4_loadrh_rr:
2399 case Hexagon::L2_ploadrht_io:
2400 case Hexagon::L2_ploadrht_pi:
2401 case Hexagon::L2_ploadrhf_io:
2402 case Hexagon::L2_ploadrhf_pi:
2403 case Hexagon::L2_ploadrhtnew_io:
2404 case Hexagon::L2_ploadrhfnew_io:
2405 case Hexagon::L4_ploadrht_rr:
2406 case Hexagon::L4_ploadrhf_rr:
2407 case Hexagon::L4_ploadrhtnew_rr:
2408 case Hexagon::L4_ploadrhfnew_rr:
2409 case Hexagon::L2_ploadrhtnew_pi:
2410 case Hexagon::L2_ploadrhfnew_pi:
2411 case Hexagon::L4_ploadrht_abs:
2412 case Hexagon::L4_ploadrhf_abs:
2413 case Hexagon::L4_ploadrhtnew_abs:
2414 case Hexagon::L4_ploadrhfnew_abs:
2415 case Hexagon::L2_loadrhgp:
2423 bool HexagonInstrInfo::isSolo(const MachineInstr* MI) const {
2424 const uint64_t F = MI->getDesc().TSFlags;
2425 return (F >> HexagonII::SoloPos) & HexagonII::SoloMask;
2429 bool HexagonInstrInfo::isSpillPredRegOp(const MachineInstr *MI) const {
2430 switch (MI->getOpcode()) {
2431 case Hexagon::STriw_pred :
2432 case Hexagon::LDriw_pred :
2440 bool HexagonInstrInfo::isTailCall(const MachineInstr *MI) const {
2441 if (!MI->isBranch())
2444 for (auto &Op : MI->operands())
2445 if (Op.isGlobal() || Op.isSymbol())
2451 // Returns true when SU has a timing class TC1.
2452 bool HexagonInstrInfo::isTC1(const MachineInstr *MI) const {
2453 unsigned SchedClass = MI->getDesc().getSchedClass();
2454 switch (SchedClass) {
2455 case Hexagon::Sched::ALU32_2op_tc_1_SLOT0123:
2456 case Hexagon::Sched::ALU32_3op_tc_1_SLOT0123:
2457 case Hexagon::Sched::ALU32_ADDI_tc_1_SLOT0123:
2458 case Hexagon::Sched::ALU64_tc_1_SLOT23:
2459 case Hexagon::Sched::EXTENDER_tc_1_SLOT0123:
2460 //case Hexagon::Sched::M_tc_1_SLOT23:
2461 case Hexagon::Sched::S_2op_tc_1_SLOT23:
2462 case Hexagon::Sched::S_3op_tc_1_SLOT23:
2471 bool HexagonInstrInfo::isTC2(const MachineInstr *MI) const {
2472 unsigned SchedClass = MI->getDesc().getSchedClass();
2473 switch (SchedClass) {
2474 case Hexagon::Sched::ALU32_3op_tc_2_SLOT0123:
2475 case Hexagon::Sched::ALU64_tc_2_SLOT23:
2476 case Hexagon::Sched::CR_tc_2_SLOT3:
2477 case Hexagon::Sched::M_tc_2_SLOT23:
2478 case Hexagon::Sched::S_2op_tc_2_SLOT23:
2479 case Hexagon::Sched::S_3op_tc_2_SLOT23:
2488 bool HexagonInstrInfo::isTC2Early(const MachineInstr *MI) const {
2489 unsigned SchedClass = MI->getDesc().getSchedClass();
2490 switch (SchedClass) {
2491 case Hexagon::Sched::ALU32_2op_tc_2early_SLOT0123:
2492 case Hexagon::Sched::ALU32_3op_tc_2early_SLOT0123:
2493 case Hexagon::Sched::ALU64_tc_2early_SLOT23:
2494 case Hexagon::Sched::CR_tc_2early_SLOT23:
2495 case Hexagon::Sched::CR_tc_2early_SLOT3:
2496 case Hexagon::Sched::J_tc_2early_SLOT0123:
2497 case Hexagon::Sched::J_tc_2early_SLOT2:
2498 case Hexagon::Sched::J_tc_2early_SLOT23:
2499 case Hexagon::Sched::S_2op_tc_2early_SLOT23:
2500 case Hexagon::Sched::S_3op_tc_2early_SLOT23:
2509 bool HexagonInstrInfo::isTC4x(const MachineInstr *MI) const {
2513 unsigned SchedClass = MI->getDesc().getSchedClass();
2514 return SchedClass == Hexagon::Sched::M_tc_3or4x_SLOT23;
2518 // Schedule this ASAP.
2519 bool HexagonInstrInfo::isToBeScheduledASAP(const MachineInstr *MI1,
2520 const MachineInstr *MI2) const {
2523 if (mayBeCurLoad(MI1)) {
2524 // if (result of SU is used in Next) return true;
2525 unsigned DstReg = MI1->getOperand(0).getReg();
2526 int N = MI2->getNumOperands();
2527 for (int I = 0; I < N; I++)
2528 if (MI2->getOperand(I).isReg() && DstReg == MI2->getOperand(I).getReg())
2531 if (mayBeNewStore(MI2))
2532 if (MI2->getOpcode() == Hexagon::V6_vS32b_pi)
2533 if (MI1->getOperand(0).isReg() && MI2->getOperand(3).isReg() &&
2534 MI1->getOperand(0).getReg() == MI2->getOperand(3).getReg())
2540 bool HexagonInstrInfo::isV60VectorInstruction(const MachineInstr *MI) const {
2544 const uint64_t V = getType(MI);
2545 return HexagonII::TypeCVI_FIRST <= V && V <= HexagonII::TypeCVI_LAST;
2549 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
2551 bool HexagonInstrInfo::isValidAutoIncImm(const EVT VT, const int Offset) const {
2552 if (VT == MVT::v16i32 || VT == MVT::v8i64 ||
2553 VT == MVT::v32i16 || VT == MVT::v64i8) {
2554 return (Offset >= Hexagon_MEMV_AUTOINC_MIN &&
2555 Offset <= Hexagon_MEMV_AUTOINC_MAX &&
2556 (Offset & 0x3f) == 0);
2559 if (VT == MVT::v32i32 || VT == MVT::v16i64 ||
2560 VT == MVT::v64i16 || VT == MVT::v128i8) {
2561 return (Offset >= Hexagon_MEMV_AUTOINC_MIN_128B &&
2562 Offset <= Hexagon_MEMV_AUTOINC_MAX_128B &&
2563 (Offset & 0x7f) == 0);
2565 if (VT == MVT::i64) {
2566 return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
2567 Offset <= Hexagon_MEMD_AUTOINC_MAX &&
2568 (Offset & 0x7) == 0);
2570 if (VT == MVT::i32) {
2571 return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
2572 Offset <= Hexagon_MEMW_AUTOINC_MAX &&
2573 (Offset & 0x3) == 0);
2575 if (VT == MVT::i16) {
2576 return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
2577 Offset <= Hexagon_MEMH_AUTOINC_MAX &&
2578 (Offset & 0x1) == 0);
2580 if (VT == MVT::i8) {
2581 return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
2582 Offset <= Hexagon_MEMB_AUTOINC_MAX);
2584 llvm_unreachable("Not an auto-inc opc!");
2588 bool HexagonInstrInfo::isValidOffset(unsigned Opcode, int Offset,
2589 bool Extend) const {
2590 // This function is to check whether the "Offset" is in the correct range of
2591 // the given "Opcode". If "Offset" is not in the correct range, "A2_addi" is
2592 // inserted to calculate the final address. Due to this reason, the function
2593 // assumes that the "Offset" has correct alignment.
2594 // We used to assert if the offset was not properly aligned, however,
2595 // there are cases where a misaligned pointer recast can cause this
2596 // problem, and we need to allow for it. The front end warns of such
2597 // misaligns with respect to load size.
2600 case Hexagon::STriq_pred_V6:
2601 case Hexagon::STriq_pred_vec_V6:
2602 case Hexagon::STriv_pseudo_V6:
2603 case Hexagon::STrivv_pseudo_V6:
2604 case Hexagon::LDriq_pred_V6:
2605 case Hexagon::LDriq_pred_vec_V6:
2606 case Hexagon::LDriv_pseudo_V6:
2607 case Hexagon::LDrivv_pseudo_V6:
2608 case Hexagon::LDrivv_indexed:
2609 case Hexagon::STrivv_indexed:
2610 case Hexagon::V6_vL32b_ai:
2611 case Hexagon::V6_vS32b_ai:
2612 case Hexagon::V6_vL32Ub_ai:
2613 case Hexagon::V6_vS32Ub_ai:
2614 return (Offset >= Hexagon_MEMV_OFFSET_MIN) &&
2615 (Offset <= Hexagon_MEMV_OFFSET_MAX);
2617 case Hexagon::STriq_pred_V6_128B:
2618 case Hexagon::STriq_pred_vec_V6_128B:
2619 case Hexagon::STriv_pseudo_V6_128B:
2620 case Hexagon::STrivv_pseudo_V6_128B:
2621 case Hexagon::LDriq_pred_V6_128B:
2622 case Hexagon::LDriq_pred_vec_V6_128B:
2623 case Hexagon::LDriv_pseudo_V6_128B:
2624 case Hexagon::LDrivv_pseudo_V6_128B:
2625 case Hexagon::LDrivv_indexed_128B:
2626 case Hexagon::STrivv_indexed_128B:
2627 case Hexagon::V6_vL32b_ai_128B:
2628 case Hexagon::V6_vS32b_ai_128B:
2629 case Hexagon::V6_vL32Ub_ai_128B:
2630 case Hexagon::V6_vS32Ub_ai_128B:
2631 return (Offset >= Hexagon_MEMV_OFFSET_MIN_128B) &&
2632 (Offset <= Hexagon_MEMV_OFFSET_MAX_128B);
2634 case Hexagon::J2_loop0i:
2635 case Hexagon::J2_loop1i:
2636 return isUInt<10>(Offset);
2638 case Hexagon::S4_storeirb_io:
2639 case Hexagon::S4_storeirbt_io:
2640 case Hexagon::S4_storeirbf_io:
2641 return isUInt<6>(Offset);
2643 case Hexagon::S4_storeirh_io:
2644 case Hexagon::S4_storeirht_io:
2645 case Hexagon::S4_storeirhf_io:
2646 return isShiftedUInt<6,1>(Offset);
2648 case Hexagon::S4_storeiri_io:
2649 case Hexagon::S4_storeirit_io:
2650 case Hexagon::S4_storeirif_io:
2651 return isShiftedUInt<6,2>(Offset);
2658 case Hexagon::L2_loadri_io:
2659 case Hexagon::S2_storeri_io:
2660 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
2661 (Offset <= Hexagon_MEMW_OFFSET_MAX);
2663 case Hexagon::L2_loadrd_io:
2664 case Hexagon::S2_storerd_io:
2665 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
2666 (Offset <= Hexagon_MEMD_OFFSET_MAX);
2668 case Hexagon::L2_loadrh_io:
2669 case Hexagon::L2_loadruh_io:
2670 case Hexagon::S2_storerh_io:
2671 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
2672 (Offset <= Hexagon_MEMH_OFFSET_MAX);
2674 case Hexagon::L2_loadrb_io:
2675 case Hexagon::L2_loadrub_io:
2676 case Hexagon::S2_storerb_io:
2677 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
2678 (Offset <= Hexagon_MEMB_OFFSET_MAX);
2680 case Hexagon::A2_addi:
2681 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
2682 (Offset <= Hexagon_ADDI_OFFSET_MAX);
2684 case Hexagon::L4_iadd_memopw_io :
2685 case Hexagon::L4_isub_memopw_io :
2686 case Hexagon::L4_add_memopw_io :
2687 case Hexagon::L4_sub_memopw_io :
2688 case Hexagon::L4_and_memopw_io :
2689 case Hexagon::L4_or_memopw_io :
2690 return (0 <= Offset && Offset <= 255);
2692 case Hexagon::L4_iadd_memoph_io :
2693 case Hexagon::L4_isub_memoph_io :
2694 case Hexagon::L4_add_memoph_io :
2695 case Hexagon::L4_sub_memoph_io :
2696 case Hexagon::L4_and_memoph_io :
2697 case Hexagon::L4_or_memoph_io :
2698 return (0 <= Offset && Offset <= 127);
2700 case Hexagon::L4_iadd_memopb_io :
2701 case Hexagon::L4_isub_memopb_io :
2702 case Hexagon::L4_add_memopb_io :
2703 case Hexagon::L4_sub_memopb_io :
2704 case Hexagon::L4_and_memopb_io :
2705 case Hexagon::L4_or_memopb_io :
2706 return (0 <= Offset && Offset <= 63);
2708 // LDriw_xxx and STriw_xxx are pseudo operations, so it has to take offset of
2709 // any size. Later pass knows how to handle it.
2710 case Hexagon::STriw_pred:
2711 case Hexagon::LDriw_pred:
2712 case Hexagon::STriw_mod:
2713 case Hexagon::LDriw_mod:
2716 case Hexagon::TFR_FI:
2717 case Hexagon::TFR_FIA:
2718 case Hexagon::INLINEASM:
2721 case Hexagon::L2_ploadrbt_io:
2722 case Hexagon::L2_ploadrbf_io:
2723 case Hexagon::L2_ploadrubt_io:
2724 case Hexagon::L2_ploadrubf_io:
2725 case Hexagon::S2_pstorerbt_io:
2726 case Hexagon::S2_pstorerbf_io:
2727 return isUInt<6>(Offset);
2729 case Hexagon::L2_ploadrht_io:
2730 case Hexagon::L2_ploadrhf_io:
2731 case Hexagon::L2_ploadruht_io:
2732 case Hexagon::L2_ploadruhf_io:
2733 case Hexagon::S2_pstorerht_io:
2734 case Hexagon::S2_pstorerhf_io:
2735 return isShiftedUInt<6,1>(Offset);
2737 case Hexagon::L2_ploadrit_io:
2738 case Hexagon::L2_ploadrif_io:
2739 case Hexagon::S2_pstorerit_io:
2740 case Hexagon::S2_pstorerif_io:
2741 return isShiftedUInt<6,2>(Offset);
2743 case Hexagon::L2_ploadrdt_io:
2744 case Hexagon::L2_ploadrdf_io:
2745 case Hexagon::S2_pstorerdt_io:
2746 case Hexagon::S2_pstorerdf_io:
2747 return isShiftedUInt<6,3>(Offset);
2750 llvm_unreachable("No offset range is defined for this opcode. "
2751 "Please define it in the above switch statement!");
2755 bool HexagonInstrInfo::isVecAcc(const MachineInstr *MI) const {
2756 return MI && isV60VectorInstruction(MI) && isAccumulator(MI);
2760 bool HexagonInstrInfo::isVecALU(const MachineInstr *MI) const {
2763 const uint64_t F = get(MI->getOpcode()).TSFlags;
2764 const uint64_t V = ((F >> HexagonII::TypePos) & HexagonII::TypeMask);
2766 V == HexagonII::TypeCVI_VA ||
2767 V == HexagonII::TypeCVI_VA_DV;
2771 bool HexagonInstrInfo::isVecUsableNextPacket(const MachineInstr *ProdMI,
2772 const MachineInstr *ConsMI) const {
2773 if (EnableACCForwarding && isVecAcc(ProdMI) && isVecAcc(ConsMI))
2776 if (EnableALUForwarding && (isVecALU(ConsMI) || isLateSourceInstr(ConsMI)))
2779 if (mayBeNewStore(ConsMI))
2785 bool HexagonInstrInfo::isZeroExtendingLoad(const MachineInstr &MI) const {
2786 switch (MI.getOpcode()) {
2788 case Hexagon::L2_loadrub_io:
2789 case Hexagon::L4_loadrub_ur:
2790 case Hexagon::L4_loadrub_ap:
2791 case Hexagon::L2_loadrub_pr:
2792 case Hexagon::L2_loadrub_pbr:
2793 case Hexagon::L2_loadrub_pi:
2794 case Hexagon::L2_loadrub_pci:
2795 case Hexagon::L2_loadrub_pcr:
2796 case Hexagon::L2_loadbzw2_io:
2797 case Hexagon::L4_loadbzw2_ur:
2798 case Hexagon::L4_loadbzw2_ap:
2799 case Hexagon::L2_loadbzw2_pr:
2800 case Hexagon::L2_loadbzw2_pbr:
2801 case Hexagon::L2_loadbzw2_pi:
2802 case Hexagon::L2_loadbzw2_pci:
2803 case Hexagon::L2_loadbzw2_pcr:
2804 case Hexagon::L2_loadbzw4_io:
2805 case Hexagon::L4_loadbzw4_ur:
2806 case Hexagon::L4_loadbzw4_ap:
2807 case Hexagon::L2_loadbzw4_pr:
2808 case Hexagon::L2_loadbzw4_pbr:
2809 case Hexagon::L2_loadbzw4_pi:
2810 case Hexagon::L2_loadbzw4_pci:
2811 case Hexagon::L2_loadbzw4_pcr:
2812 case Hexagon::L4_loadrub_rr:
2813 case Hexagon::L2_ploadrubt_io:
2814 case Hexagon::L2_ploadrubt_pi:
2815 case Hexagon::L2_ploadrubf_io:
2816 case Hexagon::L2_ploadrubf_pi:
2817 case Hexagon::L2_ploadrubtnew_io:
2818 case Hexagon::L2_ploadrubfnew_io:
2819 case Hexagon::L4_ploadrubt_rr:
2820 case Hexagon::L4_ploadrubf_rr:
2821 case Hexagon::L4_ploadrubtnew_rr:
2822 case Hexagon::L4_ploadrubfnew_rr:
2823 case Hexagon::L2_ploadrubtnew_pi:
2824 case Hexagon::L2_ploadrubfnew_pi:
2825 case Hexagon::L4_ploadrubt_abs:
2826 case Hexagon::L4_ploadrubf_abs:
2827 case Hexagon::L4_ploadrubtnew_abs:
2828 case Hexagon::L4_ploadrubfnew_abs:
2829 case Hexagon::L2_loadrubgp:
2831 case Hexagon::L2_loadruh_io:
2832 case Hexagon::L4_loadruh_ur:
2833 case Hexagon::L4_loadruh_ap:
2834 case Hexagon::L2_loadruh_pr:
2835 case Hexagon::L2_loadruh_pbr:
2836 case Hexagon::L2_loadruh_pi:
2837 case Hexagon::L2_loadruh_pci:
2838 case Hexagon::L2_loadruh_pcr:
2839 case Hexagon::L4_loadruh_rr:
2840 case Hexagon::L2_ploadruht_io:
2841 case Hexagon::L2_ploadruht_pi:
2842 case Hexagon::L2_ploadruhf_io:
2843 case Hexagon::L2_ploadruhf_pi:
2844 case Hexagon::L2_ploadruhtnew_io:
2845 case Hexagon::L2_ploadruhfnew_io:
2846 case Hexagon::L4_ploadruht_rr:
2847 case Hexagon::L4_ploadruhf_rr:
2848 case Hexagon::L4_ploadruhtnew_rr:
2849 case Hexagon::L4_ploadruhfnew_rr:
2850 case Hexagon::L2_ploadruhtnew_pi:
2851 case Hexagon::L2_ploadruhfnew_pi:
2852 case Hexagon::L4_ploadruht_abs:
2853 case Hexagon::L4_ploadruhf_abs:
2854 case Hexagon::L4_ploadruhtnew_abs:
2855 case Hexagon::L4_ploadruhfnew_abs:
2856 case Hexagon::L2_loadruhgp:
2864 // Add latency to instruction.
2865 bool HexagonInstrInfo::addLatencyToSchedule(const MachineInstr *MI1,
2866 const MachineInstr *MI2) const {
2867 if (isV60VectorInstruction(MI1) && isV60VectorInstruction(MI2))
2868 if (!isVecUsableNextPacket(MI1, MI2))
2874 /// \brief Can these instructions execute at the same time in a bundle.
2875 bool HexagonInstrInfo::canExecuteInBundle(const MachineInstr *First,
2876 const MachineInstr *Second) const {
2877 if (DisableNVSchedule)
2879 if (mayBeNewStore(Second)) {
2880 // Make sure the definition of the first instruction is the value being
2882 const MachineOperand &Stored =
2883 Second->getOperand(Second->getNumOperands() - 1);
2884 if (!Stored.isReg())
2886 for (unsigned i = 0, e = First->getNumOperands(); i < e; ++i) {
2887 const MachineOperand &Op = First->getOperand(i);
2888 if (Op.isReg() && Op.isDef() && Op.getReg() == Stored.getReg())
2896 bool HexagonInstrInfo::hasEHLabel(const MachineBasicBlock *B) const {
2904 // Returns true if an instruction can be converted into a non-extended
2905 // equivalent instruction.
2906 bool HexagonInstrInfo::hasNonExtEquivalent(const MachineInstr *MI) const {
2908 // Check if the instruction has a register form that uses register in place
2909 // of the extended operand, if so return that as the non-extended form.
2910 if (Hexagon::getRegForm(MI->getOpcode()) >= 0)
2913 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
2914 // Check addressing mode and retrieve non-ext equivalent instruction.
2916 switch (getAddrMode(MI)) {
2917 case HexagonII::Absolute :
2918 // Load/store with absolute addressing mode can be converted into
2919 // base+offset mode.
2920 NonExtOpcode = Hexagon::getBaseWithImmOffset(MI->getOpcode());
2922 case HexagonII::BaseImmOffset :
2923 // Load/store with base+offset addressing mode can be converted into
2924 // base+register offset addressing mode. However left shift operand should
2926 NonExtOpcode = Hexagon::getBaseWithRegOffset(MI->getOpcode());
2928 case HexagonII::BaseLongOffset:
2929 NonExtOpcode = Hexagon::getRegShlForm(MI->getOpcode());
2934 if (NonExtOpcode < 0)
2942 bool HexagonInstrInfo::hasPseudoInstrPair(const MachineInstr *MI) const {
2943 return Hexagon::getRealHWInstr(MI->getOpcode(),
2944 Hexagon::InstrType_Pseudo) >= 0;
2948 bool HexagonInstrInfo::hasUncondBranch(const MachineBasicBlock *B)
2950 MachineBasicBlock::const_iterator I = B->getFirstTerminator(), E = B->end();
2960 // Returns true, if a LD insn can be promoted to a cur load.
2961 bool HexagonInstrInfo::mayBeCurLoad(const MachineInstr *MI) const {
2962 auto &HST = MI->getParent()->getParent()->getSubtarget<HexagonSubtarget>();
2963 const uint64_t F = MI->getDesc().TSFlags;
2964 return ((F >> HexagonII::mayCVLoadPos) & HexagonII::mayCVLoadMask) &&
2969 // Returns true, if a ST insn can be promoted to a new-value store.
2970 bool HexagonInstrInfo::mayBeNewStore(const MachineInstr *MI) const {
2971 const uint64_t F = MI->getDesc().TSFlags;
2972 return (F >> HexagonII::mayNVStorePos) & HexagonII::mayNVStoreMask;
2976 bool HexagonInstrInfo::producesStall(const MachineInstr *ProdMI,
2977 const MachineInstr *ConsMI) const {
2978 // There is no stall when ProdMI is not a V60 vector.
2979 if (!isV60VectorInstruction(ProdMI))
2982 // There is no stall when ProdMI and ConsMI are not dependent.
2983 if (!isDependent(ProdMI, ConsMI))
2986 // When Forward Scheduling is enabled, there is no stall if ProdMI and ConsMI
2987 // are scheduled in consecutive packets.
2988 if (isVecUsableNextPacket(ProdMI, ConsMI))
2995 bool HexagonInstrInfo::producesStall(const MachineInstr *MI,
2996 MachineBasicBlock::const_instr_iterator BII) const {
2997 // There is no stall when I is not a V60 vector.
2998 if (!isV60VectorInstruction(MI))
3001 MachineBasicBlock::const_instr_iterator MII = BII;
3002 MachineBasicBlock::const_instr_iterator MIE = MII->getParent()->instr_end();
3004 if (!(*MII).isBundle()) {
3005 const MachineInstr *J = &*MII;
3006 if (!isV60VectorInstruction(J))
3008 else if (isVecUsableNextPacket(J, MI))
3013 for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3014 const MachineInstr *J = &*MII;
3015 if (producesStall(J, MI))
3022 bool HexagonInstrInfo::predCanBeUsedAsDotNew(const MachineInstr *MI,
3023 unsigned PredReg) const {
3024 for (unsigned opNum = 0; opNum < MI->getNumOperands(); opNum++) {
3025 const MachineOperand &MO = MI->getOperand(opNum);
3026 if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3027 return false; // Predicate register must be explicitly defined.
3030 // Hexagon Programmer's Reference says that decbin, memw_locked, and
3031 // memd_locked cannot be used as .new as well,
3032 // but we don't seem to have these instructions defined.
3033 return MI->getOpcode() != Hexagon::A4_tlbmatch;
3037 bool HexagonInstrInfo::PredOpcodeHasJMP_c(unsigned Opcode) const {
3038 return (Opcode == Hexagon::J2_jumpt) ||
3039 (Opcode == Hexagon::J2_jumpf) ||
3040 (Opcode == Hexagon::J2_jumptnew) ||
3041 (Opcode == Hexagon::J2_jumpfnew) ||
3042 (Opcode == Hexagon::J2_jumptnewpt) ||
3043 (Opcode == Hexagon::J2_jumpfnewpt);
3047 bool HexagonInstrInfo::predOpcodeHasNot(ArrayRef<MachineOperand> Cond) const {
3048 if (Cond.empty() || !isPredicated(Cond[0].getImm()))
3050 return !isPredicatedTrue(Cond[0].getImm());
3054 short HexagonInstrInfo::getAbsoluteForm(const MachineInstr *MI) const {
3055 return Hexagon::getAbsoluteForm(MI->getOpcode());
3059 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
3060 const uint64_t F = MI->getDesc().TSFlags;
3061 return (F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask;
3065 // Returns the base register in a memory access (load/store). The offset is
3066 // returned in Offset and the access size is returned in AccessSize.
3067 unsigned HexagonInstrInfo::getBaseAndOffset(const MachineInstr *MI,
3068 int &Offset, unsigned &AccessSize) const {
3069 // Return if it is not a base+offset type instruction or a MemOp.
3070 if (getAddrMode(MI) != HexagonII::BaseImmOffset &&
3071 getAddrMode(MI) != HexagonII::BaseLongOffset &&
3072 !isMemOp(MI) && !isPostIncrement(MI))
3075 // Since it is a memory access instruction, getMemAccessSize() should never
3077 assert (getMemAccessSize(MI) &&
3078 "BaseImmOffset or BaseLongOffset or MemOp without accessSize");
3080 // Return Values of getMemAccessSize() are
3081 // 0 - Checked in the assert above.
3082 // 1, 2, 3, 4 & 7, 8 - The statement below is correct for all these.
3083 // MemAccessSize is represented as 1+log2(N) where N is size in bits.
3084 AccessSize = (1U << (getMemAccessSize(MI) - 1));
3086 unsigned basePos = 0, offsetPos = 0;
3087 if (!getBaseAndOffsetPosition(MI, basePos, offsetPos))
3090 // Post increment updates its EA after the mem access,
3091 // so we need to treat its offset as zero.
3092 if (isPostIncrement(MI))
3095 Offset = MI->getOperand(offsetPos).getImm();
3098 return MI->getOperand(basePos).getReg();
3102 /// Return the position of the base and offset operands for this instruction.
3103 bool HexagonInstrInfo::getBaseAndOffsetPosition(const MachineInstr *MI,
3104 unsigned &BasePos, unsigned &OffsetPos) const {
3105 // Deal with memops first.
3109 } else if (MI->mayStore()) {
3112 } else if (MI->mayLoad()) {
3118 if (isPredicated(*MI)) {
3122 if (isPostIncrement(MI)) {
3127 if (!MI->getOperand(BasePos).isReg() || !MI->getOperand(OffsetPos).isImm())
3134 // Inserts branching instructions in reverse order of their occurence.
3135 // e.g. jump_t t1 (i1)
3137 // Jumpers = {i2, i1}
3138 SmallVector<MachineInstr*, 2> HexagonInstrInfo::getBranchingInstrs(
3139 MachineBasicBlock& MBB) const {
3140 SmallVector<MachineInstr*, 2> Jumpers;
3141 // If the block has no terminators, it just falls into the block after it.
3142 MachineBasicBlock::instr_iterator I = MBB.instr_end();
3143 if (I == MBB.instr_begin())
3146 // A basic block may looks like this:
3156 // It has two succs but does not have a terminator
3157 // Don't know how to handle it.
3162 } while (I != MBB.instr_begin());
3164 I = MBB.instr_end();
3167 while (I->isDebugValue()) {
3168 if (I == MBB.instr_begin())
3172 if (!isUnpredicatedTerminator(*I))
3175 // Get the last instruction in the block.
3176 MachineInstr *LastInst = &*I;
3177 Jumpers.push_back(LastInst);
3178 MachineInstr *SecondLastInst = nullptr;
3179 // Find one more terminator if present.
3181 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(*I)) {
3182 if (!SecondLastInst) {
3183 SecondLastInst = &*I;
3184 Jumpers.push_back(SecondLastInst);
3185 } else // This is a third branch.
3188 if (I == MBB.instr_begin())
3196 short HexagonInstrInfo::getBaseWithLongOffset(short Opcode) const {
3199 return Hexagon::getBaseWithLongOffset(Opcode);
3203 short HexagonInstrInfo::getBaseWithLongOffset(const MachineInstr *MI) const {
3204 return Hexagon::getBaseWithLongOffset(MI->getOpcode());
3208 short HexagonInstrInfo::getBaseWithRegOffset(const MachineInstr *MI) const {
3209 return Hexagon::getBaseWithRegOffset(MI->getOpcode());
3213 // Returns Operand Index for the constant extended instruction.
3214 unsigned HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const {
3215 const uint64_t F = MI->getDesc().TSFlags;
3216 return (F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask;
3219 // See if instruction could potentially be a duplex candidate.
3220 // If so, return its group. Zero otherwise.
3221 HexagonII::CompoundGroup HexagonInstrInfo::getCompoundCandidateGroup(
3222 const MachineInstr *MI) const {
3223 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3225 switch (MI->getOpcode()) {
3227 return HexagonII::HCG_None;
3230 // "p0=cmp.eq(Rs16,Rt16); if (p0.new) jump:nt #r9:2"
3231 // "Rd16=#U6 ; jump #r9:2"
3232 // "Rd16=Rs16 ; jump #r9:2"
3234 case Hexagon::C2_cmpeq:
3235 case Hexagon::C2_cmpgt:
3236 case Hexagon::C2_cmpgtu:
3237 DstReg = MI->getOperand(0).getReg();
3238 Src1Reg = MI->getOperand(1).getReg();
3239 Src2Reg = MI->getOperand(2).getReg();
3240 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3241 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3242 isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg))
3243 return HexagonII::HCG_A;
3245 case Hexagon::C2_cmpeqi:
3246 case Hexagon::C2_cmpgti:
3247 case Hexagon::C2_cmpgtui:
3248 // P0 = cmp.eq(Rs,#u2)
3249 DstReg = MI->getOperand(0).getReg();
3250 SrcReg = MI->getOperand(1).getReg();
3251 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3252 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3253 isIntRegForSubInst(SrcReg) && MI->getOperand(2).isImm() &&
3254 ((isUInt<5>(MI->getOperand(2).getImm())) ||
3255 (MI->getOperand(2).getImm() == -1)))
3256 return HexagonII::HCG_A;
3258 case Hexagon::A2_tfr:
3260 DstReg = MI->getOperand(0).getReg();
3261 SrcReg = MI->getOperand(1).getReg();
3262 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3263 return HexagonII::HCG_A;
3265 case Hexagon::A2_tfrsi:
3267 // Do not test for #u6 size since the const is getting extended
3268 // regardless and compound could be formed.
3269 DstReg = MI->getOperand(0).getReg();
3270 if (isIntRegForSubInst(DstReg))
3271 return HexagonII::HCG_A;
3273 case Hexagon::S2_tstbit_i:
3274 DstReg = MI->getOperand(0).getReg();
3275 Src1Reg = MI->getOperand(1).getReg();
3276 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3277 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3278 MI->getOperand(2).isImm() &&
3279 isIntRegForSubInst(Src1Reg) && (MI->getOperand(2).getImm() == 0))
3280 return HexagonII::HCG_A;
3282 // The fact that .new form is used pretty much guarantees
3283 // that predicate register will match. Nevertheless,
3284 // there could be some false positives without additional
3286 case Hexagon::J2_jumptnew:
3287 case Hexagon::J2_jumpfnew:
3288 case Hexagon::J2_jumptnewpt:
3289 case Hexagon::J2_jumpfnewpt:
3290 Src1Reg = MI->getOperand(0).getReg();
3291 if (Hexagon::PredRegsRegClass.contains(Src1Reg) &&
3292 (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3293 return HexagonII::HCG_B;
3295 // Transfer and jump:
3296 // Rd=#U6 ; jump #r9:2
3297 // Rd=Rs ; jump #r9:2
3298 // Do not test for jump range here.
3299 case Hexagon::J2_jump:
3300 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3301 return HexagonII::HCG_C;
3305 return HexagonII::HCG_None;
3309 // Returns -1 when there is no opcode found.
3310 unsigned HexagonInstrInfo::getCompoundOpcode(const MachineInstr *GA,
3311 const MachineInstr *GB) const {
3312 assert(getCompoundCandidateGroup(GA) == HexagonII::HCG_A);
3313 assert(getCompoundCandidateGroup(GB) == HexagonII::HCG_B);
3314 if ((GA->getOpcode() != Hexagon::C2_cmpeqi) ||
3315 (GB->getOpcode() != Hexagon::J2_jumptnew))
3317 unsigned DestReg = GA->getOperand(0).getReg();
3318 if (!GB->readsRegister(DestReg))
3320 if (DestReg == Hexagon::P0)
3321 return Hexagon::J4_cmpeqi_tp0_jump_nt;
3322 if (DestReg == Hexagon::P1)
3323 return Hexagon::J4_cmpeqi_tp1_jump_nt;
3328 int HexagonInstrInfo::getCondOpcode(int Opc, bool invertPredicate) const {
3329 enum Hexagon::PredSense inPredSense;
3330 inPredSense = invertPredicate ? Hexagon::PredSense_false :
3331 Hexagon::PredSense_true;
3332 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3333 if (CondOpcode >= 0) // Valid Conditional opcode/instruction
3336 // This switch case will be removed once all the instructions have been
3337 // modified to use relation maps.
3339 case Hexagon::TFRI_f:
3340 return !invertPredicate ? Hexagon::TFRI_cPt_f :
3341 Hexagon::TFRI_cNotPt_f;
3344 llvm_unreachable("Unexpected predicable instruction");
3348 // Return the cur value instruction for a given store.
3349 int HexagonInstrInfo::getDotCurOp(const MachineInstr* MI) const {
3350 switch (MI->getOpcode()) {
3351 default: llvm_unreachable("Unknown .cur type");
3352 case Hexagon::V6_vL32b_pi:
3353 return Hexagon::V6_vL32b_cur_pi;
3354 case Hexagon::V6_vL32b_ai:
3355 return Hexagon::V6_vL32b_cur_ai;
3357 case Hexagon::V6_vL32b_pi_128B:
3358 return Hexagon::V6_vL32b_cur_pi_128B;
3359 case Hexagon::V6_vL32b_ai_128B:
3360 return Hexagon::V6_vL32b_cur_ai_128B;
3367 // The diagram below shows the steps involved in the conversion of a predicated
3368 // store instruction to its .new predicated new-value form.
3370 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
3372 // / \ (not OK. it will cause new-value store to be
3373 // / X conditional on p0.new while R2 producer is
3376 // p.new store p.old NV store
3377 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
3383 // [if (p0)memw(R0+#0)=R2]
3386 // The following set of instructions further explains the scenario where
3387 // conditional new-value store becomes invalid when promoted to .new predicate
3390 // { 1) if (p0) r0 = add(r1, r2)
3391 // 2) p0 = cmp.eq(r3, #0) }
3393 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
3394 // the first two instructions because in instr 1, r0 is conditional on old value
3395 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
3396 // is not valid for new-value stores.
3397 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
3398 // from the "Conditional Store" list. Because a predicated new value store
3399 // would NOT be promoted to a double dot new store. See diagram below:
3400 // This function returns yes for those stores that are predicated but not
3401 // yet promoted to predicate dot new instructions.
3403 // +---------------------+
3404 // /-----| if (p0) memw(..)=r0 |---------\~
3405 // || +---------------------+ ||
3406 // promote || /\ /\ || promote
3408 // \||/ demote || \||/
3410 // +-------------------------+ || +-------------------------+
3411 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
3412 // +-------------------------+ || +-------------------------+
3415 // promote || \/ NOT possible
3419 // +-----------------------------+
3420 // | if (p0.new) memw(..)=r0.new |
3421 // +-----------------------------+
3422 // Double Dot New Store
3424 // Returns the most basic instruction for the .new predicated instructions and
3425 // new-value stores.
3426 // For example, all of the following instructions will be converted back to the
3427 // same instruction:
3428 // 1) if (p0.new) memw(R0+#0) = R1.new --->
3429 // 2) if (p0) memw(R0+#0)= R1.new -------> if (p0) memw(R0+#0) = R1
3430 // 3) if (p0.new) memw(R0+#0) = R1 --->
3432 // To understand the translation of instruction 1 to its original form, consider
3433 // a packet with 3 instructions.
3434 // { p0 = cmp.eq(R0,R1)
3435 // if (p0.new) R2 = add(R3, R4)
3436 // R5 = add (R3, R1)
3438 // if (p0) memw(R5+#0) = R2 <--- trying to include it in the previous packet
3440 // This instruction can be part of the previous packet only if both p0 and R2
3441 // are promoted to .new values. This promotion happens in steps, first
3442 // predicate register is promoted to .new and in the next iteration R2 is
3443 // promoted. Therefore, in case of dependence check failure (due to R5) during
3444 // next iteration, it should be converted back to its most basic form.
3447 // Return the new value instruction for a given store.
3448 int HexagonInstrInfo::getDotNewOp(const MachineInstr* MI) const {
3449 int NVOpcode = Hexagon::getNewValueOpcode(MI->getOpcode());
3450 if (NVOpcode >= 0) // Valid new-value store instruction.
3453 switch (MI->getOpcode()) {
3454 default: llvm_unreachable("Unknown .new type");
3455 case Hexagon::S4_storerb_ur:
3456 return Hexagon::S4_storerbnew_ur;
3458 case Hexagon::S2_storerb_pci:
3459 return Hexagon::S2_storerb_pci;
3461 case Hexagon::S2_storeri_pci:
3462 return Hexagon::S2_storeri_pci;
3464 case Hexagon::S2_storerh_pci:
3465 return Hexagon::S2_storerh_pci;
3467 case Hexagon::S2_storerd_pci:
3468 return Hexagon::S2_storerd_pci;
3470 case Hexagon::S2_storerf_pci:
3471 return Hexagon::S2_storerf_pci;
3473 case Hexagon::V6_vS32b_ai:
3474 return Hexagon::V6_vS32b_new_ai;
3476 case Hexagon::V6_vS32b_pi:
3477 return Hexagon::V6_vS32b_new_pi;
3480 case Hexagon::V6_vS32b_ai_128B:
3481 return Hexagon::V6_vS32b_new_ai_128B;
3483 case Hexagon::V6_vS32b_pi_128B:
3484 return Hexagon::V6_vS32b_new_pi_128B;
3490 // Returns the opcode to use when converting MI, which is a conditional jump,
3491 // into a conditional instruction which uses the .new value of the predicate.
3492 // We also use branch probabilities to add a hint to the jump.
3493 int HexagonInstrInfo::getDotNewPredJumpOp(const MachineInstr *MI,
3494 const MachineBranchProbabilityInfo *MBPI) const {
3495 // We assume that block can have at most two successors.
3497 const MachineBasicBlock *Src = MI->getParent();
3498 const MachineOperand *BrTarget = &MI->getOperand(1);
3499 const MachineBasicBlock *Dst = BrTarget->getMBB();
3501 const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst);
3502 if (Prediction >= BranchProbability(1,2))
3505 switch (MI->getOpcode()) {
3506 case Hexagon::J2_jumpt:
3507 return taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3508 case Hexagon::J2_jumpf:
3509 return taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3512 llvm_unreachable("Unexpected jump instruction.");
3517 // Return .new predicate version for an instruction.
3518 int HexagonInstrInfo::getDotNewPredOp(const MachineInstr *MI,
3519 const MachineBranchProbabilityInfo *MBPI) const {
3520 int NewOpcode = Hexagon::getPredNewOpcode(MI->getOpcode());
3521 if (NewOpcode >= 0) // Valid predicate new instruction
3524 switch (MI->getOpcode()) {
3526 case Hexagon::J2_jumpt:
3527 case Hexagon::J2_jumpf:
3528 return getDotNewPredJumpOp(MI, MBPI);
3531 assert(0 && "Unknown .new type");
3537 int HexagonInstrInfo::getDotOldOp(const int opc) const {
3539 if (isPredicated(NewOp) && isPredicatedNew(NewOp)) { // Get predicate old form
3540 NewOp = Hexagon::getPredOldOpcode(NewOp);
3541 assert(NewOp >= 0 &&
3542 "Couldn't change predicate new instruction to its old form.");
3545 if (isNewValueStore(NewOp)) { // Convert into non-new-value format
3546 NewOp = Hexagon::getNonNVStore(NewOp);
3547 assert(NewOp >= 0 && "Couldn't change new-value store to its old form.");
3553 // See if instruction could potentially be a duplex candidate.
3554 // If so, return its group. Zero otherwise.
3555 HexagonII::SubInstructionGroup HexagonInstrInfo::getDuplexCandidateGroup(
3556 const MachineInstr *MI) const {
3557 unsigned DstReg, SrcReg, Src1Reg, Src2Reg;
3558 auto &HRI = getRegisterInfo();
3560 switch (MI->getOpcode()) {
3562 return HexagonII::HSIG_None;
3566 // Rd = memw(Rs+#u4:2)
3567 // Rd = memub(Rs+#u4:0)
3568 case Hexagon::L2_loadri_io:
3569 DstReg = MI->getOperand(0).getReg();
3570 SrcReg = MI->getOperand(1).getReg();
3571 // Special case this one from Group L2.
3572 // Rd = memw(r29+#u5:2)
3573 if (isIntRegForSubInst(DstReg)) {
3574 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3575 HRI.getStackRegister() == SrcReg &&
3576 MI->getOperand(2).isImm() &&
3577 isShiftedUInt<5,2>(MI->getOperand(2).getImm()))
3578 return HexagonII::HSIG_L2;
3579 // Rd = memw(Rs+#u4:2)
3580 if (isIntRegForSubInst(SrcReg) &&
3581 (MI->getOperand(2).isImm() &&
3582 isShiftedUInt<4,2>(MI->getOperand(2).getImm())))
3583 return HexagonII::HSIG_L1;
3586 case Hexagon::L2_loadrub_io:
3587 // Rd = memub(Rs+#u4:0)
3588 DstReg = MI->getOperand(0).getReg();
3589 SrcReg = MI->getOperand(1).getReg();
3590 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3591 MI->getOperand(2).isImm() && isUInt<4>(MI->getOperand(2).getImm()))
3592 return HexagonII::HSIG_L1;
3597 // Rd = memh/memuh(Rs+#u3:1)
3598 // Rd = memb(Rs+#u3:0)
3599 // Rd = memw(r29+#u5:2) - Handled above.
3600 // Rdd = memd(r29+#u5:3)
3602 // [if ([!]p0[.new])] dealloc_return
3603 // [if ([!]p0[.new])] jumpr r31
3604 case Hexagon::L2_loadrh_io:
3605 case Hexagon::L2_loadruh_io:
3606 // Rd = memh/memuh(Rs+#u3:1)
3607 DstReg = MI->getOperand(0).getReg();
3608 SrcReg = MI->getOperand(1).getReg();
3609 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3610 MI->getOperand(2).isImm() &&
3611 isShiftedUInt<3,1>(MI->getOperand(2).getImm()))
3612 return HexagonII::HSIG_L2;
3614 case Hexagon::L2_loadrb_io:
3615 // Rd = memb(Rs+#u3:0)
3616 DstReg = MI->getOperand(0).getReg();
3617 SrcReg = MI->getOperand(1).getReg();
3618 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3619 MI->getOperand(2).isImm() &&
3620 isUInt<3>(MI->getOperand(2).getImm()))
3621 return HexagonII::HSIG_L2;
3623 case Hexagon::L2_loadrd_io:
3624 // Rdd = memd(r29+#u5:3)
3625 DstReg = MI->getOperand(0).getReg();
3626 SrcReg = MI->getOperand(1).getReg();
3627 if (isDblRegForSubInst(DstReg, HRI) &&
3628 Hexagon::IntRegsRegClass.contains(SrcReg) &&
3629 HRI.getStackRegister() == SrcReg &&
3630 MI->getOperand(2).isImm() &&
3631 isShiftedUInt<5,3>(MI->getOperand(2).getImm()))
3632 return HexagonII::HSIG_L2;
3634 // dealloc_return is not documented in Hexagon Manual, but marked
3635 // with A_SUBINSN attribute in iset_v4classic.py.
3636 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3637 case Hexagon::L4_return:
3638 case Hexagon::L2_deallocframe:
3639 return HexagonII::HSIG_L2;
3640 case Hexagon::EH_RETURN_JMPR:
3641 case Hexagon::JMPret :
3643 // Actual form JMPR %PC<imp-def>, %R31<imp-use>, %R0<imp-use,internal>.
3644 DstReg = MI->getOperand(0).getReg();
3645 if (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg))
3646 return HexagonII::HSIG_L2;
3648 case Hexagon::JMPrett:
3649 case Hexagon::JMPretf:
3650 case Hexagon::JMPrettnewpt:
3651 case Hexagon::JMPretfnewpt :
3652 case Hexagon::JMPrettnew :
3653 case Hexagon::JMPretfnew :
3654 DstReg = MI->getOperand(1).getReg();
3655 SrcReg = MI->getOperand(0).getReg();
3656 // [if ([!]p0[.new])] jumpr r31
3657 if ((Hexagon::PredRegsRegClass.contains(SrcReg) &&
3658 (Hexagon::P0 == SrcReg)) &&
3659 (Hexagon::IntRegsRegClass.contains(DstReg) && (Hexagon::R31 == DstReg)))
3660 return HexagonII::HSIG_L2;
3662 case Hexagon::L4_return_t :
3663 case Hexagon::L4_return_f :
3664 case Hexagon::L4_return_tnew_pnt :
3665 case Hexagon::L4_return_fnew_pnt :
3666 case Hexagon::L4_return_tnew_pt :
3667 case Hexagon::L4_return_fnew_pt :
3668 // [if ([!]p0[.new])] dealloc_return
3669 SrcReg = MI->getOperand(0).getReg();
3670 if (Hexagon::PredRegsRegClass.contains(SrcReg) && (Hexagon::P0 == SrcReg))
3671 return HexagonII::HSIG_L2;
3676 // memw(Rs+#u4:2) = Rt
3677 // memb(Rs+#u4:0) = Rt
3678 case Hexagon::S2_storeri_io:
3679 // Special case this one from Group S2.
3680 // memw(r29+#u5:2) = Rt
3681 Src1Reg = MI->getOperand(0).getReg();
3682 Src2Reg = MI->getOperand(2).getReg();
3683 if (Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3684 isIntRegForSubInst(Src2Reg) &&
3685 HRI.getStackRegister() == Src1Reg && MI->getOperand(1).isImm() &&
3686 isShiftedUInt<5,2>(MI->getOperand(1).getImm()))
3687 return HexagonII::HSIG_S2;
3688 // memw(Rs+#u4:2) = Rt
3689 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3690 MI->getOperand(1).isImm() &&
3691 isShiftedUInt<4,2>(MI->getOperand(1).getImm()))
3692 return HexagonII::HSIG_S1;
3694 case Hexagon::S2_storerb_io:
3695 // memb(Rs+#u4:0) = Rt
3696 Src1Reg = MI->getOperand(0).getReg();
3697 Src2Reg = MI->getOperand(2).getReg();
3698 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3699 MI->getOperand(1).isImm() && isUInt<4>(MI->getOperand(1).getImm()))
3700 return HexagonII::HSIG_S1;
3705 // memh(Rs+#u3:1) = Rt
3706 // memw(r29+#u5:2) = Rt
3707 // memd(r29+#s6:3) = Rtt
3708 // memw(Rs+#u4:2) = #U1
3709 // memb(Rs+#u4) = #U1
3710 // allocframe(#u5:3)
3711 case Hexagon::S2_storerh_io:
3712 // memh(Rs+#u3:1) = Rt
3713 Src1Reg = MI->getOperand(0).getReg();
3714 Src2Reg = MI->getOperand(2).getReg();
3715 if (isIntRegForSubInst(Src1Reg) && isIntRegForSubInst(Src2Reg) &&
3716 MI->getOperand(1).isImm() &&
3717 isShiftedUInt<3,1>(MI->getOperand(1).getImm()))
3718 return HexagonII::HSIG_S1;
3720 case Hexagon::S2_storerd_io:
3721 // memd(r29+#s6:3) = Rtt
3722 Src1Reg = MI->getOperand(0).getReg();
3723 Src2Reg = MI->getOperand(2).getReg();
3724 if (isDblRegForSubInst(Src2Reg, HRI) &&
3725 Hexagon::IntRegsRegClass.contains(Src1Reg) &&
3726 HRI.getStackRegister() == Src1Reg && MI->getOperand(1).isImm() &&
3727 isShiftedInt<6,3>(MI->getOperand(1).getImm()))
3728 return HexagonII::HSIG_S2;
3730 case Hexagon::S4_storeiri_io:
3731 // memw(Rs+#u4:2) = #U1
3732 Src1Reg = MI->getOperand(0).getReg();
3733 if (isIntRegForSubInst(Src1Reg) && MI->getOperand(1).isImm() &&
3734 isShiftedUInt<4,2>(MI->getOperand(1).getImm()) &&
3735 MI->getOperand(2).isImm() && isUInt<1>(MI->getOperand(2).getImm()))
3736 return HexagonII::HSIG_S2;
3738 case Hexagon::S4_storeirb_io:
3739 // memb(Rs+#u4) = #U1
3740 Src1Reg = MI->getOperand(0).getReg();
3741 if (isIntRegForSubInst(Src1Reg) &&
3742 MI->getOperand(1).isImm() && isUInt<4>(MI->getOperand(1).getImm()) &&
3743 MI->getOperand(2).isImm() && isUInt<1>(MI->getOperand(2).getImm()))
3744 return HexagonII::HSIG_S2;
3746 case Hexagon::S2_allocframe:
3747 if (MI->getOperand(0).isImm() &&
3748 isShiftedUInt<5,3>(MI->getOperand(0).getImm()))
3749 return HexagonII::HSIG_S1;
3758 // if ([!]P0[.new]) Rd = #0
3759 // Rd = add(r29,#u6:2)
3761 // P0 = cmp.eq(Rs,#u2)
3762 // Rdd = combine(#0,Rs)
3763 // Rdd = combine(Rs,#0)
3764 // Rdd = combine(#u2,#U2)
3767 // Rd = sxth/sxtb/zxtb/zxth(Rs)
3769 case Hexagon::A2_addi:
3770 DstReg = MI->getOperand(0).getReg();
3771 SrcReg = MI->getOperand(1).getReg();
3772 if (isIntRegForSubInst(DstReg)) {
3773 // Rd = add(r29,#u6:2)
3774 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
3775 HRI.getStackRegister() == SrcReg && MI->getOperand(2).isImm() &&
3776 isShiftedUInt<6,2>(MI->getOperand(2).getImm()))
3777 return HexagonII::HSIG_A;
3779 if ((DstReg == SrcReg) && MI->getOperand(2).isImm() &&
3780 isInt<7>(MI->getOperand(2).getImm()))
3781 return HexagonII::HSIG_A;
3784 if (isIntRegForSubInst(SrcReg) && MI->getOperand(2).isImm() &&
3785 ((MI->getOperand(2).getImm() == 1) ||
3786 (MI->getOperand(2).getImm() == -1)))
3787 return HexagonII::HSIG_A;
3790 case Hexagon::A2_add:
3792 DstReg = MI->getOperand(0).getReg();
3793 Src1Reg = MI->getOperand(1).getReg();
3794 Src2Reg = MI->getOperand(2).getReg();
3795 if (isIntRegForSubInst(DstReg) && (DstReg == Src1Reg) &&
3796 isIntRegForSubInst(Src2Reg))
3797 return HexagonII::HSIG_A;
3799 case Hexagon::A2_andir:
3801 // Rd16=and(Rs16,#255)
3802 // Rd16=and(Rs16,#1)
3803 DstReg = MI->getOperand(0).getReg();
3804 SrcReg = MI->getOperand(1).getReg();
3805 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg) &&
3806 MI->getOperand(2).isImm() &&
3807 ((MI->getOperand(2).getImm() == 1) ||
3808 (MI->getOperand(2).getImm() == 255)))
3809 return HexagonII::HSIG_A;
3811 case Hexagon::A2_tfr:
3813 DstReg = MI->getOperand(0).getReg();
3814 SrcReg = MI->getOperand(1).getReg();
3815 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3816 return HexagonII::HSIG_A;
3818 case Hexagon::A2_tfrsi:
3820 // Do not test for #u6 size since the const is getting extended
3821 // regardless and compound could be formed.
3823 DstReg = MI->getOperand(0).getReg();
3824 if (isIntRegForSubInst(DstReg))
3825 return HexagonII::HSIG_A;
3827 case Hexagon::C2_cmoveit:
3828 case Hexagon::C2_cmovenewit:
3829 case Hexagon::C2_cmoveif:
3830 case Hexagon::C2_cmovenewif:
3831 // if ([!]P0[.new]) Rd = #0
3833 // %R16<def> = C2_cmovenewit %P0<internal>, 0, %R16<imp-use,undef>;
3834 DstReg = MI->getOperand(0).getReg();
3835 SrcReg = MI->getOperand(1).getReg();
3836 if (isIntRegForSubInst(DstReg) &&
3837 Hexagon::PredRegsRegClass.contains(SrcReg) && Hexagon::P0 == SrcReg &&
3838 MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0)
3839 return HexagonII::HSIG_A;
3841 case Hexagon::C2_cmpeqi:
3842 // P0 = cmp.eq(Rs,#u2)
3843 DstReg = MI->getOperand(0).getReg();
3844 SrcReg = MI->getOperand(1).getReg();
3845 if (Hexagon::PredRegsRegClass.contains(DstReg) &&
3846 Hexagon::P0 == DstReg && isIntRegForSubInst(SrcReg) &&
3847 MI->getOperand(2).isImm() && isUInt<2>(MI->getOperand(2).getImm()))
3848 return HexagonII::HSIG_A;
3850 case Hexagon::A2_combineii:
3851 case Hexagon::A4_combineii:
3852 // Rdd = combine(#u2,#U2)
3853 DstReg = MI->getOperand(0).getReg();
3854 if (isDblRegForSubInst(DstReg, HRI) &&
3855 ((MI->getOperand(1).isImm() && isUInt<2>(MI->getOperand(1).getImm())) ||
3856 (MI->getOperand(1).isGlobal() &&
3857 isUInt<2>(MI->getOperand(1).getOffset()))) &&
3858 ((MI->getOperand(2).isImm() && isUInt<2>(MI->getOperand(2).getImm())) ||
3859 (MI->getOperand(2).isGlobal() &&
3860 isUInt<2>(MI->getOperand(2).getOffset()))))
3861 return HexagonII::HSIG_A;
3863 case Hexagon::A4_combineri:
3864 // Rdd = combine(Rs,#0)
3865 DstReg = MI->getOperand(0).getReg();
3866 SrcReg = MI->getOperand(1).getReg();
3867 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3868 ((MI->getOperand(2).isImm() && MI->getOperand(2).getImm() == 0) ||
3869 (MI->getOperand(2).isGlobal() && MI->getOperand(2).getOffset() == 0)))
3870 return HexagonII::HSIG_A;
3872 case Hexagon::A4_combineir:
3873 // Rdd = combine(#0,Rs)
3874 DstReg = MI->getOperand(0).getReg();
3875 SrcReg = MI->getOperand(2).getReg();
3876 if (isDblRegForSubInst(DstReg, HRI) && isIntRegForSubInst(SrcReg) &&
3877 ((MI->getOperand(1).isImm() && MI->getOperand(1).getImm() == 0) ||
3878 (MI->getOperand(1).isGlobal() && MI->getOperand(1).getOffset() == 0)))
3879 return HexagonII::HSIG_A;
3881 case Hexagon::A2_sxtb:
3882 case Hexagon::A2_sxth:
3883 case Hexagon::A2_zxtb:
3884 case Hexagon::A2_zxth:
3885 // Rd = sxth/sxtb/zxtb/zxth(Rs)
3886 DstReg = MI->getOperand(0).getReg();
3887 SrcReg = MI->getOperand(1).getReg();
3888 if (isIntRegForSubInst(DstReg) && isIntRegForSubInst(SrcReg))
3889 return HexagonII::HSIG_A;
3893 return HexagonII::HSIG_None;
3897 short HexagonInstrInfo::getEquivalentHWInstr(const MachineInstr *MI) const {
3898 return Hexagon::getRealHWInstr(MI->getOpcode(), Hexagon::InstrType_Real);
3902 // Return first non-debug instruction in the basic block.
3903 MachineInstr *HexagonInstrInfo::getFirstNonDbgInst(MachineBasicBlock *BB)
3905 for (auto MII = BB->instr_begin(), End = BB->instr_end(); MII != End; MII++) {
3906 MachineInstr *MI = &*MII;
3907 if (MI->isDebugValue())
3915 unsigned HexagonInstrInfo::getInstrTimingClassLatency(
3916 const InstrItineraryData *ItinData, const MachineInstr *MI) const {
3917 // Default to one cycle for no itinerary. However, an "empty" itinerary may
3918 // still have a MinLatency property, which getStageLatency checks.
3920 return getInstrLatency(ItinData, *MI);
3922 // Get the latency embedded in the itinerary. If we're not using timing class
3923 // latencies or if we using BSB scheduling, then restrict the maximum latency
3924 // to 1 (that is, either 0 or 1).
3925 if (MI->isTransient())
3927 unsigned Latency = ItinData->getStageLatency(MI->getDesc().getSchedClass());
3928 if (!EnableTimingClassLatency ||
3929 MI->getParent()->getParent()->getSubtarget<HexagonSubtarget>().
3937 // inverts the predication logic.
3940 bool HexagonInstrInfo::getInvertedPredSense(
3941 SmallVectorImpl<MachineOperand> &Cond) const {
3944 unsigned Opc = getInvertedPredicatedOpcode(Cond[0].getImm());
3945 Cond[0].setImm(Opc);
3950 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
3952 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
3953 : Hexagon::getTruePredOpcode(Opc);
3954 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
3955 return InvPredOpcode;
3957 llvm_unreachable("Unexpected predicated instruction");
3961 // Returns the max value that doesn't need to be extended.
3962 int HexagonInstrInfo::getMaxValue(const MachineInstr *MI) const {
3963 const uint64_t F = MI->getDesc().TSFlags;
3964 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3965 & HexagonII::ExtentSignedMask;
3966 unsigned bits = (F >> HexagonII::ExtentBitsPos)
3967 & HexagonII::ExtentBitsMask;
3969 if (isSigned) // if value is signed
3970 return ~(-1U << (bits - 1));
3972 return ~(-1U << bits);
3976 unsigned HexagonInstrInfo::getMemAccessSize(const MachineInstr* MI) const {
3977 const uint64_t F = MI->getDesc().TSFlags;
3978 return (F >> HexagonII::MemAccessSizePos) & HexagonII::MemAccesSizeMask;
3982 // Returns the min value that doesn't need to be extended.
3983 int HexagonInstrInfo::getMinValue(const MachineInstr *MI) const {
3984 const uint64_t F = MI->getDesc().TSFlags;
3985 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
3986 & HexagonII::ExtentSignedMask;
3987 unsigned bits = (F >> HexagonII::ExtentBitsPos)
3988 & HexagonII::ExtentBitsMask;
3990 if (isSigned) // if value is signed
3991 return -1U << (bits - 1);
3997 // Returns opcode of the non-extended equivalent instruction.
3998 short HexagonInstrInfo::getNonExtOpcode(const MachineInstr *MI) const {
3999 // Check if the instruction has a register form that uses register in place
4000 // of the extended operand, if so return that as the non-extended form.
4001 short NonExtOpcode = Hexagon::getRegForm(MI->getOpcode());
4002 if (NonExtOpcode >= 0)
4003 return NonExtOpcode;
4005 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
4006 // Check addressing mode and retrieve non-ext equivalent instruction.
4007 switch (getAddrMode(MI)) {
4008 case HexagonII::Absolute :
4009 return Hexagon::getBaseWithImmOffset(MI->getOpcode());
4010 case HexagonII::BaseImmOffset :
4011 return Hexagon::getBaseWithRegOffset(MI->getOpcode());
4012 case HexagonII::BaseLongOffset:
4013 return Hexagon::getRegShlForm(MI->getOpcode());
4023 bool HexagonInstrInfo::getPredReg(ArrayRef<MachineOperand> Cond,
4024 unsigned &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const {
4027 assert(Cond.size() == 2);
4028 if (isNewValueJump(Cond[0].getImm()) || Cond[1].isMBB()) {
4029 DEBUG(dbgs() << "No predregs for new-value jumps/endloop");
4032 PredReg = Cond[1].getReg();
4034 // See IfConversion.cpp why we add RegState::Implicit | RegState::Undef
4036 if (Cond[1].isImplicit())
4037 PredRegFlags = RegState::Implicit;
4038 if (Cond[1].isUndef())
4039 PredRegFlags |= RegState::Undef;
4044 short HexagonInstrInfo::getPseudoInstrPair(const MachineInstr *MI) const {
4045 return Hexagon::getRealHWInstr(MI->getOpcode(), Hexagon::InstrType_Pseudo);
4049 short HexagonInstrInfo::getRegForm(const MachineInstr *MI) const {
4050 return Hexagon::getRegForm(MI->getOpcode());
4054 // Return the number of bytes required to encode the instruction.
4055 // Hexagon instructions are fixed length, 4 bytes, unless they
4056 // use a constant extender, which requires another 4 bytes.
4057 // For debug instructions and prolog labels, return 0.
4058 unsigned HexagonInstrInfo::getSize(const MachineInstr *MI) const {
4059 if (MI->isDebugValue() || MI->isPosition())
4062 unsigned Size = MI->getDesc().getSize();
4064 // Assume the default insn size in case it cannot be determined
4065 // for whatever reason.
4066 Size = HEXAGON_INSTR_SIZE;
4068 if (isConstExtended(MI) || isExtended(MI))
4069 Size += HEXAGON_INSTR_SIZE;
4071 // Try and compute number of instructions in asm.
4072 if (BranchRelaxAsmLarge && MI->getOpcode() == Hexagon::INLINEASM) {
4073 const MachineBasicBlock &MBB = *MI->getParent();
4074 const MachineFunction *MF = MBB.getParent();
4075 const MCAsmInfo *MAI = MF->getTarget().getMCAsmInfo();
4077 // Count the number of register definitions to find the asm string.
4078 unsigned NumDefs = 0;
4079 for (; MI->getOperand(NumDefs).isReg() && MI->getOperand(NumDefs).isDef();
4081 assert(NumDefs != MI->getNumOperands()-2 && "No asm string?");
4083 assert(MI->getOperand(NumDefs).isSymbol() && "No asm string?");
4084 // Disassemble the AsmStr and approximate number of instructions.
4085 const char *AsmStr = MI->getOperand(NumDefs).getSymbolName();
4086 Size = getInlineAsmLength(AsmStr, *MAI);
4093 uint64_t HexagonInstrInfo::getType(const MachineInstr* MI) const {
4094 const uint64_t F = MI->getDesc().TSFlags;
4095 return (F >> HexagonII::TypePos) & HexagonII::TypeMask;
4099 unsigned HexagonInstrInfo::getUnits(const MachineInstr* MI) const {
4100 const TargetSubtargetInfo &ST = MI->getParent()->getParent()->getSubtarget();
4101 const InstrItineraryData &II = *ST.getInstrItineraryData();
4102 const InstrStage &IS = *II.beginStage(MI->getDesc().getSchedClass());
4104 return IS.getUnits();
4108 unsigned HexagonInstrInfo::getValidSubTargets(const unsigned Opcode) const {
4109 const uint64_t F = get(Opcode).TSFlags;
4110 return (F >> HexagonII::validSubTargetPos) & HexagonII::validSubTargetMask;
4114 // Calculate size of the basic block without debug instructions.
4115 unsigned HexagonInstrInfo::nonDbgBBSize(const MachineBasicBlock *BB) const {
4116 return nonDbgMICount(BB->instr_begin(), BB->instr_end());
4120 unsigned HexagonInstrInfo::nonDbgBundleSize(
4121 MachineBasicBlock::const_iterator BundleHead) const {
4122 assert(BundleHead->isBundle() && "Not a bundle header");
4123 auto MII = BundleHead.getInstrIterator();
4124 // Skip the bundle header.
4125 return nonDbgMICount(++MII, getBundleEnd(*BundleHead));
4129 /// immediateExtend - Changes the instruction in place to one using an immediate
4131 void HexagonInstrInfo::immediateExtend(MachineInstr *MI) const {
4132 assert((isExtendable(MI)||isConstExtended(MI)) &&
4133 "Instruction must be extendable");
4134 // Find which operand is extendable.
4135 short ExtOpNum = getCExtOpNum(MI);
4136 MachineOperand &MO = MI->getOperand(ExtOpNum);
4137 // This needs to be something we understand.
4138 assert((MO.isMBB() || MO.isImm()) &&
4139 "Branch with unknown extendable field type");
4140 // Mark given operand as extended.
4141 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
4145 bool HexagonInstrInfo::invertAndChangeJumpTarget(
4146 MachineInstr* MI, MachineBasicBlock* NewTarget) const {
4147 DEBUG(dbgs() << "\n[invertAndChangeJumpTarget] to BB#"
4148 << NewTarget->getNumber(); MI->dump(););
4149 assert(MI->isBranch());
4150 unsigned NewOpcode = getInvertedPredicatedOpcode(MI->getOpcode());
4151 int TargetPos = MI->getNumOperands() - 1;
4152 // In general branch target is the last operand,
4153 // but some implicit defs added at the end might change it.
4154 while ((TargetPos > -1) && !MI->getOperand(TargetPos).isMBB())
4156 assert((TargetPos >= 0) && MI->getOperand(TargetPos).isMBB());
4157 MI->getOperand(TargetPos).setMBB(NewTarget);
4158 if (EnableBranchPrediction && isPredicatedNew(*MI)) {
4159 NewOpcode = reversePrediction(NewOpcode);
4161 MI->setDesc(get(NewOpcode));
4166 void HexagonInstrInfo::genAllInsnTimingClasses(MachineFunction &MF) const {
4167 /* +++ The code below is used to generate complete set of Hexagon Insn +++ */
4168 MachineFunction::iterator A = MF.begin();
4169 MachineBasicBlock &B = *A;
4170 MachineBasicBlock::iterator I = B.begin();
4171 MachineInstr *MI = &*I;
4172 DebugLoc DL = MI->getDebugLoc();
4173 MachineInstr *NewMI;
4175 for (unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4176 insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4177 NewMI = BuildMI(B, MI, DL, get(insn));
4178 DEBUG(dbgs() << "\n" << getName(NewMI->getOpcode()) <<
4179 " Class: " << NewMI->getDesc().getSchedClass());
4180 NewMI->eraseFromParent();
4182 /* --- The code above is used to generate complete set of Hexagon Insn --- */
4186 // inverts the predication logic.
4189 bool HexagonInstrInfo::reversePredSense(MachineInstr* MI) const {
4190 DEBUG(dbgs() << "\nTrying to reverse pred. sense of:"; MI->dump());
4191 MI->setDesc(get(getInvertedPredicatedOpcode(MI->getOpcode())));
4196 // Reverse the branch prediction.
4197 unsigned HexagonInstrInfo::reversePrediction(unsigned Opcode) const {
4198 int PredRevOpcode = -1;
4199 if (isPredictedTaken(Opcode))
4200 PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4202 PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4203 assert(PredRevOpcode > 0);
4204 return PredRevOpcode;
4208 // TODO: Add more rigorous validation.
4209 bool HexagonInstrInfo::validateBranchCond(const ArrayRef<MachineOperand> &Cond)
4211 return Cond.empty() || (Cond[0].isImm() && (Cond.size() != 1));
4215 short HexagonInstrInfo::xformRegToImmOffset(const MachineInstr *MI) const {
4216 return Hexagon::xformRegToImmOffset(MI->getOpcode());