1 //===-- HexagonInstrInfo.cpp - Hexagon Instruction Information ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Hexagon implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "HexagonInstrInfo.h"
16 #include "HexagonRegisterInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/DFAPacketizer.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PseudoSourceValue.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Support/raw_ostream.h"
29 #define GET_INSTRINFO_CTOR
30 #define GET_INSTRMAP_INFO
31 #include "HexagonGenInstrInfo.inc"
32 #include "HexagonGenDFAPacketizer.inc"
37 /// Constants for Hexagon instructions.
39 const int Hexagon_MEMW_OFFSET_MAX = 4095;
40 const int Hexagon_MEMW_OFFSET_MIN = -4096;
41 const int Hexagon_MEMD_OFFSET_MAX = 8191;
42 const int Hexagon_MEMD_OFFSET_MIN = -8192;
43 const int Hexagon_MEMH_OFFSET_MAX = 2047;
44 const int Hexagon_MEMH_OFFSET_MIN = -2048;
45 const int Hexagon_MEMB_OFFSET_MAX = 1023;
46 const int Hexagon_MEMB_OFFSET_MIN = -1024;
47 const int Hexagon_ADDI_OFFSET_MAX = 32767;
48 const int Hexagon_ADDI_OFFSET_MIN = -32768;
49 const int Hexagon_MEMD_AUTOINC_MAX = 56;
50 const int Hexagon_MEMD_AUTOINC_MIN = -64;
51 const int Hexagon_MEMW_AUTOINC_MAX = 28;
52 const int Hexagon_MEMW_AUTOINC_MIN = -32;
53 const int Hexagon_MEMH_AUTOINC_MAX = 14;
54 const int Hexagon_MEMH_AUTOINC_MIN = -16;
55 const int Hexagon_MEMB_AUTOINC_MAX = 7;
56 const int Hexagon_MEMB_AUTOINC_MIN = -8;
59 HexagonInstrInfo::HexagonInstrInfo(HexagonSubtarget &ST)
60 : HexagonGenInstrInfo(Hexagon::ADJCALLSTACKDOWN, Hexagon::ADJCALLSTACKUP),
61 RI(ST, *this), Subtarget(ST) {
65 /// isLoadFromStackSlot - If the specified machine instruction is a direct
66 /// load from a stack slot, return the virtual or physical register number of
67 /// the destination along with the FrameIndex of the loaded stack slot. If
68 /// not, return 0. This predicate must return 0 if the instruction has
69 /// any side effects other than loading from the stack slot.
70 unsigned HexagonInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
71 int &FrameIndex) const {
74 switch (MI->getOpcode()) {
81 if (MI->getOperand(2).isFI() &&
82 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
83 FrameIndex = MI->getOperand(2).getIndex();
84 return MI->getOperand(0).getReg();
92 /// isStoreToStackSlot - If the specified machine instruction is a direct
93 /// store to a stack slot, return the virtual or physical register number of
94 /// the source reg along with the FrameIndex of the loaded stack slot. If
95 /// not, return 0. This predicate must return 0 if the instruction has
96 /// any side effects other than storing to the stack slot.
97 unsigned HexagonInstrInfo::isStoreToStackSlot(const MachineInstr *MI,
98 int &FrameIndex) const {
99 switch (MI->getOpcode()) {
105 if (MI->getOperand(2).isFI() &&
106 MI->getOperand(1).isImm() && (MI->getOperand(1).getImm() == 0)) {
107 FrameIndex = MI->getOperand(0).getIndex();
108 return MI->getOperand(2).getReg();
117 HexagonInstrInfo::InsertBranch(MachineBasicBlock &MBB,MachineBasicBlock *TBB,
118 MachineBasicBlock *FBB,
119 const SmallVectorImpl<MachineOperand> &Cond,
122 int BOpc = Hexagon::JMP;
123 int BccOpc = Hexagon::JMP_t;
125 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
128 // Check if ReverseBranchCondition has asked to reverse this branch
129 // If we want to reverse the branch an odd number of times, we want
131 if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
132 BccOpc = Hexagon::JMP_f;
138 // Due to a bug in TailMerging/CFG Optimization, we need to add a
139 // special case handling of a predicated jump followed by an
140 // unconditional jump. If not, Tail Merging and CFG Optimization go
141 // into an infinite loop.
142 MachineBasicBlock *NewTBB, *NewFBB;
143 SmallVector<MachineOperand, 4> Cond;
144 MachineInstr *Term = MBB.getFirstTerminator();
145 if (isPredicated(Term) && !AnalyzeBranch(MBB, NewTBB, NewFBB, Cond,
147 MachineBasicBlock *NextBB =
148 llvm::next(MachineFunction::iterator(&MBB));
149 if (NewTBB == NextBB) {
150 ReverseBranchCondition(Cond);
152 return InsertBranch(MBB, TBB, 0, Cond, DL);
155 BuildMI(&MBB, DL, get(BOpc)).addMBB(TBB);
158 get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
163 BuildMI(&MBB, DL, get(BccOpc)).addReg(Cond[regPos].getReg()).addMBB(TBB);
164 BuildMI(&MBB, DL, get(BOpc)).addMBB(FBB);
170 bool HexagonInstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
171 MachineBasicBlock *&TBB,
172 MachineBasicBlock *&FBB,
173 SmallVectorImpl<MachineOperand> &Cond,
174 bool AllowModify) const {
178 // If the block has no terminators, it just falls into the block after it.
179 MachineBasicBlock::instr_iterator I = MBB.instr_end();
180 if (I == MBB.instr_begin())
183 // A basic block may looks like this:
193 // It has two succs but does not have a terminator
194 // Don't know how to handle it.
199 } while (I != MBB.instr_begin());
204 while (I->isDebugValue()) {
205 if (I == MBB.instr_begin())
210 // Delete the JMP if it's equivalent to a fall-through.
211 if (AllowModify && I->getOpcode() == Hexagon::JMP &&
212 MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
213 DEBUG(dbgs()<< "\nErasing the jump to successor block\n";);
214 I->eraseFromParent();
216 if (I == MBB.instr_begin())
220 if (!isUnpredicatedTerminator(I))
223 // Get the last instruction in the block.
224 MachineInstr *LastInst = I;
225 MachineInstr *SecondLastInst = NULL;
226 // Find one more terminator if present.
228 if (&*I != LastInst && !I->isBundle() && isUnpredicatedTerminator(I)) {
232 // This is a third branch.
235 if (I == MBB.instr_begin())
240 int LastOpcode = LastInst->getOpcode();
242 bool LastOpcodeHasJMP_c = PredOpcodeHasJMP_c(LastOpcode);
243 bool LastOpcodeHasNot = PredOpcodeHasNot(LastOpcode);
245 // If there is only one terminator instruction, process it.
246 if (LastInst && !SecondLastInst) {
247 if (LastOpcode == Hexagon::JMP) {
248 TBB = LastInst->getOperand(0).getMBB();
251 if (LastOpcode == Hexagon::ENDLOOP0) {
252 TBB = LastInst->getOperand(0).getMBB();
253 Cond.push_back(LastInst->getOperand(0));
256 if (LastOpcodeHasJMP_c) {
257 TBB = LastInst->getOperand(1).getMBB();
258 if (LastOpcodeHasNot) {
259 Cond.push_back(MachineOperand::CreateImm(0));
261 Cond.push_back(LastInst->getOperand(0));
264 // Otherwise, don't know what this is.
268 int SecLastOpcode = SecondLastInst->getOpcode();
270 bool SecLastOpcodeHasJMP_c = PredOpcodeHasJMP_c(SecLastOpcode);
271 bool SecLastOpcodeHasNot = PredOpcodeHasNot(SecLastOpcode);
272 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::JMP)) {
273 TBB = SecondLastInst->getOperand(1).getMBB();
274 if (SecLastOpcodeHasNot)
275 Cond.push_back(MachineOperand::CreateImm(0));
276 Cond.push_back(SecondLastInst->getOperand(0));
277 FBB = LastInst->getOperand(0).getMBB();
281 // If the block ends with two Hexagon:JMPs, handle it. The second one is not
282 // executed, so remove it.
283 if (SecLastOpcode == Hexagon::JMP && LastOpcode == Hexagon::JMP) {
284 TBB = SecondLastInst->getOperand(0).getMBB();
287 I->eraseFromParent();
291 // If the block ends with an ENDLOOP, and JMP, handle it.
292 if (SecLastOpcode == Hexagon::ENDLOOP0 &&
293 LastOpcode == Hexagon::JMP) {
294 TBB = SecondLastInst->getOperand(0).getMBB();
295 Cond.push_back(SecondLastInst->getOperand(0));
296 FBB = LastInst->getOperand(0).getMBB();
300 // Otherwise, can't handle this.
305 unsigned HexagonInstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
306 int BOpc = Hexagon::JMP;
307 int BccOpc = Hexagon::JMP_t;
308 int BccOpcNot = Hexagon::JMP_f;
310 MachineBasicBlock::iterator I = MBB.end();
311 if (I == MBB.begin()) return 0;
313 if (I->getOpcode() != BOpc && I->getOpcode() != BccOpc &&
314 I->getOpcode() != BccOpcNot)
317 // Remove the branch.
318 I->eraseFromParent();
322 if (I == MBB.begin()) return 1;
324 if (I->getOpcode() != BccOpc && I->getOpcode() != BccOpcNot)
327 // Remove the branch.
328 I->eraseFromParent();
333 /// \brief For a comparison instruction, return the source registers in
334 /// \p SrcReg and \p SrcReg2 if having two register operands, and the value it
335 /// compares against in CmpValue. Return true if the comparison instruction
337 bool HexagonInstrInfo::analyzeCompare(const MachineInstr *MI,
338 unsigned &SrcReg, unsigned &SrcReg2,
339 int &Mask, int &Value) const {
340 unsigned Opc = MI->getOpcode();
342 // Set mask and the first source register.
344 case Hexagon::CMPEHexagon4rr:
345 case Hexagon::CMPEQri:
346 case Hexagon::CMPEQrr:
347 case Hexagon::CMPGT64rr:
348 case Hexagon::CMPGTU64rr:
349 case Hexagon::CMPGTUri:
350 case Hexagon::CMPGTUrr:
351 case Hexagon::CMPGTri:
352 case Hexagon::CMPGTrr:
353 SrcReg = MI->getOperand(1).getReg();
356 case Hexagon::CMPbEQri_V4:
357 case Hexagon::CMPbEQrr_sbsb_V4:
358 case Hexagon::CMPbEQrr_ubub_V4:
359 case Hexagon::CMPbGTUri_V4:
360 case Hexagon::CMPbGTUrr_V4:
361 case Hexagon::CMPbGTrr_V4:
362 SrcReg = MI->getOperand(1).getReg();
365 case Hexagon::CMPhEQri_V4:
366 case Hexagon::CMPhEQrr_shl_V4:
367 case Hexagon::CMPhEQrr_xor_V4:
368 case Hexagon::CMPhGTUri_V4:
369 case Hexagon::CMPhGTUrr_V4:
370 case Hexagon::CMPhGTrr_shl_V4:
371 SrcReg = MI->getOperand(1).getReg();
376 // Set the value/second source register.
378 case Hexagon::CMPEHexagon4rr:
379 case Hexagon::CMPEQrr:
380 case Hexagon::CMPGT64rr:
381 case Hexagon::CMPGTU64rr:
382 case Hexagon::CMPGTUrr:
383 case Hexagon::CMPGTrr:
384 case Hexagon::CMPbEQrr_sbsb_V4:
385 case Hexagon::CMPbEQrr_ubub_V4:
386 case Hexagon::CMPbGTUrr_V4:
387 case Hexagon::CMPbGTrr_V4:
388 case Hexagon::CMPhEQrr_shl_V4:
389 case Hexagon::CMPhEQrr_xor_V4:
390 case Hexagon::CMPhGTUrr_V4:
391 case Hexagon::CMPhGTrr_shl_V4:
392 SrcReg2 = MI->getOperand(2).getReg();
395 case Hexagon::CMPEQri:
396 case Hexagon::CMPGTUri:
397 case Hexagon::CMPGTri:
398 case Hexagon::CMPbEQri_V4:
399 case Hexagon::CMPbGTUri_V4:
400 case Hexagon::CMPhEQri_V4:
401 case Hexagon::CMPhGTUri_V4:
403 Value = MI->getOperand(2).getImm();
411 void HexagonInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
412 MachineBasicBlock::iterator I, DebugLoc DL,
413 unsigned DestReg, unsigned SrcReg,
414 bool KillSrc) const {
415 if (Hexagon::IntRegsRegClass.contains(SrcReg, DestReg)) {
416 BuildMI(MBB, I, DL, get(Hexagon::TFR), DestReg).addReg(SrcReg);
419 if (Hexagon::DoubleRegsRegClass.contains(SrcReg, DestReg)) {
420 BuildMI(MBB, I, DL, get(Hexagon::TFR64), DestReg).addReg(SrcReg);
423 if (Hexagon::PredRegsRegClass.contains(SrcReg, DestReg)) {
424 // Map Pd = Ps to Pd = or(Ps, Ps).
425 BuildMI(MBB, I, DL, get(Hexagon::OR_pp),
426 DestReg).addReg(SrcReg).addReg(SrcReg);
429 if (Hexagon::DoubleRegsRegClass.contains(DestReg) &&
430 Hexagon::IntRegsRegClass.contains(SrcReg)) {
431 // We can have an overlap between single and double reg: r1:0 = r0.
432 if(SrcReg == RI.getSubReg(DestReg, Hexagon::subreg_loreg)) {
434 BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
435 Hexagon::subreg_hireg))).addImm(0);
437 // r1:0 = r1 or no overlap.
438 BuildMI(MBB, I, DL, get(Hexagon::TFR), (RI.getSubReg(DestReg,
439 Hexagon::subreg_loreg))).addReg(SrcReg);
440 BuildMI(MBB, I, DL, get(Hexagon::TFRI), (RI.getSubReg(DestReg,
441 Hexagon::subreg_hireg))).addImm(0);
445 if (Hexagon::CRRegsRegClass.contains(DestReg) &&
446 Hexagon::IntRegsRegClass.contains(SrcReg)) {
447 BuildMI(MBB, I, DL, get(Hexagon::TFCR), DestReg).addReg(SrcReg);
450 if (Hexagon::PredRegsRegClass.contains(SrcReg) &&
451 Hexagon::IntRegsRegClass.contains(DestReg)) {
452 BuildMI(MBB, I, DL, get(Hexagon::TFR_RsPd), DestReg).
453 addReg(SrcReg, getKillRegState(KillSrc));
456 if (Hexagon::IntRegsRegClass.contains(SrcReg) &&
457 Hexagon::PredRegsRegClass.contains(DestReg)) {
458 BuildMI(MBB, I, DL, get(Hexagon::TFR_PdRs), DestReg).
459 addReg(SrcReg, getKillRegState(KillSrc));
463 llvm_unreachable("Unimplemented");
467 void HexagonInstrInfo::
468 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
469 unsigned SrcReg, bool isKill, int FI,
470 const TargetRegisterClass *RC,
471 const TargetRegisterInfo *TRI) const {
473 DebugLoc DL = MBB.findDebugLoc(I);
474 MachineFunction &MF = *MBB.getParent();
475 MachineFrameInfo &MFI = *MF.getFrameInfo();
476 unsigned Align = MFI.getObjectAlignment(FI);
478 MachineMemOperand *MMO =
479 MF.getMachineMemOperand(
480 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
481 MachineMemOperand::MOStore,
482 MFI.getObjectSize(FI),
485 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
486 BuildMI(MBB, I, DL, get(Hexagon::STriw))
487 .addFrameIndex(FI).addImm(0)
488 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
489 } else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
490 BuildMI(MBB, I, DL, get(Hexagon::STrid))
491 .addFrameIndex(FI).addImm(0)
492 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
493 } else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
494 BuildMI(MBB, I, DL, get(Hexagon::STriw_pred))
495 .addFrameIndex(FI).addImm(0)
496 .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
498 llvm_unreachable("Unimplemented");
503 void HexagonInstrInfo::storeRegToAddr(
504 MachineFunction &MF, unsigned SrcReg,
506 SmallVectorImpl<MachineOperand> &Addr,
507 const TargetRegisterClass *RC,
508 SmallVectorImpl<MachineInstr*> &NewMIs) const
510 llvm_unreachable("Unimplemented");
514 void HexagonInstrInfo::
515 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
516 unsigned DestReg, int FI,
517 const TargetRegisterClass *RC,
518 const TargetRegisterInfo *TRI) const {
519 DebugLoc DL = MBB.findDebugLoc(I);
520 MachineFunction &MF = *MBB.getParent();
521 MachineFrameInfo &MFI = *MF.getFrameInfo();
522 unsigned Align = MFI.getObjectAlignment(FI);
524 MachineMemOperand *MMO =
525 MF.getMachineMemOperand(
526 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI)),
527 MachineMemOperand::MOLoad,
528 MFI.getObjectSize(FI),
530 if (RC == &Hexagon::IntRegsRegClass) {
531 BuildMI(MBB, I, DL, get(Hexagon::LDriw), DestReg)
532 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
533 } else if (RC == &Hexagon::DoubleRegsRegClass) {
534 BuildMI(MBB, I, DL, get(Hexagon::LDrid), DestReg)
535 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
536 } else if (RC == &Hexagon::PredRegsRegClass) {
537 BuildMI(MBB, I, DL, get(Hexagon::LDriw_pred), DestReg)
538 .addFrameIndex(FI).addImm(0).addMemOperand(MMO);
540 llvm_unreachable("Can't store this register to stack slot");
545 void HexagonInstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
546 SmallVectorImpl<MachineOperand> &Addr,
547 const TargetRegisterClass *RC,
548 SmallVectorImpl<MachineInstr*> &NewMIs) const {
549 llvm_unreachable("Unimplemented");
553 MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
555 const SmallVectorImpl<unsigned> &Ops,
557 // Hexagon_TODO: Implement.
562 HexagonInstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
563 int FrameIx, uint64_t Offset,
566 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Hexagon::DBG_VALUE))
567 .addImm(0).addImm(Offset).addMetadata(MDPtr);
571 unsigned HexagonInstrInfo::createVR(MachineFunction* MF, MVT VT) const {
573 MachineRegisterInfo &RegInfo = MF->getRegInfo();
574 const TargetRegisterClass *TRC;
576 TRC = &Hexagon::PredRegsRegClass;
577 } else if (VT == MVT::i32 || VT == MVT::f32) {
578 TRC = &Hexagon::IntRegsRegClass;
579 } else if (VT == MVT::i64 || VT == MVT::f64) {
580 TRC = &Hexagon::DoubleRegsRegClass;
582 llvm_unreachable("Cannot handle this register class");
585 unsigned NewReg = RegInfo.createVirtualRegister(TRC);
589 bool HexagonInstrInfo::isExtendable(const MachineInstr *MI) const {
590 // Constant extenders are allowed only for V4 and above.
591 if (!Subtarget.hasV4TOps())
594 const MCInstrDesc &MID = MI->getDesc();
595 const uint64_t F = MID.TSFlags;
596 if ((F >> HexagonII::ExtendablePos) & HexagonII::ExtendableMask)
599 // TODO: This is largely obsolete now. Will need to be removed
600 // in consecutive patches.
601 switch(MI->getOpcode()) {
602 // TFR_FI Remains a special case.
603 case Hexagon::TFR_FI:
611 // This returns true in two cases:
612 // - The OP code itself indicates that this is an extended instruction.
613 // - One of MOs has been marked with HMOTF_ConstExtended flag.
614 bool HexagonInstrInfo::isExtended(const MachineInstr *MI) const {
615 // First check if this is permanently extended op code.
616 const uint64_t F = MI->getDesc().TSFlags;
617 if ((F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask)
619 // Use MO operand flags to determine if one of MI's operands
620 // has HMOTF_ConstExtended flag set.
621 for (MachineInstr::const_mop_iterator I = MI->operands_begin(),
622 E = MI->operands_end(); I != E; ++I) {
623 if (I->getTargetFlags() && HexagonII::HMOTF_ConstExtended)
629 bool HexagonInstrInfo::isBranch (const MachineInstr *MI) const {
630 return MI->getDesc().isBranch();
633 bool HexagonInstrInfo::isNewValueStore(const MachineInstr *MI) const {
634 switch (MI->getOpcode()) {
635 default: return false;
637 case Hexagon::STrib_nv_V4:
638 case Hexagon::STrib_indexed_nv_V4:
639 case Hexagon::STrib_indexed_shl_nv_V4:
640 case Hexagon::STrib_shl_nv_V4:
641 case Hexagon::STb_GP_nv_V4:
642 case Hexagon::POST_STbri_nv_V4:
643 case Hexagon::STrib_cPt_nv_V4:
644 case Hexagon::STrib_cdnPt_nv_V4:
645 case Hexagon::STrib_cNotPt_nv_V4:
646 case Hexagon::STrib_cdnNotPt_nv_V4:
647 case Hexagon::STrib_indexed_cPt_nv_V4:
648 case Hexagon::STrib_indexed_cdnPt_nv_V4:
649 case Hexagon::STrib_indexed_cNotPt_nv_V4:
650 case Hexagon::STrib_indexed_cdnNotPt_nv_V4:
651 case Hexagon::STrib_indexed_shl_cPt_nv_V4:
652 case Hexagon::STrib_indexed_shl_cdnPt_nv_V4:
653 case Hexagon::STrib_indexed_shl_cNotPt_nv_V4:
654 case Hexagon::STrib_indexed_shl_cdnNotPt_nv_V4:
655 case Hexagon::POST_STbri_cPt_nv_V4:
656 case Hexagon::POST_STbri_cdnPt_nv_V4:
657 case Hexagon::POST_STbri_cNotPt_nv_V4:
658 case Hexagon::POST_STbri_cdnNotPt_nv_V4:
659 case Hexagon::STb_GP_cPt_nv_V4:
660 case Hexagon::STb_GP_cNotPt_nv_V4:
661 case Hexagon::STb_GP_cdnPt_nv_V4:
662 case Hexagon::STb_GP_cdnNotPt_nv_V4:
663 case Hexagon::STrib_abs_nv_V4:
664 case Hexagon::STrib_abs_cPt_nv_V4:
665 case Hexagon::STrib_abs_cdnPt_nv_V4:
666 case Hexagon::STrib_abs_cNotPt_nv_V4:
667 case Hexagon::STrib_abs_cdnNotPt_nv_V4:
670 case Hexagon::STrih_nv_V4:
671 case Hexagon::STrih_indexed_nv_V4:
672 case Hexagon::STrih_indexed_shl_nv_V4:
673 case Hexagon::STrih_shl_nv_V4:
674 case Hexagon::STh_GP_nv_V4:
675 case Hexagon::POST_SThri_nv_V4:
676 case Hexagon::STrih_cPt_nv_V4:
677 case Hexagon::STrih_cdnPt_nv_V4:
678 case Hexagon::STrih_cNotPt_nv_V4:
679 case Hexagon::STrih_cdnNotPt_nv_V4:
680 case Hexagon::STrih_indexed_cPt_nv_V4:
681 case Hexagon::STrih_indexed_cdnPt_nv_V4:
682 case Hexagon::STrih_indexed_cNotPt_nv_V4:
683 case Hexagon::STrih_indexed_cdnNotPt_nv_V4:
684 case Hexagon::STrih_indexed_shl_cPt_nv_V4:
685 case Hexagon::STrih_indexed_shl_cdnPt_nv_V4:
686 case Hexagon::STrih_indexed_shl_cNotPt_nv_V4:
687 case Hexagon::STrih_indexed_shl_cdnNotPt_nv_V4:
688 case Hexagon::POST_SThri_cPt_nv_V4:
689 case Hexagon::POST_SThri_cdnPt_nv_V4:
690 case Hexagon::POST_SThri_cNotPt_nv_V4:
691 case Hexagon::POST_SThri_cdnNotPt_nv_V4:
692 case Hexagon::STh_GP_cPt_nv_V4:
693 case Hexagon::STh_GP_cNotPt_nv_V4:
694 case Hexagon::STh_GP_cdnPt_nv_V4:
695 case Hexagon::STh_GP_cdnNotPt_nv_V4:
696 case Hexagon::STrih_abs_nv_V4:
697 case Hexagon::STrih_abs_cPt_nv_V4:
698 case Hexagon::STrih_abs_cdnPt_nv_V4:
699 case Hexagon::STrih_abs_cNotPt_nv_V4:
700 case Hexagon::STrih_abs_cdnNotPt_nv_V4:
703 case Hexagon::STriw_nv_V4:
704 case Hexagon::STriw_indexed_nv_V4:
705 case Hexagon::STriw_indexed_shl_nv_V4:
706 case Hexagon::STriw_shl_nv_V4:
707 case Hexagon::STw_GP_nv_V4:
708 case Hexagon::POST_STwri_nv_V4:
709 case Hexagon::STriw_cPt_nv_V4:
710 case Hexagon::STriw_cdnPt_nv_V4:
711 case Hexagon::STriw_cNotPt_nv_V4:
712 case Hexagon::STriw_cdnNotPt_nv_V4:
713 case Hexagon::STriw_indexed_cPt_nv_V4:
714 case Hexagon::STriw_indexed_cdnPt_nv_V4:
715 case Hexagon::STriw_indexed_cNotPt_nv_V4:
716 case Hexagon::STriw_indexed_cdnNotPt_nv_V4:
717 case Hexagon::STriw_indexed_shl_cPt_nv_V4:
718 case Hexagon::STriw_indexed_shl_cdnPt_nv_V4:
719 case Hexagon::STriw_indexed_shl_cNotPt_nv_V4:
720 case Hexagon::STriw_indexed_shl_cdnNotPt_nv_V4:
721 case Hexagon::POST_STwri_cPt_nv_V4:
722 case Hexagon::POST_STwri_cdnPt_nv_V4:
723 case Hexagon::POST_STwri_cNotPt_nv_V4:
724 case Hexagon::POST_STwri_cdnNotPt_nv_V4:
725 case Hexagon::STw_GP_cPt_nv_V4:
726 case Hexagon::STw_GP_cNotPt_nv_V4:
727 case Hexagon::STw_GP_cdnPt_nv_V4:
728 case Hexagon::STw_GP_cdnNotPt_nv_V4:
729 case Hexagon::STriw_abs_nv_V4:
730 case Hexagon::STriw_abs_cPt_nv_V4:
731 case Hexagon::STriw_abs_cdnPt_nv_V4:
732 case Hexagon::STriw_abs_cNotPt_nv_V4:
733 case Hexagon::STriw_abs_cdnNotPt_nv_V4:
738 bool HexagonInstrInfo::isPostIncrement (const MachineInstr* MI) const {
739 switch (MI->getOpcode())
741 default: return false;
743 case Hexagon::POST_LDrib:
744 case Hexagon::POST_LDrib_cPt:
745 case Hexagon::POST_LDrib_cNotPt:
746 case Hexagon::POST_LDrib_cdnPt_V4:
747 case Hexagon::POST_LDrib_cdnNotPt_V4:
749 // Load unsigned byte
750 case Hexagon::POST_LDriub:
751 case Hexagon::POST_LDriub_cPt:
752 case Hexagon::POST_LDriub_cNotPt:
753 case Hexagon::POST_LDriub_cdnPt_V4:
754 case Hexagon::POST_LDriub_cdnNotPt_V4:
757 case Hexagon::POST_LDrih:
758 case Hexagon::POST_LDrih_cPt:
759 case Hexagon::POST_LDrih_cNotPt:
760 case Hexagon::POST_LDrih_cdnPt_V4:
761 case Hexagon::POST_LDrih_cdnNotPt_V4:
763 // Load unsigned halfword
764 case Hexagon::POST_LDriuh:
765 case Hexagon::POST_LDriuh_cPt:
766 case Hexagon::POST_LDriuh_cNotPt:
767 case Hexagon::POST_LDriuh_cdnPt_V4:
768 case Hexagon::POST_LDriuh_cdnNotPt_V4:
771 case Hexagon::POST_LDriw:
772 case Hexagon::POST_LDriw_cPt:
773 case Hexagon::POST_LDriw_cNotPt:
774 case Hexagon::POST_LDriw_cdnPt_V4:
775 case Hexagon::POST_LDriw_cdnNotPt_V4:
778 case Hexagon::POST_LDrid:
779 case Hexagon::POST_LDrid_cPt:
780 case Hexagon::POST_LDrid_cNotPt:
781 case Hexagon::POST_LDrid_cdnPt_V4:
782 case Hexagon::POST_LDrid_cdnNotPt_V4:
785 case Hexagon::POST_STbri:
786 case Hexagon::POST_STbri_cPt:
787 case Hexagon::POST_STbri_cNotPt:
788 case Hexagon::POST_STbri_cdnPt_V4:
789 case Hexagon::POST_STbri_cdnNotPt_V4:
792 case Hexagon::POST_SThri:
793 case Hexagon::POST_SThri_cPt:
794 case Hexagon::POST_SThri_cNotPt:
795 case Hexagon::POST_SThri_cdnPt_V4:
796 case Hexagon::POST_SThri_cdnNotPt_V4:
799 case Hexagon::POST_STwri:
800 case Hexagon::POST_STwri_cPt:
801 case Hexagon::POST_STwri_cNotPt:
802 case Hexagon::POST_STwri_cdnPt_V4:
803 case Hexagon::POST_STwri_cdnNotPt_V4:
806 case Hexagon::POST_STdri:
807 case Hexagon::POST_STdri_cPt:
808 case Hexagon::POST_STdri_cNotPt:
809 case Hexagon::POST_STdri_cdnPt_V4:
810 case Hexagon::POST_STdri_cdnNotPt_V4:
815 bool HexagonInstrInfo::isNewValueInst(const MachineInstr *MI) const {
816 if (isNewValueJump(MI))
819 if (isNewValueStore(MI))
825 bool HexagonInstrInfo::isSaveCalleeSavedRegsCall(const MachineInstr *MI) const {
826 return MI->getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4;
829 bool HexagonInstrInfo::isPredicable(MachineInstr *MI) const {
830 bool isPred = MI->getDesc().isPredicable();
835 const int Opc = MI->getOpcode();
839 return isInt<12>(MI->getOperand(1).getImm());
842 case Hexagon::STrid_indexed:
843 return isShiftedUInt<6,3>(MI->getOperand(1).getImm());
846 case Hexagon::STriw_indexed:
847 case Hexagon::STriw_nv_V4:
848 return isShiftedUInt<6,2>(MI->getOperand(1).getImm());
851 case Hexagon::STrih_indexed:
852 case Hexagon::STrih_nv_V4:
853 return isShiftedUInt<6,1>(MI->getOperand(1).getImm());
856 case Hexagon::STrib_indexed:
857 case Hexagon::STrib_nv_V4:
858 return isUInt<6>(MI->getOperand(1).getImm());
861 case Hexagon::LDrid_indexed:
862 return isShiftedUInt<6,3>(MI->getOperand(2).getImm());
865 case Hexagon::LDriw_indexed:
866 return isShiftedUInt<6,2>(MI->getOperand(2).getImm());
869 case Hexagon::LDriuh:
870 case Hexagon::LDrih_indexed:
871 case Hexagon::LDriuh_indexed:
872 return isShiftedUInt<6,1>(MI->getOperand(2).getImm());
875 case Hexagon::LDriub:
876 case Hexagon::LDrib_indexed:
877 case Hexagon::LDriub_indexed:
878 return isUInt<6>(MI->getOperand(2).getImm());
880 case Hexagon::POST_LDrid:
881 return isShiftedInt<4,3>(MI->getOperand(3).getImm());
883 case Hexagon::POST_LDriw:
884 return isShiftedInt<4,2>(MI->getOperand(3).getImm());
886 case Hexagon::POST_LDrih:
887 case Hexagon::POST_LDriuh:
888 return isShiftedInt<4,1>(MI->getOperand(3).getImm());
890 case Hexagon::POST_LDrib:
891 case Hexagon::POST_LDriub:
892 return isInt<4>(MI->getOperand(3).getImm());
894 case Hexagon::STrib_imm_V4:
895 case Hexagon::STrih_imm_V4:
896 case Hexagon::STriw_imm_V4:
897 return (isUInt<6>(MI->getOperand(1).getImm()) &&
898 isInt<6>(MI->getOperand(2).getImm()));
900 case Hexagon::ADD_ri:
901 return isInt<8>(MI->getOperand(2).getImm());
909 return Subtarget.hasV4TOps();
915 // This function performs the following inversiones:
920 // however, these inversiones are NOT included:
922 // cdnPt -X-> cdnNotPt
923 // cdnNotPt -X-> cdnPt
924 // cPt_nv -X-> cNotPt_nv (new value stores)
925 // cNotPt_nv -X-> cPt_nv (new value stores)
927 // because only the following transformations are allowed:
929 // cNotPt ---> cdnNotPt
931 // cNotPt ---> cNotPt_nv
933 unsigned HexagonInstrInfo::getInvertedPredicatedOpcode(const int Opc) const {
935 InvPredOpcode = isPredicatedTrue(Opc) ? Hexagon::getFalsePredOpcode(Opc)
936 : Hexagon::getTruePredOpcode(Opc);
937 if (InvPredOpcode >= 0) // Valid instruction with the inverted predicate.
938 return InvPredOpcode;
941 default: llvm_unreachable("Unexpected predicated instruction");
942 case Hexagon::TFR_cPt:
943 return Hexagon::TFR_cNotPt;
944 case Hexagon::TFR_cNotPt:
945 return Hexagon::TFR_cPt;
947 case Hexagon::TFRI_cPt:
948 return Hexagon::TFRI_cNotPt;
949 case Hexagon::TFRI_cNotPt:
950 return Hexagon::TFRI_cPt;
953 return Hexagon::JMP_f;
955 return Hexagon::JMP_t;
957 case Hexagon::ADD_ri_cPt:
958 return Hexagon::ADD_ri_cNotPt;
959 case Hexagon::ADD_ri_cNotPt:
960 return Hexagon::ADD_ri_cPt;
962 case Hexagon::ADD_rr_cPt:
963 return Hexagon::ADD_rr_cNotPt;
964 case Hexagon::ADD_rr_cNotPt:
965 return Hexagon::ADD_rr_cPt;
967 case Hexagon::XOR_rr_cPt:
968 return Hexagon::XOR_rr_cNotPt;
969 case Hexagon::XOR_rr_cNotPt:
970 return Hexagon::XOR_rr_cPt;
972 case Hexagon::AND_rr_cPt:
973 return Hexagon::AND_rr_cNotPt;
974 case Hexagon::AND_rr_cNotPt:
975 return Hexagon::AND_rr_cPt;
977 case Hexagon::OR_rr_cPt:
978 return Hexagon::OR_rr_cNotPt;
979 case Hexagon::OR_rr_cNotPt:
980 return Hexagon::OR_rr_cPt;
982 case Hexagon::SUB_rr_cPt:
983 return Hexagon::SUB_rr_cNotPt;
984 case Hexagon::SUB_rr_cNotPt:
985 return Hexagon::SUB_rr_cPt;
987 case Hexagon::COMBINE_rr_cPt:
988 return Hexagon::COMBINE_rr_cNotPt;
989 case Hexagon::COMBINE_rr_cNotPt:
990 return Hexagon::COMBINE_rr_cPt;
992 case Hexagon::ASLH_cPt_V4:
993 return Hexagon::ASLH_cNotPt_V4;
994 case Hexagon::ASLH_cNotPt_V4:
995 return Hexagon::ASLH_cPt_V4;
997 case Hexagon::ASRH_cPt_V4:
998 return Hexagon::ASRH_cNotPt_V4;
999 case Hexagon::ASRH_cNotPt_V4:
1000 return Hexagon::ASRH_cPt_V4;
1002 case Hexagon::SXTB_cPt_V4:
1003 return Hexagon::SXTB_cNotPt_V4;
1004 case Hexagon::SXTB_cNotPt_V4:
1005 return Hexagon::SXTB_cPt_V4;
1007 case Hexagon::SXTH_cPt_V4:
1008 return Hexagon::SXTH_cNotPt_V4;
1009 case Hexagon::SXTH_cNotPt_V4:
1010 return Hexagon::SXTH_cPt_V4;
1012 case Hexagon::ZXTB_cPt_V4:
1013 return Hexagon::ZXTB_cNotPt_V4;
1014 case Hexagon::ZXTB_cNotPt_V4:
1015 return Hexagon::ZXTB_cPt_V4;
1017 case Hexagon::ZXTH_cPt_V4:
1018 return Hexagon::ZXTH_cNotPt_V4;
1019 case Hexagon::ZXTH_cNotPt_V4:
1020 return Hexagon::ZXTH_cPt_V4;
1023 case Hexagon::JMPR_t:
1024 return Hexagon::JMPR_f;
1025 case Hexagon::JMPR_f:
1026 return Hexagon::JMPR_t;
1028 // V4 indexed+scaled load.
1029 case Hexagon::LDrid_indexed_shl_cPt_V4:
1030 return Hexagon::LDrid_indexed_shl_cNotPt_V4;
1031 case Hexagon::LDrid_indexed_shl_cNotPt_V4:
1032 return Hexagon::LDrid_indexed_shl_cPt_V4;
1034 case Hexagon::LDrib_indexed_shl_cPt_V4:
1035 return Hexagon::LDrib_indexed_shl_cNotPt_V4;
1036 case Hexagon::LDrib_indexed_shl_cNotPt_V4:
1037 return Hexagon::LDrib_indexed_shl_cPt_V4;
1039 case Hexagon::LDriub_indexed_shl_cPt_V4:
1040 return Hexagon::LDriub_indexed_shl_cNotPt_V4;
1041 case Hexagon::LDriub_indexed_shl_cNotPt_V4:
1042 return Hexagon::LDriub_indexed_shl_cPt_V4;
1044 case Hexagon::LDrih_indexed_shl_cPt_V4:
1045 return Hexagon::LDrih_indexed_shl_cNotPt_V4;
1046 case Hexagon::LDrih_indexed_shl_cNotPt_V4:
1047 return Hexagon::LDrih_indexed_shl_cPt_V4;
1049 case Hexagon::LDriuh_indexed_shl_cPt_V4:
1050 return Hexagon::LDriuh_indexed_shl_cNotPt_V4;
1051 case Hexagon::LDriuh_indexed_shl_cNotPt_V4:
1052 return Hexagon::LDriuh_indexed_shl_cPt_V4;
1054 case Hexagon::LDriw_indexed_shl_cPt_V4:
1055 return Hexagon::LDriw_indexed_shl_cNotPt_V4;
1056 case Hexagon::LDriw_indexed_shl_cNotPt_V4:
1057 return Hexagon::LDriw_indexed_shl_cPt_V4;
1060 case Hexagon::POST_STbri_cPt:
1061 return Hexagon::POST_STbri_cNotPt;
1062 case Hexagon::POST_STbri_cNotPt:
1063 return Hexagon::POST_STbri_cPt;
1065 case Hexagon::STrib_cPt:
1066 return Hexagon::STrib_cNotPt;
1067 case Hexagon::STrib_cNotPt:
1068 return Hexagon::STrib_cPt;
1070 case Hexagon::STrib_indexed_cPt:
1071 return Hexagon::STrib_indexed_cNotPt;
1072 case Hexagon::STrib_indexed_cNotPt:
1073 return Hexagon::STrib_indexed_cPt;
1075 case Hexagon::STrib_imm_cPt_V4:
1076 return Hexagon::STrib_imm_cNotPt_V4;
1077 case Hexagon::STrib_imm_cNotPt_V4:
1078 return Hexagon::STrib_imm_cPt_V4;
1080 case Hexagon::STrib_indexed_shl_cPt_V4:
1081 return Hexagon::STrib_indexed_shl_cNotPt_V4;
1082 case Hexagon::STrib_indexed_shl_cNotPt_V4:
1083 return Hexagon::STrib_indexed_shl_cPt_V4;
1086 case Hexagon::POST_SThri_cPt:
1087 return Hexagon::POST_SThri_cNotPt;
1088 case Hexagon::POST_SThri_cNotPt:
1089 return Hexagon::POST_SThri_cPt;
1091 case Hexagon::STrih_cPt:
1092 return Hexagon::STrih_cNotPt;
1093 case Hexagon::STrih_cNotPt:
1094 return Hexagon::STrih_cPt;
1096 case Hexagon::STrih_indexed_cPt:
1097 return Hexagon::STrih_indexed_cNotPt;
1098 case Hexagon::STrih_indexed_cNotPt:
1099 return Hexagon::STrih_indexed_cPt;
1101 case Hexagon::STrih_imm_cPt_V4:
1102 return Hexagon::STrih_imm_cNotPt_V4;
1103 case Hexagon::STrih_imm_cNotPt_V4:
1104 return Hexagon::STrih_imm_cPt_V4;
1106 case Hexagon::STrih_indexed_shl_cPt_V4:
1107 return Hexagon::STrih_indexed_shl_cNotPt_V4;
1108 case Hexagon::STrih_indexed_shl_cNotPt_V4:
1109 return Hexagon::STrih_indexed_shl_cPt_V4;
1112 case Hexagon::POST_STwri_cPt:
1113 return Hexagon::POST_STwri_cNotPt;
1114 case Hexagon::POST_STwri_cNotPt:
1115 return Hexagon::POST_STwri_cPt;
1117 case Hexagon::STriw_cPt:
1118 return Hexagon::STriw_cNotPt;
1119 case Hexagon::STriw_cNotPt:
1120 return Hexagon::STriw_cPt;
1122 case Hexagon::STriw_indexed_cPt:
1123 return Hexagon::STriw_indexed_cNotPt;
1124 case Hexagon::STriw_indexed_cNotPt:
1125 return Hexagon::STriw_indexed_cPt;
1127 case Hexagon::STriw_indexed_shl_cPt_V4:
1128 return Hexagon::STriw_indexed_shl_cNotPt_V4;
1129 case Hexagon::STriw_indexed_shl_cNotPt_V4:
1130 return Hexagon::STriw_indexed_shl_cPt_V4;
1132 case Hexagon::STriw_imm_cPt_V4:
1133 return Hexagon::STriw_imm_cNotPt_V4;
1134 case Hexagon::STriw_imm_cNotPt_V4:
1135 return Hexagon::STriw_imm_cPt_V4;
1138 case Hexagon::POST_STdri_cPt:
1139 return Hexagon::POST_STdri_cNotPt;
1140 case Hexagon::POST_STdri_cNotPt:
1141 return Hexagon::POST_STdri_cPt;
1143 case Hexagon::STrid_cPt:
1144 return Hexagon::STrid_cNotPt;
1145 case Hexagon::STrid_cNotPt:
1146 return Hexagon::STrid_cPt;
1148 case Hexagon::STrid_indexed_cPt:
1149 return Hexagon::STrid_indexed_cNotPt;
1150 case Hexagon::STrid_indexed_cNotPt:
1151 return Hexagon::STrid_indexed_cPt;
1153 case Hexagon::STrid_indexed_shl_cPt_V4:
1154 return Hexagon::STrid_indexed_shl_cNotPt_V4;
1155 case Hexagon::STrid_indexed_shl_cNotPt_V4:
1156 return Hexagon::STrid_indexed_shl_cPt_V4;
1158 // V4 Store to global address.
1159 case Hexagon::STd_GP_cPt_V4:
1160 return Hexagon::STd_GP_cNotPt_V4;
1161 case Hexagon::STd_GP_cNotPt_V4:
1162 return Hexagon::STd_GP_cPt_V4;
1164 case Hexagon::STb_GP_cPt_V4:
1165 return Hexagon::STb_GP_cNotPt_V4;
1166 case Hexagon::STb_GP_cNotPt_V4:
1167 return Hexagon::STb_GP_cPt_V4;
1169 case Hexagon::STh_GP_cPt_V4:
1170 return Hexagon::STh_GP_cNotPt_V4;
1171 case Hexagon::STh_GP_cNotPt_V4:
1172 return Hexagon::STh_GP_cPt_V4;
1174 case Hexagon::STw_GP_cPt_V4:
1175 return Hexagon::STw_GP_cNotPt_V4;
1176 case Hexagon::STw_GP_cNotPt_V4:
1177 return Hexagon::STw_GP_cPt_V4;
1180 case Hexagon::LDrid_cPt:
1181 return Hexagon::LDrid_cNotPt;
1182 case Hexagon::LDrid_cNotPt:
1183 return Hexagon::LDrid_cPt;
1185 case Hexagon::LDriw_cPt:
1186 return Hexagon::LDriw_cNotPt;
1187 case Hexagon::LDriw_cNotPt:
1188 return Hexagon::LDriw_cPt;
1190 case Hexagon::LDrih_cPt:
1191 return Hexagon::LDrih_cNotPt;
1192 case Hexagon::LDrih_cNotPt:
1193 return Hexagon::LDrih_cPt;
1195 case Hexagon::LDriuh_cPt:
1196 return Hexagon::LDriuh_cNotPt;
1197 case Hexagon::LDriuh_cNotPt:
1198 return Hexagon::LDriuh_cPt;
1200 case Hexagon::LDrib_cPt:
1201 return Hexagon::LDrib_cNotPt;
1202 case Hexagon::LDrib_cNotPt:
1203 return Hexagon::LDrib_cPt;
1205 case Hexagon::LDriub_cPt:
1206 return Hexagon::LDriub_cNotPt;
1207 case Hexagon::LDriub_cNotPt:
1208 return Hexagon::LDriub_cPt;
1211 case Hexagon::LDrid_indexed_cPt:
1212 return Hexagon::LDrid_indexed_cNotPt;
1213 case Hexagon::LDrid_indexed_cNotPt:
1214 return Hexagon::LDrid_indexed_cPt;
1216 case Hexagon::LDriw_indexed_cPt:
1217 return Hexagon::LDriw_indexed_cNotPt;
1218 case Hexagon::LDriw_indexed_cNotPt:
1219 return Hexagon::LDriw_indexed_cPt;
1221 case Hexagon::LDrih_indexed_cPt:
1222 return Hexagon::LDrih_indexed_cNotPt;
1223 case Hexagon::LDrih_indexed_cNotPt:
1224 return Hexagon::LDrih_indexed_cPt;
1226 case Hexagon::LDriuh_indexed_cPt:
1227 return Hexagon::LDriuh_indexed_cNotPt;
1228 case Hexagon::LDriuh_indexed_cNotPt:
1229 return Hexagon::LDriuh_indexed_cPt;
1231 case Hexagon::LDrib_indexed_cPt:
1232 return Hexagon::LDrib_indexed_cNotPt;
1233 case Hexagon::LDrib_indexed_cNotPt:
1234 return Hexagon::LDrib_indexed_cPt;
1236 case Hexagon::LDriub_indexed_cPt:
1237 return Hexagon::LDriub_indexed_cNotPt;
1238 case Hexagon::LDriub_indexed_cNotPt:
1239 return Hexagon::LDriub_indexed_cPt;
1242 case Hexagon::POST_LDrid_cPt:
1243 return Hexagon::POST_LDrid_cNotPt;
1244 case Hexagon::POST_LDriw_cNotPt:
1245 return Hexagon::POST_LDriw_cPt;
1247 case Hexagon::POST_LDrih_cPt:
1248 return Hexagon::POST_LDrih_cNotPt;
1249 case Hexagon::POST_LDrih_cNotPt:
1250 return Hexagon::POST_LDrih_cPt;
1252 case Hexagon::POST_LDriuh_cPt:
1253 return Hexagon::POST_LDriuh_cNotPt;
1254 case Hexagon::POST_LDriuh_cNotPt:
1255 return Hexagon::POST_LDriuh_cPt;
1257 case Hexagon::POST_LDrib_cPt:
1258 return Hexagon::POST_LDrib_cNotPt;
1259 case Hexagon::POST_LDrib_cNotPt:
1260 return Hexagon::POST_LDrib_cPt;
1262 case Hexagon::POST_LDriub_cPt:
1263 return Hexagon::POST_LDriub_cNotPt;
1264 case Hexagon::POST_LDriub_cNotPt:
1265 return Hexagon::POST_LDriub_cPt;
1268 case Hexagon::DEALLOC_RET_cPt_V4:
1269 return Hexagon::DEALLOC_RET_cNotPt_V4;
1270 case Hexagon::DEALLOC_RET_cNotPt_V4:
1271 return Hexagon::DEALLOC_RET_cPt_V4;
1276 int HexagonInstrInfo::
1277 getMatchingCondBranchOpcode(int Opc, bool invertPredicate) const {
1278 enum Hexagon::PredSense inPredSense;
1279 inPredSense = invertPredicate ? Hexagon::PredSense_false :
1280 Hexagon::PredSense_true;
1281 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
1282 if (CondOpcode >= 0) // Valid Conditional opcode/instruction
1285 // This switch case will be removed once all the instructions have been
1286 // modified to use relation maps.
1289 return !invertPredicate ? Hexagon::TFR_cPt :
1290 Hexagon::TFR_cNotPt;
1291 case Hexagon::TFRI_f:
1292 return !invertPredicate ? Hexagon::TFRI_cPt_f :
1293 Hexagon::TFRI_cNotPt_f;
1295 return !invertPredicate ? Hexagon::TFRI_cPt :
1296 Hexagon::TFRI_cNotPt;
1298 return !invertPredicate ? Hexagon::JMP_t :
1301 case Hexagon::COMBINE_rr:
1302 return !invertPredicate ? Hexagon::COMBINE_rr_cPt :
1303 Hexagon::COMBINE_rr_cNotPt;
1305 return !invertPredicate ? Hexagon::ASLH_cPt_V4 :
1306 Hexagon::ASLH_cNotPt_V4;
1308 return !invertPredicate ? Hexagon::ASRH_cPt_V4 :
1309 Hexagon::ASRH_cNotPt_V4;
1311 return !invertPredicate ? Hexagon::SXTB_cPt_V4 :
1312 Hexagon::SXTB_cNotPt_V4;
1314 return !invertPredicate ? Hexagon::SXTH_cPt_V4 :
1315 Hexagon::SXTH_cNotPt_V4;
1317 return !invertPredicate ? Hexagon::ZXTB_cPt_V4 :
1318 Hexagon::ZXTB_cNotPt_V4;
1320 return !invertPredicate ? Hexagon::ZXTH_cPt_V4 :
1321 Hexagon::ZXTH_cNotPt_V4;
1324 return !invertPredicate ? Hexagon::JMPR_t :
1327 // V4 indexed+scaled load.
1328 case Hexagon::LDrid_indexed_shl_V4:
1329 return !invertPredicate ? Hexagon::LDrid_indexed_shl_cPt_V4 :
1330 Hexagon::LDrid_indexed_shl_cNotPt_V4;
1331 case Hexagon::LDrib_indexed_shl_V4:
1332 return !invertPredicate ? Hexagon::LDrib_indexed_shl_cPt_V4 :
1333 Hexagon::LDrib_indexed_shl_cNotPt_V4;
1334 case Hexagon::LDriub_indexed_shl_V4:
1335 return !invertPredicate ? Hexagon::LDriub_indexed_shl_cPt_V4 :
1336 Hexagon::LDriub_indexed_shl_cNotPt_V4;
1337 case Hexagon::LDrih_indexed_shl_V4:
1338 return !invertPredicate ? Hexagon::LDrih_indexed_shl_cPt_V4 :
1339 Hexagon::LDrih_indexed_shl_cNotPt_V4;
1340 case Hexagon::LDriuh_indexed_shl_V4:
1341 return !invertPredicate ? Hexagon::LDriuh_indexed_shl_cPt_V4 :
1342 Hexagon::LDriuh_indexed_shl_cNotPt_V4;
1343 case Hexagon::LDriw_indexed_shl_V4:
1344 return !invertPredicate ? Hexagon::LDriw_indexed_shl_cPt_V4 :
1345 Hexagon::LDriw_indexed_shl_cNotPt_V4;
1347 // V4 Load from global address
1348 case Hexagon::LDd_GP_V4:
1349 return !invertPredicate ? Hexagon::LDd_GP_cPt_V4 :
1350 Hexagon::LDd_GP_cNotPt_V4;
1351 case Hexagon::LDb_GP_V4:
1352 return !invertPredicate ? Hexagon::LDb_GP_cPt_V4 :
1353 Hexagon::LDb_GP_cNotPt_V4;
1354 case Hexagon::LDub_GP_V4:
1355 return !invertPredicate ? Hexagon::LDub_GP_cPt_V4 :
1356 Hexagon::LDub_GP_cNotPt_V4;
1357 case Hexagon::LDh_GP_V4:
1358 return !invertPredicate ? Hexagon::LDh_GP_cPt_V4 :
1359 Hexagon::LDh_GP_cNotPt_V4;
1360 case Hexagon::LDuh_GP_V4:
1361 return !invertPredicate ? Hexagon::LDuh_GP_cPt_V4 :
1362 Hexagon::LDuh_GP_cNotPt_V4;
1363 case Hexagon::LDw_GP_V4:
1364 return !invertPredicate ? Hexagon::LDw_GP_cPt_V4 :
1365 Hexagon::LDw_GP_cNotPt_V4;
1368 case Hexagon::POST_STbri:
1369 return !invertPredicate ? Hexagon::POST_STbri_cPt :
1370 Hexagon::POST_STbri_cNotPt;
1371 case Hexagon::STrib:
1372 return !invertPredicate ? Hexagon::STrib_cPt :
1373 Hexagon::STrib_cNotPt;
1374 case Hexagon::STrib_indexed:
1375 return !invertPredicate ? Hexagon::STrib_indexed_cPt :
1376 Hexagon::STrib_indexed_cNotPt;
1377 case Hexagon::STrib_imm_V4:
1378 return !invertPredicate ? Hexagon::STrib_imm_cPt_V4 :
1379 Hexagon::STrib_imm_cNotPt_V4;
1380 case Hexagon::STrib_indexed_shl_V4:
1381 return !invertPredicate ? Hexagon::STrib_indexed_shl_cPt_V4 :
1382 Hexagon::STrib_indexed_shl_cNotPt_V4;
1384 case Hexagon::POST_SThri:
1385 return !invertPredicate ? Hexagon::POST_SThri_cPt :
1386 Hexagon::POST_SThri_cNotPt;
1387 case Hexagon::STrih:
1388 return !invertPredicate ? Hexagon::STrih_cPt :
1389 Hexagon::STrih_cNotPt;
1390 case Hexagon::STrih_indexed:
1391 return !invertPredicate ? Hexagon::STrih_indexed_cPt :
1392 Hexagon::STrih_indexed_cNotPt;
1393 case Hexagon::STrih_imm_V4:
1394 return !invertPredicate ? Hexagon::STrih_imm_cPt_V4 :
1395 Hexagon::STrih_imm_cNotPt_V4;
1396 case Hexagon::STrih_indexed_shl_V4:
1397 return !invertPredicate ? Hexagon::STrih_indexed_shl_cPt_V4 :
1398 Hexagon::STrih_indexed_shl_cNotPt_V4;
1400 case Hexagon::POST_STwri:
1401 return !invertPredicate ? Hexagon::POST_STwri_cPt :
1402 Hexagon::POST_STwri_cNotPt;
1403 case Hexagon::STriw:
1404 return !invertPredicate ? Hexagon::STriw_cPt :
1405 Hexagon::STriw_cNotPt;
1406 case Hexagon::STriw_indexed:
1407 return !invertPredicate ? Hexagon::STriw_indexed_cPt :
1408 Hexagon::STriw_indexed_cNotPt;
1409 case Hexagon::STriw_indexed_shl_V4:
1410 return !invertPredicate ? Hexagon::STriw_indexed_shl_cPt_V4 :
1411 Hexagon::STriw_indexed_shl_cNotPt_V4;
1412 case Hexagon::STriw_imm_V4:
1413 return !invertPredicate ? Hexagon::STriw_imm_cPt_V4 :
1414 Hexagon::STriw_imm_cNotPt_V4;
1416 case Hexagon::POST_STdri:
1417 return !invertPredicate ? Hexagon::POST_STdri_cPt :
1418 Hexagon::POST_STdri_cNotPt;
1419 case Hexagon::STrid:
1420 return !invertPredicate ? Hexagon::STrid_cPt :
1421 Hexagon::STrid_cNotPt;
1422 case Hexagon::STrid_indexed:
1423 return !invertPredicate ? Hexagon::STrid_indexed_cPt :
1424 Hexagon::STrid_indexed_cNotPt;
1425 case Hexagon::STrid_indexed_shl_V4:
1426 return !invertPredicate ? Hexagon::STrid_indexed_shl_cPt_V4 :
1427 Hexagon::STrid_indexed_shl_cNotPt_V4;
1429 // V4 Store to global address
1430 case Hexagon::STd_GP_V4:
1431 return !invertPredicate ? Hexagon::STd_GP_cPt_V4 :
1432 Hexagon::STd_GP_cNotPt_V4;
1433 case Hexagon::STb_GP_V4:
1434 return !invertPredicate ? Hexagon::STb_GP_cPt_V4 :
1435 Hexagon::STb_GP_cNotPt_V4;
1436 case Hexagon::STh_GP_V4:
1437 return !invertPredicate ? Hexagon::STh_GP_cPt_V4 :
1438 Hexagon::STh_GP_cNotPt_V4;
1439 case Hexagon::STw_GP_V4:
1440 return !invertPredicate ? Hexagon::STw_GP_cPt_V4 :
1441 Hexagon::STw_GP_cNotPt_V4;
1444 case Hexagon::LDrid:
1445 return !invertPredicate ? Hexagon::LDrid_cPt :
1446 Hexagon::LDrid_cNotPt;
1447 case Hexagon::LDriw:
1448 return !invertPredicate ? Hexagon::LDriw_cPt :
1449 Hexagon::LDriw_cNotPt;
1450 case Hexagon::LDrih:
1451 return !invertPredicate ? Hexagon::LDrih_cPt :
1452 Hexagon::LDrih_cNotPt;
1453 case Hexagon::LDriuh:
1454 return !invertPredicate ? Hexagon::LDriuh_cPt :
1455 Hexagon::LDriuh_cNotPt;
1456 case Hexagon::LDrib:
1457 return !invertPredicate ? Hexagon::LDrib_cPt :
1458 Hexagon::LDrib_cNotPt;
1459 case Hexagon::LDriub:
1460 return !invertPredicate ? Hexagon::LDriub_cPt :
1461 Hexagon::LDriub_cNotPt;
1463 case Hexagon::LDrid_indexed:
1464 return !invertPredicate ? Hexagon::LDrid_indexed_cPt :
1465 Hexagon::LDrid_indexed_cNotPt;
1466 case Hexagon::LDriw_indexed:
1467 return !invertPredicate ? Hexagon::LDriw_indexed_cPt :
1468 Hexagon::LDriw_indexed_cNotPt;
1469 case Hexagon::LDrih_indexed:
1470 return !invertPredicate ? Hexagon::LDrih_indexed_cPt :
1471 Hexagon::LDrih_indexed_cNotPt;
1472 case Hexagon::LDriuh_indexed:
1473 return !invertPredicate ? Hexagon::LDriuh_indexed_cPt :
1474 Hexagon::LDriuh_indexed_cNotPt;
1475 case Hexagon::LDrib_indexed:
1476 return !invertPredicate ? Hexagon::LDrib_indexed_cPt :
1477 Hexagon::LDrib_indexed_cNotPt;
1478 case Hexagon::LDriub_indexed:
1479 return !invertPredicate ? Hexagon::LDriub_indexed_cPt :
1480 Hexagon::LDriub_indexed_cNotPt;
1481 // Post Increment Load.
1482 case Hexagon::POST_LDrid:
1483 return !invertPredicate ? Hexagon::POST_LDrid_cPt :
1484 Hexagon::POST_LDrid_cNotPt;
1485 case Hexagon::POST_LDriw:
1486 return !invertPredicate ? Hexagon::POST_LDriw_cPt :
1487 Hexagon::POST_LDriw_cNotPt;
1488 case Hexagon::POST_LDrih:
1489 return !invertPredicate ? Hexagon::POST_LDrih_cPt :
1490 Hexagon::POST_LDrih_cNotPt;
1491 case Hexagon::POST_LDriuh:
1492 return !invertPredicate ? Hexagon::POST_LDriuh_cPt :
1493 Hexagon::POST_LDriuh_cNotPt;
1494 case Hexagon::POST_LDrib:
1495 return !invertPredicate ? Hexagon::POST_LDrib_cPt :
1496 Hexagon::POST_LDrib_cNotPt;
1497 case Hexagon::POST_LDriub:
1498 return !invertPredicate ? Hexagon::POST_LDriub_cPt :
1499 Hexagon::POST_LDriub_cNotPt;
1501 case Hexagon::DEALLOC_RET_V4:
1502 return !invertPredicate ? Hexagon::DEALLOC_RET_cPt_V4 :
1503 Hexagon::DEALLOC_RET_cNotPt_V4;
1505 llvm_unreachable("Unexpected predicable instruction");
1509 bool HexagonInstrInfo::
1510 PredicateInstruction(MachineInstr *MI,
1511 const SmallVectorImpl<MachineOperand> &Cond) const {
1512 int Opc = MI->getOpcode();
1513 assert (isPredicable(MI) && "Expected predicable instruction");
1514 bool invertJump = (!Cond.empty() && Cond[0].isImm() &&
1515 (Cond[0].getImm() == 0));
1517 // This will change MI's opcode to its predicate version.
1518 // However, its operand list is still the old one, i.e. the
1519 // non-predicate one.
1520 MI->setDesc(get(getMatchingCondBranchOpcode(Opc, invertJump)));
1523 unsigned int GAIdx = 0;
1525 // Indicates whether the current MI has a GlobalAddress operand
1526 bool hasGAOpnd = false;
1527 std::vector<MachineOperand> tmpOpnds;
1529 // Indicates whether we need to shift operands to right.
1530 bool needShift = true;
1532 // The predicate is ALWAYS the FIRST input operand !!!
1533 if (MI->getNumOperands() == 0) {
1534 // The non-predicate version of MI does not take any operands,
1535 // i.e. no outs and no ins. In this condition, the predicate
1536 // operand will be directly placed at Operands[0]. No operand
1542 else if ( MI->getOperand(MI->getNumOperands()-1).isReg()
1543 && MI->getOperand(MI->getNumOperands()-1).isDef()
1544 && !MI->getOperand(MI->getNumOperands()-1).isImplicit()) {
1545 // The non-predicate version of MI does not have any input operands.
1546 // In this condition, we extend the length of Operands[] by one and
1547 // copy the original last operand to the newly allocated slot.
1548 // At this moment, it is just a place holder. Later, we will put
1549 // predicate operand directly into it. No operand shift is needed.
1550 // Example: r0=BARRIER (this is a faked insn used here for illustration)
1551 MI->addOperand(MI->getOperand(MI->getNumOperands()-1));
1553 oper = MI->getNumOperands() - 2;
1556 // We need to right shift all input operands by one. Duplicate the
1557 // last operand into the newly allocated slot.
1558 MI->addOperand(MI->getOperand(MI->getNumOperands()-1));
1563 // Operands[ MI->getNumOperands() - 2 ] has been copied into
1564 // Operands[ MI->getNumOperands() - 1 ], so we start from
1565 // Operands[ MI->getNumOperands() - 3 ].
1566 // oper is a signed int.
1567 // It is ok if "MI->getNumOperands()-3" is -3, -2, or -1.
1568 for (oper = MI->getNumOperands() - 3; oper >= 0; --oper)
1570 MachineOperand &MO = MI->getOperand(oper);
1572 // Opnd[0] Opnd[1] Opnd[2] Opnd[3] Opnd[4] Opnd[5] Opnd[6] Opnd[7]
1573 // <Def0> <Def1> <Use0> <Use1> <ImpDef0> <ImpDef1> <ImpUse0> <ImpUse1>
1577 // Predicate Operand here
1578 if (MO.isReg() && !MO.isUse() && !MO.isImplicit()) {
1582 MI->getOperand(oper+1).ChangeToRegister(MO.getReg(), MO.isDef(),
1583 MO.isImplicit(), MO.isKill(),
1584 MO.isDead(), MO.isUndef(),
1587 else if (MO.isImm()) {
1588 MI->getOperand(oper+1).ChangeToImmediate(MO.getImm());
1590 else if (MO.isGlobal()) {
1591 // MI can not have more than one GlobalAddress operand.
1592 assert(hasGAOpnd == false && "MI can only have one GlobalAddress opnd");
1594 // There is no member function called "ChangeToGlobalAddress" in the
1595 // MachineOperand class (not like "ChangeToRegister" and
1596 // "ChangeToImmediate"). So we have to remove them from Operands[] list
1597 // first, and then add them back after we have inserted the predicate
1598 // operand. tmpOpnds[] is to remember these operands before we remove
1600 tmpOpnds.push_back(MO);
1602 // Operands[oper] is a GlobalAddress operand;
1603 // Operands[oper+1] has been copied into Operands[oper+2];
1609 assert(false && "Unexpected operand type");
1614 int regPos = invertJump ? 1 : 0;
1615 MachineOperand PredMO = Cond[regPos];
1617 // [oper] now points to the last explicit Def. Predicate operand must be
1618 // located at [oper+1]. See diagram above.
1619 // This assumes that the predicate is always the first operand,
1620 // i.e. Operands[0+numResults], in the set of inputs
1621 // It is better to have an assert here to check this. But I don't know how
1622 // to write this assert because findFirstPredOperandIdx() would return -1
1623 if (oper < -1) oper = -1;
1625 MI->getOperand(oper+1).ChangeToRegister(PredMO.getReg(), PredMO.isDef(),
1626 PredMO.isImplicit(), false,
1627 PredMO.isDead(), PredMO.isUndef(),
1630 MachineRegisterInfo &RegInfo = MI->getParent()->getParent()->getRegInfo();
1631 RegInfo.clearKillFlags(PredMO.getReg());
1637 // Operands[GAIdx] is the original GlobalAddress operand, which is
1638 // already copied into tmpOpnds[0].
1639 // Operands[GAIdx] now stores a copy of Operands[GAIdx-1]
1640 // Operands[GAIdx+1] has already been copied into Operands[GAIdx+2],
1641 // so we start from [GAIdx+2]
1642 for (i = GAIdx + 2; i < MI->getNumOperands(); ++i)
1643 tmpOpnds.push_back(MI->getOperand(i));
1645 // Remove all operands in range [ (GAIdx+1) ... (MI->getNumOperands()-1) ]
1646 // It is very important that we always remove from the end of Operands[]
1647 // MI->getNumOperands() is at least 2 if program goes to here.
1648 for (i = MI->getNumOperands() - 1; i > GAIdx; --i)
1649 MI->RemoveOperand(i);
1651 for (i = 0; i < tmpOpnds.size(); ++i)
1652 MI->addOperand(tmpOpnds[i]);
1661 isProfitableToIfCvt(MachineBasicBlock &MBB,
1663 unsigned ExtraPredCycles,
1664 const BranchProbability &Probability) const {
1671 isProfitableToIfCvt(MachineBasicBlock &TMBB,
1672 unsigned NumTCycles,
1673 unsigned ExtraTCycles,
1674 MachineBasicBlock &FMBB,
1675 unsigned NumFCycles,
1676 unsigned ExtraFCycles,
1677 const BranchProbability &Probability) const {
1681 // Returns true if an instruction is predicated irrespective of the predicate
1682 // sense. For example, all of the following will return true.
1683 // if (p0) R1 = add(R2, R3)
1684 // if (!p0) R1 = add(R2, R3)
1685 // if (p0.new) R1 = add(R2, R3)
1686 // if (!p0.new) R1 = add(R2, R3)
1687 bool HexagonInstrInfo::isPredicated(const MachineInstr *MI) const {
1688 const uint64_t F = MI->getDesc().TSFlags;
1690 return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1693 bool HexagonInstrInfo::isPredicated(unsigned Opcode) const {
1694 const uint64_t F = get(Opcode).TSFlags;
1696 return ((F >> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1699 bool HexagonInstrInfo::isPredicatedTrue(const MachineInstr *MI) const {
1700 const uint64_t F = MI->getDesc().TSFlags;
1702 assert(isPredicated(MI));
1703 return (!((F >> HexagonII::PredicatedFalsePos) &
1704 HexagonII::PredicatedFalseMask));
1707 bool HexagonInstrInfo::isPredicatedTrue(unsigned Opcode) const {
1708 const uint64_t F = get(Opcode).TSFlags;
1710 // Make sure that the instruction is predicated.
1711 assert((F>> HexagonII::PredicatedPos) & HexagonII::PredicatedMask);
1712 return (!((F >> HexagonII::PredicatedFalsePos) &
1713 HexagonII::PredicatedFalseMask));
1716 bool HexagonInstrInfo::isPredicatedNew(const MachineInstr *MI) const {
1717 const uint64_t F = MI->getDesc().TSFlags;
1719 assert(isPredicated(MI));
1720 return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
1723 bool HexagonInstrInfo::isPredicatedNew(unsigned Opcode) const {
1724 const uint64_t F = get(Opcode).TSFlags;
1726 assert(isPredicated(Opcode));
1727 return ((F >> HexagonII::PredicatedNewPos) & HexagonII::PredicatedNewMask);
1731 HexagonInstrInfo::DefinesPredicate(MachineInstr *MI,
1732 std::vector<MachineOperand> &Pred) const {
1733 for (unsigned oper = 0; oper < MI->getNumOperands(); ++oper) {
1734 MachineOperand MO = MI->getOperand(oper);
1735 if (MO.isReg() && MO.isDef()) {
1736 const TargetRegisterClass* RC = RI.getMinimalPhysRegClass(MO.getReg());
1737 if (RC == &Hexagon::PredRegsRegClass) {
1749 SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
1750 const SmallVectorImpl<MachineOperand> &Pred2) const {
1757 // We indicate that we want to reverse the branch by
1758 // inserting a 0 at the beginning of the Cond vector.
1760 bool HexagonInstrInfo::
1761 ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
1762 if (!Cond.empty() && Cond[0].isImm() && Cond[0].getImm() == 0) {
1763 Cond.erase(Cond.begin());
1765 Cond.insert(Cond.begin(), MachineOperand::CreateImm(0));
1771 bool HexagonInstrInfo::
1772 isProfitableToDupForIfCvt(MachineBasicBlock &MBB,unsigned NumInstrs,
1773 const BranchProbability &Probability) const {
1774 return (NumInstrs <= 4);
1777 bool HexagonInstrInfo::isDeallocRet(const MachineInstr *MI) const {
1778 switch (MI->getOpcode()) {
1779 default: return false;
1780 case Hexagon::DEALLOC_RET_V4 :
1781 case Hexagon::DEALLOC_RET_cPt_V4 :
1782 case Hexagon::DEALLOC_RET_cNotPt_V4 :
1783 case Hexagon::DEALLOC_RET_cdnPnt_V4 :
1784 case Hexagon::DEALLOC_RET_cNotdnPnt_V4 :
1785 case Hexagon::DEALLOC_RET_cdnPt_V4 :
1786 case Hexagon::DEALLOC_RET_cNotdnPt_V4 :
1792 bool HexagonInstrInfo::
1793 isValidOffset(const int Opcode, const int Offset) const {
1794 // This function is to check whether the "Offset" is in the correct range of
1795 // the given "Opcode". If "Offset" is not in the correct range, "ADD_ri" is
1796 // inserted to calculate the final address. Due to this reason, the function
1797 // assumes that the "Offset" has correct alignment.
1798 // We used to assert if the offset was not properly aligned, however,
1799 // there are cases where a misaligned pointer recast can cause this
1800 // problem, and we need to allow for it. The front end warns of such
1801 // misaligns with respect to load size.
1805 case Hexagon::LDriw:
1806 case Hexagon::LDriw_indexed:
1807 case Hexagon::LDriw_f:
1808 case Hexagon::STriw_indexed:
1809 case Hexagon::STriw:
1810 case Hexagon::STriw_f:
1811 return (Offset >= Hexagon_MEMW_OFFSET_MIN) &&
1812 (Offset <= Hexagon_MEMW_OFFSET_MAX);
1814 case Hexagon::LDrid:
1815 case Hexagon::LDrid_indexed:
1816 case Hexagon::LDrid_f:
1817 case Hexagon::STrid:
1818 case Hexagon::STrid_indexed:
1819 case Hexagon::STrid_f:
1820 return (Offset >= Hexagon_MEMD_OFFSET_MIN) &&
1821 (Offset <= Hexagon_MEMD_OFFSET_MAX);
1823 case Hexagon::LDrih:
1824 case Hexagon::LDriuh:
1825 case Hexagon::STrih:
1826 return (Offset >= Hexagon_MEMH_OFFSET_MIN) &&
1827 (Offset <= Hexagon_MEMH_OFFSET_MAX);
1829 case Hexagon::LDrib:
1830 case Hexagon::STrib:
1831 case Hexagon::LDriub:
1832 return (Offset >= Hexagon_MEMB_OFFSET_MIN) &&
1833 (Offset <= Hexagon_MEMB_OFFSET_MAX);
1835 case Hexagon::ADD_ri:
1836 case Hexagon::TFR_FI:
1837 return (Offset >= Hexagon_ADDI_OFFSET_MIN) &&
1838 (Offset <= Hexagon_ADDI_OFFSET_MAX);
1840 case Hexagon::MemOPw_ADDi_V4 :
1841 case Hexagon::MemOPw_SUBi_V4 :
1842 case Hexagon::MemOPw_ADDr_V4 :
1843 case Hexagon::MemOPw_SUBr_V4 :
1844 case Hexagon::MemOPw_ANDr_V4 :
1845 case Hexagon::MemOPw_ORr_V4 :
1846 return (0 <= Offset && Offset <= 255);
1848 case Hexagon::MemOPh_ADDi_V4 :
1849 case Hexagon::MemOPh_SUBi_V4 :
1850 case Hexagon::MemOPh_ADDr_V4 :
1851 case Hexagon::MemOPh_SUBr_V4 :
1852 case Hexagon::MemOPh_ANDr_V4 :
1853 case Hexagon::MemOPh_ORr_V4 :
1854 return (0 <= Offset && Offset <= 127);
1856 case Hexagon::MemOPb_ADDi_V4 :
1857 case Hexagon::MemOPb_SUBi_V4 :
1858 case Hexagon::MemOPb_ADDr_V4 :
1859 case Hexagon::MemOPb_SUBr_V4 :
1860 case Hexagon::MemOPb_ANDr_V4 :
1861 case Hexagon::MemOPb_ORr_V4 :
1862 return (0 <= Offset && Offset <= 63);
1864 // LDri_pred and STriw_pred are pseudo operations, so it has to take offset of
1865 // any size. Later pass knows how to handle it.
1866 case Hexagon::STriw_pred:
1867 case Hexagon::LDriw_pred:
1870 case Hexagon::LOOP0_i:
1871 return isUInt<10>(Offset);
1873 // INLINEASM is very special.
1874 case Hexagon::INLINEASM:
1878 llvm_unreachable("No offset range is defined for this opcode. "
1879 "Please define it in the above switch statement!");
1884 // Check if the Offset is a valid auto-inc imm by Load/Store Type.
1886 bool HexagonInstrInfo::
1887 isValidAutoIncImm(const EVT VT, const int Offset) const {
1889 if (VT == MVT::i64) {
1890 return (Offset >= Hexagon_MEMD_AUTOINC_MIN &&
1891 Offset <= Hexagon_MEMD_AUTOINC_MAX &&
1892 (Offset & 0x7) == 0);
1894 if (VT == MVT::i32) {
1895 return (Offset >= Hexagon_MEMW_AUTOINC_MIN &&
1896 Offset <= Hexagon_MEMW_AUTOINC_MAX &&
1897 (Offset & 0x3) == 0);
1899 if (VT == MVT::i16) {
1900 return (Offset >= Hexagon_MEMH_AUTOINC_MIN &&
1901 Offset <= Hexagon_MEMH_AUTOINC_MAX &&
1902 (Offset & 0x1) == 0);
1904 if (VT == MVT::i8) {
1905 return (Offset >= Hexagon_MEMB_AUTOINC_MIN &&
1906 Offset <= Hexagon_MEMB_AUTOINC_MAX);
1908 llvm_unreachable("Not an auto-inc opc!");
1912 bool HexagonInstrInfo::
1913 isMemOp(const MachineInstr *MI) const {
1914 switch (MI->getOpcode())
1916 default: return false;
1917 case Hexagon::MemOPw_ADDi_V4 :
1918 case Hexagon::MemOPw_SUBi_V4 :
1919 case Hexagon::MemOPw_ADDr_V4 :
1920 case Hexagon::MemOPw_SUBr_V4 :
1921 case Hexagon::MemOPw_ANDr_V4 :
1922 case Hexagon::MemOPw_ORr_V4 :
1923 case Hexagon::MemOPh_ADDi_V4 :
1924 case Hexagon::MemOPh_SUBi_V4 :
1925 case Hexagon::MemOPh_ADDr_V4 :
1926 case Hexagon::MemOPh_SUBr_V4 :
1927 case Hexagon::MemOPh_ANDr_V4 :
1928 case Hexagon::MemOPh_ORr_V4 :
1929 case Hexagon::MemOPb_ADDi_V4 :
1930 case Hexagon::MemOPb_SUBi_V4 :
1931 case Hexagon::MemOPb_ADDr_V4 :
1932 case Hexagon::MemOPb_SUBr_V4 :
1933 case Hexagon::MemOPb_ANDr_V4 :
1934 case Hexagon::MemOPb_ORr_V4 :
1935 case Hexagon::MemOPb_SETBITi_V4:
1936 case Hexagon::MemOPh_SETBITi_V4:
1937 case Hexagon::MemOPw_SETBITi_V4:
1938 case Hexagon::MemOPb_CLRBITi_V4:
1939 case Hexagon::MemOPh_CLRBITi_V4:
1940 case Hexagon::MemOPw_CLRBITi_V4:
1947 bool HexagonInstrInfo::
1948 isSpillPredRegOp(const MachineInstr *MI) const {
1949 switch (MI->getOpcode()) {
1950 default: return false;
1951 case Hexagon::STriw_pred :
1952 case Hexagon::LDriw_pred :
1957 bool HexagonInstrInfo::isNewValueJumpCandidate(const MachineInstr *MI) const {
1958 switch (MI->getOpcode()) {
1959 default: return false;
1960 case Hexagon::CMPEQrr:
1961 case Hexagon::CMPEQri:
1962 case Hexagon::CMPGTrr:
1963 case Hexagon::CMPGTri:
1964 case Hexagon::CMPGTUrr:
1965 case Hexagon::CMPGTUri:
1970 bool HexagonInstrInfo::
1971 isConditionalTransfer (const MachineInstr *MI) const {
1972 switch (MI->getOpcode()) {
1973 default: return false;
1974 case Hexagon::TFR_cPt:
1975 case Hexagon::TFR_cNotPt:
1976 case Hexagon::TFRI_cPt:
1977 case Hexagon::TFRI_cNotPt:
1978 case Hexagon::TFR_cdnPt:
1979 case Hexagon::TFR_cdnNotPt:
1980 case Hexagon::TFRI_cdnPt:
1981 case Hexagon::TFRI_cdnNotPt:
1986 bool HexagonInstrInfo::isConditionalALU32 (const MachineInstr* MI) const {
1987 const HexagonRegisterInfo& QRI = getRegisterInfo();
1988 switch (MI->getOpcode())
1990 default: return false;
1991 case Hexagon::ADD_ri_cPt:
1992 case Hexagon::ADD_ri_cNotPt:
1993 case Hexagon::ADD_rr_cPt:
1994 case Hexagon::ADD_rr_cNotPt:
1995 case Hexagon::XOR_rr_cPt:
1996 case Hexagon::XOR_rr_cNotPt:
1997 case Hexagon::AND_rr_cPt:
1998 case Hexagon::AND_rr_cNotPt:
1999 case Hexagon::OR_rr_cPt:
2000 case Hexagon::OR_rr_cNotPt:
2001 case Hexagon::SUB_rr_cPt:
2002 case Hexagon::SUB_rr_cNotPt:
2003 case Hexagon::COMBINE_rr_cPt:
2004 case Hexagon::COMBINE_rr_cNotPt:
2006 case Hexagon::ASLH_cPt_V4:
2007 case Hexagon::ASLH_cNotPt_V4:
2008 case Hexagon::ASRH_cPt_V4:
2009 case Hexagon::ASRH_cNotPt_V4:
2010 case Hexagon::SXTB_cPt_V4:
2011 case Hexagon::SXTB_cNotPt_V4:
2012 case Hexagon::SXTH_cPt_V4:
2013 case Hexagon::SXTH_cNotPt_V4:
2014 case Hexagon::ZXTB_cPt_V4:
2015 case Hexagon::ZXTB_cNotPt_V4:
2016 case Hexagon::ZXTH_cPt_V4:
2017 case Hexagon::ZXTH_cNotPt_V4:
2018 return QRI.Subtarget.hasV4TOps();
2022 bool HexagonInstrInfo::
2023 isConditionalLoad (const MachineInstr* MI) const {
2024 const HexagonRegisterInfo& QRI = getRegisterInfo();
2025 switch (MI->getOpcode())
2027 default: return false;
2028 case Hexagon::LDrid_cPt :
2029 case Hexagon::LDrid_cNotPt :
2030 case Hexagon::LDrid_indexed_cPt :
2031 case Hexagon::LDrid_indexed_cNotPt :
2032 case Hexagon::LDriw_cPt :
2033 case Hexagon::LDriw_cNotPt :
2034 case Hexagon::LDriw_indexed_cPt :
2035 case Hexagon::LDriw_indexed_cNotPt :
2036 case Hexagon::LDrih_cPt :
2037 case Hexagon::LDrih_cNotPt :
2038 case Hexagon::LDrih_indexed_cPt :
2039 case Hexagon::LDrih_indexed_cNotPt :
2040 case Hexagon::LDrib_cPt :
2041 case Hexagon::LDrib_cNotPt :
2042 case Hexagon::LDrib_indexed_cPt :
2043 case Hexagon::LDrib_indexed_cNotPt :
2044 case Hexagon::LDriuh_cPt :
2045 case Hexagon::LDriuh_cNotPt :
2046 case Hexagon::LDriuh_indexed_cPt :
2047 case Hexagon::LDriuh_indexed_cNotPt :
2048 case Hexagon::LDriub_cPt :
2049 case Hexagon::LDriub_cNotPt :
2050 case Hexagon::LDriub_indexed_cPt :
2051 case Hexagon::LDriub_indexed_cNotPt :
2053 case Hexagon::POST_LDrid_cPt :
2054 case Hexagon::POST_LDrid_cNotPt :
2055 case Hexagon::POST_LDriw_cPt :
2056 case Hexagon::POST_LDriw_cNotPt :
2057 case Hexagon::POST_LDrih_cPt :
2058 case Hexagon::POST_LDrih_cNotPt :
2059 case Hexagon::POST_LDrib_cPt :
2060 case Hexagon::POST_LDrib_cNotPt :
2061 case Hexagon::POST_LDriuh_cPt :
2062 case Hexagon::POST_LDriuh_cNotPt :
2063 case Hexagon::POST_LDriub_cPt :
2064 case Hexagon::POST_LDriub_cNotPt :
2065 return QRI.Subtarget.hasV4TOps();
2066 case Hexagon::LDrid_indexed_shl_cPt_V4 :
2067 case Hexagon::LDrid_indexed_shl_cNotPt_V4 :
2068 case Hexagon::LDrib_indexed_shl_cPt_V4 :
2069 case Hexagon::LDrib_indexed_shl_cNotPt_V4 :
2070 case Hexagon::LDriub_indexed_shl_cPt_V4 :
2071 case Hexagon::LDriub_indexed_shl_cNotPt_V4 :
2072 case Hexagon::LDrih_indexed_shl_cPt_V4 :
2073 case Hexagon::LDrih_indexed_shl_cNotPt_V4 :
2074 case Hexagon::LDriuh_indexed_shl_cPt_V4 :
2075 case Hexagon::LDriuh_indexed_shl_cNotPt_V4 :
2076 case Hexagon::LDriw_indexed_shl_cPt_V4 :
2077 case Hexagon::LDriw_indexed_shl_cNotPt_V4 :
2078 return QRI.Subtarget.hasV4TOps();
2082 // Returns true if an instruction is a conditional store.
2084 // Note: It doesn't include conditional new-value stores as they can't be
2085 // converted to .new predicate.
2087 // p.new NV store [ if(p0.new)memw(R0+#0)=R2.new ]
2089 // / \ (not OK. it will cause new-value store to be
2090 // / X conditional on p0.new while R2 producer is
2093 // p.new store p.old NV store
2094 // [if(p0.new)memw(R0+#0)=R2] [if(p0)memw(R0+#0)=R2.new]
2100 // [if (p0)memw(R0+#0)=R2]
2102 // The above diagram shows the steps involoved in the conversion of a predicated
2103 // store instruction to its .new predicated new-value form.
2105 // The following set of instructions further explains the scenario where
2106 // conditional new-value store becomes invalid when promoted to .new predicate
2109 // { 1) if (p0) r0 = add(r1, r2)
2110 // 2) p0 = cmp.eq(r3, #0) }
2112 // 3) if (p0) memb(r1+#0) = r0 --> this instruction can't be grouped with
2113 // the first two instructions because in instr 1, r0 is conditional on old value
2114 // of p0 but its use in instr 3 is conditional on p0 modified by instr 2 which
2115 // is not valid for new-value stores.
2116 bool HexagonInstrInfo::
2117 isConditionalStore (const MachineInstr* MI) const {
2118 const HexagonRegisterInfo& QRI = getRegisterInfo();
2119 switch (MI->getOpcode())
2121 default: return false;
2122 case Hexagon::STrib_imm_cPt_V4 :
2123 case Hexagon::STrib_imm_cNotPt_V4 :
2124 case Hexagon::STrib_indexed_shl_cPt_V4 :
2125 case Hexagon::STrib_indexed_shl_cNotPt_V4 :
2126 case Hexagon::STrib_cPt :
2127 case Hexagon::STrib_cNotPt :
2128 case Hexagon::POST_STbri_cPt :
2129 case Hexagon::POST_STbri_cNotPt :
2130 case Hexagon::STrid_indexed_cPt :
2131 case Hexagon::STrid_indexed_cNotPt :
2132 case Hexagon::STrid_indexed_shl_cPt_V4 :
2133 case Hexagon::POST_STdri_cPt :
2134 case Hexagon::POST_STdri_cNotPt :
2135 case Hexagon::STrih_cPt :
2136 case Hexagon::STrih_cNotPt :
2137 case Hexagon::STrih_indexed_cPt :
2138 case Hexagon::STrih_indexed_cNotPt :
2139 case Hexagon::STrih_imm_cPt_V4 :
2140 case Hexagon::STrih_imm_cNotPt_V4 :
2141 case Hexagon::STrih_indexed_shl_cPt_V4 :
2142 case Hexagon::STrih_indexed_shl_cNotPt_V4 :
2143 case Hexagon::POST_SThri_cPt :
2144 case Hexagon::POST_SThri_cNotPt :
2145 case Hexagon::STriw_cPt :
2146 case Hexagon::STriw_cNotPt :
2147 case Hexagon::STriw_indexed_cPt :
2148 case Hexagon::STriw_indexed_cNotPt :
2149 case Hexagon::STriw_imm_cPt_V4 :
2150 case Hexagon::STriw_imm_cNotPt_V4 :
2151 case Hexagon::STriw_indexed_shl_cPt_V4 :
2152 case Hexagon::STriw_indexed_shl_cNotPt_V4 :
2153 case Hexagon::POST_STwri_cPt :
2154 case Hexagon::POST_STwri_cNotPt :
2155 return QRI.Subtarget.hasV4TOps();
2157 // V4 global address store before promoting to dot new.
2158 case Hexagon::STd_GP_cPt_V4 :
2159 case Hexagon::STd_GP_cNotPt_V4 :
2160 case Hexagon::STb_GP_cPt_V4 :
2161 case Hexagon::STb_GP_cNotPt_V4 :
2162 case Hexagon::STh_GP_cPt_V4 :
2163 case Hexagon::STh_GP_cNotPt_V4 :
2164 case Hexagon::STw_GP_cPt_V4 :
2165 case Hexagon::STw_GP_cNotPt_V4 :
2166 return QRI.Subtarget.hasV4TOps();
2168 // Predicated new value stores (i.e. if (p0) memw(..)=r0.new) are excluded
2169 // from the "Conditional Store" list. Because a predicated new value store
2170 // would NOT be promoted to a double dot new store. See diagram below:
2171 // This function returns yes for those stores that are predicated but not
2172 // yet promoted to predicate dot new instructions.
2174 // +---------------------+
2175 // /-----| if (p0) memw(..)=r0 |---------\~
2176 // || +---------------------+ ||
2177 // promote || /\ /\ || promote
2179 // \||/ demote || \||/
2181 // +-------------------------+ || +-------------------------+
2182 // | if (p0.new) memw(..)=r0 | || | if (p0) memw(..)=r0.new |
2183 // +-------------------------+ || +-------------------------+
2186 // promote || \/ NOT possible
2190 // +-----------------------------+
2191 // | if (p0.new) memw(..)=r0.new |
2192 // +-----------------------------+
2193 // Double Dot New Store
2199 bool HexagonInstrInfo::isNewValueJump(const MachineInstr *MI) const {
2200 if (isNewValue(MI) && isBranch(MI))
2205 bool HexagonInstrInfo::isNewValue(const MachineInstr* MI) const {
2206 const uint64_t F = MI->getDesc().TSFlags;
2207 return ((F >> HexagonII::NewValuePos) & HexagonII::NewValueMask);
2210 // Returns true, if any one of the operands is a dot new
2211 // insn, whether it is predicated dot new or register dot new.
2212 bool HexagonInstrInfo::isDotNewInst (const MachineInstr* MI) const {
2213 return (isNewValueInst(MI) ||
2214 (isPredicated(MI) && isPredicatedNew(MI)));
2217 unsigned HexagonInstrInfo::getAddrMode(const MachineInstr* MI) const {
2218 const uint64_t F = MI->getDesc().TSFlags;
2220 return((F >> HexagonII::AddrModePos) & HexagonII::AddrModeMask);
2223 /// immediateExtend - Changes the instruction in place to one using an immediate
2225 void HexagonInstrInfo::immediateExtend(MachineInstr *MI) const {
2226 assert((isExtendable(MI)||isConstExtended(MI)) &&
2227 "Instruction must be extendable");
2228 // Find which operand is extendable.
2229 short ExtOpNum = getCExtOpNum(MI);
2230 MachineOperand &MO = MI->getOperand(ExtOpNum);
2231 // This needs to be something we understand.
2232 assert((MO.isMBB() || MO.isImm()) &&
2233 "Branch with unknown extendable field type");
2234 // Mark given operand as extended.
2235 MO.addTargetFlag(HexagonII::HMOTF_ConstExtended);
2238 DFAPacketizer *HexagonInstrInfo::
2239 CreateTargetScheduleState(const TargetMachine *TM,
2240 const ScheduleDAG *DAG) const {
2241 const InstrItineraryData *II = TM->getInstrItineraryData();
2242 return TM->getSubtarget<HexagonGenSubtargetInfo>().createDFAPacketizer(II);
2245 bool HexagonInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
2246 const MachineBasicBlock *MBB,
2247 const MachineFunction &MF) const {
2248 // Debug info is never a scheduling boundary. It's necessary to be explicit
2249 // due to the special treatment of IT instructions below, otherwise a
2250 // dbg_value followed by an IT will result in the IT instruction being
2251 // considered a scheduling hazard, which is wrong. It should be the actual
2252 // instruction preceding the dbg_value instruction(s), just like it is
2253 // when debug info is not present.
2254 if (MI->isDebugValue())
2257 // Terminators and labels can't be scheduled around.
2258 if (MI->getDesc().isTerminator() || MI->isLabel() || MI->isInlineAsm())
2264 bool HexagonInstrInfo::isConstExtended(MachineInstr *MI) const {
2266 // Constant extenders are allowed only for V4 and above.
2267 if (!Subtarget.hasV4TOps())
2270 const uint64_t F = MI->getDesc().TSFlags;
2271 unsigned isExtended = (F >> HexagonII::ExtendedPos) & HexagonII::ExtendedMask;
2272 if (isExtended) // Instruction must be extended.
2275 unsigned isExtendable = (F >> HexagonII::ExtendablePos)
2276 & HexagonII::ExtendableMask;
2280 short ExtOpNum = getCExtOpNum(MI);
2281 const MachineOperand &MO = MI->getOperand(ExtOpNum);
2282 // Use MO operand flags to determine if MO
2283 // has the HMOTF_ConstExtended flag set.
2284 if (MO.getTargetFlags() && HexagonII::HMOTF_ConstExtended)
2286 // If this is a Machine BB address we are talking about, and it is
2287 // not marked as extended, say so.
2291 // We could be using an instruction with an extendable immediate and shoehorn
2292 // a global address into it. If it is a global address it will be constant
2293 // extended. We do this for COMBINE.
2294 // We currently only handle isGlobal() because it is the only kind of
2295 // object we are going to end up with here for now.
2296 // In the future we probably should add isSymbol(), etc.
2297 if (MO.isGlobal() || MO.isSymbol())
2300 // If the extendable operand is not 'Immediate' type, the instruction should
2301 // have 'isExtended' flag set.
2302 assert(MO.isImm() && "Extendable operand must be Immediate type");
2304 int MinValue = getMinValue(MI);
2305 int MaxValue = getMaxValue(MI);
2306 int ImmValue = MO.getImm();
2308 return (ImmValue < MinValue || ImmValue > MaxValue);
2311 // Returns the opcode to use when converting MI, which is a conditional jump,
2312 // into a conditional instruction which uses the .new value of the predicate.
2313 // We also use branch probabilities to add a hint to the jump.
2315 HexagonInstrInfo::getDotNewPredJumpOp(MachineInstr *MI,
2317 MachineBranchProbabilityInfo *MBPI) const {
2319 // We assume that block can have at most two successors.
2321 MachineBasicBlock *Src = MI->getParent();
2322 MachineOperand *BrTarget = &MI->getOperand(1);
2323 MachineBasicBlock *Dst = BrTarget->getMBB();
2325 const BranchProbability Prediction = MBPI->getEdgeProbability(Src, Dst);
2326 if (Prediction >= BranchProbability(1,2))
2329 switch (MI->getOpcode()) {
2330 case Hexagon::JMP_t:
2331 return taken ? Hexagon::JMP_tnew_t : Hexagon::JMP_tnew_nt;
2332 case Hexagon::JMP_f:
2333 return taken ? Hexagon::JMP_fnew_t : Hexagon::JMP_fnew_nt;
2336 llvm_unreachable("Unexpected jump instruction.");
2339 // Returns true if a particular operand is extendable for an instruction.
2340 bool HexagonInstrInfo::isOperandExtended(const MachineInstr *MI,
2341 unsigned short OperandNum) const {
2342 // Constant extenders are allowed only for V4 and above.
2343 if (!Subtarget.hasV4TOps())
2346 const uint64_t F = MI->getDesc().TSFlags;
2348 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask)
2352 // Returns Operand Index for the constant extended instruction.
2353 unsigned short HexagonInstrInfo::getCExtOpNum(const MachineInstr *MI) const {
2354 const uint64_t F = MI->getDesc().TSFlags;
2355 return ((F >> HexagonII::ExtendableOpPos) & HexagonII::ExtendableOpMask);
2358 // Returns the min value that doesn't need to be extended.
2359 int HexagonInstrInfo::getMinValue(const MachineInstr *MI) const {
2360 const uint64_t F = MI->getDesc().TSFlags;
2361 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
2362 & HexagonII::ExtentSignedMask;
2363 unsigned bits = (F >> HexagonII::ExtentBitsPos)
2364 & HexagonII::ExtentBitsMask;
2366 if (isSigned) // if value is signed
2367 return -1 << (bits - 1);
2372 // Returns the max value that doesn't need to be extended.
2373 int HexagonInstrInfo::getMaxValue(const MachineInstr *MI) const {
2374 const uint64_t F = MI->getDesc().TSFlags;
2375 unsigned isSigned = (F >> HexagonII::ExtentSignedPos)
2376 & HexagonII::ExtentSignedMask;
2377 unsigned bits = (F >> HexagonII::ExtentBitsPos)
2378 & HexagonII::ExtentBitsMask;
2380 if (isSigned) // if value is signed
2381 return ~(-1 << (bits - 1));
2383 return ~(-1 << bits);
2386 // Returns true if an instruction can be converted into a non-extended
2387 // equivalent instruction.
2388 bool HexagonInstrInfo::NonExtEquivalentExists (const MachineInstr *MI) const {
2391 // Check if the instruction has a register form that uses register in place
2392 // of the extended operand, if so return that as the non-extended form.
2393 if (Hexagon::getRegForm(MI->getOpcode()) >= 0)
2396 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
2397 // Check addressing mode and retreive non-ext equivalent instruction.
2399 switch (getAddrMode(MI)) {
2400 case HexagonII::Absolute :
2401 // Load/store with absolute addressing mode can be converted into
2402 // base+offset mode.
2403 NonExtOpcode = Hexagon::getBasedWithImmOffset(MI->getOpcode());
2405 case HexagonII::BaseImmOffset :
2406 // Load/store with base+offset addressing mode can be converted into
2407 // base+register offset addressing mode. However left shift operand should
2409 NonExtOpcode = Hexagon::getBaseWithRegOffset(MI->getOpcode());
2414 if (NonExtOpcode < 0)
2421 // Returns opcode of the non-extended equivalent instruction.
2422 short HexagonInstrInfo::getNonExtOpcode (const MachineInstr *MI) const {
2424 // Check if the instruction has a register form that uses register in place
2425 // of the extended operand, if so return that as the non-extended form.
2426 short NonExtOpcode = Hexagon::getRegForm(MI->getOpcode());
2427 if (NonExtOpcode >= 0)
2428 return NonExtOpcode;
2430 if (MI->getDesc().mayLoad() || MI->getDesc().mayStore()) {
2431 // Check addressing mode and retreive non-ext equivalent instruction.
2432 switch (getAddrMode(MI)) {
2433 case HexagonII::Absolute :
2434 return Hexagon::getBasedWithImmOffset(MI->getOpcode());
2435 case HexagonII::BaseImmOffset :
2436 return Hexagon::getBaseWithRegOffset(MI->getOpcode());
2444 bool HexagonInstrInfo::PredOpcodeHasJMP_c(Opcode_t Opcode) const {
2445 return (Opcode == Hexagon::JMP_t) ||
2446 (Opcode == Hexagon::JMP_f) ||
2447 (Opcode == Hexagon::JMP_tnew_t) ||
2448 (Opcode == Hexagon::JMP_fnew_t) ||
2449 (Opcode == Hexagon::JMP_tnew_nt) ||
2450 (Opcode == Hexagon::JMP_fnew_nt);
2453 bool HexagonInstrInfo::PredOpcodeHasNot(Opcode_t Opcode) const {
2454 return (Opcode == Hexagon::JMP_f) ||
2455 (Opcode == Hexagon::JMP_fnew_t) ||
2456 (Opcode == Hexagon::JMP_fnew_nt);