1 //===-- MipsSEInstrInfo.cpp - Mips32/64 Instruction Information -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the Mips32/64 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "MipsSEInstrInfo.h"
15 #include "InstPrinter/MipsInstPrinter.h"
16 #include "MipsAnalyzeImmediate.h"
17 #include "MipsMachineFunction.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineRegisterInfo.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include "llvm/Support/MathExtras.h"
24 #include "llvm/Support/TargetRegistry.h"
28 static unsigned getUnconditionalBranch(const MipsSubtarget &STI) {
29 if (STI.inMicroMipsMode())
30 return STI.isPositionIndependent() ? Mips::B_MM : Mips::J_MM;
31 return STI.isPositionIndependent() ? Mips::B : Mips::J;
34 MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI)
35 : MipsInstrInfo(STI, getUnconditionalBranch(STI)), RI() {}
37 const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const {
41 /// isLoadFromStackSlot - If the specified machine instruction is a direct
42 /// load from a stack slot, return the virtual or physical register number of
43 /// the destination along with the FrameIndex of the loaded stack slot. If
44 /// not, return 0. This predicate must return 0 if the instruction has
45 /// any side effects other than loading from the stack slot.
46 unsigned MipsSEInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
47 int &FrameIndex) const {
48 unsigned Opc = MI.getOpcode();
50 if ((Opc == Mips::LW) || (Opc == Mips::LD) ||
51 (Opc == Mips::LWC1) || (Opc == Mips::LDC1) || (Opc == Mips::LDC164)) {
52 if ((MI.getOperand(1).isFI()) && // is a stack slot
53 (MI.getOperand(2).isImm()) && // the imm is zero
54 (isZeroImm(MI.getOperand(2)))) {
55 FrameIndex = MI.getOperand(1).getIndex();
56 return MI.getOperand(0).getReg();
63 /// isStoreToStackSlot - If the specified machine instruction is a direct
64 /// store to a stack slot, return the virtual or physical register number of
65 /// the source reg along with the FrameIndex of the loaded stack slot. If
66 /// not, return 0. This predicate must return 0 if the instruction has
67 /// any side effects other than storing to the stack slot.
68 unsigned MipsSEInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
69 int &FrameIndex) const {
70 unsigned Opc = MI.getOpcode();
72 if ((Opc == Mips::SW) || (Opc == Mips::SD) ||
73 (Opc == Mips::SWC1) || (Opc == Mips::SDC1) || (Opc == Mips::SDC164)) {
74 if ((MI.getOperand(1).isFI()) && // is a stack slot
75 (MI.getOperand(2).isImm()) && // the imm is zero
76 (isZeroImm(MI.getOperand(2)))) {
77 FrameIndex = MI.getOperand(1).getIndex();
78 return MI.getOperand(0).getReg();
84 void MipsSEInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
85 MachineBasicBlock::iterator I,
86 const DebugLoc &DL, unsigned DestReg,
87 unsigned SrcReg, bool KillSrc) const {
88 unsigned Opc = 0, ZeroReg = 0;
89 bool isMicroMips = Subtarget.inMicroMipsMode();
91 if (Mips::GPR32RegClass.contains(DestReg)) { // Copy to CPU Reg.
92 if (Mips::GPR32RegClass.contains(SrcReg)) {
94 Opc = Mips::MOVE16_MM;
96 Opc = Mips::OR, ZeroReg = Mips::ZERO;
97 } else if (Mips::CCRRegClass.contains(SrcReg))
99 else if (Mips::FGR32RegClass.contains(SrcReg))
101 else if (Mips::HI32RegClass.contains(SrcReg)) {
102 Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI;
104 } else if (Mips::LO32RegClass.contains(SrcReg)) {
105 Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO;
107 } else if (Mips::HI32DSPRegClass.contains(SrcReg))
108 Opc = Mips::MFHI_DSP;
109 else if (Mips::LO32DSPRegClass.contains(SrcReg))
110 Opc = Mips::MFLO_DSP;
111 else if (Mips::DSPCCRegClass.contains(SrcReg)) {
112 BuildMI(MBB, I, DL, get(Mips::RDDSP), DestReg).addImm(1 << 4)
113 .addReg(SrcReg, RegState::Implicit | getKillRegState(KillSrc));
116 else if (Mips::MSACtrlRegClass.contains(SrcReg))
119 else if (Mips::GPR32RegClass.contains(SrcReg)) { // Copy from CPU Reg.
120 if (Mips::CCRRegClass.contains(DestReg))
122 else if (Mips::FGR32RegClass.contains(DestReg))
124 else if (Mips::HI32RegClass.contains(DestReg))
125 Opc = Mips::MTHI, DestReg = 0;
126 else if (Mips::LO32RegClass.contains(DestReg))
127 Opc = Mips::MTLO, DestReg = 0;
128 else if (Mips::HI32DSPRegClass.contains(DestReg))
129 Opc = Mips::MTHI_DSP;
130 else if (Mips::LO32DSPRegClass.contains(DestReg))
131 Opc = Mips::MTLO_DSP;
132 else if (Mips::DSPCCRegClass.contains(DestReg)) {
133 BuildMI(MBB, I, DL, get(Mips::WRDSP))
134 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1 << 4)
135 .addReg(DestReg, RegState::ImplicitDefine);
137 } else if (Mips::MSACtrlRegClass.contains(DestReg)) {
138 BuildMI(MBB, I, DL, get(Mips::CTCMSA))
140 .addReg(SrcReg, getKillRegState(KillSrc));
144 else if (Mips::FGR32RegClass.contains(DestReg, SrcReg))
146 else if (Mips::AFGR64RegClass.contains(DestReg, SrcReg))
147 Opc = Mips::FMOV_D32;
148 else if (Mips::FGR64RegClass.contains(DestReg, SrcReg))
149 Opc = Mips::FMOV_D64;
150 else if (Mips::GPR64RegClass.contains(DestReg)) { // Copy to CPU64 Reg.
151 if (Mips::GPR64RegClass.contains(SrcReg))
152 Opc = Mips::OR64, ZeroReg = Mips::ZERO_64;
153 else if (Mips::HI64RegClass.contains(SrcReg))
154 Opc = Mips::MFHI64, SrcReg = 0;
155 else if (Mips::LO64RegClass.contains(SrcReg))
156 Opc = Mips::MFLO64, SrcReg = 0;
157 else if (Mips::FGR64RegClass.contains(SrcReg))
160 else if (Mips::GPR64RegClass.contains(SrcReg)) { // Copy from CPU64 Reg.
161 if (Mips::HI64RegClass.contains(DestReg))
162 Opc = Mips::MTHI64, DestReg = 0;
163 else if (Mips::LO64RegClass.contains(DestReg))
164 Opc = Mips::MTLO64, DestReg = 0;
165 else if (Mips::FGR64RegClass.contains(DestReg))
168 else if (Mips::MSA128BRegClass.contains(DestReg)) { // Copy to MSA reg
169 if (Mips::MSA128BRegClass.contains(SrcReg))
173 assert(Opc && "Cannot copy registers");
175 MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(Opc));
178 MIB.addReg(DestReg, RegState::Define);
181 MIB.addReg(SrcReg, getKillRegState(KillSrc));
187 static bool isORCopyInst(const MachineInstr &MI) {
188 switch (MI.getOpcode()) {
193 if (MI.getOperand(2).getReg() == Mips::ZERO)
197 if (MI.getOperand(2).getReg() == Mips::ZERO_64)
204 /// If @MI is WRDSP/RRDSP instruction return true with @isWrite set to true
205 /// if it is WRDSP instruction.
206 static bool isReadOrWriteToDSPReg(const MachineInstr &MI, bool &isWrite) {
207 switch (MI.getOpcode()) {
222 /// We check for the common case of 'or', as it's MIPS' preferred instruction
223 /// for GPRs but we have to check the operands to ensure that is the case.
224 /// Other move instructions for MIPS are directly identifiable.
225 bool MipsSEInstrInfo::isCopyInstr(const MachineInstr &MI,
226 const MachineOperand *&Src,
227 const MachineOperand *&Dest) const {
228 bool isDSPControlWrite = false;
229 // Condition is made to match the creation of WRDSP/RDDSP copy instruction
230 // from copyPhysReg function.
231 if (isReadOrWriteToDSPReg(MI, isDSPControlWrite)) {
232 if (!MI.getOperand(1).isImm() || MI.getOperand(1).getImm() != (1<<4))
234 else if (isDSPControlWrite) {
235 Src = &MI.getOperand(0);
236 Dest = &MI.getOperand(2);
238 Dest = &MI.getOperand(0);
239 Src = &MI.getOperand(2);
242 } else if (MI.isMoveReg() || isORCopyInst(MI)) {
243 Dest = &MI.getOperand(0);
244 Src = &MI.getOperand(1);
250 void MipsSEInstrInfo::
251 storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
252 unsigned SrcReg, bool isKill, int FI,
253 const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
254 int64_t Offset) const {
256 MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOStore);
260 if (Mips::GPR32RegClass.hasSubClassEq(RC))
262 else if (Mips::GPR64RegClass.hasSubClassEq(RC))
264 else if (Mips::ACC64RegClass.hasSubClassEq(RC))
265 Opc = Mips::STORE_ACC64;
266 else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
267 Opc = Mips::STORE_ACC64DSP;
268 else if (Mips::ACC128RegClass.hasSubClassEq(RC))
269 Opc = Mips::STORE_ACC128;
270 else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
271 Opc = Mips::STORE_CCOND_DSP;
272 else if (Mips::FGR32RegClass.hasSubClassEq(RC))
274 else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
276 else if (Mips::FGR64RegClass.hasSubClassEq(RC))
278 else if (TRI->isTypeLegalForClass(*RC, MVT::v16i8))
280 else if (TRI->isTypeLegalForClass(*RC, MVT::v8i16) ||
281 TRI->isTypeLegalForClass(*RC, MVT::v8f16))
283 else if (TRI->isTypeLegalForClass(*RC, MVT::v4i32) ||
284 TRI->isTypeLegalForClass(*RC, MVT::v4f32))
286 else if (TRI->isTypeLegalForClass(*RC, MVT::v2i64) ||
287 TRI->isTypeLegalForClass(*RC, MVT::v2f64))
289 else if (Mips::LO32RegClass.hasSubClassEq(RC))
291 else if (Mips::LO64RegClass.hasSubClassEq(RC))
293 else if (Mips::HI32RegClass.hasSubClassEq(RC))
295 else if (Mips::HI64RegClass.hasSubClassEq(RC))
297 else if (Mips::DSPRRegClass.hasSubClassEq(RC))
300 // Hi, Lo are normally caller save but they are callee save
301 // for interrupt handling.
302 const Function &Func = MBB.getParent()->getFunction();
303 if (Func.hasFnAttribute("interrupt")) {
304 if (Mips::HI32RegClass.hasSubClassEq(RC)) {
305 BuildMI(MBB, I, DL, get(Mips::MFHI), Mips::K0);
307 } else if (Mips::HI64RegClass.hasSubClassEq(RC)) {
308 BuildMI(MBB, I, DL, get(Mips::MFHI64), Mips::K0_64);
309 SrcReg = Mips::K0_64;
310 } else if (Mips::LO32RegClass.hasSubClassEq(RC)) {
311 BuildMI(MBB, I, DL, get(Mips::MFLO), Mips::K0);
313 } else if (Mips::LO64RegClass.hasSubClassEq(RC)) {
314 BuildMI(MBB, I, DL, get(Mips::MFLO64), Mips::K0_64);
315 SrcReg = Mips::K0_64;
319 assert(Opc && "Register class not handled!");
320 BuildMI(MBB, I, DL, get(Opc)).addReg(SrcReg, getKillRegState(isKill))
321 .addFrameIndex(FI).addImm(Offset).addMemOperand(MMO);
324 void MipsSEInstrInfo::
325 loadRegFromStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
326 unsigned DestReg, int FI, const TargetRegisterClass *RC,
327 const TargetRegisterInfo *TRI, int64_t Offset) const {
329 if (I != MBB.end()) DL = I->getDebugLoc();
330 MachineMemOperand *MMO = GetMemOperand(MBB, FI, MachineMemOperand::MOLoad);
333 const Function &Func = MBB.getParent()->getFunction();
334 bool ReqIndirectLoad = Func.hasFnAttribute("interrupt") &&
335 (DestReg == Mips::LO0 || DestReg == Mips::LO0_64 ||
336 DestReg == Mips::HI0 || DestReg == Mips::HI0_64);
338 if (Mips::GPR32RegClass.hasSubClassEq(RC))
340 else if (Mips::GPR64RegClass.hasSubClassEq(RC))
342 else if (Mips::ACC64RegClass.hasSubClassEq(RC))
343 Opc = Mips::LOAD_ACC64;
344 else if (Mips::ACC64DSPRegClass.hasSubClassEq(RC))
345 Opc = Mips::LOAD_ACC64DSP;
346 else if (Mips::ACC128RegClass.hasSubClassEq(RC))
347 Opc = Mips::LOAD_ACC128;
348 else if (Mips::DSPCCRegClass.hasSubClassEq(RC))
349 Opc = Mips::LOAD_CCOND_DSP;
350 else if (Mips::FGR32RegClass.hasSubClassEq(RC))
352 else if (Mips::AFGR64RegClass.hasSubClassEq(RC))
354 else if (Mips::FGR64RegClass.hasSubClassEq(RC))
356 else if (TRI->isTypeLegalForClass(*RC, MVT::v16i8))
358 else if (TRI->isTypeLegalForClass(*RC, MVT::v8i16) ||
359 TRI->isTypeLegalForClass(*RC, MVT::v8f16))
361 else if (TRI->isTypeLegalForClass(*RC, MVT::v4i32) ||
362 TRI->isTypeLegalForClass(*RC, MVT::v4f32))
364 else if (TRI->isTypeLegalForClass(*RC, MVT::v2i64) ||
365 TRI->isTypeLegalForClass(*RC, MVT::v2f64))
367 else if (Mips::HI32RegClass.hasSubClassEq(RC))
369 else if (Mips::HI64RegClass.hasSubClassEq(RC))
371 else if (Mips::LO32RegClass.hasSubClassEq(RC))
373 else if (Mips::LO64RegClass.hasSubClassEq(RC))
375 else if (Mips::DSPRRegClass.hasSubClassEq(RC))
378 assert(Opc && "Register class not handled!");
380 if (!ReqIndirectLoad)
381 BuildMI(MBB, I, DL, get(Opc), DestReg)
386 // Load HI/LO through K0. Notably the DestReg is encoded into the
387 // instruction itself.
388 unsigned Reg = Mips::K0;
389 unsigned LdOp = Mips::MTLO;
390 if (DestReg == Mips::HI0)
393 if (Subtarget.getABI().ArePtrs64bit()) {
395 if (DestReg == Mips::HI0_64)
401 BuildMI(MBB, I, DL, get(Opc), Reg)
405 BuildMI(MBB, I, DL, get(LdOp)).addReg(Reg);
409 bool MipsSEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
410 MachineBasicBlock &MBB = *MI.getParent();
411 bool isMicroMips = Subtarget.inMicroMipsMode();
414 switch (MI.getDesc().getOpcode()) {
418 expandRetRA(MBB, MI);
423 case Mips::PseudoMFHI:
424 Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI;
425 expandPseudoMFHiLo(MBB, MI, Opc);
427 case Mips::PseudoMFLO:
428 Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO;
429 expandPseudoMFHiLo(MBB, MI, Opc);
431 case Mips::PseudoMFHI64:
432 expandPseudoMFHiLo(MBB, MI, Mips::MFHI64);
434 case Mips::PseudoMFLO64:
435 expandPseudoMFHiLo(MBB, MI, Mips::MFLO64);
437 case Mips::PseudoMTLOHI:
438 expandPseudoMTLoHi(MBB, MI, Mips::MTLO, Mips::MTHI, false);
440 case Mips::PseudoMTLOHI64:
441 expandPseudoMTLoHi(MBB, MI, Mips::MTLO64, Mips::MTHI64, false);
443 case Mips::PseudoMTLOHI_DSP:
444 expandPseudoMTLoHi(MBB, MI, Mips::MTLO_DSP, Mips::MTHI_DSP, true);
446 case Mips::PseudoCVT_S_W:
447 expandCvtFPInt(MBB, MI, Mips::CVT_S_W, Mips::MTC1, false);
449 case Mips::PseudoCVT_D32_W:
450 Opc = isMicroMips ? Mips::CVT_D32_W_MM : Mips::CVT_D32_W;
451 expandCvtFPInt(MBB, MI, Opc, Mips::MTC1, false);
453 case Mips::PseudoCVT_S_L:
454 expandCvtFPInt(MBB, MI, Mips::CVT_S_L, Mips::DMTC1, true);
456 case Mips::PseudoCVT_D64_W:
457 Opc = isMicroMips ? Mips::CVT_D64_W_MM : Mips::CVT_D64_W;
458 expandCvtFPInt(MBB, MI, Opc, Mips::MTC1, true);
460 case Mips::PseudoCVT_D64_L:
461 expandCvtFPInt(MBB, MI, Mips::CVT_D64_L, Mips::DMTC1, true);
463 case Mips::BuildPairF64:
464 expandBuildPairF64(MBB, MI, isMicroMips, false);
466 case Mips::BuildPairF64_64:
467 expandBuildPairF64(MBB, MI, isMicroMips, true);
469 case Mips::ExtractElementF64:
470 expandExtractElementF64(MBB, MI, isMicroMips, false);
472 case Mips::ExtractElementF64_64:
473 expandExtractElementF64(MBB, MI, isMicroMips, true);
475 case Mips::MIPSeh_return32:
476 case Mips::MIPSeh_return64:
477 expandEhReturn(MBB, MI);
485 /// getOppositeBranchOpc - Return the inverse of the specified
486 /// opcode, e.g. turning BEQ to BNE.
487 unsigned MipsSEInstrInfo::getOppositeBranchOpc(unsigned Opc) const {
489 default: llvm_unreachable("Illegal opcode!");
490 case Mips::BEQ: return Mips::BNE;
491 case Mips::BEQ_MM: return Mips::BNE_MM;
492 case Mips::BNE: return Mips::BEQ;
493 case Mips::BNE_MM: return Mips::BEQ_MM;
494 case Mips::BGTZ: return Mips::BLEZ;
495 case Mips::BGEZ: return Mips::BLTZ;
496 case Mips::BLTZ: return Mips::BGEZ;
497 case Mips::BLEZ: return Mips::BGTZ;
498 case Mips::BGTZ_MM: return Mips::BLEZ_MM;
499 case Mips::BGEZ_MM: return Mips::BLTZ_MM;
500 case Mips::BLTZ_MM: return Mips::BGEZ_MM;
501 case Mips::BLEZ_MM: return Mips::BGTZ_MM;
502 case Mips::BEQ64: return Mips::BNE64;
503 case Mips::BNE64: return Mips::BEQ64;
504 case Mips::BGTZ64: return Mips::BLEZ64;
505 case Mips::BGEZ64: return Mips::BLTZ64;
506 case Mips::BLTZ64: return Mips::BGEZ64;
507 case Mips::BLEZ64: return Mips::BGTZ64;
508 case Mips::BC1T: return Mips::BC1F;
509 case Mips::BC1F: return Mips::BC1T;
510 case Mips::BC1T_MM: return Mips::BC1F_MM;
511 case Mips::BC1F_MM: return Mips::BC1T_MM;
512 case Mips::BEQZ16_MM: return Mips::BNEZ16_MM;
513 case Mips::BNEZ16_MM: return Mips::BEQZ16_MM;
514 case Mips::BEQZC_MM: return Mips::BNEZC_MM;
515 case Mips::BNEZC_MM: return Mips::BEQZC_MM;
516 case Mips::BEQZC: return Mips::BNEZC;
517 case Mips::BNEZC: return Mips::BEQZC;
518 case Mips::BLEZC: return Mips::BGTZC;
519 case Mips::BGEZC: return Mips::BLTZC;
520 case Mips::BGEC: return Mips::BLTC;
521 case Mips::BGTZC: return Mips::BLEZC;
522 case Mips::BLTZC: return Mips::BGEZC;
523 case Mips::BLTC: return Mips::BGEC;
524 case Mips::BGEUC: return Mips::BLTUC;
525 case Mips::BLTUC: return Mips::BGEUC;
526 case Mips::BEQC: return Mips::BNEC;
527 case Mips::BNEC: return Mips::BEQC;
528 case Mips::BC1EQZ: return Mips::BC1NEZ;
529 case Mips::BC1NEZ: return Mips::BC1EQZ;
530 case Mips::BEQZC_MMR6: return Mips::BNEZC_MMR6;
531 case Mips::BNEZC_MMR6: return Mips::BEQZC_MMR6;
532 case Mips::BLEZC_MMR6: return Mips::BGTZC_MMR6;
533 case Mips::BGEZC_MMR6: return Mips::BLTZC_MMR6;
534 case Mips::BGEC_MMR6: return Mips::BLTC_MMR6;
535 case Mips::BGTZC_MMR6: return Mips::BLEZC_MMR6;
536 case Mips::BLTZC_MMR6: return Mips::BGEZC_MMR6;
537 case Mips::BLTC_MMR6: return Mips::BGEC_MMR6;
538 case Mips::BGEUC_MMR6: return Mips::BLTUC_MMR6;
539 case Mips::BLTUC_MMR6: return Mips::BGEUC_MMR6;
540 case Mips::BEQC_MMR6: return Mips::BNEC_MMR6;
541 case Mips::BNEC_MMR6: return Mips::BEQC_MMR6;
542 case Mips::BC1EQZC_MMR6: return Mips::BC1NEZC_MMR6;
543 case Mips::BC1NEZC_MMR6: return Mips::BC1EQZC_MMR6;
544 case Mips::BEQZC64: return Mips::BNEZC64;
545 case Mips::BNEZC64: return Mips::BEQZC64;
546 case Mips::BEQC64: return Mips::BNEC64;
547 case Mips::BNEC64: return Mips::BEQC64;
548 case Mips::BGEC64: return Mips::BLTC64;
549 case Mips::BGEUC64: return Mips::BLTUC64;
550 case Mips::BLTC64: return Mips::BGEC64;
551 case Mips::BLTUC64: return Mips::BGEUC64;
552 case Mips::BGTZC64: return Mips::BLEZC64;
553 case Mips::BGEZC64: return Mips::BLTZC64;
554 case Mips::BLTZC64: return Mips::BGEZC64;
555 case Mips::BLEZC64: return Mips::BGTZC64;
556 case Mips::BBIT0: return Mips::BBIT1;
557 case Mips::BBIT1: return Mips::BBIT0;
558 case Mips::BBIT032: return Mips::BBIT132;
559 case Mips::BBIT132: return Mips::BBIT032;
560 case Mips::BZ_B: return Mips::BNZ_B;
561 case Mips::BZ_H: return Mips::BNZ_H;
562 case Mips::BZ_W: return Mips::BNZ_W;
563 case Mips::BZ_D: return Mips::BNZ_D;
564 case Mips::BZ_V: return Mips::BNZ_V;
565 case Mips::BNZ_B: return Mips::BZ_B;
566 case Mips::BNZ_H: return Mips::BZ_H;
567 case Mips::BNZ_W: return Mips::BZ_W;
568 case Mips::BNZ_D: return Mips::BZ_D;
569 case Mips::BNZ_V: return Mips::BZ_V;
573 /// Adjust SP by Amount bytes.
574 void MipsSEInstrInfo::adjustStackPtr(unsigned SP, int64_t Amount,
575 MachineBasicBlock &MBB,
576 MachineBasicBlock::iterator I) const {
577 MipsABIInfo ABI = Subtarget.getABI();
579 unsigned ADDiu = ABI.GetPtrAddiuOp();
584 if (isInt<16>(Amount)) {
585 // addi sp, sp, amount
586 BuildMI(MBB, I, DL, get(ADDiu), SP).addReg(SP).addImm(Amount);
588 // For numbers which are not 16bit integers we synthesize Amount inline
589 // then add or subtract it from sp.
590 unsigned Opc = ABI.GetPtrAdduOp();
592 Opc = ABI.GetPtrSubuOp();
595 unsigned Reg = loadImmediate(Amount, MBB, I, DL, nullptr);
596 BuildMI(MBB, I, DL, get(Opc), SP).addReg(SP).addReg(Reg, RegState::Kill);
600 /// This function generates the sequence of instructions needed to get the
601 /// result of adding register REG and immediate IMM.
602 unsigned MipsSEInstrInfo::loadImmediate(int64_t Imm, MachineBasicBlock &MBB,
603 MachineBasicBlock::iterator II,
605 unsigned *NewImm) const {
606 MipsAnalyzeImmediate AnalyzeImm;
607 const MipsSubtarget &STI = Subtarget;
608 MachineRegisterInfo &RegInfo = MBB.getParent()->getRegInfo();
609 unsigned Size = STI.isABI_N64() ? 64 : 32;
610 unsigned LUi = STI.isABI_N64() ? Mips::LUi64 : Mips::LUi;
611 unsigned ZEROReg = STI.isABI_N64() ? Mips::ZERO_64 : Mips::ZERO;
612 const TargetRegisterClass *RC = STI.isABI_N64() ?
613 &Mips::GPR64RegClass : &Mips::GPR32RegClass;
614 bool LastInstrIsADDiu = NewImm;
616 const MipsAnalyzeImmediate::InstSeq &Seq =
617 AnalyzeImm.Analyze(Imm, Size, LastInstrIsADDiu);
618 MipsAnalyzeImmediate::InstSeq::const_iterator Inst = Seq.begin();
620 assert(Seq.size() && (!LastInstrIsADDiu || (Seq.size() > 1)));
622 // The first instruction can be a LUi, which is different from other
623 // instructions (ADDiu, ORI and SLL) in that it does not have a register
625 unsigned Reg = RegInfo.createVirtualRegister(RC);
627 if (Inst->Opc == LUi)
628 BuildMI(MBB, II, DL, get(LUi), Reg).addImm(SignExtend64<16>(Inst->ImmOpnd));
630 BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(ZEROReg)
631 .addImm(SignExtend64<16>(Inst->ImmOpnd));
633 // Build the remaining instructions in Seq.
634 for (++Inst; Inst != Seq.end() - LastInstrIsADDiu; ++Inst)
635 BuildMI(MBB, II, DL, get(Inst->Opc), Reg).addReg(Reg, RegState::Kill)
636 .addImm(SignExtend64<16>(Inst->ImmOpnd));
638 if (LastInstrIsADDiu)
639 *NewImm = Inst->ImmOpnd;
644 unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const {
645 return (Opc == Mips::BEQ || Opc == Mips::BEQ_MM || Opc == Mips::BNE ||
646 Opc == Mips::BNE_MM || Opc == Mips::BGTZ || Opc == Mips::BGEZ ||
647 Opc == Mips::BLTZ || Opc == Mips::BLEZ || Opc == Mips::BEQ64 ||
648 Opc == Mips::BNE64 || Opc == Mips::BGTZ64 || Opc == Mips::BGEZ64 ||
649 Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 || Opc == Mips::BC1T ||
650 Opc == Mips::BC1F || Opc == Mips::B || Opc == Mips::J ||
651 Opc == Mips::J_MM || Opc == Mips::B_MM || Opc == Mips::BEQZC_MM ||
652 Opc == Mips::BNEZC_MM || Opc == Mips::BEQC || Opc == Mips::BNEC ||
653 Opc == Mips::BLTC || Opc == Mips::BGEC || Opc == Mips::BLTUC ||
654 Opc == Mips::BGEUC || Opc == Mips::BGTZC || Opc == Mips::BLEZC ||
655 Opc == Mips::BGEZC || Opc == Mips::BLTZC || Opc == Mips::BEQZC ||
656 Opc == Mips::BNEZC || Opc == Mips::BEQZC64 || Opc == Mips::BNEZC64 ||
657 Opc == Mips::BEQC64 || Opc == Mips::BNEC64 || Opc == Mips::BGEC64 ||
658 Opc == Mips::BGEUC64 || Opc == Mips::BLTC64 || Opc == Mips::BLTUC64 ||
659 Opc == Mips::BGTZC64 || Opc == Mips::BGEZC64 ||
660 Opc == Mips::BLTZC64 || Opc == Mips::BLEZC64 || Opc == Mips::BC ||
661 Opc == Mips::BBIT0 || Opc == Mips::BBIT1 || Opc == Mips::BBIT032 ||
662 Opc == Mips::BBIT132 || Opc == Mips::BC_MMR6 ||
663 Opc == Mips::BEQC_MMR6 || Opc == Mips::BNEC_MMR6 ||
664 Opc == Mips::BLTC_MMR6 || Opc == Mips::BGEC_MMR6 ||
665 Opc == Mips::BLTUC_MMR6 || Opc == Mips::BGEUC_MMR6 ||
666 Opc == Mips::BGTZC_MMR6 || Opc == Mips::BLEZC_MMR6 ||
667 Opc == Mips::BGEZC_MMR6 || Opc == Mips::BLTZC_MMR6 ||
668 Opc == Mips::BEQZC_MMR6 || Opc == Mips::BNEZC_MMR6) ? Opc : 0;
671 void MipsSEInstrInfo::expandRetRA(MachineBasicBlock &MBB,
672 MachineBasicBlock::iterator I) const {
674 MachineInstrBuilder MIB;
675 if (Subtarget.isGP64bit())
676 MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn64))
677 .addReg(Mips::RA_64, RegState::Undef);
679 MIB = BuildMI(MBB, I, I->getDebugLoc(), get(Mips::PseudoReturn))
680 .addReg(Mips::RA, RegState::Undef);
682 // Retain any imp-use flags.
683 for (auto & MO : I->operands()) {
689 void MipsSEInstrInfo::expandERet(MachineBasicBlock &MBB,
690 MachineBasicBlock::iterator I) const {
691 BuildMI(MBB, I, I->getDebugLoc(), get(Mips::ERET));
694 std::pair<bool, bool>
695 MipsSEInstrInfo::compareOpndSize(unsigned Opc,
696 const MachineFunction &MF) const {
697 const MCInstrDesc &Desc = get(Opc);
698 assert(Desc.NumOperands == 2 && "Unary instruction expected.");
699 const MipsRegisterInfo *RI = &getRegisterInfo();
700 unsigned DstRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 0, RI, MF));
701 unsigned SrcRegSize = RI->getRegSizeInBits(*getRegClass(Desc, 1, RI, MF));
703 return std::make_pair(DstRegSize > SrcRegSize, DstRegSize < SrcRegSize);
706 void MipsSEInstrInfo::expandPseudoMFHiLo(MachineBasicBlock &MBB,
707 MachineBasicBlock::iterator I,
708 unsigned NewOpc) const {
709 BuildMI(MBB, I, I->getDebugLoc(), get(NewOpc), I->getOperand(0).getReg());
712 void MipsSEInstrInfo::expandPseudoMTLoHi(MachineBasicBlock &MBB,
713 MachineBasicBlock::iterator I,
716 bool HasExplicitDef) const {
718 // lo_hi pseudomtlohi $gpr0, $gpr1
719 // to these two instructions:
723 DebugLoc DL = I->getDebugLoc();
724 const MachineOperand &SrcLo = I->getOperand(1), &SrcHi = I->getOperand(2);
725 MachineInstrBuilder LoInst = BuildMI(MBB, I, DL, get(LoOpc));
726 MachineInstrBuilder HiInst = BuildMI(MBB, I, DL, get(HiOpc));
728 // Add lo/hi registers if the mtlo/hi instructions created have explicit
730 if (HasExplicitDef) {
731 unsigned DstReg = I->getOperand(0).getReg();
732 unsigned DstLo = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
733 unsigned DstHi = getRegisterInfo().getSubReg(DstReg, Mips::sub_hi);
734 LoInst.addReg(DstLo, RegState::Define);
735 HiInst.addReg(DstHi, RegState::Define);
738 LoInst.addReg(SrcLo.getReg(), getKillRegState(SrcLo.isKill()));
739 HiInst.addReg(SrcHi.getReg(), getKillRegState(SrcHi.isKill()));
742 void MipsSEInstrInfo::expandCvtFPInt(MachineBasicBlock &MBB,
743 MachineBasicBlock::iterator I,
744 unsigned CvtOpc, unsigned MovOpc,
746 const MCInstrDesc &CvtDesc = get(CvtOpc), &MovDesc = get(MovOpc);
747 const MachineOperand &Dst = I->getOperand(0), &Src = I->getOperand(1);
748 unsigned DstReg = Dst.getReg(), SrcReg = Src.getReg(), TmpReg = DstReg;
749 unsigned KillSrc = getKillRegState(Src.isKill());
750 DebugLoc DL = I->getDebugLoc();
751 bool DstIsLarger, SrcIsLarger;
753 std::tie(DstIsLarger, SrcIsLarger) =
754 compareOpndSize(CvtOpc, *MBB.getParent());
757 TmpReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
760 DstReg = getRegisterInfo().getSubReg(DstReg, Mips::sub_lo);
762 BuildMI(MBB, I, DL, MovDesc, TmpReg).addReg(SrcReg, KillSrc);
763 BuildMI(MBB, I, DL, CvtDesc, DstReg).addReg(TmpReg, RegState::Kill);
766 void MipsSEInstrInfo::expandExtractElementF64(MachineBasicBlock &MBB,
767 MachineBasicBlock::iterator I,
770 unsigned DstReg = I->getOperand(0).getReg();
771 unsigned SrcReg = I->getOperand(1).getReg();
772 unsigned N = I->getOperand(2).getImm();
773 DebugLoc dl = I->getDebugLoc();
775 assert(N < 2 && "Invalid immediate");
776 unsigned SubIdx = N ? Mips::sub_hi : Mips::sub_lo;
777 unsigned SubReg = getRegisterInfo().getSubReg(SrcReg, SubIdx);
779 // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload
780 // in MipsSEFrameLowering.cpp.
781 assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2()));
783 // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload
784 // in MipsSEFrameLowering.cpp.
785 assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg()));
787 if (SubIdx == Mips::sub_hi && Subtarget.hasMTHC1()) {
788 // FIXME: Strictly speaking MFHC1 only reads the top 32-bits however, we
789 // claim to read the whole 64-bits as part of a white lie used to
790 // temporarily work around a widespread bug in the -mfp64 support.
791 // The problem is that none of the 32-bit fpu ops mention the fact
792 // that they clobber the upper 32-bits of the 64-bit FPR. Fixing that
793 // requires a major overhaul of the FPU implementation which can't
794 // be done right now due to time constraints.
795 // MFHC1 is one of two instructions that are affected since they are
796 // the only instructions that don't read the lower 32-bits.
797 // We therefore pretend that it reads the bottom 32-bits to
798 // artificially create a dependency and prevent the scheduler
799 // changing the behaviour of the code.
801 get(isMicroMips ? (FP64 ? Mips::MFHC1_D64_MM : Mips::MFHC1_D32_MM)
802 : (FP64 ? Mips::MFHC1_D64 : Mips::MFHC1_D32)),
806 BuildMI(MBB, I, dl, get(Mips::MFC1), DstReg).addReg(SubReg);
809 void MipsSEInstrInfo::expandBuildPairF64(MachineBasicBlock &MBB,
810 MachineBasicBlock::iterator I,
811 bool isMicroMips, bool FP64) const {
812 unsigned DstReg = I->getOperand(0).getReg();
813 unsigned LoReg = I->getOperand(1).getReg(), HiReg = I->getOperand(2).getReg();
814 const MCInstrDesc& Mtc1Tdd = get(Mips::MTC1);
815 DebugLoc dl = I->getDebugLoc();
816 const TargetRegisterInfo &TRI = getRegisterInfo();
818 // When mthc1 is available, use:
822 // Otherwise, for O32 FPXX ABI:
823 // spill + reload via ldc1
824 // This case is handled by the frame lowering code.
826 // Otherwise, for FP32:
830 // The case where dmtc1 is available doesn't need to be handled here
831 // because it never creates a BuildPairF64 node.
833 // FPXX on MIPS-II or MIPS32r1 should have been handled with a spill/reload
834 // in MipsSEFrameLowering.cpp.
835 assert(!(Subtarget.isABI_FPXX() && !Subtarget.hasMips32r2()));
837 // FP64A (FP64 with nooddspreg) should have been handled with a spill/reload
838 // in MipsSEFrameLowering.cpp.
839 assert(!(Subtarget.isFP64bit() && !Subtarget.useOddSPReg()));
841 BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_lo))
844 if (Subtarget.hasMTHC1()) {
845 // FIXME: The .addReg(DstReg) is a white lie used to temporarily work
846 // around a widespread bug in the -mfp64 support.
847 // The problem is that none of the 32-bit fpu ops mention the fact
848 // that they clobber the upper 32-bits of the 64-bit FPR. Fixing that
849 // requires a major overhaul of the FPU implementation which can't
850 // be done right now due to time constraints.
851 // MTHC1 is one of two instructions that are affected since they are
852 // the only instructions that don't read the lower 32-bits.
853 // We therefore pretend that it reads the bottom 32-bits to
854 // artificially create a dependency and prevent the scheduler
855 // changing the behaviour of the code.
857 get(isMicroMips ? (FP64 ? Mips::MTHC1_D64_MM : Mips::MTHC1_D32_MM)
858 : (FP64 ? Mips::MTHC1_D64 : Mips::MTHC1_D32)),
862 } else if (Subtarget.isABI_FPXX())
863 llvm_unreachable("BuildPairF64 not expanded in frame lowering code!");
865 BuildMI(MBB, I, dl, Mtc1Tdd, TRI.getSubReg(DstReg, Mips::sub_hi))
869 void MipsSEInstrInfo::expandEhReturn(MachineBasicBlock &MBB,
870 MachineBasicBlock::iterator I) const {
871 // This pseudo instruction is generated as part of the lowering of
872 // ISD::EH_RETURN. We convert it to a stack increment by OffsetReg, and
873 // indirect jump to TargetReg
874 MipsABIInfo ABI = Subtarget.getABI();
875 unsigned ADDU = ABI.GetPtrAdduOp();
876 unsigned SP = Subtarget.isGP64bit() ? Mips::SP_64 : Mips::SP;
877 unsigned RA = Subtarget.isGP64bit() ? Mips::RA_64 : Mips::RA;
878 unsigned T9 = Subtarget.isGP64bit() ? Mips::T9_64 : Mips::T9;
879 unsigned ZERO = Subtarget.isGP64bit() ? Mips::ZERO_64 : Mips::ZERO;
880 unsigned OffsetReg = I->getOperand(0).getReg();
881 unsigned TargetReg = I->getOperand(1).getReg();
883 // addu $ra, $v0, $zero
884 // addu $sp, $sp, $v1
885 // jr $ra (via RetRA)
886 const TargetMachine &TM = MBB.getParent()->getTarget();
887 if (TM.isPositionIndependent())
888 BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), T9)
891 BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), RA)
894 BuildMI(MBB, I, I->getDebugLoc(), get(ADDU), SP).addReg(SP).addReg(OffsetReg);
898 const MipsInstrInfo *llvm::createMipsSEInstrInfo(const MipsSubtarget &STI) {
899 return new MipsSEInstrInfo(STI);