1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
20 #include "llvm/CodeGen/MachineJumpTableInfo.h"
21 #include "llvm/IR/IntrinsicsMips.h"
23 #define DEBUG_TYPE "mips-isel"
29 #define GET_GLOBALISEL_PREDICATE_BITSET
30 #include "MipsGenGlobalISel.inc"
31 #undef GET_GLOBALISEL_PREDICATE_BITSET
33 class MipsInstructionSelector : public InstructionSelector {
35 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
36 const MipsRegisterBankInfo &RBI);
38 bool select(MachineInstr &I) override;
39 static const char *getName() { return DEBUG_TYPE; }
42 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 bool isRegInGprb(Register Reg, MachineRegisterInfo &MRI) const;
44 bool isRegInFprb(Register Reg, MachineRegisterInfo &MRI) const;
45 bool materialize32BitImm(Register DestReg, APInt Imm,
46 MachineIRBuilder &B) const;
47 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
48 const TargetRegisterClass *
49 getRegClassForTypeOnBank(Register Reg, MachineRegisterInfo &MRI) const;
50 unsigned selectLoadStoreOpCode(MachineInstr &I,
51 MachineRegisterInfo &MRI) const;
53 const MipsTargetMachine &TM;
54 const MipsSubtarget &STI;
55 const MipsInstrInfo &TII;
56 const MipsRegisterInfo &TRI;
57 const MipsRegisterBankInfo &RBI;
59 #define GET_GLOBALISEL_PREDICATES_DECL
60 #include "MipsGenGlobalISel.inc"
61 #undef GET_GLOBALISEL_PREDICATES_DECL
63 #define GET_GLOBALISEL_TEMPORARIES_DECL
64 #include "MipsGenGlobalISel.inc"
65 #undef GET_GLOBALISEL_TEMPORARIES_DECL
68 } // end anonymous namespace
70 #define GET_GLOBALISEL_IMPL
71 #include "MipsGenGlobalISel.inc"
72 #undef GET_GLOBALISEL_IMPL
74 MipsInstructionSelector::MipsInstructionSelector(
75 const MipsTargetMachine &TM, const MipsSubtarget &STI,
76 const MipsRegisterBankInfo &RBI)
77 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
78 TRI(*STI.getRegisterInfo()), RBI(RBI),
80 #define GET_GLOBALISEL_PREDICATES_INIT
81 #include "MipsGenGlobalISel.inc"
82 #undef GET_GLOBALISEL_PREDICATES_INIT
83 #define GET_GLOBALISEL_TEMPORARIES_INIT
84 #include "MipsGenGlobalISel.inc"
85 #undef GET_GLOBALISEL_TEMPORARIES_INIT
89 bool MipsInstructionSelector::isRegInGprb(Register Reg,
90 MachineRegisterInfo &MRI) const {
91 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::GPRBRegBankID;
94 bool MipsInstructionSelector::isRegInFprb(Register Reg,
95 MachineRegisterInfo &MRI) const {
96 return RBI.getRegBank(Reg, MRI, TRI)->getID() == Mips::FPRBRegBankID;
99 bool MipsInstructionSelector::selectCopy(MachineInstr &I,
100 MachineRegisterInfo &MRI) const {
101 Register DstReg = I.getOperand(0).getReg();
102 if (Register::isPhysicalRegister(DstReg))
105 const TargetRegisterClass *RC = getRegClassForTypeOnBank(DstReg, MRI);
106 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
107 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
114 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
115 Register Reg, MachineRegisterInfo &MRI) const {
116 const LLT Ty = MRI.getType(Reg);
117 const unsigned TySize = Ty.getSizeInBits();
119 if (isRegInGprb(Reg, MRI)) {
120 assert((Ty.isScalar() || Ty.isPointer()) && TySize == 32 &&
121 "Register class not available for LLT, register bank combination");
122 return &Mips::GPR32RegClass;
125 if (isRegInFprb(Reg, MRI)) {
127 assert((TySize == 32 || TySize == 64) &&
128 "Register class not available for LLT, register bank combination");
130 return &Mips::FGR32RegClass;
131 return STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
135 llvm_unreachable("Unsupported register bank.");
138 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
139 MachineIRBuilder &B) const {
140 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
141 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
142 if (Imm.getHiBits(16).isNullValue()) {
144 B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
145 .addImm(Imm.getLoBits(16).getLimitedValue());
146 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
148 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
149 if (Imm.getLoBits(16).isNullValue()) {
150 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
151 .addImm(Imm.getHiBits(16).getLimitedValue());
152 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
154 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
155 if (Imm.isSignedIntN(16)) {
157 B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
158 .addImm(Imm.getLoBits(16).getLimitedValue());
159 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
161 // Values that cannot be materialized with single immediate instruction.
162 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
163 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
164 .addImm(Imm.getHiBits(16).getLimitedValue());
165 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
166 .addImm(Imm.getLoBits(16).getLimitedValue());
167 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
169 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
174 /// When I.getOpcode() is returned, we failed to select MIPS instruction opcode.
176 MipsInstructionSelector::selectLoadStoreOpCode(MachineInstr &I,
177 MachineRegisterInfo &MRI) const {
178 const Register ValueReg = I.getOperand(0).getReg();
179 const LLT Ty = MRI.getType(ValueReg);
180 const unsigned TySize = Ty.getSizeInBits();
181 const unsigned MemSizeInBytes = (*I.memoperands_begin())->getSize();
182 unsigned Opc = I.getOpcode();
183 const bool isStore = Opc == TargetOpcode::G_STORE;
185 if (isRegInGprb(ValueReg, MRI)) {
186 assert(((Ty.isScalar() && TySize == 32) ||
187 (Ty.isPointer() && TySize == 32 && MemSizeInBytes == 4)) &&
188 "Unsupported register bank, LLT, MemSizeInBytes combination");
191 switch (MemSizeInBytes) {
202 // Unspecified extending load is selected into zeroExtending load.
203 switch (MemSizeInBytes) {
207 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
209 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
215 if (isRegInFprb(ValueReg, MRI)) {
217 assert(((TySize == 32 && MemSizeInBytes == 4) ||
218 (TySize == 64 && MemSizeInBytes == 8)) &&
219 "Unsupported register bank, LLT, MemSizeInBytes combination");
221 if (MemSizeInBytes == 4)
222 return isStore ? Mips::SWC1 : Mips::LWC1;
225 return isStore ? Mips::SDC164 : Mips::LDC164;
226 return isStore ? Mips::SDC1 : Mips::LDC1;
230 assert(STI.hasMSA() && "Vector instructions require target with MSA.");
231 assert((TySize == 128 && MemSizeInBytes == 16) &&
232 "Unsupported register bank, LLT, MemSizeInBytes combination");
233 switch (Ty.getElementType().getSizeInBits()) {
235 return isStore ? Mips::ST_B : Mips::LD_B;
237 return isStore ? Mips::ST_H : Mips::LD_H;
239 return isStore ? Mips::ST_W : Mips::LD_W;
241 return isStore ? Mips::ST_D : Mips::LD_D;
251 bool MipsInstructionSelector::select(MachineInstr &I) {
253 MachineBasicBlock &MBB = *I.getParent();
254 MachineFunction &MF = *MBB.getParent();
255 MachineRegisterInfo &MRI = MF.getRegInfo();
257 if (!isPreISelGenericOpcode(I.getOpcode())) {
259 return selectCopy(I, MRI);
264 if (I.getOpcode() == Mips::G_MUL &&
265 isRegInGprb(I.getOperand(0).getReg(), MRI)) {
266 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
267 .add(I.getOperand(0))
268 .add(I.getOperand(1))
269 .add(I.getOperand(2));
270 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI))
272 Mul->getOperand(3).setIsDead(true);
273 Mul->getOperand(4).setIsDead(true);
279 if (selectImpl(I, *CoverageInfo))
282 MachineInstr *MI = nullptr;
283 using namespace TargetOpcode;
285 switch (I.getOpcode()) {
287 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
288 MachineInstr *PseudoMULTu, *PseudoMove;
290 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
291 .addDef(PseudoMULTuReg)
292 .add(I.getOperand(1))
293 .add(I.getOperand(2));
294 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
297 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
298 .addDef(I.getOperand(0).getReg())
299 .addUse(PseudoMULTuReg);
300 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
307 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
308 .add(I.getOperand(0))
309 .add(I.getOperand(1))
310 .add(I.getOperand(2));
315 I.setDesc(TII.get(COPY));
316 return selectCopy(I, MRI);
318 case G_FRAME_INDEX: {
319 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
320 .add(I.getOperand(0))
321 .add(I.getOperand(1))
326 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE))
327 .add(I.getOperand(0))
329 .add(I.getOperand(1));
334 MF.getJumpTableInfo()->getEntrySize(MF.getDataLayout());
335 assert(isPowerOf2_32(EntrySize) &&
336 "Non-power-of-two jump-table entry size not supported.");
338 Register JTIndex = MRI.createVirtualRegister(&Mips::GPR32RegClass);
339 MachineInstr *SLL = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SLL))
341 .addUse(I.getOperand(2).getReg())
342 .addImm(Log2_32(EntrySize));
343 if (!constrainSelectedInstRegOperands(*SLL, TII, TRI, RBI))
346 Register DestAddress = MRI.createVirtualRegister(&Mips::GPR32RegClass);
347 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
349 .addUse(I.getOperand(0).getReg())
351 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
354 Register Dest = MRI.createVirtualRegister(&Mips::GPR32RegClass);
356 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
359 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_LO)
360 .addMemOperand(MF.getMachineMemOperand(
361 MachinePointerInfo(), MachineMemOperand::MOLoad, 4, 4));
362 if (!constrainSelectedInstRegOperands(*LW, TII, TRI, RBI))
365 if (MF.getTarget().isPositionIndependent()) {
366 Register DestTmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
367 LW->getOperand(0).setReg(DestTmp);
368 MachineInstr *ADDu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
371 .addUse(MF.getInfo<MipsFunctionInfo>()
372 ->getGlobalBaseRegForGlobalISel());
373 if (!constrainSelectedInstRegOperands(*ADDu, TII, TRI, RBI))
377 MachineInstr *Branch =
378 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
380 if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
387 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoIndirectBranch))
388 .add(I.getOperand(0));
392 const Register DestReg = I.getOperand(0).getReg();
394 const TargetRegisterClass *DefRC = nullptr;
395 if (Register::isPhysicalRegister(DestReg))
396 DefRC = TRI.getRegClass(DestReg);
398 DefRC = getRegClassForTypeOnBank(DestReg, MRI);
400 I.setDesc(TII.get(TargetOpcode::PHI));
401 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
407 const unsigned NewOpc = selectLoadStoreOpCode(I, MRI);
408 if (NewOpc == I.getOpcode())
411 MachineOperand BaseAddr = I.getOperand(1);
412 int64_t SignedOffset = 0;
413 // Try to fold load/store + G_PTR_ADD + G_CONSTANT
414 // %SignedOffset:(s32) = G_CONSTANT i32 16_bit_signed_immediate
415 // %Addr:(p0) = G_PTR_ADD %BaseAddr, %SignedOffset
416 // %LoadResult/%StoreSrc = load/store %Addr(p0)
418 // %LoadResult/%StoreSrc = NewOpc %BaseAddr(p0), 16_bit_signed_immediate
420 MachineInstr *Addr = MRI.getVRegDef(I.getOperand(1).getReg());
421 if (Addr->getOpcode() == G_PTR_ADD) {
422 MachineInstr *Offset = MRI.getVRegDef(Addr->getOperand(2).getReg());
423 if (Offset->getOpcode() == G_CONSTANT) {
424 APInt OffsetValue = Offset->getOperand(1).getCImm()->getValue();
425 if (OffsetValue.isSignedIntN(16)) {
426 BaseAddr = Addr->getOperand(1);
427 SignedOffset = OffsetValue.getSExtValue();
432 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
433 .add(I.getOperand(0))
435 .addImm(SignedOffset)
436 .addMemOperand(*I.memoperands_begin());
443 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
444 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
445 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
447 MachineInstr *PseudoDIV, *PseudoMove;
448 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
449 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
451 .add(I.getOperand(1))
452 .add(I.getOperand(2));
453 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
456 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
457 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
458 .addDef(I.getOperand(0).getReg())
460 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
467 // Handle operands with pointer type.
468 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
469 .add(I.getOperand(0))
470 .add(I.getOperand(2))
471 .add(I.getOperand(1))
472 .add(I.getOperand(3));
475 case G_IMPLICIT_DEF: {
476 Register Dst = I.getOperand(0).getReg();
477 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::IMPLICIT_DEF))
480 // Set class based on register bank, there can be fpr and gpr implicit def.
481 MRI.setRegClass(Dst, getRegClassForTypeOnBank(Dst, MRI));
485 MachineIRBuilder B(I);
486 if (!materialize32BitImm(I.getOperand(0).getReg(),
487 I.getOperand(1).getCImm()->getValue(), B))
494 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
495 APInt APImm = FPimm.bitcastToAPInt();
496 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
499 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
500 MachineIRBuilder B(I);
501 if (!materialize32BitImm(GPRReg, APImm, B))
504 MachineInstrBuilder MTC1 =
505 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
506 if (!MTC1.constrainAllUses(TII, TRI, RBI))
510 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
511 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
512 MachineIRBuilder B(I);
513 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
515 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
518 MachineInstrBuilder PairF64 = B.buildInstr(
519 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
520 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
521 if (!PairF64.constrainAllUses(TII, TRI, RBI))
529 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
530 unsigned FABSOpcode =
531 Size == 32 ? Mips::FABS_S
532 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
533 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
534 .add(I.getOperand(0))
535 .add(I.getOperand(1));
539 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
540 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
542 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
543 assert((FromSize == 32 || FromSize == 64) &&
544 "Unsupported floating point size for G_FPTOSI");
548 Opcode = Mips::TRUNC_W_S;
550 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
551 Register ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
552 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
554 .addUse(I.getOperand(1).getReg());
555 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
558 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
559 .addDef(I.getOperand(0).getReg())
560 .addUse(ResultInFPR);
561 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
567 case G_GLOBAL_VALUE: {
568 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
569 if (MF.getTarget().isPositionIndependent()) {
570 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
571 .addDef(I.getOperand(0).getReg())
572 .addReg(MF.getInfo<MipsFunctionInfo>()
573 ->getGlobalBaseRegForGlobalISel())
574 .addGlobalAddress(GVal);
575 // Global Values that don't have local linkage are handled differently
576 // when they are part of call sequence. MipsCallLowering::lowerCall
577 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
578 // MO_GOT_CALL flag when Callee doesn't have local linkage.
579 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
580 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL);
582 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT);
583 LWGOT->addMemOperand(
584 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
585 MachineMemOperand::MOLoad, 4, 4));
586 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
589 if (GVal->hasLocalLinkage()) {
590 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
591 LWGOT->getOperand(0).setReg(LWGOTDef);
593 MachineInstr *ADDiu =
594 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
595 .addDef(I.getOperand(0).getReg())
597 .addGlobalAddress(GVal);
598 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
599 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
603 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
605 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
607 .addGlobalAddress(GVal);
608 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI);
609 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
612 MachineInstr *ADDiu =
613 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
614 .addDef(I.getOperand(0).getReg())
616 .addGlobalAddress(GVal);
617 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
618 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
625 if (MF.getTarget().isPositionIndependent()) {
626 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
627 .addDef(I.getOperand(0).getReg())
628 .addReg(MF.getInfo<MipsFunctionInfo>()
629 ->getGlobalBaseRegForGlobalISel())
630 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_GOT)
632 MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
633 MachineMemOperand::MOLoad, 4, 4));
636 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
637 .addDef(I.getOperand(0).getReg())
638 .addJumpTableIndex(I.getOperand(1).getIndex(), MipsII::MO_ABS_HI);
645 Register Def, LHS, RHS;
646 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
647 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
649 bool hasImm() const {
650 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
656 SmallVector<struct Instr, 2> Instructions;
657 Register ICMPReg = I.getOperand(0).getReg();
658 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
659 Register LHS = I.getOperand(2).getReg();
660 Register RHS = I.getOperand(3).getReg();
661 CmpInst::Predicate Cond =
662 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
665 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
666 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
667 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
669 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
670 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
671 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
673 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
674 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
676 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
677 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
678 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
680 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
681 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
683 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
684 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
685 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
687 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
688 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
690 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
691 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
692 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
694 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
695 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
697 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
698 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
699 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
705 MachineIRBuilder B(I);
706 for (const struct Instr &Instruction : Instructions) {
707 MachineInstrBuilder MIB = B.buildInstr(
708 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
710 if (Instruction.hasImm())
711 MIB.addImm(Instruction.RHS);
713 MIB.addUse(Instruction.RHS);
715 if (!MIB.constrainAllUses(TII, TRI, RBI))
723 unsigned MipsFCMPCondCode;
724 bool isLogicallyNegated;
725 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
726 I.getOperand(1).getPredicate())) {
727 case CmpInst::FCMP_UNO: // Unordered
728 case CmpInst::FCMP_ORD: // Ordered (OR)
729 MipsFCMPCondCode = Mips::FCOND_UN;
730 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
732 case CmpInst::FCMP_OEQ: // Equal
733 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
734 MipsFCMPCondCode = Mips::FCOND_OEQ;
735 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
737 case CmpInst::FCMP_UEQ: // Unordered or Equal
738 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
739 MipsFCMPCondCode = Mips::FCOND_UEQ;
740 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
742 case CmpInst::FCMP_OLT: // Ordered or Less Than
743 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
744 MipsFCMPCondCode = Mips::FCOND_OLT;
745 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
747 case CmpInst::FCMP_ULT: // Unordered or Less Than
748 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
749 MipsFCMPCondCode = Mips::FCOND_ULT;
750 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
752 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
753 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
754 MipsFCMPCondCode = Mips::FCOND_OLE;
755 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
757 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
758 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
759 MipsFCMPCondCode = Mips::FCOND_ULE;
760 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
766 // Default compare result in gpr register will be `true`.
767 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
768 // using MOVF_I. When orignal predicate (Cond) is logically negated
769 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
770 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
772 Register TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
773 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
778 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
779 unsigned FCMPOpcode =
780 Size == 32 ? Mips::FCMP_S32
781 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
782 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
783 .addUse(I.getOperand(2).getReg())
784 .addUse(I.getOperand(3).getReg())
785 .addImm(MipsFCMPCondCode);
786 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
789 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
790 .addDef(I.getOperand(0).getReg())
794 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
801 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SYNC)).addImm(0);
805 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
806 int FI = FuncInfo->getVarArgsFrameIndex();
808 Register LeaReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
809 MachineInstr *LEA_ADDiu =
810 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LEA_ADDiu))
814 if (!constrainSelectedInstRegOperands(*LEA_ADDiu, TII, TRI, RBI))
817 MachineInstr *Store = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::SW))
819 .addUse(I.getOperand(0).getReg())
821 if (!constrainSelectedInstRegOperands(*Store, TII, TRI, RBI))
832 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
836 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM,
837 MipsSubtarget &Subtarget,
838 MipsRegisterBankInfo &RBI) {
839 return new MipsInstructionSelector(TM, Subtarget, RBI);
841 } // end namespace llvm