1 //===- MipsInstructionSelector.cpp ------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the InstructionSelector class for
11 /// \todo This should be generated by TableGen.
12 //===----------------------------------------------------------------------===//
14 #include "MCTargetDesc/MipsInstPrinter.h"
15 #include "MipsMachineFunction.h"
16 #include "MipsRegisterBankInfo.h"
17 #include "MipsTargetMachine.h"
18 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
19 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #define DEBUG_TYPE "mips-isel"
27 #define GET_GLOBALISEL_PREDICATE_BITSET
28 #include "MipsGenGlobalISel.inc"
29 #undef GET_GLOBALISEL_PREDICATE_BITSET
31 class MipsInstructionSelector : public InstructionSelector {
33 MipsInstructionSelector(const MipsTargetMachine &TM, const MipsSubtarget &STI,
34 const MipsRegisterBankInfo &RBI);
36 bool select(MachineInstr &I, CodeGenCoverage &CoverageInfo) const override;
37 static const char *getName() { return DEBUG_TYPE; }
40 bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
41 bool materialize32BitImm(Register DestReg, APInt Imm,
42 MachineIRBuilder &B) const;
43 bool selectCopy(MachineInstr &I, MachineRegisterInfo &MRI) const;
44 const TargetRegisterClass *
45 getRegClassForTypeOnBank(unsigned OpSize, const RegisterBank &RB,
46 const RegisterBankInfo &RBI) const;
48 const MipsTargetMachine &TM;
49 const MipsSubtarget &STI;
50 const MipsInstrInfo &TII;
51 const MipsRegisterInfo &TRI;
52 const MipsRegisterBankInfo &RBI;
54 #define GET_GLOBALISEL_PREDICATES_DECL
55 #include "MipsGenGlobalISel.inc"
56 #undef GET_GLOBALISEL_PREDICATES_DECL
58 #define GET_GLOBALISEL_TEMPORARIES_DECL
59 #include "MipsGenGlobalISel.inc"
60 #undef GET_GLOBALISEL_TEMPORARIES_DECL
63 } // end anonymous namespace
65 #define GET_GLOBALISEL_IMPL
66 #include "MipsGenGlobalISel.inc"
67 #undef GET_GLOBALISEL_IMPL
69 MipsInstructionSelector::MipsInstructionSelector(
70 const MipsTargetMachine &TM, const MipsSubtarget &STI,
71 const MipsRegisterBankInfo &RBI)
72 : InstructionSelector(), TM(TM), STI(STI), TII(*STI.getInstrInfo()),
73 TRI(*STI.getRegisterInfo()), RBI(RBI),
75 #define GET_GLOBALISEL_PREDICATES_INIT
76 #include "MipsGenGlobalISel.inc"
77 #undef GET_GLOBALISEL_PREDICATES_INIT
78 #define GET_GLOBALISEL_TEMPORARIES_INIT
79 #include "MipsGenGlobalISel.inc"
80 #undef GET_GLOBALISEL_TEMPORARIES_INIT
84 bool MipsInstructionSelector::selectCopy(MachineInstr &I,
85 MachineRegisterInfo &MRI) const {
86 Register DstReg = I.getOperand(0).getReg();
87 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
90 const RegisterBank *RegBank = RBI.getRegBank(DstReg, MRI, TRI);
91 const unsigned DstSize = MRI.getType(DstReg).getSizeInBits();
93 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
94 if (RegBank->getID() == Mips::FPRBRegBankID) {
96 RC = &Mips::FGR32RegClass;
97 else if (DstSize == 64)
98 RC = STI.isFP64bit() ? &Mips::FGR64RegClass : &Mips::AFGR64RegClass;
100 llvm_unreachable("Unsupported destination size");
102 if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
103 LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
110 const TargetRegisterClass *MipsInstructionSelector::getRegClassForTypeOnBank(
111 unsigned OpSize, const RegisterBank &RB,
112 const RegisterBankInfo &RBI) const {
113 if (RB.getID() == Mips::GPRBRegBankID)
114 return &Mips::GPR32RegClass;
116 if (RB.getID() == Mips::FPRBRegBankID)
118 ? &Mips::FGR32RegClass
119 : STI.hasMips32r6() || STI.isFP64bit() ? &Mips::FGR64RegClass
120 : &Mips::AFGR64RegClass;
122 llvm_unreachable("getRegClassForTypeOnBank can't find register class.");
126 bool MipsInstructionSelector::materialize32BitImm(Register DestReg, APInt Imm,
127 MachineIRBuilder &B) const {
128 assert(Imm.getBitWidth() == 32 && "Unsupported immediate size.");
129 // Ori zero extends immediate. Used for values with zeros in high 16 bits.
130 if (Imm.getHiBits(16).isNullValue()) {
131 MachineInstr *Inst = B.buildInstr(Mips::ORi, {DestReg}, {Register(Mips::ZERO)})
132 .addImm(Imm.getLoBits(16).getLimitedValue());
133 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
135 // Lui places immediate in high 16 bits and sets low 16 bits to zero.
136 if (Imm.getLoBits(16).isNullValue()) {
137 MachineInstr *Inst = B.buildInstr(Mips::LUi, {DestReg}, {})
138 .addImm(Imm.getHiBits(16).getLimitedValue());
139 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
141 // ADDiu sign extends immediate. Used for values with 1s in high 17 bits.
142 if (Imm.isSignedIntN(16)) {
143 MachineInstr *Inst = B.buildInstr(Mips::ADDiu, {DestReg}, {Register(Mips::ZERO)})
144 .addImm(Imm.getLoBits(16).getLimitedValue());
145 return constrainSelectedInstRegOperands(*Inst, TII, TRI, RBI);
147 // Values that cannot be materialized with single immediate instruction.
148 Register LUiReg = B.getMRI()->createVirtualRegister(&Mips::GPR32RegClass);
149 MachineInstr *LUi = B.buildInstr(Mips::LUi, {LUiReg}, {})
150 .addImm(Imm.getHiBits(16).getLimitedValue());
151 MachineInstr *ORi = B.buildInstr(Mips::ORi, {DestReg}, {LUiReg})
152 .addImm(Imm.getLoBits(16).getLimitedValue());
153 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
155 if (!constrainSelectedInstRegOperands(*ORi, TII, TRI, RBI))
160 /// Returning Opc indicates that we failed to select MIPS instruction opcode.
161 static unsigned selectLoadStoreOpCode(unsigned Opc, unsigned MemSizeInBytes,
162 unsigned RegBank, bool isFP64) {
163 bool isStore = Opc == TargetOpcode::G_STORE;
164 if (RegBank == Mips::GPRBRegBankID) {
166 switch (MemSizeInBytes) {
177 // Unspecified extending load is selected into zeroExtending load.
178 switch (MemSizeInBytes) {
182 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LH : Mips::LHu;
184 return Opc == TargetOpcode::G_SEXTLOAD ? Mips::LB : Mips::LBu;
190 if (RegBank == Mips::FPRBRegBankID) {
191 switch (MemSizeInBytes) {
193 return isStore ? Mips::SWC1 : Mips::LWC1;
196 return isStore ? Mips::SDC164 : Mips::LDC164;
198 return isStore ? Mips::SDC1 : Mips::LDC1;
206 bool MipsInstructionSelector::select(MachineInstr &I,
207 CodeGenCoverage &CoverageInfo) const {
209 MachineBasicBlock &MBB = *I.getParent();
210 MachineFunction &MF = *MBB.getParent();
211 MachineRegisterInfo &MRI = MF.getRegInfo();
213 if (!isPreISelGenericOpcode(I.getOpcode())) {
215 return selectCopy(I, MRI);
220 if (I.getOpcode() == Mips::G_MUL) {
221 MachineInstr *Mul = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MUL))
222 .add(I.getOperand(0))
223 .add(I.getOperand(1))
224 .add(I.getOperand(2));
225 if (!constrainSelectedInstRegOperands(*Mul, TII, TRI, RBI))
227 Mul->getOperand(3).setIsDead(true);
228 Mul->getOperand(4).setIsDead(true);
234 if (selectImpl(I, CoverageInfo))
237 MachineInstr *MI = nullptr;
238 using namespace TargetOpcode;
240 switch (I.getOpcode()) {
242 Register PseudoMULTuReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
243 MachineInstr *PseudoMULTu, *PseudoMove;
245 PseudoMULTu = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMULTu))
246 .addDef(PseudoMULTuReg)
247 .add(I.getOperand(1))
248 .add(I.getOperand(2));
249 if (!constrainSelectedInstRegOperands(*PseudoMULTu, TII, TRI, RBI))
252 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::PseudoMFHI))
253 .addDef(I.getOperand(0).getReg())
254 .addUse(PseudoMULTuReg);
255 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
262 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDu))
263 .add(I.getOperand(0))
264 .add(I.getOperand(1))
265 .add(I.getOperand(2));
268 case G_FRAME_INDEX: {
269 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
270 .add(I.getOperand(0))
271 .add(I.getOperand(1))
276 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::BNE))
277 .add(I.getOperand(0))
279 .add(I.getOperand(1));
283 const Register DestReg = I.getOperand(0).getReg();
284 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
286 const TargetRegisterClass *DefRC = nullptr;
287 if (TargetRegisterInfo::isPhysicalRegister(DestReg))
288 DefRC = TRI.getRegClass(DestReg);
290 DefRC = getRegClassForTypeOnBank(OpSize,
291 *RBI.getRegBank(DestReg, MRI, TRI), RBI);
293 I.setDesc(TII.get(TargetOpcode::PHI));
294 return RBI.constrainGenericRegister(DestReg, *DefRC, MRI);
300 const Register DestReg = I.getOperand(0).getReg();
301 const unsigned DestRegBank = RBI.getRegBank(DestReg, MRI, TRI)->getID();
302 const unsigned OpSize = MRI.getType(DestReg).getSizeInBits();
303 const unsigned OpMemSizeInBytes = (*I.memoperands_begin())->getSize();
305 if (DestRegBank == Mips::GPRBRegBankID && OpSize != 32)
308 if (DestRegBank == Mips::FPRBRegBankID && OpSize != 32 && OpSize != 64)
311 const unsigned NewOpc = selectLoadStoreOpCode(
312 I.getOpcode(), OpMemSizeInBytes, DestRegBank, STI.isFP64bit());
313 if (NewOpc == I.getOpcode())
316 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(NewOpc))
317 .add(I.getOperand(0))
318 .add(I.getOperand(1))
320 .addMemOperand(*I.memoperands_begin());
327 Register HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass);
328 bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV;
329 bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV;
331 MachineInstr *PseudoDIV, *PseudoMove;
332 PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(),
333 TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV))
335 .add(I.getOperand(1))
336 .add(I.getOperand(2));
337 if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI))
340 PseudoMove = BuildMI(MBB, I, I.getDebugLoc(),
341 TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI))
342 .addDef(I.getOperand(0).getReg())
344 if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI))
351 // Handle operands with pointer type.
352 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I))
353 .add(I.getOperand(0))
354 .add(I.getOperand(2))
355 .add(I.getOperand(1))
356 .add(I.getOperand(3));
360 MachineIRBuilder B(I);
361 if (!materialize32BitImm(I.getOperand(0).getReg(),
362 I.getOperand(1).getCImm()->getValue(), B))
369 const APFloat &FPimm = I.getOperand(1).getFPImm()->getValueAPF();
370 APInt APImm = FPimm.bitcastToAPInt();
371 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
374 Register GPRReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
375 MachineIRBuilder B(I);
376 if (!materialize32BitImm(GPRReg, APImm, B))
379 MachineInstrBuilder MTC1 =
380 B.buildInstr(Mips::MTC1, {I.getOperand(0).getReg()}, {GPRReg});
381 if (!MTC1.constrainAllUses(TII, TRI, RBI))
385 Register GPRRegHigh = MRI.createVirtualRegister(&Mips::GPR32RegClass);
386 Register GPRRegLow = MRI.createVirtualRegister(&Mips::GPR32RegClass);
387 MachineIRBuilder B(I);
388 if (!materialize32BitImm(GPRRegHigh, APImm.getHiBits(32).trunc(32), B))
390 if (!materialize32BitImm(GPRRegLow, APImm.getLoBits(32).trunc(32), B))
393 MachineInstrBuilder PairF64 = B.buildInstr(
394 STI.isFP64bit() ? Mips::BuildPairF64_64 : Mips::BuildPairF64,
395 {I.getOperand(0).getReg()}, {GPRRegLow, GPRRegHigh});
396 if (!PairF64.constrainAllUses(TII, TRI, RBI))
404 unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
405 unsigned FABSOpcode =
406 Size == 32 ? Mips::FABS_S
407 : STI.isFP64bit() ? Mips::FABS_D64 : Mips::FABS_D32;
408 MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FABSOpcode))
409 .add(I.getOperand(0))
410 .add(I.getOperand(1));
414 unsigned FromSize = MRI.getType(I.getOperand(1).getReg()).getSizeInBits();
415 unsigned ToSize = MRI.getType(I.getOperand(0).getReg()).getSizeInBits();
417 assert((ToSize == 32) && "Unsupported integer size for G_FPTOSI");
418 assert((FromSize == 32 || FromSize == 64) &&
419 "Unsupported floating point size for G_FPTOSI");
423 Opcode = Mips::TRUNC_W_S;
425 Opcode = STI.isFP64bit() ? Mips::TRUNC_W_D64 : Mips::TRUNC_W_D32;
426 unsigned ResultInFPR = MRI.createVirtualRegister(&Mips::FGR32RegClass);
427 MachineInstr *Trunc = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Opcode))
429 .addUse(I.getOperand(1).getReg());
430 if (!constrainSelectedInstRegOperands(*Trunc, TII, TRI, RBI))
433 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MFC1))
434 .addDef(I.getOperand(0).getReg())
435 .addUse(ResultInFPR);
436 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
442 case G_GLOBAL_VALUE: {
443 const llvm::GlobalValue *GVal = I.getOperand(1).getGlobal();
444 if (MF.getTarget().isPositionIndependent()) {
445 MachineInstr *LWGOT = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LW))
446 .addDef(I.getOperand(0).getReg())
447 .addReg(MF.getInfo<MipsFunctionInfo>()
448 ->getGlobalBaseRegForGlobalISel())
449 .addGlobalAddress(GVal);
450 // Global Values that don't have local linkage are handled differently
451 // when they are part of call sequence. MipsCallLowering::lowerCall
452 // creates G_GLOBAL_VALUE instruction as part of call sequence and adds
453 // MO_GOT_CALL flag when Callee doesn't have local linkage.
454 if (I.getOperand(1).getTargetFlags() == MipsII::MO_GOT_CALL)
455 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT_CALL);
457 LWGOT->getOperand(2).setTargetFlags(MipsII::MO_GOT);
458 LWGOT->addMemOperand(
459 MF, MF.getMachineMemOperand(MachinePointerInfo::getGOT(MF),
460 MachineMemOperand::MOLoad, 4, 4));
461 if (!constrainSelectedInstRegOperands(*LWGOT, TII, TRI, RBI))
464 if (GVal->hasLocalLinkage()) {
465 Register LWGOTDef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
466 LWGOT->getOperand(0).setReg(LWGOTDef);
468 MachineInstr *ADDiu =
469 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
470 .addDef(I.getOperand(0).getReg())
472 .addGlobalAddress(GVal);
473 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
474 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
478 Register LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
480 MachineInstr *LUi = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::LUi))
482 .addGlobalAddress(GVal);
483 LUi->getOperand(1).setTargetFlags(MipsII::MO_ABS_HI);
484 if (!constrainSelectedInstRegOperands(*LUi, TII, TRI, RBI))
487 MachineInstr *ADDiu =
488 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
489 .addDef(I.getOperand(0).getReg())
491 .addGlobalAddress(GVal);
492 ADDiu->getOperand(2).setTargetFlags(MipsII::MO_ABS_LO);
493 if (!constrainSelectedInstRegOperands(*ADDiu, TII, TRI, RBI))
502 Register Def, LHS, RHS;
503 Instr(unsigned Opcode, Register Def, Register LHS, Register RHS)
504 : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){};
506 bool hasImm() const {
507 if (Opcode == Mips::SLTiu || Opcode == Mips::XORi)
513 SmallVector<struct Instr, 2> Instructions;
514 Register ICMPReg = I.getOperand(0).getReg();
515 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
516 Register LHS = I.getOperand(2).getReg();
517 Register RHS = I.getOperand(3).getReg();
518 CmpInst::Predicate Cond =
519 static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate());
522 case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1
523 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
524 Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1);
526 case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS)
527 Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS);
528 Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp);
530 case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS
531 Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS);
533 case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS)
534 Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS);
535 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
537 case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS
538 Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS);
540 case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS)
541 Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS);
542 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
544 case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS
545 Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS);
547 case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS)
548 Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS);
549 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
551 case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS
552 Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS);
554 case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS)
555 Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS);
556 Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1);
562 MachineIRBuilder B(I);
563 for (const struct Instr &Instruction : Instructions) {
564 MachineInstrBuilder MIB = B.buildInstr(
565 Instruction.Opcode, {Instruction.Def}, {Instruction.LHS});
567 if (Instruction.hasImm())
568 MIB.addImm(Instruction.RHS);
570 MIB.addUse(Instruction.RHS);
572 if (!MIB.constrainAllUses(TII, TRI, RBI))
580 unsigned MipsFCMPCondCode;
581 bool isLogicallyNegated;
582 switch (CmpInst::Predicate Cond = static_cast<CmpInst::Predicate>(
583 I.getOperand(1).getPredicate())) {
584 case CmpInst::FCMP_UNO: // Unordered
585 case CmpInst::FCMP_ORD: // Ordered (OR)
586 MipsFCMPCondCode = Mips::FCOND_UN;
587 isLogicallyNegated = Cond != CmpInst::FCMP_UNO;
589 case CmpInst::FCMP_OEQ: // Equal
590 case CmpInst::FCMP_UNE: // Not Equal (NEQ)
591 MipsFCMPCondCode = Mips::FCOND_OEQ;
592 isLogicallyNegated = Cond != CmpInst::FCMP_OEQ;
594 case CmpInst::FCMP_UEQ: // Unordered or Equal
595 case CmpInst::FCMP_ONE: // Ordered or Greater Than or Less Than (OGL)
596 MipsFCMPCondCode = Mips::FCOND_UEQ;
597 isLogicallyNegated = Cond != CmpInst::FCMP_UEQ;
599 case CmpInst::FCMP_OLT: // Ordered or Less Than
600 case CmpInst::FCMP_UGE: // Unordered or Greater Than or Equal (UGE)
601 MipsFCMPCondCode = Mips::FCOND_OLT;
602 isLogicallyNegated = Cond != CmpInst::FCMP_OLT;
604 case CmpInst::FCMP_ULT: // Unordered or Less Than
605 case CmpInst::FCMP_OGE: // Ordered or Greater Than or Equal (OGE)
606 MipsFCMPCondCode = Mips::FCOND_ULT;
607 isLogicallyNegated = Cond != CmpInst::FCMP_ULT;
609 case CmpInst::FCMP_OLE: // Ordered or Less Than or Equal
610 case CmpInst::FCMP_UGT: // Unordered or Greater Than (UGT)
611 MipsFCMPCondCode = Mips::FCOND_OLE;
612 isLogicallyNegated = Cond != CmpInst::FCMP_OLE;
614 case CmpInst::FCMP_ULE: // Unordered or Less Than or Equal
615 case CmpInst::FCMP_OGT: // Ordered or Greater Than (OGT)
616 MipsFCMPCondCode = Mips::FCOND_ULE;
617 isLogicallyNegated = Cond != CmpInst::FCMP_ULE;
623 // Default compare result in gpr register will be `true`.
624 // We will move `false` (MIPS::Zero) to gpr result when fcmp gives false
625 // using MOVF_I. When orignal predicate (Cond) is logically negated
626 // MipsFCMPCondCode, result is inverted i.e. MOVT_I is used.
627 unsigned MoveOpcode = isLogicallyNegated ? Mips::MOVT_I : Mips::MOVF_I;
629 unsigned TrueInReg = MRI.createVirtualRegister(&Mips::GPR32RegClass);
630 BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::ADDiu))
635 unsigned Size = MRI.getType(I.getOperand(2).getReg()).getSizeInBits();
636 unsigned FCMPOpcode =
637 Size == 32 ? Mips::FCMP_S32
638 : STI.isFP64bit() ? Mips::FCMP_D64 : Mips::FCMP_D32;
639 MachineInstr *FCMP = BuildMI(MBB, I, I.getDebugLoc(), TII.get(FCMPOpcode))
640 .addUse(I.getOperand(2).getReg())
641 .addUse(I.getOperand(3).getReg())
642 .addImm(MipsFCMPCondCode);
643 if (!constrainSelectedInstRegOperands(*FCMP, TII, TRI, RBI))
646 MachineInstr *Move = BuildMI(MBB, I, I.getDebugLoc(), TII.get(MoveOpcode))
647 .addDef(I.getOperand(0).getReg())
651 if (!constrainSelectedInstRegOperands(*Move, TII, TRI, RBI))
662 return constrainSelectedInstRegOperands(*MI, TII, TRI, RBI);
666 InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &TM,
667 MipsSubtarget &Subtarget,
668 MipsRegisterBankInfo &RBI) {
669 return new MipsInstructionSelector(TM, Subtarget, RBI);
671 } // end namespace llvm