1 //===-- MipsFastISel.cpp - Mips FastISel implementation -------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// \brief This file defines the MIPS-specific support for the FastISel class.
12 /// Some of the target-specific code is generated by tablegen in the file
13 /// MipsGenFastISel.inc, which is #included here.
15 //===----------------------------------------------------------------------===//
17 #include "MCTargetDesc/MipsABIInfo.h"
18 #include "MCTargetDesc/MipsBaseInfo.h"
19 #include "MipsCCState.h"
20 #include "MipsInstrInfo.h"
21 #include "MipsISelLowering.h"
22 #include "MipsMachineFunction.h"
23 #include "MipsSubtarget.h"
24 #include "MipsTargetMachine.h"
25 #include "llvm/ADT/APInt.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/DenseMap.h"
28 #include "llvm/ADT/SmallVector.h"
29 #include "llvm/Analysis/TargetLibraryInfo.h"
30 #include "llvm/CodeGen/CallingConvLower.h"
31 #include "llvm/CodeGen/FastISel.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineRegisterInfo.h"
39 #include "llvm/CodeGen/MachineValueType.h"
40 #include "llvm/CodeGen/ValueTypes.h"
41 #include "llvm/IR/Attributes.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/Constant.h"
44 #include "llvm/IR/Constants.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GetElementPtrTypeIterator.h"
48 #include "llvm/IR/GlobalValue.h"
49 #include "llvm/IR/GlobalVariable.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instruction.h"
52 #include "llvm/IR/Instructions.h"
53 #include "llvm/IR/IntrinsicInst.h"
54 #include "llvm/IR/Operator.h"
55 #include "llvm/IR/Type.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/MC/MCInstrDesc.h"
59 #include "llvm/MC/MCRegisterInfo.h"
60 #include "llvm/MC/MCSymbol.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/Compiler.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/MathExtras.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Target/TargetInstrInfo.h"
68 #include "llvm/Target/TargetLowering.h"
74 #define DEBUG_TYPE "mips-fastisel"
80 class MipsFastISel final : public FastISel {
82 // All possible address modes.
85 typedef enum { RegBase, FrameIndexBase } BaseKind;
88 BaseKind Kind = RegBase;
96 const GlobalValue *GV = nullptr;
99 // Innocuous defaults for our address.
100 Address() { Base.Reg = 0; }
102 void setKind(BaseKind K) { Kind = K; }
103 BaseKind getKind() const { return Kind; }
104 bool isRegBase() const { return Kind == RegBase; }
105 bool isFIBase() const { return Kind == FrameIndexBase; }
107 void setReg(unsigned Reg) {
108 assert(isRegBase() && "Invalid base register access!");
112 unsigned getReg() const {
113 assert(isRegBase() && "Invalid base register access!");
117 void setFI(unsigned FI) {
118 assert(isFIBase() && "Invalid base frame index access!");
122 unsigned getFI() const {
123 assert(isFIBase() && "Invalid base frame index access!");
127 void setOffset(int64_t Offset_) { Offset = Offset_; }
128 int64_t getOffset() const { return Offset; }
129 void setGlobalValue(const GlobalValue *G) { GV = G; }
130 const GlobalValue *getGlobalValue() { return GV; }
133 /// Subtarget - Keep a pointer to the MipsSubtarget around so that we can
134 /// make the right decision when generating code for different targets.
135 const TargetMachine &TM;
136 const MipsSubtarget *Subtarget;
137 const TargetInstrInfo &TII;
138 const TargetLowering &TLI;
139 MipsFunctionInfo *MFI;
141 // Convenience variables to avoid some queries.
142 LLVMContext *Context;
144 bool fastLowerArguments() override;
145 bool fastLowerCall(CallLoweringInfo &CLI) override;
146 bool fastLowerIntrinsicCall(const IntrinsicInst *II) override;
148 bool UnsupportedFPMode; // To allow fast-isel to proceed and just not handle
149 // floating point but not reject doing fast-isel in other
153 // Selection routines.
154 bool selectLogicalOp(const Instruction *I);
155 bool selectLoad(const Instruction *I);
156 bool selectStore(const Instruction *I);
157 bool selectBranch(const Instruction *I);
158 bool selectSelect(const Instruction *I);
159 bool selectCmp(const Instruction *I);
160 bool selectFPExt(const Instruction *I);
161 bool selectFPTrunc(const Instruction *I);
162 bool selectFPToInt(const Instruction *I, bool IsSigned);
163 bool selectRet(const Instruction *I);
164 bool selectTrunc(const Instruction *I);
165 bool selectIntExt(const Instruction *I);
166 bool selectShift(const Instruction *I);
167 bool selectDivRem(const Instruction *I, unsigned ISDOpcode);
169 // Utility helper routines.
170 bool isTypeLegal(Type *Ty, MVT &VT);
171 bool isTypeSupported(Type *Ty, MVT &VT);
172 bool isLoadTypeLegal(Type *Ty, MVT &VT);
173 bool computeAddress(const Value *Obj, Address &Addr);
174 bool computeCallAddress(const Value *V, Address &Addr);
175 void simplifyAddress(Address &Addr);
177 // Emit helper routines.
178 bool emitCmp(unsigned DestReg, const CmpInst *CI);
179 bool emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
180 unsigned Alignment = 0);
181 bool emitStore(MVT VT, unsigned SrcReg, Address Addr,
182 MachineMemOperand *MMO = nullptr);
183 bool emitStore(MVT VT, unsigned SrcReg, Address &Addr,
184 unsigned Alignment = 0);
185 unsigned emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
186 bool emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg,
189 bool emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
191 bool emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, unsigned DestReg);
192 bool emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
194 bool emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
197 unsigned getRegEnsuringSimpleIntegerWidening(const Value *, bool IsUnsigned);
199 unsigned emitLogicalOp(unsigned ISDOpc, MVT RetVT, const Value *LHS,
202 unsigned materializeFP(const ConstantFP *CFP, MVT VT);
203 unsigned materializeGV(const GlobalValue *GV, MVT VT);
204 unsigned materializeInt(const Constant *C, MVT VT);
205 unsigned materialize32BitInt(int64_t Imm, const TargetRegisterClass *RC);
206 unsigned materializeExternalCallSym(MCSymbol *Syn);
208 MachineInstrBuilder emitInst(unsigned Opc) {
209 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc));
212 MachineInstrBuilder emitInst(unsigned Opc, unsigned DstReg) {
213 return BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc),
217 MachineInstrBuilder emitInstStore(unsigned Opc, unsigned SrcReg,
218 unsigned MemReg, int64_t MemOffset) {
219 return emitInst(Opc).addReg(SrcReg).addReg(MemReg).addImm(MemOffset);
222 MachineInstrBuilder emitInstLoad(unsigned Opc, unsigned DstReg,
223 unsigned MemReg, int64_t MemOffset) {
224 return emitInst(Opc, DstReg).addReg(MemReg).addImm(MemOffset);
227 unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
228 const TargetRegisterClass *RC,
229 unsigned Op0, bool Op0IsKill,
230 unsigned Op1, bool Op1IsKill);
232 // for some reason, this default is not generated by tablegen
233 // so we explicitly generate it here.
235 unsigned fastEmitInst_riir(uint64_t inst, const TargetRegisterClass *RC,
236 unsigned Op0, bool Op0IsKill, uint64_t imm1,
237 uint64_t imm2, unsigned Op3, bool Op3IsKill) {
241 // Call handling routines.
243 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC) const;
244 bool processCallArgs(CallLoweringInfo &CLI, SmallVectorImpl<MVT> &ArgVTs,
246 bool finishCall(CallLoweringInfo &CLI, MVT RetVT, unsigned NumBytes);
248 const MipsABIInfo &getABI() const {
249 return static_cast<const MipsTargetMachine &>(TM).getABI();
253 // Backend specific FastISel code.
254 explicit MipsFastISel(FunctionLoweringInfo &funcInfo,
255 const TargetLibraryInfo *libInfo)
256 : FastISel(funcInfo, libInfo), TM(funcInfo.MF->getTarget()),
257 Subtarget(&funcInfo.MF->getSubtarget<MipsSubtarget>()),
258 TII(*Subtarget->getInstrInfo()), TLI(*Subtarget->getTargetLowering()) {
259 MFI = funcInfo.MF->getInfo<MipsFunctionInfo>();
260 Context = &funcInfo.Fn->getContext();
261 UnsupportedFPMode = Subtarget->isFP64bit() || Subtarget->useSoftFloat();
264 unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
265 unsigned fastMaterializeConstant(const Constant *C) override;
266 bool fastSelectInstruction(const Instruction *I) override;
268 #include "MipsGenFastISel.inc"
271 } // end anonymous namespace
273 static bool CC_Mips(unsigned ValNo, MVT ValVT, MVT LocVT,
274 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
275 CCState &State) LLVM_ATTRIBUTE_UNUSED;
277 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT, MVT LocVT,
278 CCValAssign::LocInfo LocInfo,
279 ISD::ArgFlagsTy ArgFlags, CCState &State) {
280 llvm_unreachable("should not be called");
283 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT, MVT LocVT,
284 CCValAssign::LocInfo LocInfo,
285 ISD::ArgFlagsTy ArgFlags, CCState &State) {
286 llvm_unreachable("should not be called");
289 #include "MipsGenCallingConv.inc"
291 CCAssignFn *MipsFastISel::CCAssignFnForCall(CallingConv::ID CC) const {
295 unsigned MipsFastISel::emitLogicalOp(unsigned ISDOpc, MVT RetVT,
296 const Value *LHS, const Value *RHS) {
297 // Canonicalize immediates to the RHS first.
298 if (isa<ConstantInt>(LHS) && !isa<ConstantInt>(RHS))
313 llvm_unreachable("unexpected opcode");
316 unsigned LHSReg = getRegForValue(LHS);
321 if (const auto *C = dyn_cast<ConstantInt>(RHS))
322 RHSReg = materializeInt(C, MVT::i32);
324 RHSReg = getRegForValue(RHS);
328 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
332 emitInst(Opc, ResultReg).addReg(LHSReg).addReg(RHSReg);
336 unsigned MipsFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
337 assert(TLI.getValueType(DL, AI->getType(), true) == MVT::i32 &&
338 "Alloca should always return a pointer.");
340 DenseMap<const AllocaInst *, int>::iterator SI =
341 FuncInfo.StaticAllocaMap.find(AI);
343 if (SI != FuncInfo.StaticAllocaMap.end()) {
344 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
345 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::LEA_ADDiu),
347 .addFrameIndex(SI->second)
355 unsigned MipsFastISel::materializeInt(const Constant *C, MVT VT) {
356 if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
358 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
359 const ConstantInt *CI = cast<ConstantInt>(C);
360 return materialize32BitInt(CI->getZExtValue(), RC);
363 unsigned MipsFastISel::materialize32BitInt(int64_t Imm,
364 const TargetRegisterClass *RC) {
365 unsigned ResultReg = createResultReg(RC);
367 if (isInt<16>(Imm)) {
368 unsigned Opc = Mips::ADDiu;
369 emitInst(Opc, ResultReg).addReg(Mips::ZERO).addImm(Imm);
371 } else if (isUInt<16>(Imm)) {
372 emitInst(Mips::ORi, ResultReg).addReg(Mips::ZERO).addImm(Imm);
375 unsigned Lo = Imm & 0xFFFF;
376 unsigned Hi = (Imm >> 16) & 0xFFFF;
378 // Both Lo and Hi have nonzero bits.
379 unsigned TmpReg = createResultReg(RC);
380 emitInst(Mips::LUi, TmpReg).addImm(Hi);
381 emitInst(Mips::ORi, ResultReg).addReg(TmpReg).addImm(Lo);
383 emitInst(Mips::LUi, ResultReg).addImm(Hi);
388 unsigned MipsFastISel::materializeFP(const ConstantFP *CFP, MVT VT) {
389 if (UnsupportedFPMode)
391 int64_t Imm = CFP->getValueAPF().bitcastToAPInt().getZExtValue();
392 if (VT == MVT::f32) {
393 const TargetRegisterClass *RC = &Mips::FGR32RegClass;
394 unsigned DestReg = createResultReg(RC);
395 unsigned TempReg = materialize32BitInt(Imm, &Mips::GPR32RegClass);
396 emitInst(Mips::MTC1, DestReg).addReg(TempReg);
398 } else if (VT == MVT::f64) {
399 const TargetRegisterClass *RC = &Mips::AFGR64RegClass;
400 unsigned DestReg = createResultReg(RC);
401 unsigned TempReg1 = materialize32BitInt(Imm >> 32, &Mips::GPR32RegClass);
403 materialize32BitInt(Imm & 0xFFFFFFFF, &Mips::GPR32RegClass);
404 emitInst(Mips::BuildPairF64, DestReg).addReg(TempReg2).addReg(TempReg1);
410 unsigned MipsFastISel::materializeGV(const GlobalValue *GV, MVT VT) {
411 // For now 32-bit only.
414 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
415 unsigned DestReg = createResultReg(RC);
416 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
417 bool IsThreadLocal = GVar && GVar->isThreadLocal();
418 // TLS not supported at this time.
421 emitInst(Mips::LW, DestReg)
422 .addReg(MFI->getGlobalBaseReg())
423 .addGlobalAddress(GV, 0, MipsII::MO_GOT);
424 if ((GV->hasInternalLinkage() ||
425 (GV->hasLocalLinkage() && !isa<Function>(GV)))) {
426 unsigned TempReg = createResultReg(RC);
427 emitInst(Mips::ADDiu, TempReg)
429 .addGlobalAddress(GV, 0, MipsII::MO_ABS_LO);
435 unsigned MipsFastISel::materializeExternalCallSym(MCSymbol *Sym) {
436 const TargetRegisterClass *RC = &Mips::GPR32RegClass;
437 unsigned DestReg = createResultReg(RC);
438 emitInst(Mips::LW, DestReg)
439 .addReg(MFI->getGlobalBaseReg())
440 .addSym(Sym, MipsII::MO_GOT);
444 // Materialize a constant into a register, and return the register
445 // number (or zero if we failed to handle it).
446 unsigned MipsFastISel::fastMaterializeConstant(const Constant *C) {
447 EVT CEVT = TLI.getValueType(DL, C->getType(), true);
449 // Only handle simple types.
450 if (!CEVT.isSimple())
452 MVT VT = CEVT.getSimpleVT();
454 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
455 return (UnsupportedFPMode) ? 0 : materializeFP(CFP, VT);
456 else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
457 return materializeGV(GV, VT);
458 else if (isa<ConstantInt>(C))
459 return materializeInt(C, VT);
464 bool MipsFastISel::computeAddress(const Value *Obj, Address &Addr) {
465 const User *U = nullptr;
466 unsigned Opcode = Instruction::UserOp1;
467 if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
468 // Don't walk into other basic blocks unless the object is an alloca from
469 // another block, otherwise it may not have a virtual register assigned.
470 if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
471 FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
472 Opcode = I->getOpcode();
475 } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
476 Opcode = C->getOpcode();
482 case Instruction::BitCast:
483 // Look through bitcasts.
484 return computeAddress(U->getOperand(0), Addr);
485 case Instruction::GetElementPtr: {
486 Address SavedAddr = Addr;
487 int64_t TmpOffset = Addr.getOffset();
488 // Iterate through the GEP folding the constants into offsets where
490 gep_type_iterator GTI = gep_type_begin(U);
491 for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e;
493 const Value *Op = *i;
494 if (StructType *STy = GTI.getStructTypeOrNull()) {
495 const StructLayout *SL = DL.getStructLayout(STy);
496 unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
497 TmpOffset += SL->getElementOffset(Idx);
499 uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
501 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
502 // Constant-offset addressing.
503 TmpOffset += CI->getSExtValue() * S;
506 if (canFoldAddIntoGEP(U, Op)) {
507 // A compatible add with a constant operand. Fold the constant.
509 cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
510 TmpOffset += CI->getSExtValue() * S;
511 // Iterate on the other operand.
512 Op = cast<AddOperator>(Op)->getOperand(0);
516 goto unsupported_gep;
520 // Try to grab the base operand now.
521 Addr.setOffset(TmpOffset);
522 if (computeAddress(U->getOperand(0), Addr))
524 // We failed, restore everything and try the other options.
529 case Instruction::Alloca: {
530 const AllocaInst *AI = cast<AllocaInst>(Obj);
531 DenseMap<const AllocaInst *, int>::iterator SI =
532 FuncInfo.StaticAllocaMap.find(AI);
533 if (SI != FuncInfo.StaticAllocaMap.end()) {
534 Addr.setKind(Address::FrameIndexBase);
535 Addr.setFI(SI->second);
541 Addr.setReg(getRegForValue(Obj));
542 return Addr.getReg() != 0;
545 bool MipsFastISel::computeCallAddress(const Value *V, Address &Addr) {
546 const User *U = nullptr;
547 unsigned Opcode = Instruction::UserOp1;
549 if (const auto *I = dyn_cast<Instruction>(V)) {
550 // Check if the value is defined in the same basic block. This information
551 // is crucial to know whether or not folding an operand is valid.
552 if (I->getParent() == FuncInfo.MBB->getBasicBlock()) {
553 Opcode = I->getOpcode();
556 } else if (const auto *C = dyn_cast<ConstantExpr>(V)) {
557 Opcode = C->getOpcode();
564 case Instruction::BitCast:
565 // Look past bitcasts if its operand is in the same BB.
566 return computeCallAddress(U->getOperand(0), Addr);
568 case Instruction::IntToPtr:
569 // Look past no-op inttoptrs if its operand is in the same BB.
570 if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
571 TLI.getPointerTy(DL))
572 return computeCallAddress(U->getOperand(0), Addr);
574 case Instruction::PtrToInt:
575 // Look past no-op ptrtoints if its operand is in the same BB.
576 if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
577 return computeCallAddress(U->getOperand(0), Addr);
581 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
582 Addr.setGlobalValue(GV);
586 // If all else fails, try to materialize the value in a register.
587 if (!Addr.getGlobalValue()) {
588 Addr.setReg(getRegForValue(V));
589 return Addr.getReg() != 0;
595 bool MipsFastISel::isTypeLegal(Type *Ty, MVT &VT) {
596 EVT evt = TLI.getValueType(DL, Ty, true);
597 // Only handle simple types.
598 if (evt == MVT::Other || !evt.isSimple())
600 VT = evt.getSimpleVT();
602 // Handle all legal types, i.e. a register that will directly hold this
604 return TLI.isTypeLegal(VT);
607 bool MipsFastISel::isTypeSupported(Type *Ty, MVT &VT) {
608 if (Ty->isVectorTy())
611 if (isTypeLegal(Ty, VT))
614 // If this is a type than can be sign or zero-extended to a basic operation
615 // go ahead and accept it now.
616 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
622 bool MipsFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
623 if (isTypeLegal(Ty, VT))
625 // We will extend this in a later patch:
626 // If this is a type than can be sign or zero-extended to a basic operation
627 // go ahead and accept it now.
628 if (VT == MVT::i8 || VT == MVT::i16)
632 // Because of how EmitCmp is called with fast-isel, you can
633 // end up with redundant "andi" instructions after the sequences emitted below.
634 // We should try and solve this issue in the future.
636 bool MipsFastISel::emitCmp(unsigned ResultReg, const CmpInst *CI) {
637 const Value *Left = CI->getOperand(0), *Right = CI->getOperand(1);
638 bool IsUnsigned = CI->isUnsigned();
639 unsigned LeftReg = getRegEnsuringSimpleIntegerWidening(Left, IsUnsigned);
642 unsigned RightReg = getRegEnsuringSimpleIntegerWidening(Right, IsUnsigned);
645 CmpInst::Predicate P = CI->getPredicate();
650 case CmpInst::ICMP_EQ: {
651 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
652 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
653 emitInst(Mips::SLTiu, ResultReg).addReg(TempReg).addImm(1);
656 case CmpInst::ICMP_NE: {
657 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
658 emitInst(Mips::XOR, TempReg).addReg(LeftReg).addReg(RightReg);
659 emitInst(Mips::SLTu, ResultReg).addReg(Mips::ZERO).addReg(TempReg);
662 case CmpInst::ICMP_UGT:
663 emitInst(Mips::SLTu, ResultReg).addReg(RightReg).addReg(LeftReg);
665 case CmpInst::ICMP_ULT:
666 emitInst(Mips::SLTu, ResultReg).addReg(LeftReg).addReg(RightReg);
668 case CmpInst::ICMP_UGE: {
669 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
670 emitInst(Mips::SLTu, TempReg).addReg(LeftReg).addReg(RightReg);
671 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
674 case CmpInst::ICMP_ULE: {
675 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
676 emitInst(Mips::SLTu, TempReg).addReg(RightReg).addReg(LeftReg);
677 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
680 case CmpInst::ICMP_SGT:
681 emitInst(Mips::SLT, ResultReg).addReg(RightReg).addReg(LeftReg);
683 case CmpInst::ICMP_SLT:
684 emitInst(Mips::SLT, ResultReg).addReg(LeftReg).addReg(RightReg);
686 case CmpInst::ICMP_SGE: {
687 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
688 emitInst(Mips::SLT, TempReg).addReg(LeftReg).addReg(RightReg);
689 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
692 case CmpInst::ICMP_SLE: {
693 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
694 emitInst(Mips::SLT, TempReg).addReg(RightReg).addReg(LeftReg);
695 emitInst(Mips::XORi, ResultReg).addReg(TempReg).addImm(1);
698 case CmpInst::FCMP_OEQ:
699 case CmpInst::FCMP_UNE:
700 case CmpInst::FCMP_OLT:
701 case CmpInst::FCMP_OLE:
702 case CmpInst::FCMP_OGT:
703 case CmpInst::FCMP_OGE: {
704 if (UnsupportedFPMode)
706 bool IsFloat = Left->getType()->isFloatTy();
707 bool IsDouble = Left->getType()->isDoubleTy();
708 if (!IsFloat && !IsDouble)
710 unsigned Opc, CondMovOpc;
712 case CmpInst::FCMP_OEQ:
713 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
714 CondMovOpc = Mips::MOVT_I;
716 case CmpInst::FCMP_UNE:
717 Opc = IsFloat ? Mips::C_EQ_S : Mips::C_EQ_D32;
718 CondMovOpc = Mips::MOVF_I;
720 case CmpInst::FCMP_OLT:
721 Opc = IsFloat ? Mips::C_OLT_S : Mips::C_OLT_D32;
722 CondMovOpc = Mips::MOVT_I;
724 case CmpInst::FCMP_OLE:
725 Opc = IsFloat ? Mips::C_OLE_S : Mips::C_OLE_D32;
726 CondMovOpc = Mips::MOVT_I;
728 case CmpInst::FCMP_OGT:
729 Opc = IsFloat ? Mips::C_ULE_S : Mips::C_ULE_D32;
730 CondMovOpc = Mips::MOVF_I;
732 case CmpInst::FCMP_OGE:
733 Opc = IsFloat ? Mips::C_ULT_S : Mips::C_ULT_D32;
734 CondMovOpc = Mips::MOVF_I;
737 llvm_unreachable("Only switching of a subset of CCs.");
739 unsigned RegWithZero = createResultReg(&Mips::GPR32RegClass);
740 unsigned RegWithOne = createResultReg(&Mips::GPR32RegClass);
741 emitInst(Mips::ADDiu, RegWithZero).addReg(Mips::ZERO).addImm(0);
742 emitInst(Mips::ADDiu, RegWithOne).addReg(Mips::ZERO).addImm(1);
743 emitInst(Opc).addReg(Mips::FCC0, RegState::Define).addReg(LeftReg)
745 emitInst(CondMovOpc, ResultReg)
748 .addReg(RegWithZero);
755 bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
756 unsigned Alignment) {
758 // more cases will be handled here in following patches.
761 switch (VT.SimpleTy) {
763 ResultReg = createResultReg(&Mips::GPR32RegClass);
767 ResultReg = createResultReg(&Mips::GPR32RegClass);
771 ResultReg = createResultReg(&Mips::GPR32RegClass);
775 if (UnsupportedFPMode)
777 ResultReg = createResultReg(&Mips::FGR32RegClass);
781 if (UnsupportedFPMode)
783 ResultReg = createResultReg(&Mips::AFGR64RegClass);
789 if (Addr.isRegBase()) {
790 simplifyAddress(Addr);
791 emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
794 if (Addr.isFIBase()) {
795 unsigned FI = Addr.getFI();
797 int64_t Offset = Addr.getOffset();
798 MachineFrameInfo &MFI = MF->getFrameInfo();
799 MachineMemOperand *MMO = MF->getMachineMemOperand(
800 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
801 MFI.getObjectSize(FI), Align);
802 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc), ResultReg)
811 bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
812 unsigned Alignment) {
814 // more cases will be handled here in following patches.
817 switch (VT.SimpleTy) {
828 if (UnsupportedFPMode)
833 if (UnsupportedFPMode)
840 if (Addr.isRegBase()) {
841 simplifyAddress(Addr);
842 emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
845 if (Addr.isFIBase()) {
846 unsigned FI = Addr.getFI();
848 int64_t Offset = Addr.getOffset();
849 MachineFrameInfo &MFI = MF->getFrameInfo();
850 MachineMemOperand *MMO = MF->getMachineMemOperand(
851 MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
852 MFI.getObjectSize(FI), Align);
853 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Opc))
863 bool MipsFastISel::selectLogicalOp(const Instruction *I) {
865 if (!isTypeSupported(I->getType(), VT))
869 switch (I->getOpcode()) {
871 llvm_unreachable("Unexpected instruction.");
872 case Instruction::And:
873 ResultReg = emitLogicalOp(ISD::AND, VT, I->getOperand(0), I->getOperand(1));
875 case Instruction::Or:
876 ResultReg = emitLogicalOp(ISD::OR, VT, I->getOperand(0), I->getOperand(1));
878 case Instruction::Xor:
879 ResultReg = emitLogicalOp(ISD::XOR, VT, I->getOperand(0), I->getOperand(1));
886 updateValueMap(I, ResultReg);
890 bool MipsFastISel::selectLoad(const Instruction *I) {
891 // Atomic loads need special handling.
892 if (cast<LoadInst>(I)->isAtomic())
895 // Verify we have a legal type before going any further.
897 if (!isLoadTypeLegal(I->getType(), VT))
900 // See if we can handle this address.
902 if (!computeAddress(I->getOperand(0), Addr))
906 if (!emitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlignment()))
908 updateValueMap(I, ResultReg);
912 bool MipsFastISel::selectStore(const Instruction *I) {
913 Value *Op0 = I->getOperand(0);
916 // Atomic stores need special handling.
917 if (cast<StoreInst>(I)->isAtomic())
920 // Verify we have a legal type before going any further.
922 if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
925 // Get the value to be stored into a register.
926 SrcReg = getRegForValue(Op0);
930 // See if we can handle this address.
932 if (!computeAddress(I->getOperand(1), Addr))
935 if (!emitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlignment()))
941 // This can cause a redundant sltiu to be generated.
942 // FIXME: try and eliminate this in a future patch.
944 bool MipsFastISel::selectBranch(const Instruction *I) {
945 const BranchInst *BI = cast<BranchInst>(I);
946 MachineBasicBlock *BrBB = FuncInfo.MBB;
948 // TBB is the basic block for the case where the comparison is true.
949 // FBB is the basic block for the case where the comparison is false.
950 // if (cond) goto TBB
954 MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
955 MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
957 // For now, just try the simplest case where it's fed by a compare.
958 if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
959 unsigned CondReg = createResultReg(&Mips::GPR32RegClass);
960 if (!emitCmp(CondReg, CI))
962 BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ))
965 finishCondBranch(BI->getParent(), TBB, FBB);
971 bool MipsFastISel::selectCmp(const Instruction *I) {
972 const CmpInst *CI = cast<CmpInst>(I);
973 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
974 if (!emitCmp(ResultReg, CI))
976 updateValueMap(I, ResultReg);
980 // Attempt to fast-select a floating-point extend instruction.
981 bool MipsFastISel::selectFPExt(const Instruction *I) {
982 if (UnsupportedFPMode)
984 Value *Src = I->getOperand(0);
985 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
986 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
988 if (SrcVT != MVT::f32 || DestVT != MVT::f64)
992 getRegForValue(Src); // this must be a 32bit floating point register class
993 // maybe we should handle this differently
997 unsigned DestReg = createResultReg(&Mips::AFGR64RegClass);
998 emitInst(Mips::CVT_D32_S, DestReg).addReg(SrcReg);
999 updateValueMap(I, DestReg);
1003 bool MipsFastISel::selectSelect(const Instruction *I) {
1004 assert(isa<SelectInst>(I) && "Expected a select instruction.");
1006 DEBUG(dbgs() << "selectSelect\n");
1009 if (!isTypeSupported(I->getType(), VT) || UnsupportedFPMode) {
1010 DEBUG(dbgs() << ".. .. gave up (!isTypeSupported || UnsupportedFPMode)\n");
1014 unsigned CondMovOpc;
1015 const TargetRegisterClass *RC;
1017 if (VT.isInteger() && !VT.isVector() && VT.getSizeInBits() <= 32) {
1018 CondMovOpc = Mips::MOVN_I_I;
1019 RC = &Mips::GPR32RegClass;
1020 } else if (VT == MVT::f32) {
1021 CondMovOpc = Mips::MOVN_I_S;
1022 RC = &Mips::FGR32RegClass;
1023 } else if (VT == MVT::f64) {
1024 CondMovOpc = Mips::MOVN_I_D32;
1025 RC = &Mips::AFGR64RegClass;
1029 const SelectInst *SI = cast<SelectInst>(I);
1030 const Value *Cond = SI->getCondition();
1031 unsigned Src1Reg = getRegForValue(SI->getTrueValue());
1032 unsigned Src2Reg = getRegForValue(SI->getFalseValue());
1033 unsigned CondReg = getRegForValue(Cond);
1035 if (!Src1Reg || !Src2Reg || !CondReg)
1038 unsigned ZExtCondReg = createResultReg(&Mips::GPR32RegClass);
1042 if (!emitIntExt(MVT::i1, CondReg, MVT::i32, ZExtCondReg, true))
1045 unsigned ResultReg = createResultReg(RC);
1046 unsigned TempReg = createResultReg(RC);
1048 if (!ResultReg || !TempReg)
1051 emitInst(TargetOpcode::COPY, TempReg).addReg(Src2Reg);
1052 emitInst(CondMovOpc, ResultReg)
1053 .addReg(Src1Reg).addReg(ZExtCondReg).addReg(TempReg);
1054 updateValueMap(I, ResultReg);
1058 // Attempt to fast-select a floating-point truncate instruction.
1059 bool MipsFastISel::selectFPTrunc(const Instruction *I) {
1060 if (UnsupportedFPMode)
1062 Value *Src = I->getOperand(0);
1063 EVT SrcVT = TLI.getValueType(DL, Src->getType(), true);
1064 EVT DestVT = TLI.getValueType(DL, I->getType(), true);
1066 if (SrcVT != MVT::f64 || DestVT != MVT::f32)
1069 unsigned SrcReg = getRegForValue(Src);
1073 unsigned DestReg = createResultReg(&Mips::FGR32RegClass);
1077 emitInst(Mips::CVT_S_D32, DestReg).addReg(SrcReg);
1078 updateValueMap(I, DestReg);
1082 // Attempt to fast-select a floating-point-to-integer conversion.
1083 bool MipsFastISel::selectFPToInt(const Instruction *I, bool IsSigned) {
1084 if (UnsupportedFPMode)
1088 return false; // We don't handle this case yet. There is no native
1089 // instruction for this but it can be synthesized.
1090 Type *DstTy = I->getType();
1091 if (!isTypeLegal(DstTy, DstVT))
1094 if (DstVT != MVT::i32)
1097 Value *Src = I->getOperand(0);
1098 Type *SrcTy = Src->getType();
1099 if (!isTypeLegal(SrcTy, SrcVT))
1102 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
1105 unsigned SrcReg = getRegForValue(Src);
1109 // Determine the opcode for the conversion, which takes place
1110 // entirely within FPRs.
1111 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1112 unsigned TempReg = createResultReg(&Mips::FGR32RegClass);
1113 unsigned Opc = (SrcVT == MVT::f32) ? Mips::TRUNC_W_S : Mips::TRUNC_W_D32;
1115 // Generate the convert.
1116 emitInst(Opc, TempReg).addReg(SrcReg);
1117 emitInst(Mips::MFC1, DestReg).addReg(TempReg);
1119 updateValueMap(I, DestReg);
1123 bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
1124 SmallVectorImpl<MVT> &OutVTs,
1125 unsigned &NumBytes) {
1126 CallingConv::ID CC = CLI.CallConv;
1127 SmallVector<CCValAssign, 16> ArgLocs;
1128 CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
1129 CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
1130 // Get a count of how many bytes are to be pushed on the stack.
1131 NumBytes = CCInfo.getNextStackOffset();
1132 // This is the minimum argument area used for A0-A3.
1136 emitInst(Mips::ADJCALLSTACKDOWN).addImm(16);
1137 // Process the args.
1139 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1140 CCValAssign &VA = ArgLocs[i];
1141 const Value *ArgVal = CLI.OutVals[VA.getValNo()];
1142 MVT ArgVT = OutVTs[VA.getValNo()];
1146 if (ArgVT == MVT::f32) {
1147 VA.convertToReg(Mips::F12);
1148 } else if (ArgVT == MVT::f64) {
1149 VA.convertToReg(Mips::D6);
1151 } else if (i == 1) {
1152 if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
1153 if (ArgVT == MVT::f32) {
1154 VA.convertToReg(Mips::F14);
1155 } else if (ArgVT == MVT::f64) {
1156 VA.convertToReg(Mips::D7);
1160 if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32) || (ArgVT == MVT::i16) ||
1161 (ArgVT == MVT::i8)) &&
1163 switch (VA.getLocMemOffset()) {
1165 VA.convertToReg(Mips::A0);
1168 VA.convertToReg(Mips::A1);
1171 VA.convertToReg(Mips::A2);
1174 VA.convertToReg(Mips::A3);
1180 unsigned ArgReg = getRegForValue(ArgVal);
1184 // Handle arg promotion: SExt, ZExt, AExt.
1185 switch (VA.getLocInfo()) {
1186 case CCValAssign::Full:
1188 case CCValAssign::AExt:
1189 case CCValAssign::SExt: {
1190 MVT DestVT = VA.getLocVT();
1192 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
1197 case CCValAssign::ZExt: {
1198 MVT DestVT = VA.getLocVT();
1200 ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
1206 llvm_unreachable("Unknown arg promotion!");
1209 // Now copy/store arg to correct locations.
1210 if (VA.isRegLoc() && !VA.needsCustom()) {
1211 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1212 TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
1213 CLI.OutRegs.push_back(VA.getLocReg());
1214 } else if (VA.needsCustom()) {
1215 llvm_unreachable("Mips does not use custom args.");
1219 // FIXME: This path will currently return false. It was copied
1220 // from the AArch64 port and should be essentially fine for Mips too.
1221 // The work to finish up this path will be done in a follow-on patch.
1223 assert(VA.isMemLoc() && "Assuming store on stack.");
1224 // Don't emit stores for undef values.
1225 if (isa<UndefValue>(ArgVal))
1228 // Need to store on the stack.
1229 // FIXME: This alignment is incorrect but this path is disabled
1230 // for now (will return false). We need to determine the right alignment
1231 // based on the normal alignment for the underlying machine type.
1233 unsigned ArgSize = alignTo(ArgVT.getSizeInBits(), 4);
1235 unsigned BEAlign = 0;
1236 if (ArgSize < 8 && !Subtarget->isLittle())
1237 BEAlign = 8 - ArgSize;
1240 Addr.setKind(Address::RegBase);
1241 Addr.setReg(Mips::SP);
1242 Addr.setOffset(VA.getLocMemOffset() + BEAlign);
1244 unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
1245 MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
1246 MachinePointerInfo::getStack(*FuncInfo.MF, Addr.getOffset()),
1247 MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
1249 // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
1250 return false; // can't store on the stack yet.
1257 bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
1258 unsigned NumBytes) {
1259 CallingConv::ID CC = CLI.CallConv;
1260 emitInst(Mips::ADJCALLSTACKUP).addImm(16).addImm(0);
1261 if (RetVT != MVT::isVoid) {
1262 SmallVector<CCValAssign, 16> RVLocs;
1263 CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
1264 CCInfo.AnalyzeCallResult(RetVT, RetCC_Mips);
1266 // Only handle a single return value.
1267 if (RVLocs.size() != 1)
1269 // Copy all of the result registers out of their specified physreg.
1270 MVT CopyVT = RVLocs[0].getValVT();
1271 // Special handling for extended integers.
1272 if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
1275 unsigned ResultReg = createResultReg(TLI.getRegClassFor(CopyVT));
1278 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1279 TII.get(TargetOpcode::COPY),
1280 ResultReg).addReg(RVLocs[0].getLocReg());
1281 CLI.InRegs.push_back(RVLocs[0].getLocReg());
1283 CLI.ResultReg = ResultReg;
1284 CLI.NumResultRegs = 1;
1289 bool MipsFastISel::fastLowerArguments() {
1290 DEBUG(dbgs() << "fastLowerArguments\n");
1292 if (!FuncInfo.CanLowerReturn) {
1293 DEBUG(dbgs() << ".. gave up (!CanLowerReturn)\n");
1297 const Function *F = FuncInfo.Fn;
1298 if (F->isVarArg()) {
1299 DEBUG(dbgs() << ".. gave up (varargs)\n");
1303 CallingConv::ID CC = F->getCallingConv();
1304 if (CC != CallingConv::C) {
1305 DEBUG(dbgs() << ".. gave up (calling convention is not C)\n");
1309 const ArrayRef<MCPhysReg> GPR32ArgRegs = {Mips::A0, Mips::A1, Mips::A2,
1311 const ArrayRef<MCPhysReg> FGR32ArgRegs = {Mips::F12, Mips::F14};
1312 const ArrayRef<MCPhysReg> AFGR64ArgRegs = {Mips::D6, Mips::D7};
1313 ArrayRef<MCPhysReg>::iterator NextGPR32 = GPR32ArgRegs.begin();
1314 ArrayRef<MCPhysReg>::iterator NextFGR32 = FGR32ArgRegs.begin();
1315 ArrayRef<MCPhysReg>::iterator NextAFGR64 = AFGR64ArgRegs.begin();
1317 struct AllocatedReg {
1318 const TargetRegisterClass *RC;
1320 AllocatedReg(const TargetRegisterClass *RC, unsigned Reg)
1321 : RC(RC), Reg(Reg) {}
1324 // Only handle simple cases. i.e. All arguments are directly mapped to
1325 // registers of the appropriate type.
1326 SmallVector<AllocatedReg, 4> Allocation;
1328 for (const auto &FormalArg : F->args()) {
1329 if (F->getAttributes().hasAttribute(Idx, Attribute::InReg) ||
1330 F->getAttributes().hasAttribute(Idx, Attribute::StructRet) ||
1331 F->getAttributes().hasAttribute(Idx, Attribute::ByVal)) {
1332 DEBUG(dbgs() << ".. gave up (inreg, structret, byval)\n");
1336 Type *ArgTy = FormalArg.getType();
1337 if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy()) {
1338 DEBUG(dbgs() << ".. gave up (struct, array, or vector)\n");
1342 EVT ArgVT = TLI.getValueType(DL, ArgTy);
1343 DEBUG(dbgs() << ".. " << (Idx - 1) << ": " << ArgVT.getEVTString() << "\n");
1344 if (!ArgVT.isSimple()) {
1345 DEBUG(dbgs() << ".. .. gave up (not a simple type)\n");
1349 switch (ArgVT.getSimpleVT().SimpleTy) {
1353 if (!F->getAttributes().hasAttribute(Idx, Attribute::SExt) &&
1354 !F->getAttributes().hasAttribute(Idx, Attribute::ZExt)) {
1355 // It must be any extend, this shouldn't happen for clang-generated IR
1356 // so just fall back on SelectionDAG.
1357 DEBUG(dbgs() << ".. .. gave up (i8/i16 arg is not extended)\n");
1361 if (NextGPR32 == GPR32ArgRegs.end()) {
1362 DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1366 DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1367 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1369 // Allocating any GPR32 prohibits further use of floating point arguments.
1370 NextFGR32 = FGR32ArgRegs.end();
1371 NextAFGR64 = AFGR64ArgRegs.end();
1375 if (F->getAttributes().hasAttribute(Idx, Attribute::ZExt)) {
1376 // The O32 ABI does not permit a zero-extended i32.
1377 DEBUG(dbgs() << ".. .. gave up (i32 arg is zero extended)\n");
1381 if (NextGPR32 == GPR32ArgRegs.end()) {
1382 DEBUG(dbgs() << ".. .. gave up (ran out of GPR32 arguments)\n");
1386 DEBUG(dbgs() << ".. .. GPR32(" << *NextGPR32 << ")\n");
1387 Allocation.emplace_back(&Mips::GPR32RegClass, *NextGPR32++);
1389 // Allocating any GPR32 prohibits further use of floating point arguments.
1390 NextFGR32 = FGR32ArgRegs.end();
1391 NextAFGR64 = AFGR64ArgRegs.end();
1395 if (UnsupportedFPMode) {
1396 DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1399 if (NextFGR32 == FGR32ArgRegs.end()) {
1400 DEBUG(dbgs() << ".. .. gave up (ran out of FGR32 arguments)\n");
1403 DEBUG(dbgs() << ".. .. FGR32(" << *NextFGR32 << ")\n");
1404 Allocation.emplace_back(&Mips::FGR32RegClass, *NextFGR32++);
1405 // Allocating an FGR32 also allocates the super-register AFGR64, and
1406 // ABI rules require us to skip the corresponding GPR32.
1407 if (NextGPR32 != GPR32ArgRegs.end())
1409 if (NextAFGR64 != AFGR64ArgRegs.end())
1414 if (UnsupportedFPMode) {
1415 DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode)\n");
1418 if (NextAFGR64 == AFGR64ArgRegs.end()) {
1419 DEBUG(dbgs() << ".. .. gave up (ran out of AFGR64 arguments)\n");
1422 DEBUG(dbgs() << ".. .. AFGR64(" << *NextAFGR64 << ")\n");
1423 Allocation.emplace_back(&Mips::AFGR64RegClass, *NextAFGR64++);
1424 // Allocating an FGR32 also allocates the super-register AFGR64, and
1425 // ABI rules require us to skip the corresponding GPR32 pair.
1426 if (NextGPR32 != GPR32ArgRegs.end())
1428 if (NextGPR32 != GPR32ArgRegs.end())
1430 if (NextFGR32 != FGR32ArgRegs.end())
1435 DEBUG(dbgs() << ".. .. gave up (unknown type)\n");
1443 for (const auto &FormalArg : F->args()) {
1444 unsigned SrcReg = Allocation[Idx].Reg;
1445 unsigned DstReg = FuncInfo.MF->addLiveIn(SrcReg, Allocation[Idx].RC);
1446 // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
1447 // Without this, EmitLiveInCopies may eliminate the livein if its only
1448 // use is a bitcast (which isn't turned into an instruction).
1449 unsigned ResultReg = createResultReg(Allocation[Idx].RC);
1450 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1451 TII.get(TargetOpcode::COPY), ResultReg)
1452 .addReg(DstReg, getKillRegState(true));
1453 updateValueMap(&FormalArg, ResultReg);
1457 // Calculate the size of the incoming arguments area.
1458 // We currently reject all the cases where this would be non-zero.
1459 unsigned IncomingArgSizeInBytes = 0;
1461 // Account for the reserved argument area on ABI's that have one (O32).
1462 // It seems strange to do this on the caller side but it's necessary in
1463 // SelectionDAG's implementation.
1464 IncomingArgSizeInBytes = std::min(getABI().GetCalleeAllocdArgSizeInBytes(CC),
1465 IncomingArgSizeInBytes);
1467 MF->getInfo<MipsFunctionInfo>()->setFormalArgInfo(IncomingArgSizeInBytes,
1473 bool MipsFastISel::fastLowerCall(CallLoweringInfo &CLI) {
1474 CallingConv::ID CC = CLI.CallConv;
1475 bool IsTailCall = CLI.IsTailCall;
1476 bool IsVarArg = CLI.IsVarArg;
1477 const Value *Callee = CLI.Callee;
1478 MCSymbol *Symbol = CLI.Symbol;
1480 // Do not handle FastCC.
1481 if (CC == CallingConv::Fast)
1484 // Allow SelectionDAG isel to handle tail calls.
1488 // Let SDISel handle vararg functions.
1492 // FIXME: Only handle *simple* calls for now.
1494 if (CLI.RetTy->isVoidTy())
1495 RetVT = MVT::isVoid;
1496 else if (!isTypeSupported(CLI.RetTy, RetVT))
1499 for (auto Flag : CLI.OutFlags)
1500 if (Flag.isInReg() || Flag.isSRet() || Flag.isNest() || Flag.isByVal())
1503 // Set up the argument vectors.
1504 SmallVector<MVT, 16> OutVTs;
1505 OutVTs.reserve(CLI.OutVals.size());
1507 for (auto *Val : CLI.OutVals) {
1509 if (!isTypeLegal(Val->getType(), VT) &&
1510 !(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16))
1513 // We don't handle vector parameters yet.
1514 if (VT.isVector() || VT.getSizeInBits() > 64)
1517 OutVTs.push_back(VT);
1521 if (!computeCallAddress(Callee, Addr))
1524 // Handle the arguments now that we've gotten them.
1526 if (!processCallArgs(CLI, OutVTs, NumBytes))
1529 if (!Addr.getGlobalValue())
1533 unsigned DestAddress;
1535 DestAddress = materializeExternalCallSym(Symbol);
1537 DestAddress = materializeGV(Addr.getGlobalValue(), MVT::i32);
1538 emitInst(TargetOpcode::COPY, Mips::T9).addReg(DestAddress);
1539 MachineInstrBuilder MIB =
1540 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::JALR),
1541 Mips::RA).addReg(Mips::T9);
1543 // Add implicit physical register uses to the call.
1544 for (auto Reg : CLI.OutRegs)
1545 MIB.addReg(Reg, RegState::Implicit);
1547 // Add a register mask with the call-preserved registers.
1548 // Proper defs for return values will be added by setPhysRegsDeadExcept().
1549 MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
1553 // Finish off the call including any return values.
1554 return finishCall(CLI, RetVT, NumBytes);
1557 bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
1558 switch (II->getIntrinsicID()) {
1561 case Intrinsic::bswap: {
1562 Type *RetTy = II->getCalledFunction()->getReturnType();
1565 if (!isTypeSupported(RetTy, VT))
1568 unsigned SrcReg = getRegForValue(II->getOperand(0));
1571 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1574 if (VT == MVT::i16) {
1575 if (Subtarget->hasMips32r2()) {
1576 emitInst(Mips::WSBH, DestReg).addReg(SrcReg);
1577 updateValueMap(II, DestReg);
1580 unsigned TempReg[3];
1581 for (int i = 0; i < 3; i++) {
1582 TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1583 if (TempReg[i] == 0)
1586 emitInst(Mips::SLL, TempReg[0]).addReg(SrcReg).addImm(8);
1587 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(8);
1588 emitInst(Mips::OR, TempReg[2]).addReg(TempReg[0]).addReg(TempReg[1]);
1589 emitInst(Mips::ANDi, DestReg).addReg(TempReg[2]).addImm(0xFFFF);
1590 updateValueMap(II, DestReg);
1593 } else if (VT == MVT::i32) {
1594 if (Subtarget->hasMips32r2()) {
1595 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1596 emitInst(Mips::WSBH, TempReg).addReg(SrcReg);
1597 emitInst(Mips::ROTR, DestReg).addReg(TempReg).addImm(16);
1598 updateValueMap(II, DestReg);
1601 unsigned TempReg[8];
1602 for (int i = 0; i < 8; i++) {
1603 TempReg[i] = createResultReg(&Mips::GPR32RegClass);
1604 if (TempReg[i] == 0)
1608 emitInst(Mips::SRL, TempReg[0]).addReg(SrcReg).addImm(8);
1609 emitInst(Mips::SRL, TempReg[1]).addReg(SrcReg).addImm(24);
1610 emitInst(Mips::ANDi, TempReg[2]).addReg(TempReg[0]).addImm(0xFF00);
1611 emitInst(Mips::OR, TempReg[3]).addReg(TempReg[1]).addReg(TempReg[2]);
1613 emitInst(Mips::ANDi, TempReg[4]).addReg(SrcReg).addImm(0xFF00);
1614 emitInst(Mips::SLL, TempReg[5]).addReg(TempReg[4]).addImm(8);
1616 emitInst(Mips::SLL, TempReg[6]).addReg(SrcReg).addImm(24);
1617 emitInst(Mips::OR, TempReg[7]).addReg(TempReg[3]).addReg(TempReg[5]);
1618 emitInst(Mips::OR, DestReg).addReg(TempReg[6]).addReg(TempReg[7]);
1619 updateValueMap(II, DestReg);
1625 case Intrinsic::memcpy:
1626 case Intrinsic::memmove: {
1627 const auto *MTI = cast<MemTransferInst>(II);
1628 // Don't handle volatile.
1629 if (MTI->isVolatile())
1631 if (!MTI->getLength()->getType()->isIntegerTy(32))
1633 const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
1634 return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
1636 case Intrinsic::memset: {
1637 const MemSetInst *MSI = cast<MemSetInst>(II);
1638 // Don't handle volatile.
1639 if (MSI->isVolatile())
1641 if (!MSI->getLength()->getType()->isIntegerTy(32))
1643 return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
1649 bool MipsFastISel::selectRet(const Instruction *I) {
1650 const Function &F = *I->getParent()->getParent();
1651 const ReturnInst *Ret = cast<ReturnInst>(I);
1653 DEBUG(dbgs() << "selectRet\n");
1655 if (!FuncInfo.CanLowerReturn)
1658 // Build a list of return value registers.
1659 SmallVector<unsigned, 4> RetRegs;
1661 if (Ret->getNumOperands() > 0) {
1662 CallingConv::ID CC = F.getCallingConv();
1664 // Do not handle FastCC.
1665 if (CC == CallingConv::Fast)
1668 SmallVector<ISD::OutputArg, 4> Outs;
1669 GetReturnInfo(F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
1671 // Analyze operands of the call, assigning locations to each operand.
1672 SmallVector<CCValAssign, 16> ValLocs;
1673 MipsCCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs,
1675 CCAssignFn *RetCC = RetCC_Mips;
1676 CCInfo.AnalyzeReturn(Outs, RetCC);
1678 // Only handle a single return value for now.
1679 if (ValLocs.size() != 1)
1682 CCValAssign &VA = ValLocs[0];
1683 const Value *RV = Ret->getOperand(0);
1685 // Don't bother handling odd stuff for now.
1686 if ((VA.getLocInfo() != CCValAssign::Full) &&
1687 (VA.getLocInfo() != CCValAssign::BCvt))
1690 // Only handle register returns for now.
1694 unsigned Reg = getRegForValue(RV);
1698 unsigned SrcReg = Reg + VA.getValNo();
1699 unsigned DestReg = VA.getLocReg();
1700 // Avoid a cross-class copy. This is very unlikely.
1701 if (!MRI.getRegClass(SrcReg)->contains(DestReg))
1704 EVT RVEVT = TLI.getValueType(DL, RV->getType());
1705 if (!RVEVT.isSimple())
1708 if (RVEVT.isVector())
1711 MVT RVVT = RVEVT.getSimpleVT();
1712 if (RVVT == MVT::f128)
1715 // Do not handle FGR64 returns for now.
1716 if (RVVT == MVT::f64 && UnsupportedFPMode) {
1717 DEBUG(dbgs() << ".. .. gave up (UnsupportedFPMode\n");
1721 MVT DestVT = VA.getValVT();
1722 // Special handling for extended integers.
1723 if (RVVT != DestVT) {
1724 if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
1727 if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
1728 bool IsZExt = Outs[0].Flags.isZExt();
1729 SrcReg = emitIntExt(RVVT, SrcReg, DestVT, IsZExt);
1736 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1737 TII.get(TargetOpcode::COPY), DestReg).addReg(SrcReg);
1739 // Add register to return instruction.
1740 RetRegs.push_back(VA.getLocReg());
1742 MachineInstrBuilder MIB = emitInst(Mips::RetRA);
1743 for (unsigned i = 0, e = RetRegs.size(); i != e; ++i)
1744 MIB.addReg(RetRegs[i], RegState::Implicit);
1748 bool MipsFastISel::selectTrunc(const Instruction *I) {
1749 // The high bits for a type smaller than the register size are assumed to be
1751 Value *Op = I->getOperand(0);
1754 SrcVT = TLI.getValueType(DL, Op->getType(), true);
1755 DestVT = TLI.getValueType(DL, I->getType(), true);
1757 if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
1759 if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
1762 unsigned SrcReg = getRegForValue(Op);
1766 // Because the high bits are undefined, a truncate doesn't generate
1768 updateValueMap(I, SrcReg);
1772 bool MipsFastISel::selectIntExt(const Instruction *I) {
1773 Type *DestTy = I->getType();
1774 Value *Src = I->getOperand(0);
1775 Type *SrcTy = Src->getType();
1777 bool isZExt = isa<ZExtInst>(I);
1778 unsigned SrcReg = getRegForValue(Src);
1782 EVT SrcEVT, DestEVT;
1783 SrcEVT = TLI.getValueType(DL, SrcTy, true);
1784 DestEVT = TLI.getValueType(DL, DestTy, true);
1785 if (!SrcEVT.isSimple())
1787 if (!DestEVT.isSimple())
1790 MVT SrcVT = SrcEVT.getSimpleVT();
1791 MVT DestVT = DestEVT.getSimpleVT();
1792 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1794 if (!emitIntExt(SrcVT, SrcReg, DestVT, ResultReg, isZExt))
1796 updateValueMap(I, ResultReg);
1800 bool MipsFastISel::emitIntSExt32r1(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1803 switch (SrcVT.SimpleTy) {
1813 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1814 emitInst(Mips::SLL, TempReg).addReg(SrcReg).addImm(ShiftAmt);
1815 emitInst(Mips::SRA, DestReg).addReg(TempReg).addImm(ShiftAmt);
1819 bool MipsFastISel::emitIntSExt32r2(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1821 switch (SrcVT.SimpleTy) {
1825 emitInst(Mips::SEB, DestReg).addReg(SrcReg);
1828 emitInst(Mips::SEH, DestReg).addReg(SrcReg);
1834 bool MipsFastISel::emitIntSExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1836 if ((DestVT != MVT::i32) && (DestVT != MVT::i16))
1838 if (Subtarget->hasMips32r2())
1839 return emitIntSExt32r2(SrcVT, SrcReg, DestVT, DestReg);
1840 return emitIntSExt32r1(SrcVT, SrcReg, DestVT, DestReg);
1843 bool MipsFastISel::emitIntZExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1847 switch (SrcVT.SimpleTy) {
1861 emitInst(Mips::ANDi, DestReg).addReg(SrcReg).addImm(Imm);
1865 bool MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1866 unsigned DestReg, bool IsZExt) {
1867 // FastISel does not have plumbing to deal with extensions where the SrcVT or
1868 // DestVT are odd things, so test to make sure that they are both types we can
1869 // handle (i1/i8/i16/i32 for SrcVT and i8/i16/i32/i64 for DestVT), otherwise
1870 // bail out to SelectionDAG.
1871 if (((DestVT != MVT::i8) && (DestVT != MVT::i16) && (DestVT != MVT::i32)) ||
1872 ((SrcVT != MVT::i1) && (SrcVT != MVT::i8) && (SrcVT != MVT::i16)))
1875 return emitIntZExt(SrcVT, SrcReg, DestVT, DestReg);
1876 return emitIntSExt(SrcVT, SrcReg, DestVT, DestReg);
1879 unsigned MipsFastISel::emitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
1881 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
1882 bool Success = emitIntExt(SrcVT, SrcReg, DestVT, DestReg, isZExt);
1883 return Success ? DestReg : 0;
1886 bool MipsFastISel::selectDivRem(const Instruction *I, unsigned ISDOpcode) {
1887 EVT DestEVT = TLI.getValueType(DL, I->getType(), true);
1888 if (!DestEVT.isSimple())
1891 MVT DestVT = DestEVT.getSimpleVT();
1892 if (DestVT != MVT::i32)
1896 switch (ISDOpcode) {
1901 DivOpc = Mips::SDIV;
1905 DivOpc = Mips::UDIV;
1909 unsigned Src0Reg = getRegForValue(I->getOperand(0));
1910 unsigned Src1Reg = getRegForValue(I->getOperand(1));
1911 if (!Src0Reg || !Src1Reg)
1914 emitInst(DivOpc).addReg(Src0Reg).addReg(Src1Reg);
1915 emitInst(Mips::TEQ).addReg(Src1Reg).addReg(Mips::ZERO).addImm(7);
1917 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1921 unsigned MFOpc = (ISDOpcode == ISD::SREM || ISDOpcode == ISD::UREM)
1924 emitInst(MFOpc, ResultReg);
1926 updateValueMap(I, ResultReg);
1930 bool MipsFastISel::selectShift(const Instruction *I) {
1933 if (!isTypeSupported(I->getType(), RetVT))
1936 unsigned ResultReg = createResultReg(&Mips::GPR32RegClass);
1940 unsigned Opcode = I->getOpcode();
1941 const Value *Op0 = I->getOperand(0);
1942 unsigned Op0Reg = getRegForValue(Op0);
1946 // If AShr or LShr, then we need to make sure the operand0 is sign extended.
1947 if (Opcode == Instruction::AShr || Opcode == Instruction::LShr) {
1948 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
1952 MVT Op0MVT = TLI.getValueType(DL, Op0->getType(), true).getSimpleVT();
1953 bool IsZExt = Opcode == Instruction::LShr;
1954 if (!emitIntExt(Op0MVT, Op0Reg, MVT::i32, TempReg, IsZExt))
1960 if (const auto *C = dyn_cast<ConstantInt>(I->getOperand(1))) {
1961 uint64_t ShiftVal = C->getZExtValue();
1965 llvm_unreachable("Unexpected instruction.");
1966 case Instruction::Shl:
1969 case Instruction::AShr:
1972 case Instruction::LShr:
1977 emitInst(Opcode, ResultReg).addReg(Op0Reg).addImm(ShiftVal);
1978 updateValueMap(I, ResultReg);
1982 unsigned Op1Reg = getRegForValue(I->getOperand(1));
1988 llvm_unreachable("Unexpected instruction.");
1989 case Instruction::Shl:
1990 Opcode = Mips::SLLV;
1992 case Instruction::AShr:
1993 Opcode = Mips::SRAV;
1995 case Instruction::LShr:
1996 Opcode = Mips::SRLV;
2000 emitInst(Opcode, ResultReg).addReg(Op0Reg).addReg(Op1Reg);
2001 updateValueMap(I, ResultReg);
2005 bool MipsFastISel::fastSelectInstruction(const Instruction *I) {
2006 switch (I->getOpcode()) {
2009 case Instruction::Load:
2010 return selectLoad(I);
2011 case Instruction::Store:
2012 return selectStore(I);
2013 case Instruction::SDiv:
2014 if (!selectBinaryOp(I, ISD::SDIV))
2015 return selectDivRem(I, ISD::SDIV);
2017 case Instruction::UDiv:
2018 if (!selectBinaryOp(I, ISD::UDIV))
2019 return selectDivRem(I, ISD::UDIV);
2021 case Instruction::SRem:
2022 if (!selectBinaryOp(I, ISD::SREM))
2023 return selectDivRem(I, ISD::SREM);
2025 case Instruction::URem:
2026 if (!selectBinaryOp(I, ISD::UREM))
2027 return selectDivRem(I, ISD::UREM);
2029 case Instruction::Shl:
2030 case Instruction::LShr:
2031 case Instruction::AShr:
2032 return selectShift(I);
2033 case Instruction::And:
2034 case Instruction::Or:
2035 case Instruction::Xor:
2036 return selectLogicalOp(I);
2037 case Instruction::Br:
2038 return selectBranch(I);
2039 case Instruction::Ret:
2040 return selectRet(I);
2041 case Instruction::Trunc:
2042 return selectTrunc(I);
2043 case Instruction::ZExt:
2044 case Instruction::SExt:
2045 return selectIntExt(I);
2046 case Instruction::FPTrunc:
2047 return selectFPTrunc(I);
2048 case Instruction::FPExt:
2049 return selectFPExt(I);
2050 case Instruction::FPToSI:
2051 return selectFPToInt(I, /*isSigned*/ true);
2052 case Instruction::FPToUI:
2053 return selectFPToInt(I, /*isSigned*/ false);
2054 case Instruction::ICmp:
2055 case Instruction::FCmp:
2056 return selectCmp(I);
2057 case Instruction::Select:
2058 return selectSelect(I);
2063 unsigned MipsFastISel::getRegEnsuringSimpleIntegerWidening(const Value *V,
2065 unsigned VReg = getRegForValue(V);
2068 MVT VMVT = TLI.getValueType(DL, V->getType(), true).getSimpleVT();
2069 if ((VMVT == MVT::i8) || (VMVT == MVT::i16)) {
2070 unsigned TempReg = createResultReg(&Mips::GPR32RegClass);
2071 if (!emitIntExt(VMVT, VReg, MVT::i32, TempReg, IsUnsigned))
2078 void MipsFastISel::simplifyAddress(Address &Addr) {
2079 if (!isInt<16>(Addr.getOffset())) {
2081 materialize32BitInt(Addr.getOffset(), &Mips::GPR32RegClass);
2082 unsigned DestReg = createResultReg(&Mips::GPR32RegClass);
2083 emitInst(Mips::ADDu, DestReg).addReg(TempReg).addReg(Addr.getReg());
2084 Addr.setReg(DestReg);
2089 unsigned MipsFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2090 const TargetRegisterClass *RC,
2091 unsigned Op0, bool Op0IsKill,
2092 unsigned Op1, bool Op1IsKill) {
2093 // We treat the MUL instruction in a special way because it clobbers
2094 // the HI0 & LO0 registers. The TableGen definition of this instruction can
2095 // mark these registers only as implicitly defined. As a result, the
2096 // register allocator runs out of registers when this instruction is
2097 // followed by another instruction that defines the same registers too.
2098 // We can fix this by explicitly marking those registers as dead.
2099 if (MachineInstOpcode == Mips::MUL) {
2100 unsigned ResultReg = createResultReg(RC);
2101 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2102 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2103 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2104 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2105 .addReg(Op0, getKillRegState(Op0IsKill))
2106 .addReg(Op1, getKillRegState(Op1IsKill))
2107 .addReg(Mips::HI0, RegState::ImplicitDefine | RegState::Dead)
2108 .addReg(Mips::LO0, RegState::ImplicitDefine | RegState::Dead);
2112 return FastISel::fastEmitInst_rr(MachineInstOpcode, RC, Op0, Op0IsKill, Op1,
2118 FastISel *Mips::createFastISel(FunctionLoweringInfo &funcInfo,
2119 const TargetLibraryInfo *libInfo) {
2120 return new MipsFastISel(funcInfo, libInfo);
2123 } // end namespace llvm