1 //===-- Mips16ISelLowering.h - Mips16 DAG Lowering Interface ----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // Subclass of MipsTargetLowering specialized for mips16.
12 //===----------------------------------------------------------------------===//
13 #include "Mips16ISelLowering.h"
14 #include "MCTargetDesc/MipsBaseInfo.h"
15 #include "Mips16HardFloatInfo.h"
16 #include "MipsMachineFunction.h"
17 #include "MipsRegisterInfo.h"
18 #include "MipsTargetMachine.h"
19 #include "llvm/CodeGen/MachineInstrBuilder.h"
20 #include "llvm/CodeGen/TargetInstrInfo.h"
21 #include "llvm/Support/CommandLine.h"
25 #define DEBUG_TYPE "mips-lower"
27 static cl::opt<bool> DontExpandCondPseudos16(
28 "mips16-dont-expand-cond-pseudo",
30 cl::desc("Don't expand conditional move related "
31 "pseudos for Mips 16"),
35 struct Mips16Libcall {
36 RTLIB::Libcall Libcall;
39 bool operator<(const Mips16Libcall &RHS) const {
40 return std::strcmp(Name, RHS.Name) < 0;
44 struct Mips16IntrinsicHelperType{
48 bool operator<(const Mips16IntrinsicHelperType &RHS) const {
49 return std::strcmp(Name, RHS.Name) < 0;
51 bool operator==(const Mips16IntrinsicHelperType &RHS) const {
52 return std::strcmp(Name, RHS.Name) == 0;
57 // Libcalls for which no helper is generated. Sorted by name for binary search.
58 static const Mips16Libcall HardFloatLibCalls[] = {
59 { RTLIB::ADD_F64, "__mips16_adddf3" },
60 { RTLIB::ADD_F32, "__mips16_addsf3" },
61 { RTLIB::DIV_F64, "__mips16_divdf3" },
62 { RTLIB::DIV_F32, "__mips16_divsf3" },
63 { RTLIB::OEQ_F64, "__mips16_eqdf2" },
64 { RTLIB::OEQ_F32, "__mips16_eqsf2" },
65 { RTLIB::FPEXT_F32_F64, "__mips16_extendsfdf2" },
66 { RTLIB::FPTOSINT_F64_I32, "__mips16_fix_truncdfsi" },
67 { RTLIB::FPTOSINT_F32_I32, "__mips16_fix_truncsfsi" },
68 { RTLIB::SINTTOFP_I32_F64, "__mips16_floatsidf" },
69 { RTLIB::SINTTOFP_I32_F32, "__mips16_floatsisf" },
70 { RTLIB::UINTTOFP_I32_F64, "__mips16_floatunsidf" },
71 { RTLIB::UINTTOFP_I32_F32, "__mips16_floatunsisf" },
72 { RTLIB::OGE_F64, "__mips16_gedf2" },
73 { RTLIB::OGE_F32, "__mips16_gesf2" },
74 { RTLIB::OGT_F64, "__mips16_gtdf2" },
75 { RTLIB::OGT_F32, "__mips16_gtsf2" },
76 { RTLIB::OLE_F64, "__mips16_ledf2" },
77 { RTLIB::OLE_F32, "__mips16_lesf2" },
78 { RTLIB::OLT_F64, "__mips16_ltdf2" },
79 { RTLIB::OLT_F32, "__mips16_ltsf2" },
80 { RTLIB::MUL_F64, "__mips16_muldf3" },
81 { RTLIB::MUL_F32, "__mips16_mulsf3" },
82 { RTLIB::UNE_F64, "__mips16_nedf2" },
83 { RTLIB::UNE_F32, "__mips16_nesf2" },
84 { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_dc" }, // No associated libcall.
85 { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_df" }, // No associated libcall.
86 { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sc" }, // No associated libcall.
87 { RTLIB::UNKNOWN_LIBCALL, "__mips16_ret_sf" }, // No associated libcall.
88 { RTLIB::SUB_F64, "__mips16_subdf3" },
89 { RTLIB::SUB_F32, "__mips16_subsf3" },
90 { RTLIB::FPROUND_F64_F32, "__mips16_truncdfsf2" },
91 { RTLIB::UO_F64, "__mips16_unorddf2" },
92 { RTLIB::UO_F32, "__mips16_unordsf2" }
95 static const Mips16IntrinsicHelperType Mips16IntrinsicHelper[] = {
96 {"__fixunsdfsi", "__mips16_call_stub_2" },
97 {"ceil", "__mips16_call_stub_df_2"},
98 {"ceilf", "__mips16_call_stub_sf_1"},
99 {"copysign", "__mips16_call_stub_df_10"},
100 {"copysignf", "__mips16_call_stub_sf_5"},
101 {"cos", "__mips16_call_stub_df_2"},
102 {"cosf", "__mips16_call_stub_sf_1"},
103 {"exp2", "__mips16_call_stub_df_2"},
104 {"exp2f", "__mips16_call_stub_sf_1"},
105 {"floor", "__mips16_call_stub_df_2"},
106 {"floorf", "__mips16_call_stub_sf_1"},
107 {"log2", "__mips16_call_stub_df_2"},
108 {"log2f", "__mips16_call_stub_sf_1"},
109 {"nearbyint", "__mips16_call_stub_df_2"},
110 {"nearbyintf", "__mips16_call_stub_sf_1"},
111 {"rint", "__mips16_call_stub_df_2"},
112 {"rintf", "__mips16_call_stub_sf_1"},
113 {"sin", "__mips16_call_stub_df_2"},
114 {"sinf", "__mips16_call_stub_sf_1"},
115 {"sqrt", "__mips16_call_stub_df_2"},
116 {"sqrtf", "__mips16_call_stub_sf_1"},
117 {"trunc", "__mips16_call_stub_df_2"},
118 {"truncf", "__mips16_call_stub_sf_1"},
121 Mips16TargetLowering::Mips16TargetLowering(const MipsTargetMachine &TM,
122 const MipsSubtarget &STI)
123 : MipsTargetLowering(TM, STI) {
125 // Set up the register classes
126 addRegisterClass(MVT::i32, &Mips::CPU16RegsRegClass);
128 if (!Subtarget.useSoftFloat())
129 setMips16HardFloatLibCalls();
131 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Expand);
132 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
133 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
134 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
135 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
136 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand);
137 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand);
138 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand);
139 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
140 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
141 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
142 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
143 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
145 setOperationAction(ISD::ROTR, MVT::i32, Expand);
146 setOperationAction(ISD::ROTR, MVT::i64, Expand);
147 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
148 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
150 computeRegisterProperties(STI.getRegisterInfo());
153 const MipsTargetLowering *
154 llvm::createMips16TargetLowering(const MipsTargetMachine &TM,
155 const MipsSubtarget &STI) {
156 return new Mips16TargetLowering(TM, STI);
160 Mips16TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
168 Mips16TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
169 MachineBasicBlock *BB) const {
170 switch (MI.getOpcode()) {
172 return MipsTargetLowering::EmitInstrWithCustomInserter(MI, BB);
174 return emitSel16(Mips::BeqzRxImm16, MI, BB);
176 return emitSel16(Mips::BnezRxImm16, MI, BB);
177 case Mips::SelTBteqZCmpi:
178 return emitSeliT16(Mips::Bteqz16, Mips::CmpiRxImmX16, MI, BB);
179 case Mips::SelTBteqZSlti:
180 return emitSeliT16(Mips::Bteqz16, Mips::SltiRxImmX16, MI, BB);
181 case Mips::SelTBteqZSltiu:
182 return emitSeliT16(Mips::Bteqz16, Mips::SltiuRxImmX16, MI, BB);
183 case Mips::SelTBtneZCmpi:
184 return emitSeliT16(Mips::Btnez16, Mips::CmpiRxImmX16, MI, BB);
185 case Mips::SelTBtneZSlti:
186 return emitSeliT16(Mips::Btnez16, Mips::SltiRxImmX16, MI, BB);
187 case Mips::SelTBtneZSltiu:
188 return emitSeliT16(Mips::Btnez16, Mips::SltiuRxImmX16, MI, BB);
189 case Mips::SelTBteqZCmp:
190 return emitSelT16(Mips::Bteqz16, Mips::CmpRxRy16, MI, BB);
191 case Mips::SelTBteqZSlt:
192 return emitSelT16(Mips::Bteqz16, Mips::SltRxRy16, MI, BB);
193 case Mips::SelTBteqZSltu:
194 return emitSelT16(Mips::Bteqz16, Mips::SltuRxRy16, MI, BB);
195 case Mips::SelTBtneZCmp:
196 return emitSelT16(Mips::Btnez16, Mips::CmpRxRy16, MI, BB);
197 case Mips::SelTBtneZSlt:
198 return emitSelT16(Mips::Btnez16, Mips::SltRxRy16, MI, BB);
199 case Mips::SelTBtneZSltu:
200 return emitSelT16(Mips::Btnez16, Mips::SltuRxRy16, MI, BB);
201 case Mips::BteqzT8CmpX16:
202 return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::CmpRxRy16, MI, BB);
203 case Mips::BteqzT8SltX16:
204 return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::SltRxRy16, MI, BB);
205 case Mips::BteqzT8SltuX16:
206 // TBD: figure out a way to get this or remove the instruction
208 return emitFEXT_T8I816_ins(Mips::Bteqz16, Mips::SltuRxRy16, MI, BB);
209 case Mips::BtnezT8CmpX16:
210 return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::CmpRxRy16, MI, BB);
211 case Mips::BtnezT8SltX16:
212 return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::SltRxRy16, MI, BB);
213 case Mips::BtnezT8SltuX16:
214 // TBD: figure out a way to get this or remove the instruction
216 return emitFEXT_T8I816_ins(Mips::Btnez16, Mips::SltuRxRy16, MI, BB);
217 case Mips::BteqzT8CmpiX16: return emitFEXT_T8I8I16_ins(
218 Mips::Bteqz16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, false, MI, BB);
219 case Mips::BteqzT8SltiX16: return emitFEXT_T8I8I16_ins(
220 Mips::Bteqz16, Mips::SltiRxImm16, Mips::SltiRxImmX16, true, MI, BB);
221 case Mips::BteqzT8SltiuX16: return emitFEXT_T8I8I16_ins(
222 Mips::Bteqz16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, false, MI, BB);
223 case Mips::BtnezT8CmpiX16: return emitFEXT_T8I8I16_ins(
224 Mips::Btnez16, Mips::CmpiRxImm16, Mips::CmpiRxImmX16, false, MI, BB);
225 case Mips::BtnezT8SltiX16: return emitFEXT_T8I8I16_ins(
226 Mips::Btnez16, Mips::SltiRxImm16, Mips::SltiRxImmX16, true, MI, BB);
227 case Mips::BtnezT8SltiuX16: return emitFEXT_T8I8I16_ins(
228 Mips::Btnez16, Mips::SltiuRxImm16, Mips::SltiuRxImmX16, false, MI, BB);
230 case Mips::SltCCRxRy16:
231 return emitFEXT_CCRX16_ins(Mips::SltRxRy16, MI, BB);
233 case Mips::SltiCCRxImmX16:
234 return emitFEXT_CCRXI16_ins
235 (Mips::SltiRxImm16, Mips::SltiRxImmX16, MI, BB);
236 case Mips::SltiuCCRxImmX16:
237 return emitFEXT_CCRXI16_ins
238 (Mips::SltiuRxImm16, Mips::SltiuRxImmX16, MI, BB);
239 case Mips::SltuCCRxRy16:
240 return emitFEXT_CCRX16_ins
241 (Mips::SltuRxRy16, MI, BB);
245 bool Mips16TargetLowering::isEligibleForTailCallOptimization(
246 const CCState &CCInfo, unsigned NextStackOffset,
247 const MipsFunctionInfo &FI) const {
248 // No tail call optimization for mips16.
252 void Mips16TargetLowering::setMips16HardFloatLibCalls() {
253 for (unsigned I = 0; I != array_lengthof(HardFloatLibCalls); ++I) {
254 assert((I == 0 || HardFloatLibCalls[I - 1] < HardFloatLibCalls[I]) &&
255 "Array not sorted!");
256 if (HardFloatLibCalls[I].Libcall != RTLIB::UNKNOWN_LIBCALL)
257 setLibcallName(HardFloatLibCalls[I].Libcall, HardFloatLibCalls[I].Name);
260 setLibcallName(RTLIB::O_F64, "__mips16_unorddf2");
261 setLibcallName(RTLIB::O_F32, "__mips16_unordsf2");
265 // The Mips16 hard float is a crazy quilt inherited from gcc. I have a much
266 // cleaner way to do all of this but it will have to wait until the traditional
267 // gcc mechanism is completed.
269 // For Pic, in order for Mips16 code to call Mips32 code which according the abi
270 // have either arguments or returned values placed in floating point registers,
271 // we use a set of helper functions. (This includes functions which return type
272 // complex which on Mips are returned in a pair of floating point registers).
274 // This is an encoding that we inherited from gcc.
275 // In Mips traditional O32, N32 ABI, floating point numbers are passed in
276 // floating point argument registers 1,2 only when the first and optionally
277 // the second arguments are float (sf) or double (df).
278 // For Mips16 we are only concerned with the situations where floating point
279 // arguments are being passed in floating point registers by the ABI, because
280 // Mips16 mode code cannot execute floating point instructions to load those
281 // values and hence helper functions are needed.
282 // The possibilities are (), (sf), (sf, sf), (sf, df), (df), (df, sf), (df, df)
283 // the helper function suffixs for these are:
284 // 0, 1, 5, 9, 2, 6, 10
285 // this suffix can then be calculated as follows:
286 // for a given argument Arg:
287 // Arg1x, Arg2x = 1 : Arg is sf
289 // 0: Arg is neither sf or df
290 // So this stub is the string for number Arg1x + Arg2x*4.
291 // However not all numbers between 0 and 10 are possible, we check anyway and
292 // assert if the impossible exists.
295 unsigned int Mips16TargetLowering::getMips16HelperFunctionStubNumber
296 (ArgListTy &Args) const {
297 unsigned int resultNum = 0;
298 if (Args.size() >= 1) {
299 Type *t = Args[0].Ty;
300 if (t->isFloatTy()) {
303 else if (t->isDoubleTy()) {
308 if (Args.size() >=2) {
309 Type *t = Args[1].Ty;
310 if (t->isFloatTy()) {
313 else if (t->isDoubleTy()) {
322 // Prefixes are attached to stub numbers depending on the return type.
323 // return type: float sf_
325 // single complex sc_
326 // double complext dc_
330 // The full name of a helper function is__mips16_call_stub +
331 // return type dependent prefix + stub number
333 // FIXME: This is something that probably should be in a different source file
334 // and perhaps done differently but my main purpose is to not waste runtime
335 // on something that we can enumerate in the source. Another possibility is
336 // to have a python script to generate these mapping tables. This will do
337 // for now. There are a whole series of helper function mapping arrays, one
338 // for each return type class as outlined above. There there are 11 possible
339 // entries. Ones with 0 are ones which should never be selected.
341 // All the arrays are similar except for ones which return neither
342 // sf, df, sc, dc, in which we only care about ones which have sf or df as a
345 #define P_ "__mips16_call_stub_"
346 #define MAX_STUB_NUMBER 10
347 #define T1 P "1", P "2", 0, 0, P "5", P "6", 0, 0, P "9", P "10"
350 static char const * vMips16Helper[MAX_STUB_NUMBER+1] =
354 static char const * sfMips16Helper[MAX_STUB_NUMBER+1] =
358 static char const * dfMips16Helper[MAX_STUB_NUMBER+1] =
362 static char const * scMips16Helper[MAX_STUB_NUMBER+1] =
366 static char const * dcMips16Helper[MAX_STUB_NUMBER+1] =
372 const char* Mips16TargetLowering::
373 getMips16HelperFunction
374 (Type* RetTy, ArgListTy &Args, bool &needHelper) const {
375 const unsigned int stubNum = getMips16HelperFunctionStubNumber(Args);
377 const unsigned int maxStubNum = 10;
378 assert(stubNum <= maxStubNum);
379 const bool validStubNum[maxStubNum+1] =
380 {true, true, true, false, false, true, true, false, false, true, true};
381 assert(validStubNum[stubNum]);
384 if (RetTy->isFloatTy()) {
385 result = sfMips16Helper[stubNum];
387 else if (RetTy ->isDoubleTy()) {
388 result = dfMips16Helper[stubNum];
389 } else if (StructType *SRetTy = dyn_cast<StructType>(RetTy)) {
390 // check if it's complex
391 if (SRetTy->getNumElements() == 2) {
392 if ((SRetTy->getElementType(0)->isFloatTy()) &&
393 (SRetTy->getElementType(1)->isFloatTy())) {
394 result = scMips16Helper[stubNum];
395 } else if ((SRetTy->getElementType(0)->isDoubleTy()) &&
396 (SRetTy->getElementType(1)->isDoubleTy())) {
397 result = dcMips16Helper[stubNum];
399 llvm_unreachable("Uncovered condition");
402 llvm_unreachable("Uncovered condition");
409 result = vMips16Helper[stubNum];
415 void Mips16TargetLowering::
416 getOpndList(SmallVectorImpl<SDValue> &Ops,
417 std::deque< std::pair<unsigned, SDValue> > &RegsToPass,
418 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
419 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
420 SDValue Chain) const {
421 SelectionDAG &DAG = CLI.DAG;
422 MachineFunction &MF = DAG.getMachineFunction();
423 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
424 const char* Mips16HelperFunction = nullptr;
425 bool NeedMips16Helper = false;
427 if (Subtarget.inMips16HardFloat()) {
429 // currently we don't have symbols tagged with the mips16 or mips32
430 // qualifier so we will assume that we don't know what kind it is.
431 // and generate the helper
433 bool LookupHelper = true;
434 if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(CLI.Callee)) {
435 Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL, S->getSymbol() };
437 if (std::binary_search(std::begin(HardFloatLibCalls),
438 std::end(HardFloatLibCalls), Find))
439 LookupHelper = false;
441 const char *Symbol = S->getSymbol();
442 Mips16IntrinsicHelperType IntrinsicFind = { Symbol, "" };
443 const Mips16HardFloatInfo::FuncSignature *Signature =
444 Mips16HardFloatInfo::findFuncSignature(Symbol);
445 if (!IsPICCall && (Signature && (FuncInfo->StubsNeeded.find(Symbol) ==
446 FuncInfo->StubsNeeded.end()))) {
447 FuncInfo->StubsNeeded[Symbol] = Signature;
449 // S2 is normally saved if the stub is for a function which
450 // returns a float or double value and is not otherwise. This is
451 // because more work is required after the function the stub
452 // is calling completes, and so the stub cannot directly return
453 // and the stub has no stack space to store the return address so
454 // S2 is used for that purpose.
455 // In order to take advantage of not saving S2, we need to also
456 // optimize the call in the stub and this requires some further
457 // functionality in MipsAsmPrinter which we don't have yet.
458 // So for now we always save S2. The optimization will be done
459 // in a follow-on patch.
461 if (1 || (Signature->RetSig != Mips16HardFloatInfo::NoFPRet))
462 FuncInfo->setSaveS2();
464 // one more look at list of intrinsics
465 const Mips16IntrinsicHelperType *Helper =
466 std::lower_bound(std::begin(Mips16IntrinsicHelper),
467 std::end(Mips16IntrinsicHelper), IntrinsicFind);
468 if (Helper != std::end(Mips16IntrinsicHelper) &&
469 *Helper == IntrinsicFind) {
470 Mips16HelperFunction = Helper->Helper;
471 NeedMips16Helper = true;
472 LookupHelper = false;
476 } else if (GlobalAddressSDNode *G =
477 dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
478 Mips16Libcall Find = { RTLIB::UNKNOWN_LIBCALL,
479 G->getGlobal()->getName().data() };
481 if (std::binary_search(std::begin(HardFloatLibCalls),
482 std::end(HardFloatLibCalls), Find))
483 LookupHelper = false;
486 Mips16HelperFunction =
487 getMips16HelperFunction(CLI.RetTy, CLI.getArgs(), NeedMips16Helper);
490 SDValue JumpTarget = Callee;
492 // T9 should contain the address of the callee function if
493 // -relocation-model=pic or it is an indirect call.
494 if (IsPICCall || !GlobalOrExternal) {
495 unsigned V0Reg = Mips::V0;
496 if (NeedMips16Helper) {
497 RegsToPass.push_front(std::make_pair(V0Reg, Callee));
498 JumpTarget = DAG.getExternalSymbol(Mips16HelperFunction,
499 getPointerTy(DAG.getDataLayout()));
500 ExternalSymbolSDNode *S = cast<ExternalSymbolSDNode>(JumpTarget);
501 JumpTarget = getAddrGlobal(S, CLI.DL, JumpTarget.getValueType(), DAG,
502 MipsII::MO_GOT, Chain,
503 FuncInfo->callPtrInfo(S->getSymbol()));
505 RegsToPass.push_front(std::make_pair((unsigned)Mips::T9, Callee));
508 Ops.push_back(JumpTarget);
510 MipsTargetLowering::getOpndList(Ops, RegsToPass, IsPICCall, GlobalOrExternal,
511 InternalLinkage, IsCallReloc, CLI, Callee,
516 Mips16TargetLowering::emitSel16(unsigned Opc, MachineInstr &MI,
517 MachineBasicBlock *BB) const {
518 if (DontExpandCondPseudos16)
520 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
521 DebugLoc DL = MI.getDebugLoc();
522 // To "insert" a SELECT_CC instruction, we actually have to insert the
523 // diamond control-flow pattern. The incoming instruction knows the
524 // destination vreg to set, the condition code register to branch on, the
525 // true/false values to select between, and a branch opcode to use.
526 const BasicBlock *LLVM_BB = BB->getBasicBlock();
527 MachineFunction::iterator It = ++BB->getIterator();
533 // bNE r1, r0, copy1MBB
534 // fallthrough --> copy0MBB
535 MachineBasicBlock *thisMBB = BB;
536 MachineFunction *F = BB->getParent();
537 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
538 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
539 F->insert(It, copy0MBB);
540 F->insert(It, sinkMBB);
542 // Transfer the remainder of BB and its successor edges to sinkMBB.
543 sinkMBB->splice(sinkMBB->begin(), BB,
544 std::next(MachineBasicBlock::iterator(MI)), BB->end());
545 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
547 // Next, add the true and fallthrough blocks as its successors.
548 BB->addSuccessor(copy0MBB);
549 BB->addSuccessor(sinkMBB);
551 BuildMI(BB, DL, TII->get(Opc))
552 .addReg(MI.getOperand(3).getReg())
557 // # fallthrough to sinkMBB
560 // Update machine-CFG edges
561 BB->addSuccessor(sinkMBB);
564 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
568 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
569 .addReg(MI.getOperand(1).getReg())
571 .addReg(MI.getOperand(2).getReg())
574 MI.eraseFromParent(); // The pseudo instruction is gone now.
579 Mips16TargetLowering::emitSelT16(unsigned Opc1, unsigned Opc2, MachineInstr &MI,
580 MachineBasicBlock *BB) const {
581 if (DontExpandCondPseudos16)
583 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
584 DebugLoc DL = MI.getDebugLoc();
585 // To "insert" a SELECT_CC instruction, we actually have to insert the
586 // diamond control-flow pattern. The incoming instruction knows the
587 // destination vreg to set, the condition code register to branch on, the
588 // true/false values to select between, and a branch opcode to use.
589 const BasicBlock *LLVM_BB = BB->getBasicBlock();
590 MachineFunction::iterator It = ++BB->getIterator();
596 // bNE r1, r0, copy1MBB
597 // fallthrough --> copy0MBB
598 MachineBasicBlock *thisMBB = BB;
599 MachineFunction *F = BB->getParent();
600 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
601 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
602 F->insert(It, copy0MBB);
603 F->insert(It, sinkMBB);
605 // Transfer the remainder of BB and its successor edges to sinkMBB.
606 sinkMBB->splice(sinkMBB->begin(), BB,
607 std::next(MachineBasicBlock::iterator(MI)), BB->end());
608 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
610 // Next, add the true and fallthrough blocks as its successors.
611 BB->addSuccessor(copy0MBB);
612 BB->addSuccessor(sinkMBB);
614 BuildMI(BB, DL, TII->get(Opc2))
615 .addReg(MI.getOperand(3).getReg())
616 .addReg(MI.getOperand(4).getReg());
617 BuildMI(BB, DL, TII->get(Opc1)).addMBB(sinkMBB);
621 // # fallthrough to sinkMBB
624 // Update machine-CFG edges
625 BB->addSuccessor(sinkMBB);
628 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
632 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
633 .addReg(MI.getOperand(1).getReg())
635 .addReg(MI.getOperand(2).getReg())
638 MI.eraseFromParent(); // The pseudo instruction is gone now.
644 Mips16TargetLowering::emitSeliT16(unsigned Opc1, unsigned Opc2,
646 MachineBasicBlock *BB) const {
647 if (DontExpandCondPseudos16)
649 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
650 DebugLoc DL = MI.getDebugLoc();
651 // To "insert" a SELECT_CC instruction, we actually have to insert the
652 // diamond control-flow pattern. The incoming instruction knows the
653 // destination vreg to set, the condition code register to branch on, the
654 // true/false values to select between, and a branch opcode to use.
655 const BasicBlock *LLVM_BB = BB->getBasicBlock();
656 MachineFunction::iterator It = ++BB->getIterator();
662 // bNE r1, r0, copy1MBB
663 // fallthrough --> copy0MBB
664 MachineBasicBlock *thisMBB = BB;
665 MachineFunction *F = BB->getParent();
666 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
667 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
668 F->insert(It, copy0MBB);
669 F->insert(It, sinkMBB);
671 // Transfer the remainder of BB and its successor edges to sinkMBB.
672 sinkMBB->splice(sinkMBB->begin(), BB,
673 std::next(MachineBasicBlock::iterator(MI)), BB->end());
674 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
676 // Next, add the true and fallthrough blocks as its successors.
677 BB->addSuccessor(copy0MBB);
678 BB->addSuccessor(sinkMBB);
680 BuildMI(BB, DL, TII->get(Opc2))
681 .addReg(MI.getOperand(3).getReg())
682 .addImm(MI.getOperand(4).getImm());
683 BuildMI(BB, DL, TII->get(Opc1)).addMBB(sinkMBB);
687 // # fallthrough to sinkMBB
690 // Update machine-CFG edges
691 BB->addSuccessor(sinkMBB);
694 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
698 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
699 .addReg(MI.getOperand(1).getReg())
701 .addReg(MI.getOperand(2).getReg())
704 MI.eraseFromParent(); // The pseudo instruction is gone now.
710 Mips16TargetLowering::emitFEXT_T8I816_ins(unsigned BtOpc, unsigned CmpOpc,
712 MachineBasicBlock *BB) const {
713 if (DontExpandCondPseudos16)
715 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
716 unsigned regX = MI.getOperand(0).getReg();
717 unsigned regY = MI.getOperand(1).getReg();
718 MachineBasicBlock *target = MI.getOperand(2).getMBB();
719 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(CmpOpc))
722 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(BtOpc)).addMBB(target);
723 MI.eraseFromParent(); // The pseudo instruction is gone now.
727 MachineBasicBlock *Mips16TargetLowering::emitFEXT_T8I8I16_ins(
728 unsigned BtOpc, unsigned CmpiOpc, unsigned CmpiXOpc, bool ImmSigned,
729 MachineInstr &MI, MachineBasicBlock *BB) const {
730 if (DontExpandCondPseudos16)
732 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
733 unsigned regX = MI.getOperand(0).getReg();
734 int64_t imm = MI.getOperand(1).getImm();
735 MachineBasicBlock *target = MI.getOperand(2).getMBB();
739 else if ((!ImmSigned && isUInt<16>(imm)) ||
740 (ImmSigned && isInt<16>(imm)))
743 llvm_unreachable("immediate field not usable");
744 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(CmpOpc)).addReg(regX).addImm(imm);
745 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(BtOpc)).addMBB(target);
746 MI.eraseFromParent(); // The pseudo instruction is gone now.
750 static unsigned Mips16WhichOp8uOr16simm
751 (unsigned shortOp, unsigned longOp, int64_t Imm) {
754 else if (isInt<16>(Imm))
757 llvm_unreachable("immediate field not usable");
761 Mips16TargetLowering::emitFEXT_CCRX16_ins(unsigned SltOpc, MachineInstr &MI,
762 MachineBasicBlock *BB) const {
763 if (DontExpandCondPseudos16)
765 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
766 unsigned CC = MI.getOperand(0).getReg();
767 unsigned regX = MI.getOperand(1).getReg();
768 unsigned regY = MI.getOperand(2).getReg();
769 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc))
772 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Mips::MoveR3216), CC)
774 MI.eraseFromParent(); // The pseudo instruction is gone now.
779 Mips16TargetLowering::emitFEXT_CCRXI16_ins(unsigned SltiOpc, unsigned SltiXOpc,
781 MachineBasicBlock *BB) const {
782 if (DontExpandCondPseudos16)
784 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
785 unsigned CC = MI.getOperand(0).getReg();
786 unsigned regX = MI.getOperand(1).getReg();
787 int64_t Imm = MI.getOperand(2).getImm();
788 unsigned SltOpc = Mips16WhichOp8uOr16simm(SltiOpc, SltiXOpc, Imm);
789 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(SltOpc)).addReg(regX).addImm(Imm);
790 BuildMI(*BB, MI, MI.getDebugLoc(), TII->get(Mips::MoveR3216), CC)
792 MI.eraseFromParent(); // The pseudo instruction is gone now.