1 //===- MipsISelLowering.cpp - Mips DAG Lowering Implementation ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that Mips uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "MipsISelLowering.h"
15 #include "MCTargetDesc/MipsBaseInfo.h"
16 #include "MCTargetDesc/MipsInstPrinter.h"
17 #include "MCTargetDesc/MipsMCTargetDesc.h"
18 #include "MipsCCState.h"
19 #include "MipsInstrInfo.h"
20 #include "MipsMachineFunction.h"
21 #include "MipsRegisterInfo.h"
22 #include "MipsSubtarget.h"
23 #include "MipsTargetMachine.h"
24 #include "MipsTargetObjectFile.h"
25 #include "llvm/ADT/APFloat.h"
26 #include "llvm/ADT/ArrayRef.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/ADT/StringRef.h"
30 #include "llvm/ADT/StringSwitch.h"
31 #include "llvm/CodeGen/CallingConvLower.h"
32 #include "llvm/CodeGen/FunctionLoweringInfo.h"
33 #include "llvm/CodeGen/ISDOpcodes.h"
34 #include "llvm/CodeGen/MachineBasicBlock.h"
35 #include "llvm/CodeGen/MachineFrameInfo.h"
36 #include "llvm/CodeGen/MachineFunction.h"
37 #include "llvm/CodeGen/MachineInstr.h"
38 #include "llvm/CodeGen/MachineInstrBuilder.h"
39 #include "llvm/CodeGen/MachineJumpTableInfo.h"
40 #include "llvm/CodeGen/MachineMemOperand.h"
41 #include "llvm/CodeGen/MachineOperand.h"
42 #include "llvm/CodeGen/MachineRegisterInfo.h"
43 #include "llvm/CodeGen/RuntimeLibcalls.h"
44 #include "llvm/CodeGen/SelectionDAG.h"
45 #include "llvm/CodeGen/SelectionDAGNodes.h"
46 #include "llvm/CodeGen/TargetFrameLowering.h"
47 #include "llvm/CodeGen/TargetInstrInfo.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/ValueTypes.h"
50 #include "llvm/IR/CallingConv.h"
51 #include "llvm/IR/Constants.h"
52 #include "llvm/IR/DataLayout.h"
53 #include "llvm/IR/DebugLoc.h"
54 #include "llvm/IR/DerivedTypes.h"
55 #include "llvm/IR/Function.h"
56 #include "llvm/IR/GlobalValue.h"
57 #include "llvm/IR/Type.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/MC/MCContext.h"
60 #include "llvm/MC/MCRegisterInfo.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CodeGen.h"
63 #include "llvm/Support/CommandLine.h"
64 #include "llvm/Support/Compiler.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/MachineValueType.h"
67 #include "llvm/Support/MathExtras.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include "llvm/Target/TargetOptions.h"
81 #define DEBUG_TYPE "mips-lower"
83 STATISTIC(NumTailCalls, "Number of tail calls");
86 NoZeroDivCheck("mno-check-zero-division", cl::Hidden,
87 cl::desc("MIPS: Don't trap on integer division by zero."),
90 extern cl::opt<bool> EmitJalrReloc;
92 static const MCPhysReg Mips64DPRegs[8] = {
93 Mips::D12_64, Mips::D13_64, Mips::D14_64, Mips::D15_64,
94 Mips::D16_64, Mips::D17_64, Mips::D18_64, Mips::D19_64
97 // If I is a shifted mask, set the size (Size) and the first bit of the
98 // mask (Pos), and return true.
99 // For example, if I is 0x003ff800, (Pos, Size) = (11, 11).
100 static bool isShiftedMask(uint64_t I, uint64_t &Pos, uint64_t &Size) {
101 if (!isShiftedMask_64(I))
104 Size = countPopulation(I);
105 Pos = countTrailingZeros(I);
109 // The MIPS MSA ABI passes vector arguments in the integer register set.
110 // The number of integer registers used is dependant on the ABI used.
111 MVT MipsTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
115 return getRegisterType(Context, VT);
117 return Subtarget.isABI_O32() || VT.getSizeInBits() == 32 ? MVT::i32
121 unsigned MipsTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
125 return std::max(((unsigned)VT.getSizeInBits() /
126 (Subtarget.isABI_O32() ? 32 : 64)),
128 return MipsTargetLowering::getNumRegisters(Context, VT);
131 unsigned MipsTargetLowering::getVectorTypeBreakdownForCallingConv(
132 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
133 unsigned &NumIntermediates, MVT &RegisterVT) const {
134 // Break down vector types to either 2 i64s or 4 i32s.
135 RegisterVT = getRegisterTypeForCallingConv(Context, CC, VT);
136 IntermediateVT = RegisterVT;
137 NumIntermediates = VT.getSizeInBits() < RegisterVT.getSizeInBits()
138 ? VT.getVectorNumElements()
139 : VT.getSizeInBits() / RegisterVT.getSizeInBits();
141 return NumIntermediates;
144 SDValue MipsTargetLowering::getGlobalReg(SelectionDAG &DAG, EVT Ty) const {
145 MachineFunction &MF = DAG.getMachineFunction();
146 MipsFunctionInfo *FI = MF.getInfo<MipsFunctionInfo>();
147 return DAG.getRegister(FI->getGlobalBaseReg(MF), Ty);
150 SDValue MipsTargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty,
152 unsigned Flag) const {
153 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 0, Flag);
156 SDValue MipsTargetLowering::getTargetNode(ExternalSymbolSDNode *N, EVT Ty,
158 unsigned Flag) const {
159 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flag);
162 SDValue MipsTargetLowering::getTargetNode(BlockAddressSDNode *N, EVT Ty,
164 unsigned Flag) const {
165 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag);
168 SDValue MipsTargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty,
170 unsigned Flag) const {
171 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag);
174 SDValue MipsTargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty,
176 unsigned Flag) const {
177 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
178 N->getOffset(), Flag);
181 const char *MipsTargetLowering::getTargetNodeName(unsigned Opcode) const {
182 switch ((MipsISD::NodeType)Opcode) {
183 case MipsISD::FIRST_NUMBER: break;
184 case MipsISD::JmpLink: return "MipsISD::JmpLink";
185 case MipsISD::TailCall: return "MipsISD::TailCall";
186 case MipsISD::Highest: return "MipsISD::Highest";
187 case MipsISD::Higher: return "MipsISD::Higher";
188 case MipsISD::Hi: return "MipsISD::Hi";
189 case MipsISD::Lo: return "MipsISD::Lo";
190 case MipsISD::GotHi: return "MipsISD::GotHi";
191 case MipsISD::TlsHi: return "MipsISD::TlsHi";
192 case MipsISD::GPRel: return "MipsISD::GPRel";
193 case MipsISD::ThreadPointer: return "MipsISD::ThreadPointer";
194 case MipsISD::Ret: return "MipsISD::Ret";
195 case MipsISD::ERet: return "MipsISD::ERet";
196 case MipsISD::EH_RETURN: return "MipsISD::EH_RETURN";
197 case MipsISD::FMS: return "MipsISD::FMS";
198 case MipsISD::FPBrcond: return "MipsISD::FPBrcond";
199 case MipsISD::FPCmp: return "MipsISD::FPCmp";
200 case MipsISD::FSELECT: return "MipsISD::FSELECT";
201 case MipsISD::MTC1_D64: return "MipsISD::MTC1_D64";
202 case MipsISD::CMovFP_T: return "MipsISD::CMovFP_T";
203 case MipsISD::CMovFP_F: return "MipsISD::CMovFP_F";
204 case MipsISD::TruncIntFP: return "MipsISD::TruncIntFP";
205 case MipsISD::MFHI: return "MipsISD::MFHI";
206 case MipsISD::MFLO: return "MipsISD::MFLO";
207 case MipsISD::MTLOHI: return "MipsISD::MTLOHI";
208 case MipsISD::Mult: return "MipsISD::Mult";
209 case MipsISD::Multu: return "MipsISD::Multu";
210 case MipsISD::MAdd: return "MipsISD::MAdd";
211 case MipsISD::MAddu: return "MipsISD::MAddu";
212 case MipsISD::MSub: return "MipsISD::MSub";
213 case MipsISD::MSubu: return "MipsISD::MSubu";
214 case MipsISD::DivRem: return "MipsISD::DivRem";
215 case MipsISD::DivRemU: return "MipsISD::DivRemU";
216 case MipsISD::DivRem16: return "MipsISD::DivRem16";
217 case MipsISD::DivRemU16: return "MipsISD::DivRemU16";
218 case MipsISD::BuildPairF64: return "MipsISD::BuildPairF64";
219 case MipsISD::ExtractElementF64: return "MipsISD::ExtractElementF64";
220 case MipsISD::Wrapper: return "MipsISD::Wrapper";
221 case MipsISD::DynAlloc: return "MipsISD::DynAlloc";
222 case MipsISD::Sync: return "MipsISD::Sync";
223 case MipsISD::Ext: return "MipsISD::Ext";
224 case MipsISD::Ins: return "MipsISD::Ins";
225 case MipsISD::CIns: return "MipsISD::CIns";
226 case MipsISD::LWL: return "MipsISD::LWL";
227 case MipsISD::LWR: return "MipsISD::LWR";
228 case MipsISD::SWL: return "MipsISD::SWL";
229 case MipsISD::SWR: return "MipsISD::SWR";
230 case MipsISD::LDL: return "MipsISD::LDL";
231 case MipsISD::LDR: return "MipsISD::LDR";
232 case MipsISD::SDL: return "MipsISD::SDL";
233 case MipsISD::SDR: return "MipsISD::SDR";
234 case MipsISD::EXTP: return "MipsISD::EXTP";
235 case MipsISD::EXTPDP: return "MipsISD::EXTPDP";
236 case MipsISD::EXTR_S_H: return "MipsISD::EXTR_S_H";
237 case MipsISD::EXTR_W: return "MipsISD::EXTR_W";
238 case MipsISD::EXTR_R_W: return "MipsISD::EXTR_R_W";
239 case MipsISD::EXTR_RS_W: return "MipsISD::EXTR_RS_W";
240 case MipsISD::SHILO: return "MipsISD::SHILO";
241 case MipsISD::MTHLIP: return "MipsISD::MTHLIP";
242 case MipsISD::MULSAQ_S_W_PH: return "MipsISD::MULSAQ_S_W_PH";
243 case MipsISD::MAQ_S_W_PHL: return "MipsISD::MAQ_S_W_PHL";
244 case MipsISD::MAQ_S_W_PHR: return "MipsISD::MAQ_S_W_PHR";
245 case MipsISD::MAQ_SA_W_PHL: return "MipsISD::MAQ_SA_W_PHL";
246 case MipsISD::MAQ_SA_W_PHR: return "MipsISD::MAQ_SA_W_PHR";
247 case MipsISD::DPAU_H_QBL: return "MipsISD::DPAU_H_QBL";
248 case MipsISD::DPAU_H_QBR: return "MipsISD::DPAU_H_QBR";
249 case MipsISD::DPSU_H_QBL: return "MipsISD::DPSU_H_QBL";
250 case MipsISD::DPSU_H_QBR: return "MipsISD::DPSU_H_QBR";
251 case MipsISD::DPAQ_S_W_PH: return "MipsISD::DPAQ_S_W_PH";
252 case MipsISD::DPSQ_S_W_PH: return "MipsISD::DPSQ_S_W_PH";
253 case MipsISD::DPAQ_SA_L_W: return "MipsISD::DPAQ_SA_L_W";
254 case MipsISD::DPSQ_SA_L_W: return "MipsISD::DPSQ_SA_L_W";
255 case MipsISD::DPA_W_PH: return "MipsISD::DPA_W_PH";
256 case MipsISD::DPS_W_PH: return "MipsISD::DPS_W_PH";
257 case MipsISD::DPAQX_S_W_PH: return "MipsISD::DPAQX_S_W_PH";
258 case MipsISD::DPAQX_SA_W_PH: return "MipsISD::DPAQX_SA_W_PH";
259 case MipsISD::DPAX_W_PH: return "MipsISD::DPAX_W_PH";
260 case MipsISD::DPSX_W_PH: return "MipsISD::DPSX_W_PH";
261 case MipsISD::DPSQX_S_W_PH: return "MipsISD::DPSQX_S_W_PH";
262 case MipsISD::DPSQX_SA_W_PH: return "MipsISD::DPSQX_SA_W_PH";
263 case MipsISD::MULSA_W_PH: return "MipsISD::MULSA_W_PH";
264 case MipsISD::MULT: return "MipsISD::MULT";
265 case MipsISD::MULTU: return "MipsISD::MULTU";
266 case MipsISD::MADD_DSP: return "MipsISD::MADD_DSP";
267 case MipsISD::MADDU_DSP: return "MipsISD::MADDU_DSP";
268 case MipsISD::MSUB_DSP: return "MipsISD::MSUB_DSP";
269 case MipsISD::MSUBU_DSP: return "MipsISD::MSUBU_DSP";
270 case MipsISD::SHLL_DSP: return "MipsISD::SHLL_DSP";
271 case MipsISD::SHRA_DSP: return "MipsISD::SHRA_DSP";
272 case MipsISD::SHRL_DSP: return "MipsISD::SHRL_DSP";
273 case MipsISD::SETCC_DSP: return "MipsISD::SETCC_DSP";
274 case MipsISD::SELECT_CC_DSP: return "MipsISD::SELECT_CC_DSP";
275 case MipsISD::VALL_ZERO: return "MipsISD::VALL_ZERO";
276 case MipsISD::VANY_ZERO: return "MipsISD::VANY_ZERO";
277 case MipsISD::VALL_NONZERO: return "MipsISD::VALL_NONZERO";
278 case MipsISD::VANY_NONZERO: return "MipsISD::VANY_NONZERO";
279 case MipsISD::VCEQ: return "MipsISD::VCEQ";
280 case MipsISD::VCLE_S: return "MipsISD::VCLE_S";
281 case MipsISD::VCLE_U: return "MipsISD::VCLE_U";
282 case MipsISD::VCLT_S: return "MipsISD::VCLT_S";
283 case MipsISD::VCLT_U: return "MipsISD::VCLT_U";
284 case MipsISD::VEXTRACT_SEXT_ELT: return "MipsISD::VEXTRACT_SEXT_ELT";
285 case MipsISD::VEXTRACT_ZEXT_ELT: return "MipsISD::VEXTRACT_ZEXT_ELT";
286 case MipsISD::VNOR: return "MipsISD::VNOR";
287 case MipsISD::VSHF: return "MipsISD::VSHF";
288 case MipsISD::SHF: return "MipsISD::SHF";
289 case MipsISD::ILVEV: return "MipsISD::ILVEV";
290 case MipsISD::ILVOD: return "MipsISD::ILVOD";
291 case MipsISD::ILVL: return "MipsISD::ILVL";
292 case MipsISD::ILVR: return "MipsISD::ILVR";
293 case MipsISD::PCKEV: return "MipsISD::PCKEV";
294 case MipsISD::PCKOD: return "MipsISD::PCKOD";
295 case MipsISD::INSVE: return "MipsISD::INSVE";
300 MipsTargetLowering::MipsTargetLowering(const MipsTargetMachine &TM,
301 const MipsSubtarget &STI)
302 : TargetLowering(TM), Subtarget(STI), ABI(TM.getABI()) {
303 // Mips does not have i1 type, so use i32 for
304 // setcc operations results (slt, sgt, ...).
305 setBooleanContents(ZeroOrOneBooleanContent);
306 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
307 // The cmp.cond.fmt instruction in MIPS32r6/MIPS64r6 uses 0 and -1 like MSA
308 // does. Integer booleans still use 0 and 1.
309 if (Subtarget.hasMips32r6())
310 setBooleanContents(ZeroOrOneBooleanContent,
311 ZeroOrNegativeOneBooleanContent);
313 // Load extented operations for i1 types must be promoted
314 for (MVT VT : MVT::integer_valuetypes()) {
315 setLoadExtAction(ISD::EXTLOAD, VT, MVT::i1, Promote);
316 setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::i1, Promote);
317 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
320 // MIPS doesn't have extending float->double load/store. Set LoadExtAction
322 for (MVT VT : MVT::fp_valuetypes()) {
323 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
324 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
327 // Set LoadExtAction for f16 vectors to Expand
328 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
329 MVT F16VT = MVT::getVectorVT(MVT::f16, VT.getVectorNumElements());
331 setLoadExtAction(ISD::EXTLOAD, VT, F16VT, Expand);
334 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
335 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
337 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
339 // Used by legalize types to correctly generate the setcc result.
340 // Without this, every float setcc comes with a AND/OR with the result,
341 // we don't want this, since the fpcmp result goes to a flag register,
342 // which is used implicitly by brcond and select operations.
343 AddPromotedToType(ISD::SETCC, MVT::i1, MVT::i32);
345 // Mips Custom Operations
346 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
347 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
348 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
349 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
350 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
351 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
352 setOperationAction(ISD::SELECT, MVT::f32, Custom);
353 setOperationAction(ISD::SELECT, MVT::f64, Custom);
354 setOperationAction(ISD::SELECT, MVT::i32, Custom);
355 setOperationAction(ISD::SETCC, MVT::f32, Custom);
356 setOperationAction(ISD::SETCC, MVT::f64, Custom);
357 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
358 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
359 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
360 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
362 if (!(TM.Options.NoNaNsFPMath || Subtarget.inAbs2008Mode())) {
363 setOperationAction(ISD::FABS, MVT::f32, Custom);
364 setOperationAction(ISD::FABS, MVT::f64, Custom);
367 if (Subtarget.isGP64bit()) {
368 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
369 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
370 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
371 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
372 setOperationAction(ISD::ConstantPool, MVT::i64, Custom);
373 setOperationAction(ISD::SELECT, MVT::i64, Custom);
374 setOperationAction(ISD::LOAD, MVT::i64, Custom);
375 setOperationAction(ISD::STORE, MVT::i64, Custom);
376 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
377 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom);
378 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom);
379 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom);
382 if (!Subtarget.isGP64bit()) {
383 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
384 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
385 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
388 setOperationAction(ISD::EH_DWARF_CFA, MVT::i32, Custom);
389 if (Subtarget.isGP64bit())
390 setOperationAction(ISD::EH_DWARF_CFA, MVT::i64, Custom);
392 setOperationAction(ISD::SDIV, MVT::i32, Expand);
393 setOperationAction(ISD::SREM, MVT::i32, Expand);
394 setOperationAction(ISD::UDIV, MVT::i32, Expand);
395 setOperationAction(ISD::UREM, MVT::i32, Expand);
396 setOperationAction(ISD::SDIV, MVT::i64, Expand);
397 setOperationAction(ISD::SREM, MVT::i64, Expand);
398 setOperationAction(ISD::UDIV, MVT::i64, Expand);
399 setOperationAction(ISD::UREM, MVT::i64, Expand);
401 // Operations not directly supported by Mips.
402 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
403 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
404 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
405 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
406 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
407 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
408 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
409 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
410 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
411 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Expand);
412 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
413 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Expand);
414 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
415 if (Subtarget.hasCnMips()) {
416 setOperationAction(ISD::CTPOP, MVT::i32, Legal);
417 setOperationAction(ISD::CTPOP, MVT::i64, Legal);
419 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
420 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
422 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
423 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
424 setOperationAction(ISD::ROTL, MVT::i32, Expand);
425 setOperationAction(ISD::ROTL, MVT::i64, Expand);
426 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
427 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
429 if (!Subtarget.hasMips32r2())
430 setOperationAction(ISD::ROTR, MVT::i32, Expand);
432 if (!Subtarget.hasMips64r2())
433 setOperationAction(ISD::ROTR, MVT::i64, Expand);
435 setOperationAction(ISD::FSIN, MVT::f32, Expand);
436 setOperationAction(ISD::FSIN, MVT::f64, Expand);
437 setOperationAction(ISD::FCOS, MVT::f32, Expand);
438 setOperationAction(ISD::FCOS, MVT::f64, Expand);
439 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
440 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
441 setOperationAction(ISD::FPOW, MVT::f32, Expand);
442 setOperationAction(ISD::FPOW, MVT::f64, Expand);
443 setOperationAction(ISD::FLOG, MVT::f32, Expand);
444 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
445 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
446 setOperationAction(ISD::FEXP, MVT::f32, Expand);
447 setOperationAction(ISD::FMA, MVT::f32, Expand);
448 setOperationAction(ISD::FMA, MVT::f64, Expand);
449 setOperationAction(ISD::FREM, MVT::f32, Expand);
450 setOperationAction(ISD::FREM, MVT::f64, Expand);
452 // Lower f16 conversion operations into library calls
453 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
454 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
455 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
456 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
458 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
460 setOperationAction(ISD::VASTART, MVT::Other, Custom);
461 setOperationAction(ISD::VAARG, MVT::Other, Custom);
462 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
463 setOperationAction(ISD::VAEND, MVT::Other, Expand);
465 // Use the default for now
466 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
467 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
469 if (!Subtarget.isGP64bit()) {
470 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Expand);
471 setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Expand);
474 if (!Subtarget.hasMips32r2()) {
475 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
476 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
479 // MIPS16 lacks MIPS32's clz and clo instructions.
480 if (!Subtarget.hasMips32() || Subtarget.inMips16Mode())
481 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
482 if (!Subtarget.hasMips64())
483 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
485 if (!Subtarget.hasMips32r2())
486 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
487 if (!Subtarget.hasMips64r2())
488 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
490 if (Subtarget.isGP64bit()) {
491 setLoadExtAction(ISD::SEXTLOAD, MVT::i64, MVT::i32, Custom);
492 setLoadExtAction(ISD::ZEXTLOAD, MVT::i64, MVT::i32, Custom);
493 setLoadExtAction(ISD::EXTLOAD, MVT::i64, MVT::i32, Custom);
494 setTruncStoreAction(MVT::i64, MVT::i32, Custom);
497 setOperationAction(ISD::TRAP, MVT::Other, Legal);
499 setTargetDAGCombine(ISD::SDIVREM);
500 setTargetDAGCombine(ISD::UDIVREM);
501 setTargetDAGCombine(ISD::SELECT);
502 setTargetDAGCombine(ISD::AND);
503 setTargetDAGCombine(ISD::OR);
504 setTargetDAGCombine(ISD::ADD);
505 setTargetDAGCombine(ISD::SUB);
506 setTargetDAGCombine(ISD::AssertZext);
507 setTargetDAGCombine(ISD::SHL);
510 // These libcalls are not available in 32-bit.
511 setLibcallName(RTLIB::SHL_I128, nullptr);
512 setLibcallName(RTLIB::SRL_I128, nullptr);
513 setLibcallName(RTLIB::SRA_I128, nullptr);
516 setMinFunctionAlignment(Subtarget.isGP64bit() ? Align(8) : Align(4));
518 // The arguments on the stack are defined in terms of 4-byte slots on O32
519 // and 8-byte slots on N32/N64.
520 setMinStackArgumentAlignment((ABI.IsN32() || ABI.IsN64()) ? Align(8)
523 setStackPointerRegisterToSaveRestore(ABI.IsN64() ? Mips::SP_64 : Mips::SP);
525 MaxStoresPerMemcpy = 16;
527 isMicroMips = Subtarget.inMicroMipsMode();
530 const MipsTargetLowering *
531 MipsTargetLowering::create(const MipsTargetMachine &TM,
532 const MipsSubtarget &STI) {
533 if (STI.inMips16Mode())
534 return createMips16TargetLowering(TM, STI);
536 return createMipsSETargetLowering(TM, STI);
539 // Create a fast isel object.
541 MipsTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
542 const TargetLibraryInfo *libInfo) const {
543 const MipsTargetMachine &TM =
544 static_cast<const MipsTargetMachine &>(funcInfo.MF->getTarget());
546 // We support only the standard encoding [MIPS32,MIPS32R5] ISAs.
547 bool UseFastISel = TM.Options.EnableFastISel && Subtarget.hasMips32() &&
548 !Subtarget.hasMips32r6() && !Subtarget.inMips16Mode() &&
549 !Subtarget.inMicroMipsMode();
551 // Disable if either of the following is true:
552 // We do not generate PIC, the ABI is not O32, XGOT is being used.
553 if (!TM.isPositionIndependent() || !TM.getABI().IsO32() ||
557 return UseFastISel ? Mips::createFastISel(funcInfo, libInfo) : nullptr;
560 EVT MipsTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
564 return VT.changeVectorElementTypeToInteger();
567 static SDValue performDivRemCombine(SDNode *N, SelectionDAG &DAG,
568 TargetLowering::DAGCombinerInfo &DCI,
569 const MipsSubtarget &Subtarget) {
570 if (DCI.isBeforeLegalizeOps())
573 EVT Ty = N->getValueType(0);
574 unsigned LO = (Ty == MVT::i32) ? Mips::LO0 : Mips::LO0_64;
575 unsigned HI = (Ty == MVT::i32) ? Mips::HI0 : Mips::HI0_64;
576 unsigned Opc = N->getOpcode() == ISD::SDIVREM ? MipsISD::DivRem16 :
580 SDValue DivRem = DAG.getNode(Opc, DL, MVT::Glue,
581 N->getOperand(0), N->getOperand(1));
582 SDValue InChain = DAG.getEntryNode();
583 SDValue InGlue = DivRem;
586 if (N->hasAnyUseOfValue(0)) {
587 SDValue CopyFromLo = DAG.getCopyFromReg(InChain, DL, LO, Ty,
589 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), CopyFromLo);
590 InChain = CopyFromLo.getValue(1);
591 InGlue = CopyFromLo.getValue(2);
595 if (N->hasAnyUseOfValue(1)) {
596 SDValue CopyFromHi = DAG.getCopyFromReg(InChain, DL,
598 DAG.ReplaceAllUsesOfValueWith(SDValue(N, 1), CopyFromHi);
604 static Mips::CondCode condCodeToFCC(ISD::CondCode CC) {
606 default: llvm_unreachable("Unknown fp condition code!");
608 case ISD::SETOEQ: return Mips::FCOND_OEQ;
609 case ISD::SETUNE: return Mips::FCOND_UNE;
611 case ISD::SETOLT: return Mips::FCOND_OLT;
613 case ISD::SETOGT: return Mips::FCOND_OGT;
615 case ISD::SETOLE: return Mips::FCOND_OLE;
617 case ISD::SETOGE: return Mips::FCOND_OGE;
618 case ISD::SETULT: return Mips::FCOND_ULT;
619 case ISD::SETULE: return Mips::FCOND_ULE;
620 case ISD::SETUGT: return Mips::FCOND_UGT;
621 case ISD::SETUGE: return Mips::FCOND_UGE;
622 case ISD::SETUO: return Mips::FCOND_UN;
623 case ISD::SETO: return Mips::FCOND_OR;
625 case ISD::SETONE: return Mips::FCOND_ONE;
626 case ISD::SETUEQ: return Mips::FCOND_UEQ;
630 /// This function returns true if the floating point conditional branches and
631 /// conditional moves which use condition code CC should be inverted.
632 static bool invertFPCondCodeUser(Mips::CondCode CC) {
633 if (CC >= Mips::FCOND_F && CC <= Mips::FCOND_NGT)
636 assert((CC >= Mips::FCOND_T && CC <= Mips::FCOND_GT) &&
637 "Illegal Condition Code");
642 // Creates and returns an FPCmp node from a setcc node.
643 // Returns Op if setcc is not a floating point comparison.
644 static SDValue createFPCmp(SelectionDAG &DAG, const SDValue &Op) {
645 // must be a SETCC node
646 if (Op.getOpcode() != ISD::SETCC)
649 SDValue LHS = Op.getOperand(0);
651 if (!LHS.getValueType().isFloatingPoint())
654 SDValue RHS = Op.getOperand(1);
657 // Assume the 3rd operand is a CondCodeSDNode. Add code to check the type of
658 // node if necessary.
659 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
661 return DAG.getNode(MipsISD::FPCmp, DL, MVT::Glue, LHS, RHS,
662 DAG.getConstant(condCodeToFCC(CC), DL, MVT::i32));
665 // Creates and returns a CMovFPT/F node.
666 static SDValue createCMovFP(SelectionDAG &DAG, SDValue Cond, SDValue True,
667 SDValue False, const SDLoc &DL) {
668 ConstantSDNode *CC = cast<ConstantSDNode>(Cond.getOperand(2));
669 bool invert = invertFPCondCodeUser((Mips::CondCode)CC->getSExtValue());
670 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
672 return DAG.getNode((invert ? MipsISD::CMovFP_F : MipsISD::CMovFP_T), DL,
673 True.getValueType(), True, FCC0, False, Cond);
676 static SDValue performSELECTCombine(SDNode *N, SelectionDAG &DAG,
677 TargetLowering::DAGCombinerInfo &DCI,
678 const MipsSubtarget &Subtarget) {
679 if (DCI.isBeforeLegalizeOps())
682 SDValue SetCC = N->getOperand(0);
684 if ((SetCC.getOpcode() != ISD::SETCC) ||
685 !SetCC.getOperand(0).getValueType().isInteger())
688 SDValue False = N->getOperand(2);
689 EVT FalseTy = False.getValueType();
691 if (!FalseTy.isInteger())
694 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(False);
696 // If the RHS (False) is 0, we swap the order of the operands
697 // of ISD::SELECT (obviously also inverting the condition) so that we can
698 // take advantage of conditional moves using the $0 register.
700 // return (a != 0) ? x : 0;
708 if (!FalseC->getZExtValue()) {
709 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
710 SDValue True = N->getOperand(1);
712 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
714 ISD::getSetCCInverse(CC, SetCC.getValueType()));
716 return DAG.getNode(ISD::SELECT, DL, FalseTy, SetCC, False, True);
719 // If both operands are integer constants there's a possibility that we
720 // can do some interesting optimizations.
721 SDValue True = N->getOperand(1);
722 ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(True);
724 if (!TrueC || !True.getValueType().isInteger())
727 // We'll also ignore MVT::i64 operands as this optimizations proves
728 // to be ineffective because of the required sign extensions as the result
729 // of a SETCC operator is always MVT::i32 for non-vector types.
730 if (True.getValueType() == MVT::i64)
733 int64_t Diff = TrueC->getSExtValue() - FalseC->getSExtValue();
735 // 1) (a < x) ? y : y-1
737 // addiu $reg2, $reg1, y-1
739 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, False);
741 // 2) (a < x) ? y-1 : y
743 // xor $reg1, $reg1, 1
744 // addiu $reg2, $reg1, y-1
746 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC.getOperand(2))->get();
747 SetCC = DAG.getSetCC(DL, SetCC.getValueType(), SetCC.getOperand(0),
749 ISD::getSetCCInverse(CC, SetCC.getValueType()));
750 return DAG.getNode(ISD::ADD, DL, SetCC.getValueType(), SetCC, True);
753 // Could not optimize.
757 static SDValue performCMovFPCombine(SDNode *N, SelectionDAG &DAG,
758 TargetLowering::DAGCombinerInfo &DCI,
759 const MipsSubtarget &Subtarget) {
760 if (DCI.isBeforeLegalizeOps())
763 SDValue ValueIfTrue = N->getOperand(0), ValueIfFalse = N->getOperand(2);
765 ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(ValueIfFalse);
766 if (!FalseC || FalseC->getZExtValue())
769 // Since RHS (False) is 0, we swap the order of the True/False operands
770 // (obviously also inverting the condition) so that we can
771 // take advantage of conditional moves using the $0 register.
773 // return (a != 0) ? x : 0;
776 unsigned Opc = (N->getOpcode() == MipsISD::CMovFP_T) ? MipsISD::CMovFP_F :
779 SDValue FCC = N->getOperand(1), Glue = N->getOperand(3);
780 return DAG.getNode(Opc, SDLoc(N), ValueIfFalse.getValueType(),
781 ValueIfFalse, FCC, ValueIfTrue, Glue);
784 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG,
785 TargetLowering::DAGCombinerInfo &DCI,
786 const MipsSubtarget &Subtarget) {
787 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
790 SDValue FirstOperand = N->getOperand(0);
791 unsigned FirstOperandOpc = FirstOperand.getOpcode();
792 SDValue Mask = N->getOperand(1);
793 EVT ValTy = N->getValueType(0);
796 uint64_t Pos = 0, SMPos, SMSize;
801 // Op's second operand must be a shifted mask.
802 if (!(CN = dyn_cast<ConstantSDNode>(Mask)) ||
803 !isShiftedMask(CN->getZExtValue(), SMPos, SMSize))
806 if (FirstOperandOpc == ISD::SRA || FirstOperandOpc == ISD::SRL) {
807 // Pattern match EXT.
808 // $dst = and ((sra or srl) $src , pos), (2**size - 1)
809 // => ext $dst, $src, pos, size
811 // The second operand of the shift must be an immediate.
812 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
815 Pos = CN->getZExtValue();
817 // Return if the shifted mask does not start at bit 0 or the sum of its size
818 // and Pos exceeds the word's size.
819 if (SMPos != 0 || Pos + SMSize > ValTy.getSizeInBits())
823 NewOperand = FirstOperand.getOperand(0);
824 } else if (FirstOperandOpc == ISD::SHL && Subtarget.hasCnMips()) {
825 // Pattern match CINS.
826 // $dst = and (shl $src , pos), mask
827 // => cins $dst, $src, pos, size
828 // mask is a shifted mask with consecutive 1's, pos = shift amount,
829 // size = population count.
831 // The second operand of the shift must be an immediate.
832 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))))
835 Pos = CN->getZExtValue();
837 if (SMPos != Pos || Pos >= ValTy.getSizeInBits() || SMSize >= 32 ||
838 Pos + SMSize > ValTy.getSizeInBits())
841 NewOperand = FirstOperand.getOperand(0);
842 // SMSize is 'location' (position) in this case, not size.
846 // Pattern match EXT.
847 // $dst = and $src, (2**size - 1) , if size > 16
848 // => ext $dst, $src, pos, size , pos = 0
850 // If the mask is <= 0xffff, andi can be used instead.
851 if (CN->getZExtValue() <= 0xffff)
854 // Return if the mask doesn't start at position 0.
859 NewOperand = FirstOperand;
861 return DAG.getNode(Opc, DL, ValTy, NewOperand,
862 DAG.getConstant(Pos, DL, MVT::i32),
863 DAG.getConstant(SMSize, DL, MVT::i32));
866 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
867 TargetLowering::DAGCombinerInfo &DCI,
868 const MipsSubtarget &Subtarget) {
869 // Pattern match INS.
870 // $dst = or (and $src1 , mask0), (and (shl $src, pos), mask1),
871 // where mask1 = (2**size - 1) << pos, mask0 = ~mask1
872 // => ins $dst, $src, size, pos, $src1
873 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasExtractInsert())
876 SDValue And0 = N->getOperand(0), And1 = N->getOperand(1);
877 uint64_t SMPos0, SMSize0, SMPos1, SMSize1;
878 ConstantSDNode *CN, *CN1;
880 // See if Op's first operand matches (and $src1 , mask0).
881 if (And0.getOpcode() != ISD::AND)
884 if (!(CN = dyn_cast<ConstantSDNode>(And0.getOperand(1))) ||
885 !isShiftedMask(~CN->getSExtValue(), SMPos0, SMSize0))
888 // See if Op's second operand matches (and (shl $src, pos), mask1).
889 if (And1.getOpcode() == ISD::AND &&
890 And1.getOperand(0).getOpcode() == ISD::SHL) {
892 if (!(CN = dyn_cast<ConstantSDNode>(And1.getOperand(1))) ||
893 !isShiftedMask(CN->getZExtValue(), SMPos1, SMSize1))
896 // The shift masks must have the same position and size.
897 if (SMPos0 != SMPos1 || SMSize0 != SMSize1)
900 SDValue Shl = And1.getOperand(0);
902 if (!(CN = dyn_cast<ConstantSDNode>(Shl.getOperand(1))))
905 unsigned Shamt = CN->getZExtValue();
907 // Return if the shift amount and the first bit position of mask are not the
909 EVT ValTy = N->getValueType(0);
910 if ((Shamt != SMPos0) || (SMPos0 + SMSize0 > ValTy.getSizeInBits()))
914 return DAG.getNode(MipsISD::Ins, DL, ValTy, Shl.getOperand(0),
915 DAG.getConstant(SMPos0, DL, MVT::i32),
916 DAG.getConstant(SMSize0, DL, MVT::i32),
919 // Pattern match DINS.
920 // $dst = or (and $src, mask0), mask1
921 // where mask0 = ((1 << SMSize0) -1) << SMPos0
922 // => dins $dst, $src, pos, size
923 if (~CN->getSExtValue() == ((((int64_t)1 << SMSize0) - 1) << SMPos0) &&
924 ((SMSize0 + SMPos0 <= 64 && Subtarget.hasMips64r2()) ||
925 (SMSize0 + SMPos0 <= 32))) {
926 // Check if AND instruction has constant as argument
927 bool isConstCase = And1.getOpcode() != ISD::AND;
928 if (And1.getOpcode() == ISD::AND) {
929 if (!(CN1 = dyn_cast<ConstantSDNode>(And1->getOperand(1))))
932 if (!(CN1 = dyn_cast<ConstantSDNode>(N->getOperand(1))))
935 // Don't generate INS if constant OR operand doesn't fit into bits
936 // cleared by constant AND operand.
937 if (CN->getSExtValue() & CN1->getSExtValue())
941 EVT ValTy = N->getOperand(0)->getValueType(0);
945 Const1 = DAG.getConstant(SMPos0, DL, MVT::i32);
946 SrlX = DAG.getNode(ISD::SRL, DL, And1->getValueType(0), And1, Const1);
949 MipsISD::Ins, DL, N->getValueType(0),
951 ? DAG.getConstant(CN1->getSExtValue() >> SMPos0, DL, ValTy)
953 DAG.getConstant(SMPos0, DL, MVT::i32),
954 DAG.getConstant(ValTy.getSizeInBits() / 8 < 8 ? SMSize0 & 31
957 And0->getOperand(0));
964 static SDValue performMADD_MSUBCombine(SDNode *ROOTNode, SelectionDAG &CurDAG,
965 const MipsSubtarget &Subtarget) {
966 // ROOTNode must have a multiplication as an operand for the match to be
968 if (ROOTNode->getOperand(0).getOpcode() != ISD::MUL &&
969 ROOTNode->getOperand(1).getOpcode() != ISD::MUL)
972 // We don't handle vector types here.
973 if (ROOTNode->getValueType(0).isVector())
976 // For MIPS64, madd / msub instructions are inefficent to use with 64 bit
978 // (add (mul a b) c) =>
979 // let res = (madd (mthi (drotr c 32))x(mtlo c) a b) in
980 // MIPS64: (or (dsll (mfhi res) 32) (dsrl (dsll (mflo res) 32) 32)
982 // MIPS64R2: (dins (mflo res) (mfhi res) 32 32)
984 // The overhead of setting up the Hi/Lo registers and reassembling the
985 // result makes this a dubious optimzation for MIPS64. The core of the
986 // problem is that Hi/Lo contain the upper and lower 32 bits of the
987 // operand and result.
989 // It requires a chain of 4 add/mul for MIPS64R2 to get better code
990 // density than doing it naively, 5 for MIPS64. Additionally, using
991 // madd/msub on MIPS64 requires the operands actually be 32 bit sign
992 // extended operands, not true 64 bit values.
994 // FIXME: For the moment, disable this completely for MIPS64.
995 if (Subtarget.hasMips64())
998 SDValue Mult = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
999 ? ROOTNode->getOperand(0)
1000 : ROOTNode->getOperand(1);
1002 SDValue AddOperand = ROOTNode->getOperand(0).getOpcode() == ISD::MUL
1003 ? ROOTNode->getOperand(1)
1004 : ROOTNode->getOperand(0);
1006 // Transform this to a MADD only if the user of this node is the add.
1007 // If there are other users of the mul, this function returns here.
1008 if (!Mult.hasOneUse())
1011 // maddu and madd are unusual instructions in that on MIPS64 bits 63..31
1012 // must be in canonical form, i.e. sign extended. For MIPS32, the operands
1013 // of the multiply must have 32 or more sign bits, otherwise we cannot
1014 // perform this optimization. We have to check this here as we're performing
1015 // this optimization pre-legalization.
1016 SDValue MultLHS = Mult->getOperand(0);
1017 SDValue MultRHS = Mult->getOperand(1);
1019 bool IsSigned = MultLHS->getOpcode() == ISD::SIGN_EXTEND &&
1020 MultRHS->getOpcode() == ISD::SIGN_EXTEND;
1021 bool IsUnsigned = MultLHS->getOpcode() == ISD::ZERO_EXTEND &&
1022 MultRHS->getOpcode() == ISD::ZERO_EXTEND;
1024 if (!IsSigned && !IsUnsigned)
1027 // Initialize accumulator.
1031 BottomHalf = CurDAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, AddOperand,
1032 CurDAG.getIntPtrConstant(0, DL));
1034 TopHalf = CurDAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, AddOperand,
1035 CurDAG.getIntPtrConstant(1, DL));
1036 SDValue ACCIn = CurDAG.getNode(MipsISD::MTLOHI, DL, MVT::Untyped,
1040 // Create MipsMAdd(u) / MipsMSub(u) node.
1041 bool IsAdd = ROOTNode->getOpcode() == ISD::ADD;
1042 unsigned Opcode = IsAdd ? (IsUnsigned ? MipsISD::MAddu : MipsISD::MAdd)
1043 : (IsUnsigned ? MipsISD::MSubu : MipsISD::MSub);
1044 SDValue MAddOps[3] = {
1045 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(0)),
1046 CurDAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Mult->getOperand(1)), ACCIn};
1047 EVT VTs[2] = {MVT::i32, MVT::i32};
1048 SDValue MAdd = CurDAG.getNode(Opcode, DL, VTs, MAddOps);
1050 SDValue ResLo = CurDAG.getNode(MipsISD::MFLO, DL, MVT::i32, MAdd);
1051 SDValue ResHi = CurDAG.getNode(MipsISD::MFHI, DL, MVT::i32, MAdd);
1053 CurDAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, ResLo, ResHi);
1057 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG,
1058 TargetLowering::DAGCombinerInfo &DCI,
1059 const MipsSubtarget &Subtarget) {
1060 // (sub v0 (mul v1, v2)) => (msub v1, v2, v0)
1061 if (DCI.isBeforeLegalizeOps()) {
1062 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1063 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1064 return performMADD_MSUBCombine(N, DAG, Subtarget);
1072 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
1073 TargetLowering::DAGCombinerInfo &DCI,
1074 const MipsSubtarget &Subtarget) {
1075 // (add v0 (mul v1, v2)) => (madd v1, v2, v0)
1076 if (DCI.isBeforeLegalizeOps()) {
1077 if (Subtarget.hasMips32() && !Subtarget.hasMips32r6() &&
1078 !Subtarget.inMips16Mode() && N->getValueType(0) == MVT::i64)
1079 return performMADD_MSUBCombine(N, DAG, Subtarget);
1084 // (add v0, (add v1, abs_lo(tjt))) => (add (add v0, v1), abs_lo(tjt))
1085 SDValue Add = N->getOperand(1);
1087 if (Add.getOpcode() != ISD::ADD)
1090 SDValue Lo = Add.getOperand(1);
1092 if ((Lo.getOpcode() != MipsISD::Lo) ||
1093 (Lo.getOperand(0).getOpcode() != ISD::TargetJumpTable))
1096 EVT ValTy = N->getValueType(0);
1099 SDValue Add1 = DAG.getNode(ISD::ADD, DL, ValTy, N->getOperand(0),
1101 return DAG.getNode(ISD::ADD, DL, ValTy, Add1, Lo);
1104 static SDValue performSHLCombine(SDNode *N, SelectionDAG &DAG,
1105 TargetLowering::DAGCombinerInfo &DCI,
1106 const MipsSubtarget &Subtarget) {
1107 // Pattern match CINS.
1108 // $dst = shl (and $src , imm), pos
1109 // => cins $dst, $src, pos, size
1111 if (DCI.isBeforeLegalizeOps() || !Subtarget.hasCnMips())
1114 SDValue FirstOperand = N->getOperand(0);
1115 unsigned FirstOperandOpc = FirstOperand.getOpcode();
1116 SDValue SecondOperand = N->getOperand(1);
1117 EVT ValTy = N->getValueType(0);
1120 uint64_t Pos = 0, SMPos, SMSize;
1124 // The second operand of the shift must be an immediate.
1125 if (!(CN = dyn_cast<ConstantSDNode>(SecondOperand)))
1128 Pos = CN->getZExtValue();
1130 if (Pos >= ValTy.getSizeInBits())
1133 if (FirstOperandOpc != ISD::AND)
1136 // AND's second operand must be a shifted mask.
1137 if (!(CN = dyn_cast<ConstantSDNode>(FirstOperand.getOperand(1))) ||
1138 !isShiftedMask(CN->getZExtValue(), SMPos, SMSize))
1141 // Return if the shifted mask does not start at bit 0 or the sum of its size
1142 // and Pos exceeds the word's size.
1143 if (SMPos != 0 || SMSize > 32 || Pos + SMSize > ValTy.getSizeInBits())
1146 NewOperand = FirstOperand.getOperand(0);
1147 // SMSize is 'location' (position) in this case, not size.
1150 return DAG.getNode(MipsISD::CIns, DL, ValTy, NewOperand,
1151 DAG.getConstant(Pos, DL, MVT::i32),
1152 DAG.getConstant(SMSize, DL, MVT::i32));
1155 SDValue MipsTargetLowering::PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI)
1157 SelectionDAG &DAG = DCI.DAG;
1158 unsigned Opc = N->getOpcode();
1164 return performDivRemCombine(N, DAG, DCI, Subtarget);
1166 return performSELECTCombine(N, DAG, DCI, Subtarget);
1167 case MipsISD::CMovFP_F:
1168 case MipsISD::CMovFP_T:
1169 return performCMovFPCombine(N, DAG, DCI, Subtarget);
1171 return performANDCombine(N, DAG, DCI, Subtarget);
1173 return performORCombine(N, DAG, DCI, Subtarget);
1175 return performADDCombine(N, DAG, DCI, Subtarget);
1177 return performSHLCombine(N, DAG, DCI, Subtarget);
1179 return performSUBCombine(N, DAG, DCI, Subtarget);
1185 bool MipsTargetLowering::isCheapToSpeculateCttz() const {
1186 return Subtarget.hasMips32();
1189 bool MipsTargetLowering::isCheapToSpeculateCtlz() const {
1190 return Subtarget.hasMips32();
1193 bool MipsTargetLowering::shouldFoldConstantShiftPairToMask(
1194 const SDNode *N, CombineLevel Level) const {
1195 if (N->getOperand(0).getValueType().isVector())
1201 MipsTargetLowering::LowerOperationWrapper(SDNode *N,
1202 SmallVectorImpl<SDValue> &Results,
1203 SelectionDAG &DAG) const {
1204 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
1207 for (unsigned I = 0, E = Res->getNumValues(); I != E; ++I)
1208 Results.push_back(Res.getValue(I));
1212 MipsTargetLowering::ReplaceNodeResults(SDNode *N,
1213 SmallVectorImpl<SDValue> &Results,
1214 SelectionDAG &DAG) const {
1215 return LowerOperationWrapper(N, Results, DAG);
1218 SDValue MipsTargetLowering::
1219 LowerOperation(SDValue Op, SelectionDAG &DAG) const
1221 switch (Op.getOpcode())
1223 case ISD::BRCOND: return lowerBRCOND(Op, DAG);
1224 case ISD::ConstantPool: return lowerConstantPool(Op, DAG);
1225 case ISD::GlobalAddress: return lowerGlobalAddress(Op, DAG);
1226 case ISD::BlockAddress: return lowerBlockAddress(Op, DAG);
1227 case ISD::GlobalTLSAddress: return lowerGlobalTLSAddress(Op, DAG);
1228 case ISD::JumpTable: return lowerJumpTable(Op, DAG);
1229 case ISD::SELECT: return lowerSELECT(Op, DAG);
1230 case ISD::SETCC: return lowerSETCC(Op, DAG);
1231 case ISD::VASTART: return lowerVASTART(Op, DAG);
1232 case ISD::VAARG: return lowerVAARG(Op, DAG);
1233 case ISD::FCOPYSIGN: return lowerFCOPYSIGN(Op, DAG);
1234 case ISD::FABS: return lowerFABS(Op, DAG);
1235 case ISD::FRAMEADDR: return lowerFRAMEADDR(Op, DAG);
1236 case ISD::RETURNADDR: return lowerRETURNADDR(Op, DAG);
1237 case ISD::EH_RETURN: return lowerEH_RETURN(Op, DAG);
1238 case ISD::ATOMIC_FENCE: return lowerATOMIC_FENCE(Op, DAG);
1239 case ISD::SHL_PARTS: return lowerShiftLeftParts(Op, DAG);
1240 case ISD::SRA_PARTS: return lowerShiftRightParts(Op, DAG, true);
1241 case ISD::SRL_PARTS: return lowerShiftRightParts(Op, DAG, false);
1242 case ISD::LOAD: return lowerLOAD(Op, DAG);
1243 case ISD::STORE: return lowerSTORE(Op, DAG);
1244 case ISD::EH_DWARF_CFA: return lowerEH_DWARF_CFA(Op, DAG);
1245 case ISD::FP_TO_SINT: return lowerFP_TO_SINT(Op, DAG);
1250 //===----------------------------------------------------------------------===//
1251 // Lower helper functions
1252 //===----------------------------------------------------------------------===//
1254 // addLiveIn - This helper function adds the specified physical register to the
1255 // MachineFunction as a live in value. It also creates a corresponding
1256 // virtual register for it.
1258 addLiveIn(MachineFunction &MF, unsigned PReg, const TargetRegisterClass *RC)
1260 Register VReg = MF.getRegInfo().createVirtualRegister(RC);
1261 MF.getRegInfo().addLiveIn(PReg, VReg);
1265 static MachineBasicBlock *insertDivByZeroTrap(MachineInstr &MI,
1266 MachineBasicBlock &MBB,
1267 const TargetInstrInfo &TII,
1268 bool Is64Bit, bool IsMicroMips) {
1272 // Insert instruction "teq $divisor_reg, $zero, 7".
1273 MachineBasicBlock::iterator I(MI);
1274 MachineInstrBuilder MIB;
1275 MachineOperand &Divisor = MI.getOperand(2);
1276 MIB = BuildMI(MBB, std::next(I), MI.getDebugLoc(),
1277 TII.get(IsMicroMips ? Mips::TEQ_MM : Mips::TEQ))
1278 .addReg(Divisor.getReg(), getKillRegState(Divisor.isKill()))
1282 // Use the 32-bit sub-register if this is a 64-bit division.
1284 MIB->getOperand(0).setSubReg(Mips::sub_32);
1286 // Clear Divisor's kill flag.
1287 Divisor.setIsKill(false);
1289 // We would normally delete the original instruction here but in this case
1290 // we only needed to inject an additional instruction rather than replace it.
1296 MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1297 MachineBasicBlock *BB) const {
1298 switch (MI.getOpcode()) {
1300 llvm_unreachable("Unexpected instr type to insert");
1301 case Mips::ATOMIC_LOAD_ADD_I8:
1302 return emitAtomicBinaryPartword(MI, BB, 1);
1303 case Mips::ATOMIC_LOAD_ADD_I16:
1304 return emitAtomicBinaryPartword(MI, BB, 2);
1305 case Mips::ATOMIC_LOAD_ADD_I32:
1306 return emitAtomicBinary(MI, BB);
1307 case Mips::ATOMIC_LOAD_ADD_I64:
1308 return emitAtomicBinary(MI, BB);
1310 case Mips::ATOMIC_LOAD_AND_I8:
1311 return emitAtomicBinaryPartword(MI, BB, 1);
1312 case Mips::ATOMIC_LOAD_AND_I16:
1313 return emitAtomicBinaryPartword(MI, BB, 2);
1314 case Mips::ATOMIC_LOAD_AND_I32:
1315 return emitAtomicBinary(MI, BB);
1316 case Mips::ATOMIC_LOAD_AND_I64:
1317 return emitAtomicBinary(MI, BB);
1319 case Mips::ATOMIC_LOAD_OR_I8:
1320 return emitAtomicBinaryPartword(MI, BB, 1);
1321 case Mips::ATOMIC_LOAD_OR_I16:
1322 return emitAtomicBinaryPartword(MI, BB, 2);
1323 case Mips::ATOMIC_LOAD_OR_I32:
1324 return emitAtomicBinary(MI, BB);
1325 case Mips::ATOMIC_LOAD_OR_I64:
1326 return emitAtomicBinary(MI, BB);
1328 case Mips::ATOMIC_LOAD_XOR_I8:
1329 return emitAtomicBinaryPartword(MI, BB, 1);
1330 case Mips::ATOMIC_LOAD_XOR_I16:
1331 return emitAtomicBinaryPartword(MI, BB, 2);
1332 case Mips::ATOMIC_LOAD_XOR_I32:
1333 return emitAtomicBinary(MI, BB);
1334 case Mips::ATOMIC_LOAD_XOR_I64:
1335 return emitAtomicBinary(MI, BB);
1337 case Mips::ATOMIC_LOAD_NAND_I8:
1338 return emitAtomicBinaryPartword(MI, BB, 1);
1339 case Mips::ATOMIC_LOAD_NAND_I16:
1340 return emitAtomicBinaryPartword(MI, BB, 2);
1341 case Mips::ATOMIC_LOAD_NAND_I32:
1342 return emitAtomicBinary(MI, BB);
1343 case Mips::ATOMIC_LOAD_NAND_I64:
1344 return emitAtomicBinary(MI, BB);
1346 case Mips::ATOMIC_LOAD_SUB_I8:
1347 return emitAtomicBinaryPartword(MI, BB, 1);
1348 case Mips::ATOMIC_LOAD_SUB_I16:
1349 return emitAtomicBinaryPartword(MI, BB, 2);
1350 case Mips::ATOMIC_LOAD_SUB_I32:
1351 return emitAtomicBinary(MI, BB);
1352 case Mips::ATOMIC_LOAD_SUB_I64:
1353 return emitAtomicBinary(MI, BB);
1355 case Mips::ATOMIC_SWAP_I8:
1356 return emitAtomicBinaryPartword(MI, BB, 1);
1357 case Mips::ATOMIC_SWAP_I16:
1358 return emitAtomicBinaryPartword(MI, BB, 2);
1359 case Mips::ATOMIC_SWAP_I32:
1360 return emitAtomicBinary(MI, BB);
1361 case Mips::ATOMIC_SWAP_I64:
1362 return emitAtomicBinary(MI, BB);
1364 case Mips::ATOMIC_CMP_SWAP_I8:
1365 return emitAtomicCmpSwapPartword(MI, BB, 1);
1366 case Mips::ATOMIC_CMP_SWAP_I16:
1367 return emitAtomicCmpSwapPartword(MI, BB, 2);
1368 case Mips::ATOMIC_CMP_SWAP_I32:
1369 return emitAtomicCmpSwap(MI, BB);
1370 case Mips::ATOMIC_CMP_SWAP_I64:
1371 return emitAtomicCmpSwap(MI, BB);
1373 case Mips::ATOMIC_LOAD_MIN_I8:
1374 return emitAtomicBinaryPartword(MI, BB, 1);
1375 case Mips::ATOMIC_LOAD_MIN_I16:
1376 return emitAtomicBinaryPartword(MI, BB, 2);
1377 case Mips::ATOMIC_LOAD_MIN_I32:
1378 return emitAtomicBinary(MI, BB);
1379 case Mips::ATOMIC_LOAD_MIN_I64:
1380 return emitAtomicBinary(MI, BB);
1382 case Mips::ATOMIC_LOAD_MAX_I8:
1383 return emitAtomicBinaryPartword(MI, BB, 1);
1384 case Mips::ATOMIC_LOAD_MAX_I16:
1385 return emitAtomicBinaryPartword(MI, BB, 2);
1386 case Mips::ATOMIC_LOAD_MAX_I32:
1387 return emitAtomicBinary(MI, BB);
1388 case Mips::ATOMIC_LOAD_MAX_I64:
1389 return emitAtomicBinary(MI, BB);
1391 case Mips::ATOMIC_LOAD_UMIN_I8:
1392 return emitAtomicBinaryPartword(MI, BB, 1);
1393 case Mips::ATOMIC_LOAD_UMIN_I16:
1394 return emitAtomicBinaryPartword(MI, BB, 2);
1395 case Mips::ATOMIC_LOAD_UMIN_I32:
1396 return emitAtomicBinary(MI, BB);
1397 case Mips::ATOMIC_LOAD_UMIN_I64:
1398 return emitAtomicBinary(MI, BB);
1400 case Mips::ATOMIC_LOAD_UMAX_I8:
1401 return emitAtomicBinaryPartword(MI, BB, 1);
1402 case Mips::ATOMIC_LOAD_UMAX_I16:
1403 return emitAtomicBinaryPartword(MI, BB, 2);
1404 case Mips::ATOMIC_LOAD_UMAX_I32:
1405 return emitAtomicBinary(MI, BB);
1406 case Mips::ATOMIC_LOAD_UMAX_I64:
1407 return emitAtomicBinary(MI, BB);
1409 case Mips::PseudoSDIV:
1410 case Mips::PseudoUDIV:
1415 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false,
1417 case Mips::SDIV_MM_Pseudo:
1418 case Mips::UDIV_MM_Pseudo:
1421 case Mips::DIV_MMR6:
1422 case Mips::DIVU_MMR6:
1423 case Mips::MOD_MMR6:
1424 case Mips::MODU_MMR6:
1425 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), false, true);
1426 case Mips::PseudoDSDIV:
1427 case Mips::PseudoDUDIV:
1432 return insertDivByZeroTrap(MI, *BB, *Subtarget.getInstrInfo(), true, false);
1434 case Mips::PseudoSELECT_I:
1435 case Mips::PseudoSELECT_I64:
1436 case Mips::PseudoSELECT_S:
1437 case Mips::PseudoSELECT_D32:
1438 case Mips::PseudoSELECT_D64:
1439 return emitPseudoSELECT(MI, BB, false, Mips::BNE);
1440 case Mips::PseudoSELECTFP_F_I:
1441 case Mips::PseudoSELECTFP_F_I64:
1442 case Mips::PseudoSELECTFP_F_S:
1443 case Mips::PseudoSELECTFP_F_D32:
1444 case Mips::PseudoSELECTFP_F_D64:
1445 return emitPseudoSELECT(MI, BB, true, Mips::BC1F);
1446 case Mips::PseudoSELECTFP_T_I:
1447 case Mips::PseudoSELECTFP_T_I64:
1448 case Mips::PseudoSELECTFP_T_S:
1449 case Mips::PseudoSELECTFP_T_D32:
1450 case Mips::PseudoSELECTFP_T_D64:
1451 return emitPseudoSELECT(MI, BB, true, Mips::BC1T);
1452 case Mips::PseudoD_SELECT_I:
1453 case Mips::PseudoD_SELECT_I64:
1454 return emitPseudoD_SELECT(MI, BB);
1456 return emitLDR_W(MI, BB);
1458 return emitLDR_D(MI, BB);
1460 return emitSTR_W(MI, BB);
1462 return emitSTR_D(MI, BB);
1466 // This function also handles Mips::ATOMIC_SWAP_I32 (when BinOpcode == 0), and
1467 // Mips::ATOMIC_LOAD_NAND_I32 (when Nand == true)
1469 MipsTargetLowering::emitAtomicBinary(MachineInstr &MI,
1470 MachineBasicBlock *BB) const {
1472 MachineFunction *MF = BB->getParent();
1473 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1474 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1475 DebugLoc DL = MI.getDebugLoc();
1478 bool NeedsAdditionalReg = false;
1479 switch (MI.getOpcode()) {
1480 case Mips::ATOMIC_LOAD_ADD_I32:
1481 AtomicOp = Mips::ATOMIC_LOAD_ADD_I32_POSTRA;
1483 case Mips::ATOMIC_LOAD_SUB_I32:
1484 AtomicOp = Mips::ATOMIC_LOAD_SUB_I32_POSTRA;
1486 case Mips::ATOMIC_LOAD_AND_I32:
1487 AtomicOp = Mips::ATOMIC_LOAD_AND_I32_POSTRA;
1489 case Mips::ATOMIC_LOAD_OR_I32:
1490 AtomicOp = Mips::ATOMIC_LOAD_OR_I32_POSTRA;
1492 case Mips::ATOMIC_LOAD_XOR_I32:
1493 AtomicOp = Mips::ATOMIC_LOAD_XOR_I32_POSTRA;
1495 case Mips::ATOMIC_LOAD_NAND_I32:
1496 AtomicOp = Mips::ATOMIC_LOAD_NAND_I32_POSTRA;
1498 case Mips::ATOMIC_SWAP_I32:
1499 AtomicOp = Mips::ATOMIC_SWAP_I32_POSTRA;
1501 case Mips::ATOMIC_LOAD_ADD_I64:
1502 AtomicOp = Mips::ATOMIC_LOAD_ADD_I64_POSTRA;
1504 case Mips::ATOMIC_LOAD_SUB_I64:
1505 AtomicOp = Mips::ATOMIC_LOAD_SUB_I64_POSTRA;
1507 case Mips::ATOMIC_LOAD_AND_I64:
1508 AtomicOp = Mips::ATOMIC_LOAD_AND_I64_POSTRA;
1510 case Mips::ATOMIC_LOAD_OR_I64:
1511 AtomicOp = Mips::ATOMIC_LOAD_OR_I64_POSTRA;
1513 case Mips::ATOMIC_LOAD_XOR_I64:
1514 AtomicOp = Mips::ATOMIC_LOAD_XOR_I64_POSTRA;
1516 case Mips::ATOMIC_LOAD_NAND_I64:
1517 AtomicOp = Mips::ATOMIC_LOAD_NAND_I64_POSTRA;
1519 case Mips::ATOMIC_SWAP_I64:
1520 AtomicOp = Mips::ATOMIC_SWAP_I64_POSTRA;
1522 case Mips::ATOMIC_LOAD_MIN_I32:
1523 AtomicOp = Mips::ATOMIC_LOAD_MIN_I32_POSTRA;
1524 NeedsAdditionalReg = true;
1526 case Mips::ATOMIC_LOAD_MAX_I32:
1527 AtomicOp = Mips::ATOMIC_LOAD_MAX_I32_POSTRA;
1528 NeedsAdditionalReg = true;
1530 case Mips::ATOMIC_LOAD_UMIN_I32:
1531 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I32_POSTRA;
1532 NeedsAdditionalReg = true;
1534 case Mips::ATOMIC_LOAD_UMAX_I32:
1535 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I32_POSTRA;
1536 NeedsAdditionalReg = true;
1538 case Mips::ATOMIC_LOAD_MIN_I64:
1539 AtomicOp = Mips::ATOMIC_LOAD_MIN_I64_POSTRA;
1540 NeedsAdditionalReg = true;
1542 case Mips::ATOMIC_LOAD_MAX_I64:
1543 AtomicOp = Mips::ATOMIC_LOAD_MAX_I64_POSTRA;
1544 NeedsAdditionalReg = true;
1546 case Mips::ATOMIC_LOAD_UMIN_I64:
1547 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I64_POSTRA;
1548 NeedsAdditionalReg = true;
1550 case Mips::ATOMIC_LOAD_UMAX_I64:
1551 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I64_POSTRA;
1552 NeedsAdditionalReg = true;
1555 llvm_unreachable("Unknown pseudo atomic for replacement!");
1558 Register OldVal = MI.getOperand(0).getReg();
1559 Register Ptr = MI.getOperand(1).getReg();
1560 Register Incr = MI.getOperand(2).getReg();
1561 Register Scratch = RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1563 MachineBasicBlock::iterator II(MI);
1565 // The scratch registers here with the EarlyClobber | Define | Implicit
1566 // flags is used to persuade the register allocator and the machine
1567 // verifier to accept the usage of this register. This has to be a real
1568 // register which has an UNDEF value but is dead after the instruction which
1569 // is unique among the registers chosen for the instruction.
1571 // The EarlyClobber flag has the semantic properties that the operand it is
1572 // attached to is clobbered before the rest of the inputs are read. Hence it
1573 // must be unique among the operands to the instruction.
1574 // The Define flag is needed to coerce the machine verifier that an Undef
1575 // value isn't a problem.
1576 // The Dead flag is needed as the value in scratch isn't used by any other
1577 // instruction. Kill isn't used as Dead is more precise.
1578 // The implicit flag is here due to the interaction between the other flags
1579 // and the machine verifier.
1581 // For correctness purpose, a new pseudo is introduced here. We need this
1582 // new pseudo, so that FastRegisterAllocator does not see an ll/sc sequence
1583 // that is spread over >1 basic blocks. A register allocator which
1584 // introduces (or any codegen infact) a store, can violate the expectations
1587 // An atomic read-modify-write sequence starts with a linked load
1588 // instruction and ends with a store conditional instruction. The atomic
1589 // read-modify-write sequence fails if any of the following conditions
1590 // occur between the execution of ll and sc:
1591 // * A coherent store is completed by another process or coherent I/O
1592 // module into the block of synchronizable physical memory containing
1593 // the word. The size and alignment of the block is
1594 // implementation-dependent.
1595 // * A coherent store is executed between an LL and SC sequence on the
1596 // same processor to the block of synchornizable physical memory
1597 // containing the word.
1600 Register PtrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Ptr));
1601 Register IncrCopy = RegInfo.createVirtualRegister(RegInfo.getRegClass(Incr));
1603 BuildMI(*BB, II, DL, TII->get(Mips::COPY), IncrCopy).addReg(Incr);
1604 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1606 MachineInstrBuilder MIB =
1607 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1608 .addReg(OldVal, RegState::Define | RegState::EarlyClobber)
1611 .addReg(Scratch, RegState::Define | RegState::EarlyClobber |
1612 RegState::Implicit | RegState::Dead);
1613 if (NeedsAdditionalReg) {
1615 RegInfo.createVirtualRegister(RegInfo.getRegClass(OldVal));
1616 MIB.addReg(Scratch2, RegState::Define | RegState::EarlyClobber |
1617 RegState::Implicit | RegState::Dead);
1620 MI.eraseFromParent();
1625 MachineBasicBlock *MipsTargetLowering::emitSignExtendToI32InReg(
1626 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size, unsigned DstReg,
1627 unsigned SrcReg) const {
1628 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1629 const DebugLoc &DL = MI.getDebugLoc();
1631 if (Subtarget.hasMips32r2() && Size == 1) {
1632 BuildMI(BB, DL, TII->get(Mips::SEB), DstReg).addReg(SrcReg);
1636 if (Subtarget.hasMips32r2() && Size == 2) {
1637 BuildMI(BB, DL, TII->get(Mips::SEH), DstReg).addReg(SrcReg);
1641 MachineFunction *MF = BB->getParent();
1642 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1643 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1644 Register ScrReg = RegInfo.createVirtualRegister(RC);
1647 int64_t ShiftImm = 32 - (Size * 8);
1649 BuildMI(BB, DL, TII->get(Mips::SLL), ScrReg).addReg(SrcReg).addImm(ShiftImm);
1650 BuildMI(BB, DL, TII->get(Mips::SRA), DstReg).addReg(ScrReg).addImm(ShiftImm);
1655 MachineBasicBlock *MipsTargetLowering::emitAtomicBinaryPartword(
1656 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1657 assert((Size == 1 || Size == 2) &&
1658 "Unsupported size for EmitAtomicBinaryPartial.");
1660 MachineFunction *MF = BB->getParent();
1661 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1662 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1663 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1664 const TargetRegisterClass *RCp =
1665 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1666 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1667 DebugLoc DL = MI.getDebugLoc();
1669 Register Dest = MI.getOperand(0).getReg();
1670 Register Ptr = MI.getOperand(1).getReg();
1671 Register Incr = MI.getOperand(2).getReg();
1673 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1674 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1675 Register Mask = RegInfo.createVirtualRegister(RC);
1676 Register Mask2 = RegInfo.createVirtualRegister(RC);
1677 Register Incr2 = RegInfo.createVirtualRegister(RC);
1678 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1679 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1680 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1681 Register Scratch = RegInfo.createVirtualRegister(RC);
1682 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1683 Register Scratch3 = RegInfo.createVirtualRegister(RC);
1685 unsigned AtomicOp = 0;
1686 bool NeedsAdditionalReg = false;
1687 switch (MI.getOpcode()) {
1688 case Mips::ATOMIC_LOAD_NAND_I8:
1689 AtomicOp = Mips::ATOMIC_LOAD_NAND_I8_POSTRA;
1691 case Mips::ATOMIC_LOAD_NAND_I16:
1692 AtomicOp = Mips::ATOMIC_LOAD_NAND_I16_POSTRA;
1694 case Mips::ATOMIC_SWAP_I8:
1695 AtomicOp = Mips::ATOMIC_SWAP_I8_POSTRA;
1697 case Mips::ATOMIC_SWAP_I16:
1698 AtomicOp = Mips::ATOMIC_SWAP_I16_POSTRA;
1700 case Mips::ATOMIC_LOAD_ADD_I8:
1701 AtomicOp = Mips::ATOMIC_LOAD_ADD_I8_POSTRA;
1703 case Mips::ATOMIC_LOAD_ADD_I16:
1704 AtomicOp = Mips::ATOMIC_LOAD_ADD_I16_POSTRA;
1706 case Mips::ATOMIC_LOAD_SUB_I8:
1707 AtomicOp = Mips::ATOMIC_LOAD_SUB_I8_POSTRA;
1709 case Mips::ATOMIC_LOAD_SUB_I16:
1710 AtomicOp = Mips::ATOMIC_LOAD_SUB_I16_POSTRA;
1712 case Mips::ATOMIC_LOAD_AND_I8:
1713 AtomicOp = Mips::ATOMIC_LOAD_AND_I8_POSTRA;
1715 case Mips::ATOMIC_LOAD_AND_I16:
1716 AtomicOp = Mips::ATOMIC_LOAD_AND_I16_POSTRA;
1718 case Mips::ATOMIC_LOAD_OR_I8:
1719 AtomicOp = Mips::ATOMIC_LOAD_OR_I8_POSTRA;
1721 case Mips::ATOMIC_LOAD_OR_I16:
1722 AtomicOp = Mips::ATOMIC_LOAD_OR_I16_POSTRA;
1724 case Mips::ATOMIC_LOAD_XOR_I8:
1725 AtomicOp = Mips::ATOMIC_LOAD_XOR_I8_POSTRA;
1727 case Mips::ATOMIC_LOAD_XOR_I16:
1728 AtomicOp = Mips::ATOMIC_LOAD_XOR_I16_POSTRA;
1730 case Mips::ATOMIC_LOAD_MIN_I8:
1731 AtomicOp = Mips::ATOMIC_LOAD_MIN_I8_POSTRA;
1732 NeedsAdditionalReg = true;
1734 case Mips::ATOMIC_LOAD_MIN_I16:
1735 AtomicOp = Mips::ATOMIC_LOAD_MIN_I16_POSTRA;
1736 NeedsAdditionalReg = true;
1738 case Mips::ATOMIC_LOAD_MAX_I8:
1739 AtomicOp = Mips::ATOMIC_LOAD_MAX_I8_POSTRA;
1740 NeedsAdditionalReg = true;
1742 case Mips::ATOMIC_LOAD_MAX_I16:
1743 AtomicOp = Mips::ATOMIC_LOAD_MAX_I16_POSTRA;
1744 NeedsAdditionalReg = true;
1746 case Mips::ATOMIC_LOAD_UMIN_I8:
1747 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I8_POSTRA;
1748 NeedsAdditionalReg = true;
1750 case Mips::ATOMIC_LOAD_UMIN_I16:
1751 AtomicOp = Mips::ATOMIC_LOAD_UMIN_I16_POSTRA;
1752 NeedsAdditionalReg = true;
1754 case Mips::ATOMIC_LOAD_UMAX_I8:
1755 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I8_POSTRA;
1756 NeedsAdditionalReg = true;
1758 case Mips::ATOMIC_LOAD_UMAX_I16:
1759 AtomicOp = Mips::ATOMIC_LOAD_UMAX_I16_POSTRA;
1760 NeedsAdditionalReg = true;
1763 llvm_unreachable("Unknown subword atomic pseudo for expansion!");
1766 // insert new blocks after the current block
1767 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1768 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1769 MachineFunction::iterator It = ++BB->getIterator();
1770 MF->insert(It, exitMBB);
1772 // Transfer the remainder of BB and its successor edges to exitMBB.
1773 exitMBB->splice(exitMBB->begin(), BB,
1774 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1775 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1777 BB->addSuccessor(exitMBB, BranchProbability::getOne());
1780 // addiu masklsb2,$0,-4 # 0xfffffffc
1781 // and alignedaddr,ptr,masklsb2
1782 // andi ptrlsb2,ptr,3
1783 // sll shiftamt,ptrlsb2,3
1784 // ori maskupper,$0,255 # 0xff
1785 // sll mask,maskupper,shiftamt
1786 // nor mask2,$0,mask
1787 // sll incr2,incr,shiftamt
1789 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1790 BuildMI(BB, DL, TII->get(ABI.GetPtrAddiuOp()), MaskLSB2)
1791 .addReg(ABI.GetNullPtr()).addImm(-4);
1792 BuildMI(BB, DL, TII->get(ABI.GetPtrAndOp()), AlignedAddr)
1793 .addReg(Ptr).addReg(MaskLSB2);
1794 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1795 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1796 if (Subtarget.isLittle()) {
1797 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1799 Register Off = RegInfo.createVirtualRegister(RC);
1800 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1801 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1802 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1804 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1805 .addReg(Mips::ZERO).addImm(MaskImm);
1806 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1807 .addReg(MaskUpper).addReg(ShiftAmt);
1808 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1809 BuildMI(BB, DL, TII->get(Mips::SLLV), Incr2).addReg(Incr).addReg(ShiftAmt);
1812 // The purposes of the flags on the scratch registers is explained in
1813 // emitAtomicBinary. In summary, we need a scratch register which is going to
1814 // be undef, that is unique among registers chosen for the instruction.
1816 MachineInstrBuilder MIB =
1817 BuildMI(BB, DL, TII->get(AtomicOp))
1818 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
1819 .addReg(AlignedAddr)
1824 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
1825 RegState::Dead | RegState::Implicit)
1826 .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
1827 RegState::Dead | RegState::Implicit)
1828 .addReg(Scratch3, RegState::EarlyClobber | RegState::Define |
1829 RegState::Dead | RegState::Implicit);
1830 if (NeedsAdditionalReg) {
1831 Register Scratch4 = RegInfo.createVirtualRegister(RC);
1832 MIB.addReg(Scratch4, RegState::EarlyClobber | RegState::Define |
1833 RegState::Dead | RegState::Implicit);
1836 MI.eraseFromParent(); // The instruction is gone now.
1841 // Lower atomic compare and swap to a pseudo instruction, taking care to
1842 // define a scratch register for the pseudo instruction's expansion. The
1843 // instruction is expanded after the register allocator as to prevent
1844 // the insertion of stores between the linked load and the store conditional.
1847 MipsTargetLowering::emitAtomicCmpSwap(MachineInstr &MI,
1848 MachineBasicBlock *BB) const {
1850 assert((MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ||
1851 MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I64) &&
1852 "Unsupported atomic pseudo for EmitAtomicCmpSwap.");
1854 const unsigned Size = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32 ? 4 : 8;
1856 MachineFunction *MF = BB->getParent();
1857 MachineRegisterInfo &MRI = MF->getRegInfo();
1858 const TargetRegisterClass *RC = getRegClassFor(MVT::getIntegerVT(Size * 8));
1859 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1860 DebugLoc DL = MI.getDebugLoc();
1862 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I32
1863 ? Mips::ATOMIC_CMP_SWAP_I32_POSTRA
1864 : Mips::ATOMIC_CMP_SWAP_I64_POSTRA;
1865 Register Dest = MI.getOperand(0).getReg();
1866 Register Ptr = MI.getOperand(1).getReg();
1867 Register OldVal = MI.getOperand(2).getReg();
1868 Register NewVal = MI.getOperand(3).getReg();
1870 Register Scratch = MRI.createVirtualRegister(RC);
1871 MachineBasicBlock::iterator II(MI);
1873 // We need to create copies of the various registers and kill them at the
1874 // atomic pseudo. If the copies are not made, when the atomic is expanded
1875 // after fast register allocation, the spills will end up outside of the
1876 // blocks that their values are defined in, causing livein errors.
1878 Register PtrCopy = MRI.createVirtualRegister(MRI.getRegClass(Ptr));
1879 Register OldValCopy = MRI.createVirtualRegister(MRI.getRegClass(OldVal));
1880 Register NewValCopy = MRI.createVirtualRegister(MRI.getRegClass(NewVal));
1882 BuildMI(*BB, II, DL, TII->get(Mips::COPY), PtrCopy).addReg(Ptr);
1883 BuildMI(*BB, II, DL, TII->get(Mips::COPY), OldValCopy).addReg(OldVal);
1884 BuildMI(*BB, II, DL, TII->get(Mips::COPY), NewValCopy).addReg(NewVal);
1886 // The purposes of the flags on the scratch registers is explained in
1887 // emitAtomicBinary. In summary, we need a scratch register which is going to
1888 // be undef, that is unique among registers chosen for the instruction.
1890 BuildMI(*BB, II, DL, TII->get(AtomicOp))
1891 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
1892 .addReg(PtrCopy, RegState::Kill)
1893 .addReg(OldValCopy, RegState::Kill)
1894 .addReg(NewValCopy, RegState::Kill)
1895 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
1896 RegState::Dead | RegState::Implicit);
1898 MI.eraseFromParent(); // The instruction is gone now.
1903 MachineBasicBlock *MipsTargetLowering::emitAtomicCmpSwapPartword(
1904 MachineInstr &MI, MachineBasicBlock *BB, unsigned Size) const {
1905 assert((Size == 1 || Size == 2) &&
1906 "Unsupported size for EmitAtomicCmpSwapPartial.");
1908 MachineFunction *MF = BB->getParent();
1909 MachineRegisterInfo &RegInfo = MF->getRegInfo();
1910 const TargetRegisterClass *RC = getRegClassFor(MVT::i32);
1911 const bool ArePtrs64bit = ABI.ArePtrs64bit();
1912 const TargetRegisterClass *RCp =
1913 getRegClassFor(ArePtrs64bit ? MVT::i64 : MVT::i32);
1914 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1915 DebugLoc DL = MI.getDebugLoc();
1917 Register Dest = MI.getOperand(0).getReg();
1918 Register Ptr = MI.getOperand(1).getReg();
1919 Register CmpVal = MI.getOperand(2).getReg();
1920 Register NewVal = MI.getOperand(3).getReg();
1922 Register AlignedAddr = RegInfo.createVirtualRegister(RCp);
1923 Register ShiftAmt = RegInfo.createVirtualRegister(RC);
1924 Register Mask = RegInfo.createVirtualRegister(RC);
1925 Register Mask2 = RegInfo.createVirtualRegister(RC);
1926 Register ShiftedCmpVal = RegInfo.createVirtualRegister(RC);
1927 Register ShiftedNewVal = RegInfo.createVirtualRegister(RC);
1928 Register MaskLSB2 = RegInfo.createVirtualRegister(RCp);
1929 Register PtrLSB2 = RegInfo.createVirtualRegister(RC);
1930 Register MaskUpper = RegInfo.createVirtualRegister(RC);
1931 Register MaskedCmpVal = RegInfo.createVirtualRegister(RC);
1932 Register MaskedNewVal = RegInfo.createVirtualRegister(RC);
1933 unsigned AtomicOp = MI.getOpcode() == Mips::ATOMIC_CMP_SWAP_I8
1934 ? Mips::ATOMIC_CMP_SWAP_I8_POSTRA
1935 : Mips::ATOMIC_CMP_SWAP_I16_POSTRA;
1937 // The scratch registers here with the EarlyClobber | Define | Dead | Implicit
1938 // flags are used to coerce the register allocator and the machine verifier to
1939 // accept the usage of these registers.
1940 // The EarlyClobber flag has the semantic properties that the operand it is
1941 // attached to is clobbered before the rest of the inputs are read. Hence it
1942 // must be unique among the operands to the instruction.
1943 // The Define flag is needed to coerce the machine verifier that an Undef
1944 // value isn't a problem.
1945 // The Dead flag is needed as the value in scratch isn't used by any other
1946 // instruction. Kill isn't used as Dead is more precise.
1947 Register Scratch = RegInfo.createVirtualRegister(RC);
1948 Register Scratch2 = RegInfo.createVirtualRegister(RC);
1950 // insert new blocks after the current block
1951 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1952 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
1953 MachineFunction::iterator It = ++BB->getIterator();
1954 MF->insert(It, exitMBB);
1956 // Transfer the remainder of BB and its successor edges to exitMBB.
1957 exitMBB->splice(exitMBB->begin(), BB,
1958 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1959 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
1961 BB->addSuccessor(exitMBB, BranchProbability::getOne());
1964 // addiu masklsb2,$0,-4 # 0xfffffffc
1965 // and alignedaddr,ptr,masklsb2
1966 // andi ptrlsb2,ptr,3
1967 // xori ptrlsb2,ptrlsb2,3 # Only for BE
1968 // sll shiftamt,ptrlsb2,3
1969 // ori maskupper,$0,255 # 0xff
1970 // sll mask,maskupper,shiftamt
1971 // nor mask2,$0,mask
1972 // andi maskedcmpval,cmpval,255
1973 // sll shiftedcmpval,maskedcmpval,shiftamt
1974 // andi maskednewval,newval,255
1975 // sll shiftednewval,maskednewval,shiftamt
1976 int64_t MaskImm = (Size == 1) ? 255 : 65535;
1977 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::DADDiu : Mips::ADDiu), MaskLSB2)
1978 .addReg(ABI.GetNullPtr()).addImm(-4);
1979 BuildMI(BB, DL, TII->get(ArePtrs64bit ? Mips::AND64 : Mips::AND), AlignedAddr)
1980 .addReg(Ptr).addReg(MaskLSB2);
1981 BuildMI(BB, DL, TII->get(Mips::ANDi), PtrLSB2)
1982 .addReg(Ptr, 0, ArePtrs64bit ? Mips::sub_32 : 0).addImm(3);
1983 if (Subtarget.isLittle()) {
1984 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(PtrLSB2).addImm(3);
1986 Register Off = RegInfo.createVirtualRegister(RC);
1987 BuildMI(BB, DL, TII->get(Mips::XORi), Off)
1988 .addReg(PtrLSB2).addImm((Size == 1) ? 3 : 2);
1989 BuildMI(BB, DL, TII->get(Mips::SLL), ShiftAmt).addReg(Off).addImm(3);
1991 BuildMI(BB, DL, TII->get(Mips::ORi), MaskUpper)
1992 .addReg(Mips::ZERO).addImm(MaskImm);
1993 BuildMI(BB, DL, TII->get(Mips::SLLV), Mask)
1994 .addReg(MaskUpper).addReg(ShiftAmt);
1995 BuildMI(BB, DL, TII->get(Mips::NOR), Mask2).addReg(Mips::ZERO).addReg(Mask);
1996 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedCmpVal)
1997 .addReg(CmpVal).addImm(MaskImm);
1998 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedCmpVal)
1999 .addReg(MaskedCmpVal).addReg(ShiftAmt);
2000 BuildMI(BB, DL, TII->get(Mips::ANDi), MaskedNewVal)
2001 .addReg(NewVal).addImm(MaskImm);
2002 BuildMI(BB, DL, TII->get(Mips::SLLV), ShiftedNewVal)
2003 .addReg(MaskedNewVal).addReg(ShiftAmt);
2005 // The purposes of the flags on the scratch registers are explained in
2006 // emitAtomicBinary. In summary, we need a scratch register which is going to
2007 // be undef, that is unique among the register chosen for the instruction.
2009 BuildMI(BB, DL, TII->get(AtomicOp))
2010 .addReg(Dest, RegState::Define | RegState::EarlyClobber)
2011 .addReg(AlignedAddr)
2013 .addReg(ShiftedCmpVal)
2015 .addReg(ShiftedNewVal)
2017 .addReg(Scratch, RegState::EarlyClobber | RegState::Define |
2018 RegState::Dead | RegState::Implicit)
2019 .addReg(Scratch2, RegState::EarlyClobber | RegState::Define |
2020 RegState::Dead | RegState::Implicit);
2022 MI.eraseFromParent(); // The instruction is gone now.
2027 SDValue MipsTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
2028 // The first operand is the chain, the second is the condition, the third is
2029 // the block to branch to if the condition is true.
2030 SDValue Chain = Op.getOperand(0);
2031 SDValue Dest = Op.getOperand(2);
2034 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2035 SDValue CondRes = createFPCmp(DAG, Op.getOperand(1));
2037 // Return if flag is not set by a floating point comparison.
2038 if (CondRes.getOpcode() != MipsISD::FPCmp)
2041 SDValue CCNode = CondRes.getOperand(2);
2043 (Mips::CondCode)cast<ConstantSDNode>(CCNode)->getZExtValue();
2044 unsigned Opc = invertFPCondCodeUser(CC) ? Mips::BRANCH_F : Mips::BRANCH_T;
2045 SDValue BrCode = DAG.getConstant(Opc, DL, MVT::i32);
2046 SDValue FCC0 = DAG.getRegister(Mips::FCC0, MVT::i32);
2047 return DAG.getNode(MipsISD::FPBrcond, DL, Op.getValueType(), Chain, BrCode,
2048 FCC0, Dest, CondRes);
2051 SDValue MipsTargetLowering::
2052 lowerSELECT(SDValue Op, SelectionDAG &DAG) const
2054 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2055 SDValue Cond = createFPCmp(DAG, Op.getOperand(0));
2057 // Return if flag is not set by a floating point comparison.
2058 if (Cond.getOpcode() != MipsISD::FPCmp)
2061 return createCMovFP(DAG, Cond, Op.getOperand(1), Op.getOperand(2),
2065 SDValue MipsTargetLowering::lowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2066 assert(!Subtarget.hasMips32r6() && !Subtarget.hasMips64r6());
2067 SDValue Cond = createFPCmp(DAG, Op);
2069 assert(Cond.getOpcode() == MipsISD::FPCmp &&
2070 "Floating point operand expected.");
2073 SDValue True = DAG.getConstant(1, DL, MVT::i32);
2074 SDValue False = DAG.getConstant(0, DL, MVT::i32);
2076 return createCMovFP(DAG, Cond, True, False, DL);
2079 SDValue MipsTargetLowering::lowerGlobalAddress(SDValue Op,
2080 SelectionDAG &DAG) const {
2081 EVT Ty = Op.getValueType();
2082 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
2083 const GlobalValue *GV = N->getGlobal();
2085 if (!isPositionIndependent()) {
2086 const MipsTargetObjectFile *TLOF =
2087 static_cast<const MipsTargetObjectFile *>(
2088 getTargetMachine().getObjFileLowering());
2089 const GlobalObject *GO = GV->getBaseObject();
2090 if (GO && TLOF->IsGlobalInSmallSection(GO, getTargetMachine()))
2091 // %gp_rel relocation
2092 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2094 // %hi/%lo relocation
2095 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2096 // %highest/%higher/%hi/%lo relocation
2097 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2100 // Every other architecture would use shouldAssumeDSOLocal in here, but
2102 // * In PIC code mips requires got loads even for local statics!
2103 // * To save on got entries, for local statics the got entry contains the
2104 // page and an additional add instruction takes care of the low bits.
2105 // * It is legal to access a hidden symbol with a non hidden undefined,
2106 // so one cannot guarantee that all access to a hidden symbol will know
2108 // * Mips linkers don't support creating a page and a full got entry for
2110 // * Given all that, we have to use a full got entry for hidden symbols :-(
2111 if (GV->hasLocalLinkage())
2112 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2114 if (Subtarget.useXGOT())
2115 return getAddrGlobalLargeGOT(
2116 N, SDLoc(N), Ty, DAG, MipsII::MO_GOT_HI16, MipsII::MO_GOT_LO16,
2118 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2120 return getAddrGlobal(
2121 N, SDLoc(N), Ty, DAG,
2122 (ABI.IsN32() || ABI.IsN64()) ? MipsII::MO_GOT_DISP : MipsII::MO_GOT,
2123 DAG.getEntryNode(), MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2126 SDValue MipsTargetLowering::lowerBlockAddress(SDValue Op,
2127 SelectionDAG &DAG) const {
2128 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
2129 EVT Ty = Op.getValueType();
2131 if (!isPositionIndependent())
2132 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2133 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2135 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2138 SDValue MipsTargetLowering::
2139 lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
2141 // If the relocation model is PIC, use the General Dynamic TLS Model or
2142 // Local Dynamic TLS model, otherwise use the Initial Exec or
2143 // Local Exec TLS Model.
2145 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2146 if (DAG.getTarget().useEmulatedTLS())
2147 return LowerToTLSEmulatedModel(GA, DAG);
2150 const GlobalValue *GV = GA->getGlobal();
2151 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2153 TLSModel::Model model = getTargetMachine().getTLSModel(GV);
2155 if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
2156 // General Dynamic and Local Dynamic TLS Model.
2157 unsigned Flag = (model == TLSModel::LocalDynamic) ? MipsII::MO_TLSLDM
2160 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, Flag);
2161 SDValue Argument = DAG.getNode(MipsISD::Wrapper, DL, PtrVT,
2162 getGlobalReg(DAG, PtrVT), TGA);
2163 unsigned PtrSize = PtrVT.getSizeInBits();
2164 IntegerType *PtrTy = Type::getIntNTy(*DAG.getContext(), PtrSize);
2166 SDValue TlsGetAddr = DAG.getExternalSymbol("__tls_get_addr", PtrVT);
2170 Entry.Node = Argument;
2172 Args.push_back(Entry);
2174 TargetLowering::CallLoweringInfo CLI(DAG);
2176 .setChain(DAG.getEntryNode())
2177 .setLibCallee(CallingConv::C, PtrTy, TlsGetAddr, std::move(Args));
2178 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2180 SDValue Ret = CallResult.first;
2182 if (model != TLSModel::LocalDynamic)
2185 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2186 MipsII::MO_DTPREL_HI);
2187 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2188 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2189 MipsII::MO_DTPREL_LO);
2190 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2191 SDValue Add = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Ret);
2192 return DAG.getNode(ISD::ADD, DL, PtrVT, Add, Lo);
2196 if (model == TLSModel::InitialExec) {
2197 // Initial Exec TLS Model
2198 SDValue TGA = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2199 MipsII::MO_GOTTPREL);
2200 TGA = DAG.getNode(MipsISD::Wrapper, DL, PtrVT, getGlobalReg(DAG, PtrVT),
2203 DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), TGA, MachinePointerInfo());
2205 // Local Exec TLS Model
2206 assert(model == TLSModel::LocalExec);
2207 SDValue TGAHi = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2208 MipsII::MO_TPREL_HI);
2209 SDValue TGALo = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2210 MipsII::MO_TPREL_LO);
2211 SDValue Hi = DAG.getNode(MipsISD::TlsHi, DL, PtrVT, TGAHi);
2212 SDValue Lo = DAG.getNode(MipsISD::Lo, DL, PtrVT, TGALo);
2213 Offset = DAG.getNode(ISD::ADD, DL, PtrVT, Hi, Lo);
2216 SDValue ThreadPointer = DAG.getNode(MipsISD::ThreadPointer, DL, PtrVT);
2217 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadPointer, Offset);
2220 SDValue MipsTargetLowering::
2221 lowerJumpTable(SDValue Op, SelectionDAG &DAG) const
2223 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
2224 EVT Ty = Op.getValueType();
2226 if (!isPositionIndependent())
2227 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2228 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2230 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2233 SDValue MipsTargetLowering::
2234 lowerConstantPool(SDValue Op, SelectionDAG &DAG) const
2236 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
2237 EVT Ty = Op.getValueType();
2239 if (!isPositionIndependent()) {
2240 const MipsTargetObjectFile *TLOF =
2241 static_cast<const MipsTargetObjectFile *>(
2242 getTargetMachine().getObjFileLowering());
2244 if (TLOF->IsConstantInSmallSection(DAG.getDataLayout(), N->getConstVal(),
2245 getTargetMachine()))
2246 // %gp_rel relocation
2247 return getAddrGPRel(N, SDLoc(N), Ty, DAG, ABI.IsN64());
2249 return Subtarget.hasSym32() ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
2250 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
2253 return getAddrLocal(N, SDLoc(N), Ty, DAG, ABI.IsN32() || ABI.IsN64());
2256 SDValue MipsTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2257 MachineFunction &MF = DAG.getMachineFunction();
2258 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
2261 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
2262 getPointerTy(MF.getDataLayout()));
2264 // vastart just stores the address of the VarArgsFrameIndex slot into the
2265 // memory location argument.
2266 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2267 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
2268 MachinePointerInfo(SV));
2271 SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
2272 SDNode *Node = Op.getNode();
2273 EVT VT = Node->getValueType(0);
2274 SDValue Chain = Node->getOperand(0);
2275 SDValue VAListPtr = Node->getOperand(1);
2277 llvm::MaybeAlign(Node->getConstantOperandVal(3)).valueOrOne();
2278 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
2280 unsigned ArgSlotSizeInBytes = (ABI.IsN32() || ABI.IsN64()) ? 8 : 4;
2282 SDValue VAListLoad = DAG.getLoad(getPointerTy(DAG.getDataLayout()), DL, Chain,
2283 VAListPtr, MachinePointerInfo(SV));
2284 SDValue VAList = VAListLoad;
2286 // Re-align the pointer if necessary.
2287 // It should only ever be necessary for 64-bit types on O32 since the minimum
2288 // argument alignment is the same as the maximum type alignment for N32/N64.
2290 // FIXME: We currently align too often. The code generator doesn't notice
2291 // when the pointer is still aligned from the last va_arg (or pair of
2292 // va_args for the i64 on O32 case).
2293 if (Align > getMinStackArgumentAlignment()) {
2294 VAList = DAG.getNode(
2295 ISD::ADD, DL, VAList.getValueType(), VAList,
2296 DAG.getConstant(Align.value() - 1, DL, VAList.getValueType()));
2298 VAList = DAG.getNode(
2299 ISD::AND, DL, VAList.getValueType(), VAList,
2300 DAG.getConstant(-(int64_t)Align.value(), DL, VAList.getValueType()));
2303 // Increment the pointer, VAList, to the next vaarg.
2304 auto &TD = DAG.getDataLayout();
2305 unsigned ArgSizeInBytes =
2306 TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
2308 DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
2309 DAG.getConstant(alignTo(ArgSizeInBytes, ArgSlotSizeInBytes),
2310 DL, VAList.getValueType()));
2311 // Store the incremented VAList to the legalized pointer
2312 Chain = DAG.getStore(VAListLoad.getValue(1), DL, Tmp3, VAListPtr,
2313 MachinePointerInfo(SV));
2315 // In big-endian mode we must adjust the pointer when the load size is smaller
2316 // than the argument slot size. We must also reduce the known alignment to
2317 // match. For example in the N64 ABI, we must add 4 bytes to the offset to get
2318 // the correct half of the slot, and reduce the alignment from 8 (slot
2319 // alignment) down to 4 (type alignment).
2320 if (!Subtarget.isLittle() && ArgSizeInBytes < ArgSlotSizeInBytes) {
2321 unsigned Adjustment = ArgSlotSizeInBytes - ArgSizeInBytes;
2322 VAList = DAG.getNode(ISD::ADD, DL, VAListPtr.getValueType(), VAList,
2323 DAG.getIntPtrConstant(Adjustment, DL));
2325 // Load the actual argument out of the pointer VAList
2326 return DAG.getLoad(VT, DL, Chain, VAList, MachinePointerInfo());
2329 static SDValue lowerFCOPYSIGN32(SDValue Op, SelectionDAG &DAG,
2330 bool HasExtractInsert) {
2331 EVT TyX = Op.getOperand(0).getValueType();
2332 EVT TyY = Op.getOperand(1).getValueType();
2334 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2335 SDValue Const31 = DAG.getConstant(31, DL, MVT::i32);
2338 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2340 SDValue X = (TyX == MVT::f32) ?
2341 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0)) :
2342 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2344 SDValue Y = (TyY == MVT::f32) ?
2345 DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(1)) :
2346 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(1),
2349 if (HasExtractInsert) {
2350 // ext E, Y, 31, 1 ; extract bit31 of Y
2351 // ins X, E, 31, 1 ; insert extracted bit at bit31 of X
2352 SDValue E = DAG.getNode(MipsISD::Ext, DL, MVT::i32, Y, Const31, Const1);
2353 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32, E, Const31, Const1, X);
2356 // srl SrlX, SllX, 1
2358 // sll SllY, SrlX, 31
2359 // or Or, SrlX, SllY
2360 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2361 SDValue SrlX = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2362 SDValue SrlY = DAG.getNode(ISD::SRL, DL, MVT::i32, Y, Const31);
2363 SDValue SllY = DAG.getNode(ISD::SHL, DL, MVT::i32, SrlY, Const31);
2364 Res = DAG.getNode(ISD::OR, DL, MVT::i32, SrlX, SllY);
2367 if (TyX == MVT::f32)
2368 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Res);
2370 SDValue LowX = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2372 DAG.getConstant(0, DL, MVT::i32));
2373 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2376 static SDValue lowerFCOPYSIGN64(SDValue Op, SelectionDAG &DAG,
2377 bool HasExtractInsert) {
2378 unsigned WidthX = Op.getOperand(0).getValueSizeInBits();
2379 unsigned WidthY = Op.getOperand(1).getValueSizeInBits();
2380 EVT TyX = MVT::getIntegerVT(WidthX), TyY = MVT::getIntegerVT(WidthY);
2382 SDValue Const1 = DAG.getConstant(1, DL, MVT::i32);
2384 // Bitcast to integer nodes.
2385 SDValue X = DAG.getNode(ISD::BITCAST, DL, TyX, Op.getOperand(0));
2386 SDValue Y = DAG.getNode(ISD::BITCAST, DL, TyY, Op.getOperand(1));
2388 if (HasExtractInsert) {
2389 // ext E, Y, width(Y) - 1, 1 ; extract bit width(Y)-1 of Y
2390 // ins X, E, width(X) - 1, 1 ; insert extracted bit at bit width(X)-1 of X
2391 SDValue E = DAG.getNode(MipsISD::Ext, DL, TyY, Y,
2392 DAG.getConstant(WidthY - 1, DL, MVT::i32), Const1);
2394 if (WidthX > WidthY)
2395 E = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, E);
2396 else if (WidthY > WidthX)
2397 E = DAG.getNode(ISD::TRUNCATE, DL, TyX, E);
2399 SDValue I = DAG.getNode(MipsISD::Ins, DL, TyX, E,
2400 DAG.getConstant(WidthX - 1, DL, MVT::i32), Const1,
2402 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), I);
2405 // (d)sll SllX, X, 1
2406 // (d)srl SrlX, SllX, 1
2407 // (d)srl SrlY, Y, width(Y)-1
2408 // (d)sll SllY, SrlX, width(Y)-1
2409 // or Or, SrlX, SllY
2410 SDValue SllX = DAG.getNode(ISD::SHL, DL, TyX, X, Const1);
2411 SDValue SrlX = DAG.getNode(ISD::SRL, DL, TyX, SllX, Const1);
2412 SDValue SrlY = DAG.getNode(ISD::SRL, DL, TyY, Y,
2413 DAG.getConstant(WidthY - 1, DL, MVT::i32));
2415 if (WidthX > WidthY)
2416 SrlY = DAG.getNode(ISD::ZERO_EXTEND, DL, TyX, SrlY);
2417 else if (WidthY > WidthX)
2418 SrlY = DAG.getNode(ISD::TRUNCATE, DL, TyX, SrlY);
2420 SDValue SllY = DAG.getNode(ISD::SHL, DL, TyX, SrlY,
2421 DAG.getConstant(WidthX - 1, DL, MVT::i32));
2422 SDValue Or = DAG.getNode(ISD::OR, DL, TyX, SrlX, SllY);
2423 return DAG.getNode(ISD::BITCAST, DL, Op.getOperand(0).getValueType(), Or);
2427 MipsTargetLowering::lowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
2428 if (Subtarget.isGP64bit())
2429 return lowerFCOPYSIGN64(Op, DAG, Subtarget.hasExtractInsert());
2431 return lowerFCOPYSIGN32(Op, DAG, Subtarget.hasExtractInsert());
2434 static SDValue lowerFABS32(SDValue Op, SelectionDAG &DAG,
2435 bool HasExtractInsert) {
2437 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2439 // If operand is of type f64, extract the upper 32-bit. Otherwise, bitcast it
2441 SDValue X = (Op.getValueType() == MVT::f32)
2442 ? DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op.getOperand(0))
2443 : DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
2444 Op.getOperand(0), Const1);
2447 if (HasExtractInsert)
2448 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i32,
2449 DAG.getRegister(Mips::ZERO, MVT::i32),
2450 DAG.getConstant(31, DL, MVT::i32), Const1, X);
2452 // TODO: Provide DAG patterns which transform (and x, cst)
2453 // back to a (shl (srl x (clz cst)) (clz cst)) sequence.
2454 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i32, X, Const1);
2455 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, SllX, Const1);
2458 if (Op.getValueType() == MVT::f32)
2459 return DAG.getNode(ISD::BITCAST, DL, MVT::f32, Res);
2461 // FIXME: For mips32r2, the sequence of (BuildPairF64 (ins (ExtractElementF64
2462 // Op 1), $zero, 31 1) (ExtractElementF64 Op 0)) and the Op has one use, we
2463 // should be able to drop the usage of mfc1/mtc1 and rewrite the register in
2466 DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32, Op.getOperand(0),
2467 DAG.getConstant(0, DL, MVT::i32));
2468 return DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64, LowX, Res);
2471 static SDValue lowerFABS64(SDValue Op, SelectionDAG &DAG,
2472 bool HasExtractInsert) {
2474 SDValue Res, Const1 = DAG.getConstant(1, DL, MVT::i32);
2476 // Bitcast to integer node.
2477 SDValue X = DAG.getNode(ISD::BITCAST, DL, MVT::i64, Op.getOperand(0));
2480 if (HasExtractInsert)
2481 Res = DAG.getNode(MipsISD::Ins, DL, MVT::i64,
2482 DAG.getRegister(Mips::ZERO_64, MVT::i64),
2483 DAG.getConstant(63, DL, MVT::i32), Const1, X);
2485 SDValue SllX = DAG.getNode(ISD::SHL, DL, MVT::i64, X, Const1);
2486 Res = DAG.getNode(ISD::SRL, DL, MVT::i64, SllX, Const1);
2489 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, Res);
2492 SDValue MipsTargetLowering::lowerFABS(SDValue Op, SelectionDAG &DAG) const {
2493 if ((ABI.IsN32() || ABI.IsN64()) && (Op.getValueType() == MVT::f64))
2494 return lowerFABS64(Op, DAG, Subtarget.hasExtractInsert());
2496 return lowerFABS32(Op, DAG, Subtarget.hasExtractInsert());
2499 SDValue MipsTargetLowering::
2500 lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
2502 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2503 DAG.getContext()->emitError(
2504 "return address can be determined only for current frame");
2508 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2509 MFI.setFrameAddressIsTaken(true);
2510 EVT VT = Op.getValueType();
2512 SDValue FrameAddr = DAG.getCopyFromReg(
2513 DAG.getEntryNode(), DL, ABI.IsN64() ? Mips::FP_64 : Mips::FP, VT);
2517 SDValue MipsTargetLowering::lowerRETURNADDR(SDValue Op,
2518 SelectionDAG &DAG) const {
2519 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
2523 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() != 0) {
2524 DAG.getContext()->emitError(
2525 "return address can be determined only for current frame");
2529 MachineFunction &MF = DAG.getMachineFunction();
2530 MachineFrameInfo &MFI = MF.getFrameInfo();
2531 MVT VT = Op.getSimpleValueType();
2532 unsigned RA = ABI.IsN64() ? Mips::RA_64 : Mips::RA;
2533 MFI.setReturnAddressIsTaken(true);
2535 // Return RA, which contains the return address. Mark it an implicit live-in.
2536 unsigned Reg = MF.addLiveIn(RA, getRegClassFor(VT));
2537 return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(Op), Reg, VT);
2540 // An EH_RETURN is the result of lowering llvm.eh.return which in turn is
2541 // generated from __builtin_eh_return (offset, handler)
2542 // The effect of this is to adjust the stack pointer by "offset"
2543 // and then branch to "handler".
2544 SDValue MipsTargetLowering::lowerEH_RETURN(SDValue Op, SelectionDAG &DAG)
2546 MachineFunction &MF = DAG.getMachineFunction();
2547 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
2549 MipsFI->setCallsEhReturn();
2550 SDValue Chain = Op.getOperand(0);
2551 SDValue Offset = Op.getOperand(1);
2552 SDValue Handler = Op.getOperand(2);
2554 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
2556 // Store stack offset in V1, store jump target in V0. Glue CopyToReg and
2557 // EH_RETURN nodes, so that instructions are emitted back-to-back.
2558 unsigned OffsetReg = ABI.IsN64() ? Mips::V1_64 : Mips::V1;
2559 unsigned AddrReg = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
2560 Chain = DAG.getCopyToReg(Chain, DL, OffsetReg, Offset, SDValue());
2561 Chain = DAG.getCopyToReg(Chain, DL, AddrReg, Handler, Chain.getValue(1));
2562 return DAG.getNode(MipsISD::EH_RETURN, DL, MVT::Other, Chain,
2563 DAG.getRegister(OffsetReg, Ty),
2564 DAG.getRegister(AddrReg, getPointerTy(MF.getDataLayout())),
2568 SDValue MipsTargetLowering::lowerATOMIC_FENCE(SDValue Op,
2569 SelectionDAG &DAG) const {
2570 // FIXME: Need pseudo-fence for 'singlethread' fences
2571 // FIXME: Set SType for weaker fences where supported/appropriate.
2574 return DAG.getNode(MipsISD::Sync, DL, MVT::Other, Op.getOperand(0),
2575 DAG.getConstant(SType, DL, MVT::i32));
2578 SDValue MipsTargetLowering::lowerShiftLeftParts(SDValue Op,
2579 SelectionDAG &DAG) const {
2581 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2583 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2584 SDValue Shamt = Op.getOperand(2);
2585 // if shamt < (VT.bits):
2586 // lo = (shl lo, shamt)
2587 // hi = (or (shl hi, shamt) (srl (srl lo, 1), ~shamt))
2590 // hi = (shl lo, shamt[4:0])
2591 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2592 DAG.getConstant(-1, DL, MVT::i32));
2593 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo,
2594 DAG.getConstant(1, DL, VT));
2595 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, Not);
2596 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
2597 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2598 SDValue ShiftLeftLo = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
2599 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2600 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2601 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2602 DAG.getConstant(0, DL, VT), ShiftLeftLo);
2603 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftLeftLo, Or);
2605 SDValue Ops[2] = {Lo, Hi};
2606 return DAG.getMergeValues(Ops, DL);
2609 SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
2612 SDValue Lo = Op.getOperand(0), Hi = Op.getOperand(1);
2613 SDValue Shamt = Op.getOperand(2);
2614 MVT VT = Subtarget.isGP64bit() ? MVT::i64 : MVT::i32;
2616 // if shamt < (VT.bits):
2617 // lo = (or (shl (shl hi, 1), ~shamt) (srl lo, shamt))
2619 // hi = (sra hi, shamt)
2621 // hi = (srl hi, shamt)
2624 // lo = (sra hi, shamt[4:0])
2625 // hi = (sra hi, 31)
2627 // lo = (srl hi, shamt[4:0])
2629 SDValue Not = DAG.getNode(ISD::XOR, DL, MVT::i32, Shamt,
2630 DAG.getConstant(-1, DL, MVT::i32));
2631 SDValue ShiftLeft1Hi = DAG.getNode(ISD::SHL, DL, VT, Hi,
2632 DAG.getConstant(1, DL, VT));
2633 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, ShiftLeft1Hi, Not);
2634 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
2635 SDValue Or = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
2636 SDValue ShiftRightHi = DAG.getNode(IsSRA ? ISD::SRA : ISD::SRL,
2638 SDValue Cond = DAG.getNode(ISD::AND, DL, MVT::i32, Shamt,
2639 DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32));
2640 SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi,
2641 DAG.getConstant(VT.getSizeInBits() - 1, DL, VT));
2643 if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) {
2644 SDVTList VTList = DAG.getVTList(VT, VT);
2645 return DAG.getNode(Subtarget.isGP64bit() ? Mips::PseudoD_SELECT_I64
2646 : Mips::PseudoD_SELECT_I,
2647 DL, VTList, Cond, ShiftRightHi,
2648 IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or,
2652 Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or);
2653 Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond,
2654 IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi);
2656 SDValue Ops[2] = {Lo, Hi};
2657 return DAG.getMergeValues(Ops, DL);
2660 static SDValue createLoadLR(unsigned Opc, SelectionDAG &DAG, LoadSDNode *LD,
2661 SDValue Chain, SDValue Src, unsigned Offset) {
2662 SDValue Ptr = LD->getBasePtr();
2663 EVT VT = LD->getValueType(0), MemVT = LD->getMemoryVT();
2664 EVT BasePtrVT = Ptr.getValueType();
2666 SDVTList VTList = DAG.getVTList(VT, MVT::Other);
2669 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2670 DAG.getConstant(Offset, DL, BasePtrVT));
2672 SDValue Ops[] = { Chain, Ptr, Src };
2673 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2674 LD->getMemOperand());
2677 // Expand an unaligned 32 or 64-bit integer load node.
2678 SDValue MipsTargetLowering::lowerLOAD(SDValue Op, SelectionDAG &DAG) const {
2679 LoadSDNode *LD = cast<LoadSDNode>(Op);
2680 EVT MemVT = LD->getMemoryVT();
2682 if (Subtarget.systemSupportsUnalignedAccess())
2685 // Return if load is aligned or if MemVT is neither i32 nor i64.
2686 if ((LD->getAlignment() >= MemVT.getSizeInBits() / 8) ||
2687 ((MemVT != MVT::i32) && (MemVT != MVT::i64)))
2690 bool IsLittle = Subtarget.isLittle();
2691 EVT VT = Op.getValueType();
2692 ISD::LoadExtType ExtType = LD->getExtensionType();
2693 SDValue Chain = LD->getChain(), Undef = DAG.getUNDEF(VT);
2695 assert((VT == MVT::i32) || (VT == MVT::i64));
2698 // (set dst, (i64 (load baseptr)))
2700 // (set tmp, (ldl (add baseptr, 7), undef))
2701 // (set dst, (ldr baseptr, tmp))
2702 if ((VT == MVT::i64) && (ExtType == ISD::NON_EXTLOAD)) {
2703 SDValue LDL = createLoadLR(MipsISD::LDL, DAG, LD, Chain, Undef,
2705 return createLoadLR(MipsISD::LDR, DAG, LD, LDL.getValue(1), LDL,
2709 SDValue LWL = createLoadLR(MipsISD::LWL, DAG, LD, Chain, Undef,
2711 SDValue LWR = createLoadLR(MipsISD::LWR, DAG, LD, LWL.getValue(1), LWL,
2715 // (set dst, (i32 (load baseptr))) or
2716 // (set dst, (i64 (sextload baseptr))) or
2717 // (set dst, (i64 (extload baseptr)))
2719 // (set tmp, (lwl (add baseptr, 3), undef))
2720 // (set dst, (lwr baseptr, tmp))
2721 if ((VT == MVT::i32) || (ExtType == ISD::SEXTLOAD) ||
2722 (ExtType == ISD::EXTLOAD))
2725 assert((VT == MVT::i64) && (ExtType == ISD::ZEXTLOAD));
2728 // (set dst, (i64 (zextload baseptr)))
2730 // (set tmp0, (lwl (add baseptr, 3), undef))
2731 // (set tmp1, (lwr baseptr, tmp0))
2732 // (set tmp2, (shl tmp1, 32))
2733 // (set dst, (srl tmp2, 32))
2735 SDValue Const32 = DAG.getConstant(32, DL, MVT::i32);
2736 SDValue SLL = DAG.getNode(ISD::SHL, DL, MVT::i64, LWR, Const32);
2737 SDValue SRL = DAG.getNode(ISD::SRL, DL, MVT::i64, SLL, Const32);
2738 SDValue Ops[] = { SRL, LWR.getValue(1) };
2739 return DAG.getMergeValues(Ops, DL);
2742 static SDValue createStoreLR(unsigned Opc, SelectionDAG &DAG, StoreSDNode *SD,
2743 SDValue Chain, unsigned Offset) {
2744 SDValue Ptr = SD->getBasePtr(), Value = SD->getValue();
2745 EVT MemVT = SD->getMemoryVT(), BasePtrVT = Ptr.getValueType();
2747 SDVTList VTList = DAG.getVTList(MVT::Other);
2750 Ptr = DAG.getNode(ISD::ADD, DL, BasePtrVT, Ptr,
2751 DAG.getConstant(Offset, DL, BasePtrVT));
2753 SDValue Ops[] = { Chain, Value, Ptr };
2754 return DAG.getMemIntrinsicNode(Opc, DL, VTList, Ops, MemVT,
2755 SD->getMemOperand());
2758 // Expand an unaligned 32 or 64-bit integer store node.
2759 static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG,
2761 SDValue Value = SD->getValue(), Chain = SD->getChain();
2762 EVT VT = Value.getValueType();
2765 // (store val, baseptr) or
2766 // (truncstore val, baseptr)
2768 // (swl val, (add baseptr, 3))
2769 // (swr val, baseptr)
2770 if ((VT == MVT::i32) || SD->isTruncatingStore()) {
2771 SDValue SWL = createStoreLR(MipsISD::SWL, DAG, SD, Chain,
2773 return createStoreLR(MipsISD::SWR, DAG, SD, SWL, IsLittle ? 0 : 3);
2776 assert(VT == MVT::i64);
2779 // (store val, baseptr)
2781 // (sdl val, (add baseptr, 7))
2782 // (sdr val, baseptr)
2783 SDValue SDL = createStoreLR(MipsISD::SDL, DAG, SD, Chain, IsLittle ? 7 : 0);
2784 return createStoreLR(MipsISD::SDR, DAG, SD, SDL, IsLittle ? 0 : 7);
2787 // Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr).
2788 static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG,
2790 SDValue Val = SD->getValue();
2792 if (Val.getOpcode() != ISD::FP_TO_SINT ||
2793 (Val.getValueSizeInBits() > 32 && SingleFloat))
2796 EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits());
2797 SDValue Tr = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Val), FPTy,
2799 return DAG.getStore(SD->getChain(), SDLoc(SD), Tr, SD->getBasePtr(),
2800 SD->getPointerInfo(), SD->getAlignment(),
2801 SD->getMemOperand()->getFlags());
2804 SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const {
2805 StoreSDNode *SD = cast<StoreSDNode>(Op);
2806 EVT MemVT = SD->getMemoryVT();
2808 // Lower unaligned integer stores.
2809 if (!Subtarget.systemSupportsUnalignedAccess() &&
2810 (SD->getAlignment() < MemVT.getSizeInBits() / 8) &&
2811 ((MemVT == MVT::i32) || (MemVT == MVT::i64)))
2812 return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle());
2814 return lowerFP_TO_SINT_STORE(SD, DAG, Subtarget.isSingleFloat());
2817 SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op,
2818 SelectionDAG &DAG) const {
2820 // Return a fixed StackObject with offset 0 which points to the old stack
2822 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
2823 EVT ValTy = Op->getValueType(0);
2824 int FI = MFI.CreateFixedObject(Op.getValueSizeInBits() / 8, 0, false);
2825 return DAG.getFrameIndex(FI, ValTy);
2828 SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op,
2829 SelectionDAG &DAG) const {
2830 if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat())
2833 EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits());
2834 SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy,
2836 return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op.getValueType(), Trunc);
2839 //===----------------------------------------------------------------------===//
2840 // Calling Convention Implementation
2841 //===----------------------------------------------------------------------===//
2843 //===----------------------------------------------------------------------===//
2844 // TODO: Implement a generic logic using tblgen that can support this.
2845 // Mips O32 ABI rules:
2847 // i32 - Passed in A0, A1, A2, A3 and stack
2848 // f32 - Only passed in f32 registers if no int reg has been used yet to hold
2849 // an argument. Otherwise, passed in A1, A2, A3 and stack.
2850 // f64 - Only passed in two aliased f32 registers if no int reg has been used
2851 // yet to hold an argument. Otherwise, use A2, A3 and stack. If A1 is
2852 // not used, it must be shadowed. If only A3 is available, shadow it and
2854 // vXiX - Received as scalarized i32s, passed in A0 - A3 and the stack.
2855 // vXf32 - Passed in either a pair of registers {A0, A1}, {A2, A3} or {A0 - A3}
2856 // with the remainder spilled to the stack.
2857 // vXf64 - Passed in either {A0, A1, A2, A3} or {A2, A3} and in both cases
2858 // spilling the remainder to the stack.
2860 // For vararg functions, all arguments are passed in A0, A1, A2, A3 and stack.
2861 //===----------------------------------------------------------------------===//
2863 static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2864 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2865 CCState &State, ArrayRef<MCPhysReg> F64Regs) {
2866 const MipsSubtarget &Subtarget = static_cast<const MipsSubtarget &>(
2867 State.getMachineFunction().getSubtarget());
2869 static const MCPhysReg IntRegs[] = { Mips::A0, Mips::A1, Mips::A2, Mips::A3 };
2871 const MipsCCState * MipsState = static_cast<MipsCCState *>(&State);
2873 static const MCPhysReg F32Regs[] = { Mips::F12, Mips::F14 };
2875 static const MCPhysReg FloatVectorIntRegs[] = { Mips::A0, Mips::A2 };
2877 // Do not process byval args here.
2878 if (ArgFlags.isByVal())
2881 // Promote i8 and i16
2882 if (ArgFlags.isInReg() && !Subtarget.isLittle()) {
2883 if (LocVT == MVT::i8 || LocVT == MVT::i16 || LocVT == MVT::i32) {
2885 if (ArgFlags.isSExt())
2886 LocInfo = CCValAssign::SExtUpper;
2887 else if (ArgFlags.isZExt())
2888 LocInfo = CCValAssign::ZExtUpper;
2890 LocInfo = CCValAssign::AExtUpper;
2894 // Promote i8 and i16
2895 if (LocVT == MVT::i8 || LocVT == MVT::i16) {
2897 if (ArgFlags.isSExt())
2898 LocInfo = CCValAssign::SExt;
2899 else if (ArgFlags.isZExt())
2900 LocInfo = CCValAssign::ZExt;
2902 LocInfo = CCValAssign::AExt;
2907 // f32 and f64 are allocated in A0, A1, A2, A3 when either of the following
2908 // is true: function is vararg, argument is 3rd or higher, there is previous
2909 // argument which is not f32 or f64.
2910 bool AllocateFloatsInIntReg = State.isVarArg() || ValNo > 1 ||
2911 State.getFirstUnallocated(F32Regs) != ValNo;
2912 Align OrigAlign = ArgFlags.getNonZeroOrigAlign();
2913 bool isI64 = (ValVT == MVT::i32 && OrigAlign == Align(8));
2914 bool isVectorFloat = MipsState->WasOriginalArgVectorFloat(ValNo);
2916 // The MIPS vector ABI for floats passes them in a pair of registers
2917 if (ValVT == MVT::i32 && isVectorFloat) {
2918 // This is the start of an vector that was scalarized into an unknown number
2919 // of components. It doesn't matter how many there are. Allocate one of the
2920 // notional 8 byte aligned registers which map onto the argument stack, and
2921 // shadow the register lost to alignment requirements.
2922 if (ArgFlags.isSplit()) {
2923 Reg = State.AllocateReg(FloatVectorIntRegs);
2924 if (Reg == Mips::A2)
2925 State.AllocateReg(Mips::A1);
2927 State.AllocateReg(Mips::A3);
2929 // If we're an intermediate component of the split, we can just attempt to
2930 // allocate a register directly.
2931 Reg = State.AllocateReg(IntRegs);
2933 } else if (ValVT == MVT::i32 ||
2934 (ValVT == MVT::f32 && AllocateFloatsInIntReg)) {
2935 Reg = State.AllocateReg(IntRegs);
2936 // If this is the first part of an i64 arg,
2937 // the allocated register must be either A0 or A2.
2938 if (isI64 && (Reg == Mips::A1 || Reg == Mips::A3))
2939 Reg = State.AllocateReg(IntRegs);
2941 } else if (ValVT == MVT::f64 && AllocateFloatsInIntReg) {
2942 // Allocate int register and shadow next int register. If first
2943 // available register is Mips::A1 or Mips::A3, shadow it too.
2944 Reg = State.AllocateReg(IntRegs);
2945 if (Reg == Mips::A1 || Reg == Mips::A3)
2946 Reg = State.AllocateReg(IntRegs);
2947 State.AllocateReg(IntRegs);
2949 } else if (ValVT.isFloatingPoint() && !AllocateFloatsInIntReg) {
2950 // we are guaranteed to find an available float register
2951 if (ValVT == MVT::f32) {
2952 Reg = State.AllocateReg(F32Regs);
2953 // Shadow int register
2954 State.AllocateReg(IntRegs);
2956 Reg = State.AllocateReg(F64Regs);
2957 // Shadow int registers
2958 unsigned Reg2 = State.AllocateReg(IntRegs);
2959 if (Reg2 == Mips::A1 || Reg2 == Mips::A3)
2960 State.AllocateReg(IntRegs);
2961 State.AllocateReg(IntRegs);
2964 llvm_unreachable("Cannot handle this ValVT.");
2967 unsigned Offset = State.AllocateStack(ValVT.getStoreSize(), OrigAlign);
2968 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
2970 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2975 static bool CC_MipsO32_FP32(unsigned ValNo, MVT ValVT,
2976 MVT LocVT, CCValAssign::LocInfo LocInfo,
2977 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2978 static const MCPhysReg F64Regs[] = { Mips::D6, Mips::D7 };
2980 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2983 static bool CC_MipsO32_FP64(unsigned ValNo, MVT ValVT,
2984 MVT LocVT, CCValAssign::LocInfo LocInfo,
2985 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2986 static const MCPhysReg F64Regs[] = { Mips::D12_64, Mips::D14_64 };
2988 return CC_MipsO32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State, F64Regs);
2991 static bool CC_MipsO32(unsigned ValNo, MVT ValVT, MVT LocVT,
2992 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
2993 CCState &State) LLVM_ATTRIBUTE_UNUSED;
2995 #include "MipsGenCallingConv.inc"
2997 CCAssignFn *MipsTargetLowering::CCAssignFnForCall() const{
2998 return CC_Mips_FixedArg;
3001 CCAssignFn *MipsTargetLowering::CCAssignFnForReturn() const{
3004 //===----------------------------------------------------------------------===//
3005 // Call Calling Convention Implementation
3006 //===----------------------------------------------------------------------===//
3008 // Return next O32 integer argument register.
3009 static unsigned getNextIntArgReg(unsigned Reg) {
3010 assert((Reg == Mips::A0) || (Reg == Mips::A2));
3011 return (Reg == Mips::A0) ? Mips::A1 : Mips::A3;
3014 SDValue MipsTargetLowering::passArgOnStack(SDValue StackPtr, unsigned Offset,
3015 SDValue Chain, SDValue Arg,
3016 const SDLoc &DL, bool IsTailCall,
3017 SelectionDAG &DAG) const {
3020 DAG.getNode(ISD::ADD, DL, getPointerTy(DAG.getDataLayout()), StackPtr,
3021 DAG.getIntPtrConstant(Offset, DL));
3022 return DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo());
3025 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
3026 int FI = MFI.CreateFixedObject(Arg.getValueSizeInBits() / 8, Offset, false);
3027 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3028 return DAG.getStore(Chain, DL, Arg, FIN, MachinePointerInfo(),
3029 /* Alignment = */ 0, MachineMemOperand::MOVolatile);
3032 void MipsTargetLowering::
3033 getOpndList(SmallVectorImpl<SDValue> &Ops,
3034 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
3035 bool IsPICCall, bool GlobalOrExternal, bool InternalLinkage,
3036 bool IsCallReloc, CallLoweringInfo &CLI, SDValue Callee,
3037 SDValue Chain) const {
3038 // Insert node "GP copy globalreg" before call to function.
3040 // R_MIPS_CALL* operators (emitted when non-internal functions are called
3041 // in PIC mode) allow symbols to be resolved via lazy binding.
3042 // The lazy binding stub requires GP to point to the GOT.
3043 // Note that we don't need GP to point to the GOT for indirect calls
3044 // (when R_MIPS_CALL* is not used for the call) because Mips linker generates
3045 // lazy binding stub for a function only when R_MIPS_CALL* are the only relocs
3046 // used for the function (that is, Mips linker doesn't generate lazy binding
3047 // stub for a function whose address is taken in the program).
3048 if (IsPICCall && !InternalLinkage && IsCallReloc) {
3049 unsigned GPReg = ABI.IsN64() ? Mips::GP_64 : Mips::GP;
3050 EVT Ty = ABI.IsN64() ? MVT::i64 : MVT::i32;
3051 RegsToPass.push_back(std::make_pair(GPReg, getGlobalReg(CLI.DAG, Ty)));
3054 // Build a sequence of copy-to-reg nodes chained together with token
3055 // chain and flag operands which copy the outgoing args into registers.
3056 // The InFlag in necessary since all emitted instructions must be
3060 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3061 Chain = CLI.DAG.getCopyToReg(Chain, CLI.DL, RegsToPass[i].first,
3062 RegsToPass[i].second, InFlag);
3063 InFlag = Chain.getValue(1);
3066 // Add argument registers to the end of the list so that they are
3067 // known live into the call.
3068 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3069 Ops.push_back(CLI.DAG.getRegister(RegsToPass[i].first,
3070 RegsToPass[i].second.getValueType()));
3072 // Add a register mask operand representing the call-preserved registers.
3073 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
3074 const uint32_t *Mask =
3075 TRI->getCallPreservedMask(CLI.DAG.getMachineFunction(), CLI.CallConv);
3076 assert(Mask && "Missing call preserved mask for calling convention");
3077 if (Subtarget.inMips16HardFloat()) {
3078 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(CLI.Callee)) {
3079 StringRef Sym = G->getGlobal()->getName();
3080 Function *F = G->getGlobal()->getParent()->getFunction(Sym);
3081 if (F && F->hasFnAttribute("__Mips16RetHelper")) {
3082 Mask = MipsRegisterInfo::getMips16RetHelperMask();
3086 Ops.push_back(CLI.DAG.getRegisterMask(Mask));
3088 if (InFlag.getNode())
3089 Ops.push_back(InFlag);
3092 void MipsTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
3093 SDNode *Node) const {
3094 switch (MI.getOpcode()) {
3098 case Mips::JALRPseudo:
3100 case Mips::JALR64Pseudo:
3101 case Mips::JALR16_MM:
3102 case Mips::JALRC16_MMR6:
3103 case Mips::TAILCALLREG:
3104 case Mips::TAILCALLREG64:
3105 case Mips::TAILCALLR6REG:
3106 case Mips::TAILCALL64R6REG:
3107 case Mips::TAILCALLREG_MM:
3108 case Mips::TAILCALLREG_MMR6: {
3109 if (!EmitJalrReloc ||
3110 Subtarget.inMips16Mode() ||
3111 !isPositionIndependent() ||
3112 Node->getNumOperands() < 1 ||
3113 Node->getOperand(0).getNumOperands() < 2) {
3116 // We are after the callee address, set by LowerCall().
3117 // If added to MI, asm printer will emit .reloc R_MIPS_JALR for the
3119 const SDValue TargetAddr = Node->getOperand(0).getOperand(1);
3121 if (const GlobalAddressSDNode *G =
3122 dyn_cast_or_null<const GlobalAddressSDNode>(TargetAddr)) {
3123 // We must not emit the R_MIPS_JALR relocation against data symbols
3124 // since this will cause run-time crashes if the linker replaces the
3125 // call instruction with a relative branch to the data symbol.
3126 if (!isa<Function>(G->getGlobal())) {
3127 LLVM_DEBUG(dbgs() << "Not adding R_MIPS_JALR against data symbol "
3128 << G->getGlobal()->getName() << "\n");
3131 Sym = G->getGlobal()->getName();
3133 else if (const ExternalSymbolSDNode *ES =
3134 dyn_cast_or_null<const ExternalSymbolSDNode>(TargetAddr)) {
3135 Sym = ES->getSymbol();
3141 MachineFunction *MF = MI.getParent()->getParent();
3142 MCSymbol *S = MF->getContext().getOrCreateSymbol(Sym);
3143 LLVM_DEBUG(dbgs() << "Adding R_MIPS_JALR against " << Sym << "\n");
3144 MI.addOperand(MachineOperand::CreateMCSymbol(S, MipsII::MO_JALR));
3149 /// LowerCall - functions arguments are copied from virtual regs to
3150 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
3152 MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3153 SmallVectorImpl<SDValue> &InVals) const {
3154 SelectionDAG &DAG = CLI.DAG;
3156 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3157 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3158 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3159 SDValue Chain = CLI.Chain;
3160 SDValue Callee = CLI.Callee;
3161 bool &IsTailCall = CLI.IsTailCall;
3162 CallingConv::ID CallConv = CLI.CallConv;
3163 bool IsVarArg = CLI.IsVarArg;
3165 MachineFunction &MF = DAG.getMachineFunction();
3166 MachineFrameInfo &MFI = MF.getFrameInfo();
3167 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
3168 MipsFunctionInfo *FuncInfo = MF.getInfo<MipsFunctionInfo>();
3169 bool IsPIC = isPositionIndependent();
3171 // Analyze operands of the call, assigning locations to each operand.
3172 SmallVector<CCValAssign, 16> ArgLocs;
3174 CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, *DAG.getContext(),
3175 MipsCCState::getSpecialCallingConvForCallee(Callee.getNode(), Subtarget));
3177 const ExternalSymbolSDNode *ES =
3178 dyn_cast_or_null<const ExternalSymbolSDNode>(Callee.getNode());
3180 // There is one case where CALLSEQ_START..CALLSEQ_END can be nested, which
3181 // is during the lowering of a call with a byval argument which produces
3182 // a call to memcpy. For the O32 case, this causes the caller to allocate
3183 // stack space for the reserved argument area for the callee, then recursively
3184 // again for the memcpy call. In the NEWABI case, this doesn't occur as those
3185 // ABIs mandate that the callee allocates the reserved argument area. We do
3186 // still produce nested CALLSEQ_START..CALLSEQ_END with zero space though.
3188 // If the callee has a byval argument and memcpy is used, we are mandated
3189 // to already have produced a reserved argument area for the callee for O32.
3190 // Therefore, the reserved argument area can be reused for both calls.
3192 // Other cases of calling memcpy cannot have a chain with a CALLSEQ_START
3193 // present, as we have yet to hook that node onto the chain.
3195 // Hence, the CALLSEQ_START and CALLSEQ_END nodes can be eliminated in this
3196 // case. GCC does a similar trick, in that wherever possible, it calculates
3197 // the maximum out going argument area (including the reserved area), and
3198 // preallocates the stack space on entrance to the caller.
3200 // FIXME: We should do the same for efficiency and space.
3202 // Note: The check on the calling convention below must match
3203 // MipsABIInfo::GetCalleeAllocdArgSizeInBytes().
3204 bool MemcpyInByVal = ES &&
3205 StringRef(ES->getSymbol()) == StringRef("memcpy") &&
3206 CallConv != CallingConv::Fast &&
3207 Chain.getOpcode() == ISD::CALLSEQ_START;
3209 // Allocate the reserved argument area. It seems strange to do this from the
3210 // caller side but removing it breaks the frame size calculation.
3211 unsigned ReservedArgArea =
3212 MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
3213 CCInfo.AllocateStack(ReservedArgArea, Align(1));
3215 CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
3216 ES ? ES->getSymbol() : nullptr);
3218 // Get a count of how many bytes are to be pushed on the stack.
3219 unsigned NextStackOffset = CCInfo.getNextStackOffset();
3221 // Call site info for function parameters tracking.
3222 MachineFunction::CallSiteInfo CSInfo;
3224 // Check if it's really possible to do a tail call. Restrict it to functions
3225 // that are part of this compilation unit.
3226 bool InternalLinkage = false;
3228 IsTailCall = isEligibleForTailCallOptimization(
3229 CCInfo, NextStackOffset, *MF.getInfo<MipsFunctionInfo>());
3230 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3231 InternalLinkage = G->getGlobal()->hasInternalLinkage();
3232 IsTailCall &= (InternalLinkage || G->getGlobal()->hasLocalLinkage() ||
3233 G->getGlobal()->hasPrivateLinkage() ||
3234 G->getGlobal()->hasHiddenVisibility() ||
3235 G->getGlobal()->hasProtectedVisibility());
3238 if (!IsTailCall && CLI.CB && CLI.CB->isMustTailCall())
3239 report_fatal_error("failed to perform tail call elimination on a call "
3240 "site marked musttail");
3245 // Chain is the output chain of the last Load/Store or CopyToReg node.
3246 // ByValChain is the output chain of the last Memcpy node created for copying
3247 // byval arguments to the stack.
3248 unsigned StackAlignment = TFL->getStackAlignment();
3249 NextStackOffset = alignTo(NextStackOffset, StackAlignment);
3250 SDValue NextStackOffsetVal = DAG.getIntPtrConstant(NextStackOffset, DL, true);
3252 if (!(IsTailCall || MemcpyInByVal))
3253 Chain = DAG.getCALLSEQ_START(Chain, NextStackOffset, 0, DL);
3256 DAG.getCopyFromReg(Chain, DL, ABI.IsN64() ? Mips::SP_64 : Mips::SP,
3257 getPointerTy(DAG.getDataLayout()));
3259 std::deque<std::pair<unsigned, SDValue>> RegsToPass;
3260 SmallVector<SDValue, 8> MemOpChains;
3262 CCInfo.rewindByValRegsInfo();
3264 // Walk the register/memloc assignments, inserting copies/loads.
3265 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3266 SDValue Arg = OutVals[i];
3267 CCValAssign &VA = ArgLocs[i];
3268 MVT ValVT = VA.getValVT(), LocVT = VA.getLocVT();
3269 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3270 bool UseUpperBits = false;
3273 if (Flags.isByVal()) {
3274 unsigned FirstByValReg, LastByValReg;
3275 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3276 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3278 assert(Flags.getByValSize() &&
3279 "ByVal args of size 0 should have been ignored by front-end.");
3280 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3281 assert(!IsTailCall &&
3282 "Do not tail-call optimize if there is a byval argument.");
3283 passByValArg(Chain, DL, RegsToPass, MemOpChains, StackPtr, MFI, DAG, Arg,
3284 FirstByValReg, LastByValReg, Flags, Subtarget.isLittle(),
3286 CCInfo.nextInRegsParam();
3290 // Promote the value if needed.
3291 switch (VA.getLocInfo()) {
3293 llvm_unreachable("Unknown loc info!");
3294 case CCValAssign::Full:
3295 if (VA.isRegLoc()) {
3296 if ((ValVT == MVT::f32 && LocVT == MVT::i32) ||
3297 (ValVT == MVT::f64 && LocVT == MVT::i64) ||
3298 (ValVT == MVT::i64 && LocVT == MVT::f64))
3299 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3300 else if (ValVT == MVT::f64 && LocVT == MVT::i32) {
3301 SDValue Lo = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
3302 Arg, DAG.getConstant(0, DL, MVT::i32));
3303 SDValue Hi = DAG.getNode(MipsISD::ExtractElementF64, DL, MVT::i32,
3304 Arg, DAG.getConstant(1, DL, MVT::i32));
3305 if (!Subtarget.isLittle())
3307 Register LocRegLo = VA.getLocReg();
3308 unsigned LocRegHigh = getNextIntArgReg(LocRegLo);
3309 RegsToPass.push_back(std::make_pair(LocRegLo, Lo));
3310 RegsToPass.push_back(std::make_pair(LocRegHigh, Hi));
3315 case CCValAssign::BCvt:
3316 Arg = DAG.getNode(ISD::BITCAST, DL, LocVT, Arg);
3318 case CCValAssign::SExtUpper:
3319 UseUpperBits = true;
3321 case CCValAssign::SExt:
3322 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, LocVT, Arg);
3324 case CCValAssign::ZExtUpper:
3325 UseUpperBits = true;
3327 case CCValAssign::ZExt:
3328 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, LocVT, Arg);
3330 case CCValAssign::AExtUpper:
3331 UseUpperBits = true;
3333 case CCValAssign::AExt:
3334 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, LocVT, Arg);
3339 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3340 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3342 ISD::SHL, DL, VA.getLocVT(), Arg,
3343 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3346 // Arguments that can be passed on register must be kept at
3347 // RegsToPass vector
3348 if (VA.isRegLoc()) {
3349 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
3351 // If the parameter is passed through reg $D, which splits into
3352 // two physical registers, avoid creating call site info.
3353 if (Mips::AFGR64RegClass.contains(VA.getLocReg()))
3356 // Collect CSInfo about which register passes which parameter.
3357 const TargetOptions &Options = DAG.getTarget().Options;
3358 if (Options.SupportsDebugEntryValues)
3359 CSInfo.emplace_back(VA.getLocReg(), i);
3364 // Register can't get to this point...
3365 assert(VA.isMemLoc());
3367 // emit ISD::STORE whichs stores the
3368 // parameter value to a stack Location
3369 MemOpChains.push_back(passArgOnStack(StackPtr, VA.getLocMemOffset(),
3370 Chain, Arg, DL, IsTailCall, DAG));
3373 // Transform all store nodes into one single node because all store
3374 // nodes are independent of each other.
3375 if (!MemOpChains.empty())
3376 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
3378 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
3379 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
3380 // node so that legalize doesn't hack it.
3382 EVT Ty = Callee.getValueType();
3383 bool GlobalOrExternal = false, IsCallReloc = false;
3385 // The long-calls feature is ignored in case of PIC.
3386 // While we do not support -mshared / -mno-shared properly,
3387 // ignore long-calls in case of -mabicalls too.
3388 if (!Subtarget.isABICalls() && !IsPIC) {
3389 // If the function should be called using "long call",
3390 // get its address into a register to prevent using
3391 // of the `jal` instruction for the direct call.
3392 if (auto *N = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3393 if (Subtarget.useLongCalls())
3394 Callee = Subtarget.hasSym32()
3395 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3396 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3397 } else if (auto *N = dyn_cast<GlobalAddressSDNode>(Callee)) {
3398 bool UseLongCalls = Subtarget.useLongCalls();
3399 // If the function has long-call/far/near attribute
3400 // it overrides command line switch pased to the backend.
3401 if (auto *F = dyn_cast<Function>(N->getGlobal())) {
3402 if (F->hasFnAttribute("long-call"))
3403 UseLongCalls = true;
3404 else if (F->hasFnAttribute("short-call"))
3405 UseLongCalls = false;
3408 Callee = Subtarget.hasSym32()
3409 ? getAddrNonPIC(N, SDLoc(N), Ty, DAG)
3410 : getAddrNonPICSym64(N, SDLoc(N), Ty, DAG);
3414 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
3416 const GlobalValue *Val = G->getGlobal();
3417 InternalLinkage = Val->hasInternalLinkage();
3419 if (InternalLinkage)
3420 Callee = getAddrLocal(G, DL, Ty, DAG, ABI.IsN32() || ABI.IsN64());
3421 else if (Subtarget.useXGOT()) {
3422 Callee = getAddrGlobalLargeGOT(G, DL, Ty, DAG, MipsII::MO_CALL_HI16,
3423 MipsII::MO_CALL_LO16, Chain,
3424 FuncInfo->callPtrInfo(MF, Val));
3427 Callee = getAddrGlobal(G, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3428 FuncInfo->callPtrInfo(MF, Val));
3432 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL,
3433 getPointerTy(DAG.getDataLayout()), 0,
3434 MipsII::MO_NO_FLAG);
3435 GlobalOrExternal = true;
3437 else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3438 const char *Sym = S->getSymbol();
3440 if (!IsPIC) // static
3441 Callee = DAG.getTargetExternalSymbol(
3442 Sym, getPointerTy(DAG.getDataLayout()), MipsII::MO_NO_FLAG);
3443 else if (Subtarget.useXGOT()) {
3444 Callee = getAddrGlobalLargeGOT(S, DL, Ty, DAG, MipsII::MO_CALL_HI16,
3445 MipsII::MO_CALL_LO16, Chain,
3446 FuncInfo->callPtrInfo(MF, Sym));
3449 Callee = getAddrGlobal(S, DL, Ty, DAG, MipsII::MO_GOT_CALL, Chain,
3450 FuncInfo->callPtrInfo(MF, Sym));
3454 GlobalOrExternal = true;
3457 SmallVector<SDValue, 8> Ops(1, Chain);
3458 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3460 getOpndList(Ops, RegsToPass, IsPIC, GlobalOrExternal, InternalLinkage,
3461 IsCallReloc, CLI, Callee, Chain);
3464 MF.getFrameInfo().setHasTailCall();
3465 SDValue Ret = DAG.getNode(MipsISD::TailCall, DL, MVT::Other, Ops);
3466 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
3470 Chain = DAG.getNode(MipsISD::JmpLink, DL, NodeTys, Ops);
3471 SDValue InFlag = Chain.getValue(1);
3473 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
3475 // Create the CALLSEQ_END node in the case of where it is not a call to
3477 if (!(MemcpyInByVal)) {
3478 Chain = DAG.getCALLSEQ_END(Chain, NextStackOffsetVal,
3479 DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
3480 InFlag = Chain.getValue(1);
3483 // Handle result values, copying them out of physregs into vregs that we
3485 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG,
3489 /// LowerCallResult - Lower the result values of a call into the
3490 /// appropriate copies out of appropriate physical registers.
3491 SDValue MipsTargetLowering::LowerCallResult(
3492 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool IsVarArg,
3493 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3494 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
3495 TargetLowering::CallLoweringInfo &CLI) const {
3496 // Assign locations to each value returned by this call.
3497 SmallVector<CCValAssign, 16> RVLocs;
3498 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
3501 const ExternalSymbolSDNode *ES =
3502 dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
3503 CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
3504 ES ? ES->getSymbol() : nullptr);
3506 // Copy all of the result registers out of their specified physreg.
3507 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3508 CCValAssign &VA = RVLocs[i];
3509 assert(VA.isRegLoc() && "Can only return in registers!");
3511 SDValue Val = DAG.getCopyFromReg(Chain, DL, RVLocs[i].getLocReg(),
3512 RVLocs[i].getLocVT(), InFlag);
3513 Chain = Val.getValue(1);
3514 InFlag = Val.getValue(2);
3516 if (VA.isUpperBitsInLoc()) {
3517 unsigned ValSizeInBits = Ins[i].ArgVT.getSizeInBits();
3518 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3520 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3522 Shift, DL, VA.getLocVT(), Val,
3523 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3526 switch (VA.getLocInfo()) {
3528 llvm_unreachable("Unknown loc info!");
3529 case CCValAssign::Full:
3531 case CCValAssign::BCvt:
3532 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
3534 case CCValAssign::AExt:
3535 case CCValAssign::AExtUpper:
3536 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3538 case CCValAssign::ZExt:
3539 case CCValAssign::ZExtUpper:
3540 Val = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Val,
3541 DAG.getValueType(VA.getValVT()));
3542 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3544 case CCValAssign::SExt:
3545 case CCValAssign::SExtUpper:
3546 Val = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Val,
3547 DAG.getValueType(VA.getValVT()));
3548 Val = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Val);
3552 InVals.push_back(Val);
3558 static SDValue UnpackFromArgumentSlot(SDValue Val, const CCValAssign &VA,
3559 EVT ArgVT, const SDLoc &DL,
3560 SelectionDAG &DAG) {
3561 MVT LocVT = VA.getLocVT();
3562 EVT ValVT = VA.getValVT();
3564 // Shift into the upper bits if necessary.
3565 switch (VA.getLocInfo()) {
3568 case CCValAssign::AExtUpper:
3569 case CCValAssign::SExtUpper:
3570 case CCValAssign::ZExtUpper: {
3571 unsigned ValSizeInBits = ArgVT.getSizeInBits();
3572 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3574 VA.getLocInfo() == CCValAssign::ZExtUpper ? ISD::SRL : ISD::SRA;
3576 Opcode, DL, VA.getLocVT(), Val,
3577 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3582 // If this is an value smaller than the argument slot size (32-bit for O32,
3583 // 64-bit for N32/N64), it has been promoted in some way to the argument slot
3584 // size. Extract the value and insert any appropriate assertions regarding
3585 // sign/zero extension.
3586 switch (VA.getLocInfo()) {
3588 llvm_unreachable("Unknown loc info!");
3589 case CCValAssign::Full:
3591 case CCValAssign::AExtUpper:
3592 case CCValAssign::AExt:
3593 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3595 case CCValAssign::SExtUpper:
3596 case CCValAssign::SExt:
3597 Val = DAG.getNode(ISD::AssertSext, DL, LocVT, Val, DAG.getValueType(ValVT));
3598 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3600 case CCValAssign::ZExtUpper:
3601 case CCValAssign::ZExt:
3602 Val = DAG.getNode(ISD::AssertZext, DL, LocVT, Val, DAG.getValueType(ValVT));
3603 Val = DAG.getNode(ISD::TRUNCATE, DL, ValVT, Val);
3605 case CCValAssign::BCvt:
3606 Val = DAG.getNode(ISD::BITCAST, DL, ValVT, Val);
3613 //===----------------------------------------------------------------------===//
3614 // Formal Arguments Calling Convention Implementation
3615 //===----------------------------------------------------------------------===//
3616 /// LowerFormalArguments - transform physical registers into virtual registers
3617 /// and generate load operations for arguments places on the stack.
3618 SDValue MipsTargetLowering::LowerFormalArguments(
3619 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3620 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
3621 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3622 MachineFunction &MF = DAG.getMachineFunction();
3623 MachineFrameInfo &MFI = MF.getFrameInfo();
3624 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3626 MipsFI->setVarArgsFrameIndex(0);
3628 // Used with vargs to acumulate store chains.
3629 std::vector<SDValue> OutChains;
3631 // Assign locations to all of the incoming arguments.
3632 SmallVector<CCValAssign, 16> ArgLocs;
3633 MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
3635 CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), Align(1));
3636 const Function &Func = DAG.getMachineFunction().getFunction();
3637 Function::const_arg_iterator FuncArg = Func.arg_begin();
3639 if (Func.hasFnAttribute("interrupt") && !Func.arg_empty())
3641 "Functions with the interrupt attribute cannot have arguments!");
3643 CCInfo.AnalyzeFormalArguments(Ins, CC_Mips_FixedArg);
3644 MipsFI->setFormalArgInfo(CCInfo.getNextStackOffset(),
3645 CCInfo.getInRegsParamsCount() > 0);
3647 unsigned CurArgIdx = 0;
3648 CCInfo.rewindByValRegsInfo();
3650 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3651 CCValAssign &VA = ArgLocs[i];
3652 if (Ins[i].isOrigArg()) {
3653 std::advance(FuncArg, Ins[i].getOrigArgIndex() - CurArgIdx);
3654 CurArgIdx = Ins[i].getOrigArgIndex();
3656 EVT ValVT = VA.getValVT();
3657 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3658 bool IsRegLoc = VA.isRegLoc();
3660 if (Flags.isByVal()) {
3661 assert(Ins[i].isOrigArg() && "Byval arguments cannot be implicit");
3662 unsigned FirstByValReg, LastByValReg;
3663 unsigned ByValIdx = CCInfo.getInRegsParamsProcessed();
3664 CCInfo.getInRegsParamInfo(ByValIdx, FirstByValReg, LastByValReg);
3666 assert(Flags.getByValSize() &&
3667 "ByVal args of size 0 should have been ignored by front-end.");
3668 assert(ByValIdx < CCInfo.getInRegsParamsCount());
3669 copyByValRegs(Chain, DL, OutChains, DAG, Flags, InVals, &*FuncArg,
3670 FirstByValReg, LastByValReg, VA, CCInfo);
3671 CCInfo.nextInRegsParam();
3675 // Arguments stored on registers
3677 MVT RegVT = VA.getLocVT();
3678 Register ArgReg = VA.getLocReg();
3679 const TargetRegisterClass *RC = getRegClassFor(RegVT);
3681 // Transform the arguments stored on
3682 // physical registers into virtual ones
3683 unsigned Reg = addLiveIn(DAG.getMachineFunction(), ArgReg, RC);
3684 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT);
3686 ArgValue = UnpackFromArgumentSlot(ArgValue, VA, Ins[i].ArgVT, DL, DAG);
3688 // Handle floating point arguments passed in integer registers and
3689 // long double arguments passed in floating point registers.
3690 if ((RegVT == MVT::i32 && ValVT == MVT::f32) ||
3691 (RegVT == MVT::i64 && ValVT == MVT::f64) ||
3692 (RegVT == MVT::f64 && ValVT == MVT::i64))
3693 ArgValue = DAG.getNode(ISD::BITCAST, DL, ValVT, ArgValue);
3694 else if (ABI.IsO32() && RegVT == MVT::i32 &&
3695 ValVT == MVT::f64) {
3696 unsigned Reg2 = addLiveIn(DAG.getMachineFunction(),
3697 getNextIntArgReg(ArgReg), RC);
3698 SDValue ArgValue2 = DAG.getCopyFromReg(Chain, DL, Reg2, RegVT);
3699 if (!Subtarget.isLittle())
3700 std::swap(ArgValue, ArgValue2);
3701 ArgValue = DAG.getNode(MipsISD::BuildPairF64, DL, MVT::f64,
3702 ArgValue, ArgValue2);
3705 InVals.push_back(ArgValue);
3706 } else { // VA.isRegLoc()
3707 MVT LocVT = VA.getLocVT();
3710 // We ought to be able to use LocVT directly but O32 sets it to i32
3711 // when allocating floating point values to integer registers.
3712 // This shouldn't influence how we load the value into registers unless
3713 // we are targeting softfloat.
3714 if (VA.getValVT().isFloatingPoint() && !Subtarget.useSoftFloat())
3715 LocVT = VA.getValVT();
3719 assert(VA.isMemLoc());
3721 // The stack pointer offset is relative to the caller stack frame.
3722 int FI = MFI.CreateFixedObject(LocVT.getSizeInBits() / 8,
3723 VA.getLocMemOffset(), true);
3725 // Create load nodes to retrieve arguments from the stack
3726 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3727 SDValue ArgValue = DAG.getLoad(
3728 LocVT, DL, Chain, FIN,
3729 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3730 OutChains.push_back(ArgValue.getValue(1));
3732 ArgValue = UnpackFromArgumentSlot(ArgValue, VA, Ins[i].ArgVT, DL, DAG);
3734 InVals.push_back(ArgValue);
3738 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3739 // The mips ABIs for returning structs by value requires that we copy
3740 // the sret argument into $v0 for the return. Save the argument into
3741 // a virtual register so that we can access it from the return points.
3742 if (Ins[i].Flags.isSRet()) {
3743 unsigned Reg = MipsFI->getSRetReturnReg();
3745 Reg = MF.getRegInfo().createVirtualRegister(
3746 getRegClassFor(ABI.IsN64() ? MVT::i64 : MVT::i32));
3747 MipsFI->setSRetReturnReg(Reg);
3749 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), DL, Reg, InVals[i]);
3750 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Copy, Chain);
3756 writeVarArgRegs(OutChains, Chain, DL, DAG, CCInfo);
3758 // All stores are grouped in one node to allow the matching between
3759 // the size of Ins and InVals. This only happens when on varg functions
3760 if (!OutChains.empty()) {
3761 OutChains.push_back(Chain);
3762 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
3768 //===----------------------------------------------------------------------===//
3769 // Return Value Calling Convention Implementation
3770 //===----------------------------------------------------------------------===//
3773 MipsTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
3774 MachineFunction &MF, bool IsVarArg,
3775 const SmallVectorImpl<ISD::OutputArg> &Outs,
3776 LLVMContext &Context) const {
3777 SmallVector<CCValAssign, 16> RVLocs;
3778 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
3779 return CCInfo.CheckReturn(Outs, RetCC_Mips);
3782 bool MipsTargetLowering::shouldSignExtendTypeInLibCall(EVT Type,
3783 bool IsSigned) const {
3784 if ((ABI.IsN32() || ABI.IsN64()) && Type == MVT::i32)
3791 MipsTargetLowering::LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
3793 SelectionDAG &DAG) const {
3794 MachineFunction &MF = DAG.getMachineFunction();
3795 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3799 return DAG.getNode(MipsISD::ERet, DL, MVT::Other, RetOps);
3803 MipsTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
3805 const SmallVectorImpl<ISD::OutputArg> &Outs,
3806 const SmallVectorImpl<SDValue> &OutVals,
3807 const SDLoc &DL, SelectionDAG &DAG) const {
3808 // CCValAssign - represent the assignment of
3809 // the return value to a location
3810 SmallVector<CCValAssign, 16> RVLocs;
3811 MachineFunction &MF = DAG.getMachineFunction();
3813 // CCState - Info about the registers and stack slot.
3814 MipsCCState CCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
3816 // Analyze return values.
3817 CCInfo.AnalyzeReturn(Outs, RetCC_Mips);
3820 SmallVector<SDValue, 4> RetOps(1, Chain);
3822 // Copy the result values into the output registers.
3823 for (unsigned i = 0; i != RVLocs.size(); ++i) {
3824 SDValue Val = OutVals[i];
3825 CCValAssign &VA = RVLocs[i];
3826 assert(VA.isRegLoc() && "Can only return in registers!");
3827 bool UseUpperBits = false;
3829 switch (VA.getLocInfo()) {
3831 llvm_unreachable("Unknown loc info!");
3832 case CCValAssign::Full:
3834 case CCValAssign::BCvt:
3835 Val = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Val);
3837 case CCValAssign::AExtUpper:
3838 UseUpperBits = true;
3840 case CCValAssign::AExt:
3841 Val = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Val);
3843 case CCValAssign::ZExtUpper:
3844 UseUpperBits = true;
3846 case CCValAssign::ZExt:
3847 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Val);
3849 case CCValAssign::SExtUpper:
3850 UseUpperBits = true;
3852 case CCValAssign::SExt:
3853 Val = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Val);
3858 unsigned ValSizeInBits = Outs[i].ArgVT.getSizeInBits();
3859 unsigned LocSizeInBits = VA.getLocVT().getSizeInBits();
3861 ISD::SHL, DL, VA.getLocVT(), Val,
3862 DAG.getConstant(LocSizeInBits - ValSizeInBits, DL, VA.getLocVT()));
3865 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Flag);
3867 // Guarantee that all emitted copies are stuck together with flags.
3868 Flag = Chain.getValue(1);
3869 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3872 // The mips ABIs for returning structs by value requires that we copy
3873 // the sret argument into $v0 for the return. We saved the argument into
3874 // a virtual register in the entry block, so now we copy the value out
3876 if (MF.getFunction().hasStructRetAttr()) {
3877 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
3878 unsigned Reg = MipsFI->getSRetReturnReg();
3881 llvm_unreachable("sret virtual register not created in the entry block");
3883 DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(DAG.getDataLayout()));
3884 unsigned V0 = ABI.IsN64() ? Mips::V0_64 : Mips::V0;
3886 Chain = DAG.getCopyToReg(Chain, DL, V0, Val, Flag);
3887 Flag = Chain.getValue(1);
3888 RetOps.push_back(DAG.getRegister(V0, getPointerTy(DAG.getDataLayout())));
3891 RetOps[0] = Chain; // Update chain.
3893 // Add the flag if we have it.
3895 RetOps.push_back(Flag);
3897 // ISRs must use "eret".
3898 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt"))
3899 return LowerInterruptReturn(RetOps, DL, DAG);
3901 // Standard return on Mips is a "jr $ra"
3902 return DAG.getNode(MipsISD::Ret, DL, MVT::Other, RetOps);
3905 //===----------------------------------------------------------------------===//
3906 // Mips Inline Assembly Support
3907 //===----------------------------------------------------------------------===//
3909 /// getConstraintType - Given a constraint letter, return the type of
3910 /// constraint it is for this target.
3911 MipsTargetLowering::ConstraintType
3912 MipsTargetLowering::getConstraintType(StringRef Constraint) const {
3913 // Mips specific constraints
3914 // GCC config/mips/constraints.md
3916 // 'd' : An address register. Equivalent to r
3917 // unless generating MIPS16 code.
3918 // 'y' : Equivalent to r; retained for
3919 // backwards compatibility.
3920 // 'c' : A register suitable for use in an indirect
3921 // jump. This will always be $25 for -mabicalls.
3922 // 'l' : The lo register. 1 word storage.
3923 // 'x' : The hilo register pair. Double word storage.
3924 if (Constraint.size() == 1) {
3925 switch (Constraint[0]) {
3933 return C_RegisterClass;
3939 if (Constraint == "ZC")
3942 return TargetLowering::getConstraintType(Constraint);
3945 /// Examine constraint type and operand type and determine a weight value.
3946 /// This object must already have been set up with the operand type
3947 /// and the current alternative constraint selected.
3948 TargetLowering::ConstraintWeight
3949 MipsTargetLowering::getSingleConstraintMatchWeight(
3950 AsmOperandInfo &info, const char *constraint) const {
3951 ConstraintWeight weight = CW_Invalid;
3952 Value *CallOperandVal = info.CallOperandVal;
3953 // If we don't have a value, we can't do a match,
3954 // but allow it at the lowest weight.
3955 if (!CallOperandVal)
3957 Type *type = CallOperandVal->getType();
3958 // Look at the constraint type.
3959 switch (*constraint) {
3961 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
3965 if (type->isIntegerTy())
3966 weight = CW_Register;
3968 case 'f': // FPU or MSA register
3969 if (Subtarget.hasMSA() && type->isVectorTy() &&
3970 type->getPrimitiveSizeInBits().getFixedSize() == 128)
3971 weight = CW_Register;
3972 else if (type->isFloatTy())
3973 weight = CW_Register;
3975 case 'c': // $25 for indirect jumps
3976 case 'l': // lo register
3977 case 'x': // hilo register pair
3978 if (type->isIntegerTy())
3979 weight = CW_SpecificReg;
3981 case 'I': // signed 16 bit immediate
3982 case 'J': // integer zero
3983 case 'K': // unsigned 16 bit immediate
3984 case 'L': // signed 32 bit immediate where lower 16 bits are 0
3985 case 'N': // immediate in the range of -65535 to -1 (inclusive)
3986 case 'O': // signed 15 bit immediate (+- 16383)
3987 case 'P': // immediate in the range of 65535 to 1 (inclusive)
3988 if (isa<ConstantInt>(CallOperandVal))
3989 weight = CW_Constant;
3998 /// This is a helper function to parse a physical register string and split it
3999 /// into non-numeric and numeric parts (Prefix and Reg). The first boolean flag
4000 /// that is returned indicates whether parsing was successful. The second flag
4001 /// is true if the numeric part exists.
4002 static std::pair<bool, bool> parsePhysicalReg(StringRef C, StringRef &Prefix,
4003 unsigned long long &Reg) {
4004 if (C.front() != '{' || C.back() != '}')
4005 return std::make_pair(false, false);
4007 // Search for the first numeric character.
4008 StringRef::const_iterator I, B = C.begin() + 1, E = C.end() - 1;
4009 I = std::find_if(B, E, isdigit);
4011 Prefix = StringRef(B, I - B);
4013 // The second flag is set to false if no numeric characters were found.
4015 return std::make_pair(true, false);
4017 // Parse the numeric characters.
4018 return std::make_pair(!getAsUnsignedInteger(StringRef(I, E - I), 10, Reg),
4022 EVT MipsTargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
4023 ISD::NodeType) const {
4024 bool Cond = !Subtarget.isABI_O32() && VT.getSizeInBits() == 32;
4025 EVT MinVT = getRegisterType(Context, Cond ? MVT::i64 : MVT::i32);
4026 return VT.bitsLT(MinVT) ? MinVT : VT;
4029 std::pair<unsigned, const TargetRegisterClass *> MipsTargetLowering::
4030 parseRegForInlineAsmConstraint(StringRef C, MVT VT) const {
4031 const TargetRegisterInfo *TRI =
4032 Subtarget.getRegisterInfo();
4033 const TargetRegisterClass *RC;
4035 unsigned long long Reg;
4037 std::pair<bool, bool> R = parsePhysicalReg(C, Prefix, Reg);
4040 return std::make_pair(0U, nullptr);
4042 if ((Prefix == "hi" || Prefix == "lo")) { // Parse hi/lo.
4043 // No numeric characters follow "hi" or "lo".
4045 return std::make_pair(0U, nullptr);
4047 RC = TRI->getRegClass(Prefix == "hi" ?
4048 Mips::HI32RegClassID : Mips::LO32RegClassID);
4049 return std::make_pair(*(RC->begin()), RC);
4050 } else if (Prefix.startswith("$msa")) {
4051 // Parse $msa(ir|csr|access|save|modify|request|map|unmap)
4053 // No numeric characters follow the name.
4055 return std::make_pair(0U, nullptr);
4057 Reg = StringSwitch<unsigned long long>(Prefix)
4058 .Case("$msair", Mips::MSAIR)
4059 .Case("$msacsr", Mips::MSACSR)
4060 .Case("$msaaccess", Mips::MSAAccess)
4061 .Case("$msasave", Mips::MSASave)
4062 .Case("$msamodify", Mips::MSAModify)
4063 .Case("$msarequest", Mips::MSARequest)
4064 .Case("$msamap", Mips::MSAMap)
4065 .Case("$msaunmap", Mips::MSAUnmap)
4069 return std::make_pair(0U, nullptr);
4071 RC = TRI->getRegClass(Mips::MSACtrlRegClassID);
4072 return std::make_pair(Reg, RC);
4076 return std::make_pair(0U, nullptr);
4078 if (Prefix == "$f") { // Parse $f0-$f31.
4079 // If the size of FP registers is 64-bit or Reg is an even number, select
4080 // the 64-bit register class. Otherwise, select the 32-bit register class.
4081 if (VT == MVT::Other)
4082 VT = (Subtarget.isFP64bit() || !(Reg % 2)) ? MVT::f64 : MVT::f32;
4084 RC = getRegClassFor(VT);
4086 if (RC == &Mips::AFGR64RegClass) {
4087 assert(Reg % 2 == 0);
4090 } else if (Prefix == "$fcc") // Parse $fcc0-$fcc7.
4091 RC = TRI->getRegClass(Mips::FCCRegClassID);
4092 else if (Prefix == "$w") { // Parse $w0-$w31.
4093 RC = getRegClassFor((VT == MVT::Other) ? MVT::v16i8 : VT);
4094 } else { // Parse $0-$31.
4095 assert(Prefix == "$");
4096 RC = getRegClassFor((VT == MVT::Other) ? MVT::i32 : VT);
4099 assert(Reg < RC->getNumRegs());
4100 return std::make_pair(*(RC->begin() + Reg), RC);
4103 /// Given a register class constraint, like 'r', if this corresponds directly
4104 /// to an LLVM register class, return a register of 0 and the register class
4106 std::pair<unsigned, const TargetRegisterClass *>
4107 MipsTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
4108 StringRef Constraint,
4110 if (Constraint.size() == 1) {
4111 switch (Constraint[0]) {
4112 case 'd': // Address register. Same as 'r' unless generating MIPS16 code.
4113 case 'y': // Same as 'r'. Exists for compatibility.
4115 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
4116 if (Subtarget.inMips16Mode())
4117 return std::make_pair(0U, &Mips::CPU16RegsRegClass);
4118 return std::make_pair(0U, &Mips::GPR32RegClass);
4120 if (VT == MVT::i64 && !Subtarget.isGP64bit())
4121 return std::make_pair(0U, &Mips::GPR32RegClass);
4122 if (VT == MVT::i64 && Subtarget.isGP64bit())
4123 return std::make_pair(0U, &Mips::GPR64RegClass);
4124 // This will generate an error message
4125 return std::make_pair(0U, nullptr);
4126 case 'f': // FPU or MSA register
4127 if (VT == MVT::v16i8)
4128 return std::make_pair(0U, &Mips::MSA128BRegClass);
4129 else if (VT == MVT::v8i16 || VT == MVT::v8f16)
4130 return std::make_pair(0U, &Mips::MSA128HRegClass);
4131 else if (VT == MVT::v4i32 || VT == MVT::v4f32)
4132 return std::make_pair(0U, &Mips::MSA128WRegClass);
4133 else if (VT == MVT::v2i64 || VT == MVT::v2f64)
4134 return std::make_pair(0U, &Mips::MSA128DRegClass);
4135 else if (VT == MVT::f32)
4136 return std::make_pair(0U, &Mips::FGR32RegClass);
4137 else if ((VT == MVT::f64) && (!Subtarget.isSingleFloat())) {
4138 if (Subtarget.isFP64bit())
4139 return std::make_pair(0U, &Mips::FGR64RegClass);
4140 return std::make_pair(0U, &Mips::AFGR64RegClass);
4143 case 'c': // register suitable for indirect jump
4145 return std::make_pair((unsigned)Mips::T9, &Mips::GPR32RegClass);
4147 return std::make_pair((unsigned)Mips::T9_64, &Mips::GPR64RegClass);
4148 // This will generate an error message
4149 return std::make_pair(0U, nullptr);
4150 case 'l': // use the `lo` register to store values
4151 // that are no bigger than a word
4152 if (VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8)
4153 return std::make_pair((unsigned)Mips::LO0, &Mips::LO32RegClass);
4154 return std::make_pair((unsigned)Mips::LO0_64, &Mips::LO64RegClass);
4155 case 'x': // use the concatenated `hi` and `lo` registers
4156 // to store doubleword values
4157 // Fixme: Not triggering the use of both hi and low
4158 // This will generate an error message
4159 return std::make_pair(0U, nullptr);
4163 if (!Constraint.empty()) {
4164 std::pair<unsigned, const TargetRegisterClass *> R;
4165 R = parseRegForInlineAsmConstraint(Constraint, VT);
4171 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
4174 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
4175 /// vector. If it is invalid, don't add anything to Ops.
4176 void MipsTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4177 std::string &Constraint,
4178 std::vector<SDValue>&Ops,
4179 SelectionDAG &DAG) const {
4183 // Only support length 1 constraints for now.
4184 if (Constraint.length() > 1) return;
4186 char ConstraintLetter = Constraint[0];
4187 switch (ConstraintLetter) {
4188 default: break; // This will fall through to the generic implementation
4189 case 'I': // Signed 16 bit constant
4190 // If this fails, the parent routine will give an error
4191 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4192 EVT Type = Op.getValueType();
4193 int64_t Val = C->getSExtValue();
4194 if (isInt<16>(Val)) {
4195 Result = DAG.getTargetConstant(Val, DL, Type);
4200 case 'J': // integer zero
4201 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4202 EVT Type = Op.getValueType();
4203 int64_t Val = C->getZExtValue();
4205 Result = DAG.getTargetConstant(0, DL, Type);
4210 case 'K': // unsigned 16 bit immediate
4211 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4212 EVT Type = Op.getValueType();
4213 uint64_t Val = (uint64_t)C->getZExtValue();
4214 if (isUInt<16>(Val)) {
4215 Result = DAG.getTargetConstant(Val, DL, Type);
4220 case 'L': // signed 32 bit immediate where lower 16 bits are 0
4221 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4222 EVT Type = Op.getValueType();
4223 int64_t Val = C->getSExtValue();
4224 if ((isInt<32>(Val)) && ((Val & 0xffff) == 0)){
4225 Result = DAG.getTargetConstant(Val, DL, Type);
4230 case 'N': // immediate in the range of -65535 to -1 (inclusive)
4231 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4232 EVT Type = Op.getValueType();
4233 int64_t Val = C->getSExtValue();
4234 if ((Val >= -65535) && (Val <= -1)) {
4235 Result = DAG.getTargetConstant(Val, DL, Type);
4240 case 'O': // signed 15 bit immediate
4241 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4242 EVT Type = Op.getValueType();
4243 int64_t Val = C->getSExtValue();
4244 if ((isInt<15>(Val))) {
4245 Result = DAG.getTargetConstant(Val, DL, Type);
4250 case 'P': // immediate in the range of 1 to 65535 (inclusive)
4251 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
4252 EVT Type = Op.getValueType();
4253 int64_t Val = C->getSExtValue();
4254 if ((Val <= 65535) && (Val >= 1)) {
4255 Result = DAG.getTargetConstant(Val, DL, Type);
4262 if (Result.getNode()) {
4263 Ops.push_back(Result);
4267 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4270 bool MipsTargetLowering::isLegalAddressingMode(const DataLayout &DL,
4271 const AddrMode &AM, Type *Ty,
4273 Instruction *I) const {
4274 // No global is ever allowed as a base.
4279 case 0: // "r+i" or just "i", depending on HasBaseReg.
4282 if (!AM.HasBaseReg) // allow "r+i".
4284 return false; // disallow "r+r" or "r+r+i".
4293 MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
4294 // The Mips target isn't yet aware of offsets.
4298 EVT MipsTargetLowering::getOptimalMemOpType(
4299 const MemOp &Op, const AttributeList &FuncAttributes) const {
4300 if (Subtarget.hasMips64())
4306 bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
4307 bool ForCodeSize) const {
4308 if (VT != MVT::f32 && VT != MVT::f64)
4310 if (Imm.isNegZero())
4312 return Imm.isZero();
4315 unsigned MipsTargetLowering::getJumpTableEncoding() const {
4317 // FIXME: For space reasons this should be: EK_GPRel32BlockAddress.
4318 if (ABI.IsN64() && isPositionIndependent())
4319 return MachineJumpTableInfo::EK_GPRel64BlockAddress;
4321 return TargetLowering::getJumpTableEncoding();
4324 bool MipsTargetLowering::useSoftFloat() const {
4325 return Subtarget.useSoftFloat();
4328 void MipsTargetLowering::copyByValRegs(
4329 SDValue Chain, const SDLoc &DL, std::vector<SDValue> &OutChains,
4330 SelectionDAG &DAG, const ISD::ArgFlagsTy &Flags,
4331 SmallVectorImpl<SDValue> &InVals, const Argument *FuncArg,
4332 unsigned FirstReg, unsigned LastReg, const CCValAssign &VA,
4333 MipsCCState &State) const {
4334 MachineFunction &MF = DAG.getMachineFunction();
4335 MachineFrameInfo &MFI = MF.getFrameInfo();
4336 unsigned GPRSizeInBytes = Subtarget.getGPRSizeInBytes();
4337 unsigned NumRegs = LastReg - FirstReg;
4338 unsigned RegAreaSize = NumRegs * GPRSizeInBytes;
4339 unsigned FrameObjSize = std::max(Flags.getByValSize(), RegAreaSize);
4341 ArrayRef<MCPhysReg> ByValArgRegs = ABI.GetByValArgRegs();
4345 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4346 (int)((ByValArgRegs.size() - FirstReg) * GPRSizeInBytes);
4348 FrameObjOffset = VA.getLocMemOffset();
4350 // Create frame object.
4351 EVT PtrTy = getPointerTy(DAG.getDataLayout());
4352 // Make the fixed object stored to mutable so that the load instructions
4353 // referencing it have their memory dependencies added.
4354 // Set the frame object as isAliased which clears the underlying objects
4355 // vector in ScheduleDAGInstrs::buildSchedGraph() resulting in addition of all
4356 // stores as dependencies for loads referencing this fixed object.
4357 int FI = MFI.CreateFixedObject(FrameObjSize, FrameObjOffset, false, true);
4358 SDValue FIN = DAG.getFrameIndex(FI, PtrTy);
4359 InVals.push_back(FIN);
4364 // Copy arg registers.
4365 MVT RegTy = MVT::getIntegerVT(GPRSizeInBytes * 8);
4366 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4368 for (unsigned I = 0; I < NumRegs; ++I) {
4369 unsigned ArgReg = ByValArgRegs[FirstReg + I];
4370 unsigned VReg = addLiveIn(MF, ArgReg, RC);
4371 unsigned Offset = I * GPRSizeInBytes;
4372 SDValue StorePtr = DAG.getNode(ISD::ADD, DL, PtrTy, FIN,
4373 DAG.getConstant(Offset, DL, PtrTy));
4374 SDValue Store = DAG.getStore(Chain, DL, DAG.getRegister(VReg, RegTy),
4375 StorePtr, MachinePointerInfo(FuncArg, Offset));
4376 OutChains.push_back(Store);
4380 // Copy byVal arg to registers and stack.
4381 void MipsTargetLowering::passByValArg(
4382 SDValue Chain, const SDLoc &DL,
4383 std::deque<std::pair<unsigned, SDValue>> &RegsToPass,
4384 SmallVectorImpl<SDValue> &MemOpChains, SDValue StackPtr,
4385 MachineFrameInfo &MFI, SelectionDAG &DAG, SDValue Arg, unsigned FirstReg,
4386 unsigned LastReg, const ISD::ArgFlagsTy &Flags, bool isLittle,
4387 const CCValAssign &VA) const {
4388 unsigned ByValSizeInBytes = Flags.getByValSize();
4389 unsigned OffsetInBytes = 0; // From beginning of struct
4390 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4392 std::min(Flags.getNonZeroByValAlign(), Align(RegSizeInBytes));
4393 EVT PtrTy = getPointerTy(DAG.getDataLayout()),
4394 RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4395 unsigned NumRegs = LastReg - FirstReg;
4398 ArrayRef<MCPhysReg> ArgRegs = ABI.GetByValArgRegs();
4399 bool LeftoverBytes = (NumRegs * RegSizeInBytes > ByValSizeInBytes);
4402 // Copy words to registers.
4403 for (; I < NumRegs - LeftoverBytes; ++I, OffsetInBytes += RegSizeInBytes) {
4404 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4405 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4406 SDValue LoadVal = DAG.getLoad(RegTy, DL, Chain, LoadPtr,
4407 MachinePointerInfo(), Alignment.value());
4408 MemOpChains.push_back(LoadVal.getValue(1));
4409 unsigned ArgReg = ArgRegs[FirstReg + I];
4410 RegsToPass.push_back(std::make_pair(ArgReg, LoadVal));
4413 // Return if the struct has been fully copied.
4414 if (ByValSizeInBytes == OffsetInBytes)
4417 // Copy the remainder of the byval argument with sub-word loads and shifts.
4418 if (LeftoverBytes) {
4421 for (unsigned LoadSizeInBytes = RegSizeInBytes / 2, TotalBytesLoaded = 0;
4422 OffsetInBytes < ByValSizeInBytes; LoadSizeInBytes /= 2) {
4423 unsigned RemainingSizeInBytes = ByValSizeInBytes - OffsetInBytes;
4425 if (RemainingSizeInBytes < LoadSizeInBytes)
4429 SDValue LoadPtr = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4430 DAG.getConstant(OffsetInBytes, DL,
4432 SDValue LoadVal = DAG.getExtLoad(
4433 ISD::ZEXTLOAD, DL, RegTy, Chain, LoadPtr, MachinePointerInfo(),
4434 MVT::getIntegerVT(LoadSizeInBytes * 8), Alignment.value());
4435 MemOpChains.push_back(LoadVal.getValue(1));
4437 // Shift the loaded value.
4441 Shamt = TotalBytesLoaded * 8;
4443 Shamt = (RegSizeInBytes - (TotalBytesLoaded + LoadSizeInBytes)) * 8;
4445 SDValue Shift = DAG.getNode(ISD::SHL, DL, RegTy, LoadVal,
4446 DAG.getConstant(Shamt, DL, MVT::i32));
4449 Val = DAG.getNode(ISD::OR, DL, RegTy, Val, Shift);
4453 OffsetInBytes += LoadSizeInBytes;
4454 TotalBytesLoaded += LoadSizeInBytes;
4455 Alignment = std::min(Alignment, Align(LoadSizeInBytes));
4458 unsigned ArgReg = ArgRegs[FirstReg + I];
4459 RegsToPass.push_back(std::make_pair(ArgReg, Val));
4464 // Copy remainder of byval arg to it with memcpy.
4465 unsigned MemCpySize = ByValSizeInBytes - OffsetInBytes;
4466 SDValue Src = DAG.getNode(ISD::ADD, DL, PtrTy, Arg,
4467 DAG.getConstant(OffsetInBytes, DL, PtrTy));
4468 SDValue Dst = DAG.getNode(ISD::ADD, DL, PtrTy, StackPtr,
4469 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
4470 Chain = DAG.getMemcpy(
4471 Chain, DL, Dst, Src, DAG.getConstant(MemCpySize, DL, PtrTy),
4472 Align(Alignment), /*isVolatile=*/false, /*AlwaysInline=*/false,
4473 /*isTailCall=*/false, MachinePointerInfo(), MachinePointerInfo());
4474 MemOpChains.push_back(Chain);
4477 void MipsTargetLowering::writeVarArgRegs(std::vector<SDValue> &OutChains,
4478 SDValue Chain, const SDLoc &DL,
4480 CCState &State) const {
4481 ArrayRef<MCPhysReg> ArgRegs = ABI.GetVarArgRegs();
4482 unsigned Idx = State.getFirstUnallocated(ArgRegs);
4483 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4484 MVT RegTy = MVT::getIntegerVT(RegSizeInBytes * 8);
4485 const TargetRegisterClass *RC = getRegClassFor(RegTy);
4486 MachineFunction &MF = DAG.getMachineFunction();
4487 MachineFrameInfo &MFI = MF.getFrameInfo();
4488 MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
4490 // Offset of the first variable argument from stack pointer.
4493 if (ArgRegs.size() == Idx)
4494 VaArgOffset = alignTo(State.getNextStackOffset(), RegSizeInBytes);
4497 (int)ABI.GetCalleeAllocdArgSizeInBytes(State.getCallingConv()) -
4498 (int)(RegSizeInBytes * (ArgRegs.size() - Idx));
4501 // Record the frame index of the first variable argument
4502 // which is a value necessary to VASTART.
4503 int FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4504 MipsFI->setVarArgsFrameIndex(FI);
4506 // Copy the integer registers that have not been used for argument passing
4507 // to the argument register save area. For O32, the save area is allocated
4508 // in the caller's stack frame, while for N32/64, it is allocated in the
4509 // callee's stack frame.
4510 for (unsigned I = Idx; I < ArgRegs.size();
4511 ++I, VaArgOffset += RegSizeInBytes) {
4512 unsigned Reg = addLiveIn(MF, ArgRegs[I], RC);
4513 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegTy);
4514 FI = MFI.CreateFixedObject(RegSizeInBytes, VaArgOffset, true);
4515 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4517 DAG.getStore(Chain, DL, ArgValue, PtrOff, MachinePointerInfo());
4518 cast<StoreSDNode>(Store.getNode())->getMemOperand()->setValue(
4520 OutChains.push_back(Store);
4524 void MipsTargetLowering::HandleByVal(CCState *State, unsigned &Size,
4525 Align Alignment) const {
4526 const TargetFrameLowering *TFL = Subtarget.getFrameLowering();
4528 assert(Size && "Byval argument's size shouldn't be 0.");
4530 Alignment = std::min(Alignment, TFL->getStackAlign());
4532 unsigned FirstReg = 0;
4533 unsigned NumRegs = 0;
4535 if (State->getCallingConv() != CallingConv::Fast) {
4536 unsigned RegSizeInBytes = Subtarget.getGPRSizeInBytes();
4537 ArrayRef<MCPhysReg> IntArgRegs = ABI.GetByValArgRegs();
4538 // FIXME: The O32 case actually describes no shadow registers.
4539 const MCPhysReg *ShadowRegs =
4540 ABI.IsO32() ? IntArgRegs.data() : Mips64DPRegs;
4542 // We used to check the size as well but we can't do that anymore since
4543 // CCState::HandleByVal() rounds up the size after calling this function.
4545 Alignment >= Align(RegSizeInBytes) &&
4546 "Byval argument's alignment should be a multiple of RegSizeInBytes.");
4548 FirstReg = State->getFirstUnallocated(IntArgRegs);
4550 // If Alignment > RegSizeInBytes, the first arg register must be even.
4551 // FIXME: This condition happens to do the right thing but it's not the
4552 // right way to test it. We want to check that the stack frame offset
4553 // of the register is aligned.
4554 if ((Alignment > RegSizeInBytes) && (FirstReg % 2)) {
4555 State->AllocateReg(IntArgRegs[FirstReg], ShadowRegs[FirstReg]);
4559 // Mark the registers allocated.
4560 Size = alignTo(Size, RegSizeInBytes);
4561 for (unsigned I = FirstReg; Size > 0 && (I < IntArgRegs.size());
4562 Size -= RegSizeInBytes, ++I, ++NumRegs)
4563 State->AllocateReg(IntArgRegs[I], ShadowRegs[I]);
4566 State->addInRegsParamInfo(FirstReg, FirstReg + NumRegs);
4569 MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI,
4570 MachineBasicBlock *BB,
4572 unsigned Opc) const {
4573 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4574 "Subtarget already supports SELECT nodes with the use of"
4575 "conditional-move instructions.");
4577 const TargetInstrInfo *TII =
4578 Subtarget.getInstrInfo();
4579 DebugLoc DL = MI.getDebugLoc();
4581 // To "insert" a SELECT instruction, we actually have to insert the
4582 // diamond control-flow pattern. The incoming instruction knows the
4583 // destination vreg to set, the condition code register to branch on, the
4584 // true/false values to select between, and a branch opcode to use.
4585 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4586 MachineFunction::iterator It = ++BB->getIterator();
4592 // bNE r1, r0, copy1MBB
4593 // fallthrough --> copy0MBB
4594 MachineBasicBlock *thisMBB = BB;
4595 MachineFunction *F = BB->getParent();
4596 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4597 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4598 F->insert(It, copy0MBB);
4599 F->insert(It, sinkMBB);
4601 // Transfer the remainder of BB and its successor edges to sinkMBB.
4602 sinkMBB->splice(sinkMBB->begin(), BB,
4603 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4604 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
4606 // Next, add the true and fallthrough blocks as its successors.
4607 BB->addSuccessor(copy0MBB);
4608 BB->addSuccessor(sinkMBB);
4611 // bc1[tf] cc, sinkMBB
4612 BuildMI(BB, DL, TII->get(Opc))
4613 .addReg(MI.getOperand(1).getReg())
4616 // bne rs, $0, sinkMBB
4617 BuildMI(BB, DL, TII->get(Opc))
4618 .addReg(MI.getOperand(1).getReg())
4624 // %FalseValue = ...
4625 // # fallthrough to sinkMBB
4628 // Update machine-CFG edges
4629 BB->addSuccessor(sinkMBB);
4632 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4636 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4637 .addReg(MI.getOperand(2).getReg())
4639 .addReg(MI.getOperand(3).getReg())
4642 MI.eraseFromParent(); // The pseudo instruction is gone now.
4648 MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI,
4649 MachineBasicBlock *BB) const {
4650 assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) &&
4651 "Subtarget already supports SELECT nodes with the use of"
4652 "conditional-move instructions.");
4654 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4655 DebugLoc DL = MI.getDebugLoc();
4657 // D_SELECT substitutes two SELECT nodes that goes one after another and
4658 // have the same condition operand. On machines which don't have
4659 // conditional-move instruction, it reduces unnecessary branch instructions
4660 // which are result of using two diamond patterns that are result of two
4661 // SELECT pseudo instructions.
4662 const BasicBlock *LLVM_BB = BB->getBasicBlock();
4663 MachineFunction::iterator It = ++BB->getIterator();
4669 // bNE r1, r0, copy1MBB
4670 // fallthrough --> copy0MBB
4671 MachineBasicBlock *thisMBB = BB;
4672 MachineFunction *F = BB->getParent();
4673 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
4674 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
4675 F->insert(It, copy0MBB);
4676 F->insert(It, sinkMBB);
4678 // Transfer the remainder of BB and its successor edges to sinkMBB.
4679 sinkMBB->splice(sinkMBB->begin(), BB,
4680 std::next(MachineBasicBlock::iterator(MI)), BB->end());
4681 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
4683 // Next, add the true and fallthrough blocks as its successors.
4684 BB->addSuccessor(copy0MBB);
4685 BB->addSuccessor(sinkMBB);
4687 // bne rs, $0, sinkMBB
4688 BuildMI(BB, DL, TII->get(Mips::BNE))
4689 .addReg(MI.getOperand(2).getReg())
4694 // %FalseValue = ...
4695 // # fallthrough to sinkMBB
4698 // Update machine-CFG edges
4699 BB->addSuccessor(sinkMBB);
4702 // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ]
4706 // Use two PHI nodes to select two reults
4707 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg())
4708 .addReg(MI.getOperand(3).getReg())
4710 .addReg(MI.getOperand(5).getReg())
4712 BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg())
4713 .addReg(MI.getOperand(4).getReg())
4715 .addReg(MI.getOperand(6).getReg())
4718 MI.eraseFromParent(); // The pseudo instruction is gone now.
4723 // FIXME? Maybe this could be a TableGen attribute on some registers and
4724 // this table could be generated automatically from RegInfo.
4726 MipsTargetLowering::getRegisterByName(const char *RegName, LLT VT,
4727 const MachineFunction &MF) const {
4728 // Named registers is expected to be fairly rare. For now, just support $28
4729 // since the linux kernel uses it.
4730 if (Subtarget.isGP64bit()) {
4731 Register Reg = StringSwitch<Register>(RegName)
4732 .Case("$28", Mips::GP_64)
4733 .Default(Register());
4737 Register Reg = StringSwitch<Register>(RegName)
4738 .Case("$28", Mips::GP)
4739 .Default(Register());
4743 report_fatal_error("Invalid register name global variable");
4746 MachineBasicBlock *MipsTargetLowering::emitLDR_W(MachineInstr &MI,
4747 MachineBasicBlock *BB) const {
4748 MachineFunction *MF = BB->getParent();
4749 MachineRegisterInfo &MRI = MF->getRegInfo();
4750 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4751 const bool IsLittle = Subtarget.isLittle();
4752 DebugLoc DL = MI.getDebugLoc();
4754 Register Dest = MI.getOperand(0).getReg();
4755 Register Address = MI.getOperand(1).getReg();
4756 unsigned Imm = MI.getOperand(2).getImm();
4758 MachineBasicBlock::iterator I(MI);
4760 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4761 // Mips release 6 can load from adress that is not naturally-aligned.
4762 Register Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4763 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4767 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(Temp);
4769 // Mips release 5 needs to use instructions that can load from an unaligned
4771 Register LoadHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4772 Register LoadFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4773 Register Undef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4774 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(Undef);
4775 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4778 .addImm(Imm + (IsLittle ? 0 : 3))
4780 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4783 .addImm(Imm + (IsLittle ? 3 : 0))
4785 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Dest).addUse(LoadFull);
4788 MI.eraseFromParent();
4792 MachineBasicBlock *MipsTargetLowering::emitLDR_D(MachineInstr &MI,
4793 MachineBasicBlock *BB) const {
4794 MachineFunction *MF = BB->getParent();
4795 MachineRegisterInfo &MRI = MF->getRegInfo();
4796 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4797 const bool IsLittle = Subtarget.isLittle();
4798 DebugLoc DL = MI.getDebugLoc();
4800 Register Dest = MI.getOperand(0).getReg();
4801 Register Address = MI.getOperand(1).getReg();
4802 unsigned Imm = MI.getOperand(2).getImm();
4804 MachineBasicBlock::iterator I(MI);
4806 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4807 // Mips release 6 can load from adress that is not naturally-aligned.
4808 if (Subtarget.isGP64bit()) {
4809 Register Temp = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4810 BuildMI(*BB, I, DL, TII->get(Mips::LD))
4814 BuildMI(*BB, I, DL, TII->get(Mips::FILL_D)).addDef(Dest).addUse(Temp);
4816 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4817 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4818 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4819 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4822 .addImm(Imm + (IsLittle ? 0 : 4));
4823 BuildMI(*BB, I, DL, TII->get(Mips::LW))
4826 .addImm(Imm + (IsLittle ? 4 : 0));
4827 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(Lo);
4828 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4834 // Mips release 5 needs to use instructions that can load from an unaligned
4836 Register LoHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4837 Register LoFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4838 Register LoUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4839 Register HiHalf = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4840 Register HiFull = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4841 Register HiUndef = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4842 Register Wtemp = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4843 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(LoUndef);
4844 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4847 .addImm(Imm + (IsLittle ? 0 : 7))
4849 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4852 .addImm(Imm + (IsLittle ? 3 : 4))
4854 BuildMI(*BB, I, DL, TII->get(Mips::IMPLICIT_DEF)).addDef(HiUndef);
4855 BuildMI(*BB, I, DL, TII->get(Mips::LWR))
4858 .addImm(Imm + (IsLittle ? 4 : 3))
4860 BuildMI(*BB, I, DL, TII->get(Mips::LWL))
4863 .addImm(Imm + (IsLittle ? 7 : 0))
4865 BuildMI(*BB, I, DL, TII->get(Mips::FILL_W)).addDef(Wtemp).addUse(LoFull);
4866 BuildMI(*BB, I, DL, TII->get(Mips::INSERT_W), Dest)
4872 MI.eraseFromParent();
4876 MachineBasicBlock *MipsTargetLowering::emitSTR_W(MachineInstr &MI,
4877 MachineBasicBlock *BB) const {
4878 MachineFunction *MF = BB->getParent();
4879 MachineRegisterInfo &MRI = MF->getRegInfo();
4880 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4881 const bool IsLittle = Subtarget.isLittle();
4882 DebugLoc DL = MI.getDebugLoc();
4884 Register StoreVal = MI.getOperand(0).getReg();
4885 Register Address = MI.getOperand(1).getReg();
4886 unsigned Imm = MI.getOperand(2).getImm();
4888 MachineBasicBlock::iterator I(MI);
4890 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4891 // Mips release 6 can store to adress that is not naturally-aligned.
4892 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4893 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4894 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(BitcastW).addUse(StoreVal);
4895 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4899 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4904 // Mips release 5 needs to use instructions that can store to an unaligned
4906 Register Tmp = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4907 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4911 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
4914 .addImm(Imm + (IsLittle ? 0 : 3));
4915 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
4918 .addImm(Imm + (IsLittle ? 3 : 0));
4921 MI.eraseFromParent();
4926 MachineBasicBlock *MipsTargetLowering::emitSTR_D(MachineInstr &MI,
4927 MachineBasicBlock *BB) const {
4928 MachineFunction *MF = BB->getParent();
4929 MachineRegisterInfo &MRI = MF->getRegInfo();
4930 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
4931 const bool IsLittle = Subtarget.isLittle();
4932 DebugLoc DL = MI.getDebugLoc();
4934 Register StoreVal = MI.getOperand(0).getReg();
4935 Register Address = MI.getOperand(1).getReg();
4936 unsigned Imm = MI.getOperand(2).getImm();
4938 MachineBasicBlock::iterator I(MI);
4940 if (Subtarget.hasMips32r6() || Subtarget.hasMips64r6()) {
4941 // Mips release 6 can store to adress that is not naturally-aligned.
4942 if (Subtarget.isGP64bit()) {
4943 Register BitcastD = MRI.createVirtualRegister(&Mips::MSA128DRegClass);
4944 Register Lo = MRI.createVirtualRegister(&Mips::GPR64RegClass);
4945 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4948 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_D))
4952 BuildMI(*BB, I, DL, TII->get(Mips::SD))
4957 Register BitcastW = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4958 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4959 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4960 BuildMI(*BB, I, DL, TII->get(Mips::COPY))
4963 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4967 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4971 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4974 .addImm(Imm + (IsLittle ? 0 : 4));
4975 BuildMI(*BB, I, DL, TII->get(Mips::SW))
4978 .addImm(Imm + (IsLittle ? 4 : 0));
4981 // Mips release 5 needs to use instructions that can store to an unaligned
4983 Register Bitcast = MRI.createVirtualRegister(&Mips::MSA128WRegClass);
4984 Register Lo = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4985 Register Hi = MRI.createVirtualRegister(&Mips::GPR32RegClass);
4986 BuildMI(*BB, I, DL, TII->get(Mips::COPY)).addDef(Bitcast).addUse(StoreVal);
4987 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4991 BuildMI(*BB, I, DL, TII->get(Mips::COPY_S_W))
4995 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
4998 .addImm(Imm + (IsLittle ? 0 : 3));
4999 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5002 .addImm(Imm + (IsLittle ? 3 : 0));
5003 BuildMI(*BB, I, DL, TII->get(Mips::SWR))
5006 .addImm(Imm + (IsLittle ? 4 : 7));
5007 BuildMI(*BB, I, DL, TII->get(Mips::SWL))
5010 .addImm(Imm + (IsLittle ? 7 : 4));
5013 MI.eraseFromParent();