1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "RISCVISelLowering.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVRegisterInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "RISCVTargetMachine.h"
20 #include "Utils/RISCVMatInt.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/DiagnosticInfo.h"
32 #include "llvm/IR/DiagnosticPrinter.h"
33 #include "llvm/IR/IntrinsicsRISCV.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Support/raw_ostream.h"
41 #define DEBUG_TYPE "riscv-lower"
43 STATISTIC(NumTailCalls, "Number of tail calls");
45 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
46 const RISCVSubtarget &STI)
47 : TargetLowering(TM), Subtarget(STI) {
49 if (Subtarget.isRV32E())
50 report_fatal_error("Codegen not yet implemented for RV32E");
52 RISCVABI::ABI ABI = Subtarget.getTargetABI();
53 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
55 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
56 !Subtarget.hasStdExtF()) {
57 errs() << "Hard-float 'f' ABI can't be used for a target that "
58 "doesn't support the F instruction set extension (ignoring "
60 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
61 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
62 !Subtarget.hasStdExtD()) {
63 errs() << "Hard-float 'd' ABI can't be used for a target that "
64 "doesn't support the D instruction set extension (ignoring "
66 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
71 report_fatal_error("Don't know how to lower this ABI");
72 case RISCVABI::ABI_ILP32:
73 case RISCVABI::ABI_ILP32F:
74 case RISCVABI::ABI_ILP32D:
75 case RISCVABI::ABI_LP64:
76 case RISCVABI::ABI_LP64F:
77 case RISCVABI::ABI_LP64D:
81 MVT XLenVT = Subtarget.getXLenVT();
83 // Set up the register classes.
84 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
86 if (Subtarget.hasStdExtF())
87 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
88 if (Subtarget.hasStdExtD())
89 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
91 // Compute derived properties from the register classes.
92 computeRegisterProperties(STI.getRegisterInfo());
94 setStackPointerRegisterToSaveRestore(RISCV::X2);
96 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
97 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
99 // TODO: add all necessary setOperationAction calls.
100 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
102 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
103 setOperationAction(ISD::BR_CC, XLenVT, Expand);
104 setOperationAction(ISD::SELECT, XLenVT, Custom);
105 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
107 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
108 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
110 setOperationAction(ISD::VASTART, MVT::Other, Custom);
111 setOperationAction(ISD::VAARG, MVT::Other, Expand);
112 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
113 setOperationAction(ISD::VAEND, MVT::Other, Expand);
115 for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
116 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
118 if (Subtarget.is64Bit()) {
119 setOperationAction(ISD::ADD, MVT::i32, Custom);
120 setOperationAction(ISD::SUB, MVT::i32, Custom);
121 setOperationAction(ISD::SHL, MVT::i32, Custom);
122 setOperationAction(ISD::SRA, MVT::i32, Custom);
123 setOperationAction(ISD::SRL, MVT::i32, Custom);
126 if (!Subtarget.hasStdExtM()) {
127 setOperationAction(ISD::MUL, XLenVT, Expand);
128 setOperationAction(ISD::MULHS, XLenVT, Expand);
129 setOperationAction(ISD::MULHU, XLenVT, Expand);
130 setOperationAction(ISD::SDIV, XLenVT, Expand);
131 setOperationAction(ISD::UDIV, XLenVT, Expand);
132 setOperationAction(ISD::SREM, XLenVT, Expand);
133 setOperationAction(ISD::UREM, XLenVT, Expand);
136 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
137 setOperationAction(ISD::MUL, MVT::i32, Custom);
138 setOperationAction(ISD::SDIV, MVT::i32, Custom);
139 setOperationAction(ISD::UDIV, MVT::i32, Custom);
140 setOperationAction(ISD::UREM, MVT::i32, Custom);
143 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
144 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
145 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
146 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
148 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
149 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
150 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
152 if (!(Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp())) {
153 setOperationAction(ISD::ROTL, XLenVT, Expand);
154 setOperationAction(ISD::ROTR, XLenVT, Expand);
157 if (!Subtarget.hasStdExtZbp())
158 setOperationAction(ISD::BSWAP, XLenVT, Expand);
160 if (!Subtarget.hasStdExtZbb()) {
161 setOperationAction(ISD::CTTZ, XLenVT, Expand);
162 setOperationAction(ISD::CTLZ, XLenVT, Expand);
163 setOperationAction(ISD::CTPOP, XLenVT, Expand);
166 if (Subtarget.hasStdExtZbp())
167 setOperationAction(ISD::BITREVERSE, XLenVT, Legal);
169 if (Subtarget.hasStdExtZbt()) {
170 setOperationAction(ISD::FSHL, XLenVT, Legal);
171 setOperationAction(ISD::FSHR, XLenVT, Legal);
174 ISD::CondCode FPCCToExtend[] = {
175 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
176 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
177 ISD::SETGE, ISD::SETNE};
179 ISD::NodeType FPOpToExtend[] = {
180 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
183 if (Subtarget.hasStdExtF()) {
184 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
185 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
186 for (auto CC : FPCCToExtend)
187 setCondCodeAction(CC, MVT::f32, Expand);
188 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
189 setOperationAction(ISD::SELECT, MVT::f32, Custom);
190 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
191 for (auto Op : FPOpToExtend)
192 setOperationAction(Op, MVT::f32, Expand);
193 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
194 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
197 if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
198 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
200 if (Subtarget.hasStdExtD()) {
201 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
202 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
203 for (auto CC : FPCCToExtend)
204 setCondCodeAction(CC, MVT::f64, Expand);
205 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
206 setOperationAction(ISD::SELECT, MVT::f64, Custom);
207 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
208 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
209 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
210 for (auto Op : FPOpToExtend)
211 setOperationAction(Op, MVT::f64, Expand);
212 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
213 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
216 if (Subtarget.is64Bit() &&
217 !(Subtarget.hasStdExtD() || Subtarget.hasStdExtF())) {
218 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
219 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
220 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
221 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
224 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
225 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
226 setOperationAction(ISD::ConstantPool, XLenVT, Custom);
228 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
230 // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
231 // Unfortunately this can't be determined just from the ISA naming string.
232 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
233 Subtarget.is64Bit() ? Legal : Custom);
235 setOperationAction(ISD::TRAP, MVT::Other, Legal);
236 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
237 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
239 if (Subtarget.hasStdExtA()) {
240 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
241 setMinCmpXchgSizeInBits(32);
243 setMaxAtomicSizeInBitsSupported(0);
246 setBooleanContents(ZeroOrOneBooleanContent);
248 // Function alignments.
249 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
250 setMinFunctionAlignment(FunctionAlignment);
251 setPrefFunctionAlignment(FunctionAlignment);
253 // Effectively disable jump table generation.
254 setMinimumJumpTableEntries(INT_MAX);
256 // Jumps are expensive, compared to logic
257 setJumpIsExpensive();
259 // We can use any register for comparisons
260 setHasMultipleConditionRegisters();
263 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
266 return getPointerTy(DL);
267 return VT.changeVectorElementTypeToInteger();
270 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
273 unsigned Intrinsic) const {
277 case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
278 case Intrinsic::riscv_masked_atomicrmw_add_i32:
279 case Intrinsic::riscv_masked_atomicrmw_sub_i32:
280 case Intrinsic::riscv_masked_atomicrmw_nand_i32:
281 case Intrinsic::riscv_masked_atomicrmw_max_i32:
282 case Intrinsic::riscv_masked_atomicrmw_min_i32:
283 case Intrinsic::riscv_masked_atomicrmw_umax_i32:
284 case Intrinsic::riscv_masked_atomicrmw_umin_i32:
285 case Intrinsic::riscv_masked_cmpxchg_i32:
286 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
287 Info.opc = ISD::INTRINSIC_W_CHAIN;
288 Info.memVT = MVT::getVT(PtrTy->getElementType());
289 Info.ptrVal = I.getArgOperand(0);
291 Info.align = Align(4);
292 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
293 MachineMemOperand::MOVolatile;
298 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
299 const AddrMode &AM, Type *Ty,
301 Instruction *I) const {
302 // No global is ever allowed as a base.
306 // Require a 12-bit signed offset.
307 if (!isInt<12>(AM.BaseOffs))
311 case 0: // "r+i" or just "i", depending on HasBaseReg.
314 if (!AM.HasBaseReg) // allow "r+i".
316 return false; // disallow "r+r" or "r+r+i".
324 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
325 return isInt<12>(Imm);
328 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
329 return isInt<12>(Imm);
332 // On RV32, 64-bit integers are split into their high and low parts and held
333 // in two different registers, so the trunc is free since the low register can
335 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
336 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
338 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
339 unsigned DestBits = DstTy->getPrimitiveSizeInBits();
340 return (SrcBits == 64 && DestBits == 32);
343 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
344 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
345 !SrcVT.isInteger() || !DstVT.isInteger())
347 unsigned SrcBits = SrcVT.getSizeInBits();
348 unsigned DestBits = DstVT.getSizeInBits();
349 return (SrcBits == 64 && DestBits == 32);
352 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
353 // Zexts are free if they can be combined with a load.
354 if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
355 EVT MemVT = LD->getMemoryVT();
356 if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
357 (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
358 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
359 LD->getExtensionType() == ISD::ZEXTLOAD))
363 return TargetLowering::isZExtFree(Val, VT2);
366 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
367 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
370 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
371 bool ForCodeSize) const {
372 if (VT == MVT::f32 && !Subtarget.hasStdExtF())
374 if (VT == MVT::f64 && !Subtarget.hasStdExtD())
381 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
382 return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
383 (VT == MVT::f64 && Subtarget.hasStdExtD());
386 // Changes the condition code and swaps operands if necessary, so the SetCC
387 // operation matches one of the comparisons supported directly in the RISC-V
389 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
397 CC = ISD::getSetCCSwappedOperands(CC);
403 // Return the RISC-V branch opcode that matches the given DAG integer
404 // condition code. The CondCode must be one of those supported by the RISC-V
405 // ISA (see normaliseSetCC).
406 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
409 llvm_unreachable("Unsupported CondCode");
425 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
426 SelectionDAG &DAG) const {
427 switch (Op.getOpcode()) {
429 report_fatal_error("unimplemented operand");
430 case ISD::GlobalAddress:
431 return lowerGlobalAddress(Op, DAG);
432 case ISD::BlockAddress:
433 return lowerBlockAddress(Op, DAG);
434 case ISD::ConstantPool:
435 return lowerConstantPool(Op, DAG);
436 case ISD::GlobalTLSAddress:
437 return lowerGlobalTLSAddress(Op, DAG);
439 return lowerSELECT(Op, DAG);
441 return lowerVASTART(Op, DAG);
443 return lowerFRAMEADDR(Op, DAG);
444 case ISD::RETURNADDR:
445 return lowerRETURNADDR(Op, DAG);
447 return lowerShiftLeftParts(Op, DAG);
449 return lowerShiftRightParts(Op, DAG, true);
451 return lowerShiftRightParts(Op, DAG, false);
453 assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
454 "Unexpected custom legalisation");
456 SDValue Op0 = Op.getOperand(0);
457 if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32)
459 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
460 SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
463 case ISD::INTRINSIC_WO_CHAIN:
464 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
468 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
469 SelectionDAG &DAG, unsigned Flags) {
470 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
473 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
474 SelectionDAG &DAG, unsigned Flags) {
475 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
479 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
480 SelectionDAG &DAG, unsigned Flags) {
481 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
482 N->getOffset(), Flags);
485 template <class NodeTy>
486 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
487 bool IsLocal) const {
489 EVT Ty = getPointerTy(DAG.getDataLayout());
491 if (isPositionIndependent()) {
492 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
494 // Use PC-relative addressing to access the symbol. This generates the
495 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
496 // %pcrel_lo(auipc)).
497 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
499 // Use PC-relative addressing to access the GOT for this symbol, then load
500 // the address from the GOT. This generates the pattern (PseudoLA sym),
501 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
502 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
505 switch (getTargetMachine().getCodeModel()) {
507 report_fatal_error("Unsupported code model for lowering");
508 case CodeModel::Small: {
509 // Generate a sequence for accessing addresses within the first 2 GiB of
510 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
511 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
512 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
513 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
514 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
516 case CodeModel::Medium: {
517 // Generate a sequence for accessing addresses within any 2GiB range within
518 // the address space. This generates the pattern (PseudoLLA sym), which
519 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
520 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
521 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
526 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
527 SelectionDAG &DAG) const {
529 EVT Ty = Op.getValueType();
530 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
531 int64_t Offset = N->getOffset();
532 MVT XLenVT = Subtarget.getXLenVT();
534 const GlobalValue *GV = N->getGlobal();
535 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
536 SDValue Addr = getAddr(N, DAG, IsLocal);
538 // In order to maximise the opportunity for common subexpression elimination,
539 // emit a separate ADD node for the global address offset instead of folding
540 // it in the global address node. Later peephole optimisations may choose to
541 // fold it back in when profitable.
543 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
544 DAG.getConstant(Offset, DL, XLenVT));
548 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
549 SelectionDAG &DAG) const {
550 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
552 return getAddr(N, DAG);
555 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
556 SelectionDAG &DAG) const {
557 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
559 return getAddr(N, DAG);
562 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
566 EVT Ty = getPointerTy(DAG.getDataLayout());
567 const GlobalValue *GV = N->getGlobal();
568 MVT XLenVT = Subtarget.getXLenVT();
571 // Use PC-relative addressing to access the GOT for this TLS symbol, then
572 // load the address from the GOT and add the thread pointer. This generates
573 // the pattern (PseudoLA_TLS_IE sym), which expands to
574 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
575 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
577 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
579 // Add the thread pointer.
580 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
581 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
584 // Generate a sequence for accessing the address relative to the thread
585 // pointer, with the appropriate adjustment for the thread pointer offset.
586 // This generates the pattern
587 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
589 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
591 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
593 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
595 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
596 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
597 SDValue MNAdd = SDValue(
598 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
600 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
603 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
604 SelectionDAG &DAG) const {
606 EVT Ty = getPointerTy(DAG.getDataLayout());
607 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
608 const GlobalValue *GV = N->getGlobal();
610 // Use a PC-relative addressing mode to access the global dynamic GOT address.
611 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
612 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
613 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
615 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
617 // Prepare argument list to generate call.
622 Args.push_back(Entry);
624 // Setup call to __tls_get_addr.
625 TargetLowering::CallLoweringInfo CLI(DAG);
627 .setChain(DAG.getEntryNode())
628 .setLibCallee(CallingConv::C, CallTy,
629 DAG.getExternalSymbol("__tls_get_addr", Ty),
632 return LowerCallTo(CLI).first;
635 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
636 SelectionDAG &DAG) const {
638 EVT Ty = Op.getValueType();
639 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
640 int64_t Offset = N->getOffset();
641 MVT XLenVT = Subtarget.getXLenVT();
643 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
647 case TLSModel::LocalExec:
648 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
650 case TLSModel::InitialExec:
651 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
653 case TLSModel::LocalDynamic:
654 case TLSModel::GeneralDynamic:
655 Addr = getDynamicTLSAddr(N, DAG);
659 // In order to maximise the opportunity for common subexpression elimination,
660 // emit a separate ADD node for the global address offset instead of folding
661 // it in the global address node. Later peephole optimisations may choose to
662 // fold it back in when profitable.
664 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
665 DAG.getConstant(Offset, DL, XLenVT));
669 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
670 SDValue CondV = Op.getOperand(0);
671 SDValue TrueV = Op.getOperand(1);
672 SDValue FalseV = Op.getOperand(2);
674 MVT XLenVT = Subtarget.getXLenVT();
676 // If the result type is XLenVT and CondV is the output of a SETCC node
677 // which also operated on XLenVT inputs, then merge the SETCC node into the
678 // lowered RISCVISD::SELECT_CC to take advantage of the integer
679 // compare+branch instructions. i.e.:
680 // (select (setcc lhs, rhs, cc), truev, falsev)
681 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
682 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
683 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
684 SDValue LHS = CondV.getOperand(0);
685 SDValue RHS = CondV.getOperand(1);
686 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
687 ISD::CondCode CCVal = CC->get();
689 normaliseSetCC(LHS, RHS, CCVal);
691 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
692 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
693 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
694 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
698 // (select condv, truev, falsev)
699 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
700 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
701 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
703 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
704 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
706 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
709 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
710 MachineFunction &MF = DAG.getMachineFunction();
711 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
714 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
715 getPointerTy(MF.getDataLayout()));
717 // vastart just stores the address of the VarArgsFrameIndex slot into the
718 // memory location argument.
719 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
720 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
721 MachinePointerInfo(SV));
724 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
725 SelectionDAG &DAG) const {
726 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
727 MachineFunction &MF = DAG.getMachineFunction();
728 MachineFrameInfo &MFI = MF.getFrameInfo();
729 MFI.setFrameAddressIsTaken(true);
730 Register FrameReg = RI.getFrameRegister(MF);
731 int XLenInBytes = Subtarget.getXLen() / 8;
733 EVT VT = Op.getValueType();
735 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
736 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
738 int Offset = -(XLenInBytes * 2);
739 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
740 DAG.getIntPtrConstant(Offset, DL));
742 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
747 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
748 SelectionDAG &DAG) const {
749 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
750 MachineFunction &MF = DAG.getMachineFunction();
751 MachineFrameInfo &MFI = MF.getFrameInfo();
752 MFI.setReturnAddressIsTaken(true);
753 MVT XLenVT = Subtarget.getXLenVT();
754 int XLenInBytes = Subtarget.getXLen() / 8;
756 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
759 EVT VT = Op.getValueType();
761 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
763 int Off = -XLenInBytes;
764 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
765 SDValue Offset = DAG.getConstant(Off, DL, VT);
766 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
767 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
768 MachinePointerInfo());
771 // Return the value of the return address register, marking it an implicit
773 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
774 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
777 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
778 SelectionDAG &DAG) const {
780 SDValue Lo = Op.getOperand(0);
781 SDValue Hi = Op.getOperand(1);
782 SDValue Shamt = Op.getOperand(2);
783 EVT VT = Lo.getValueType();
785 // if Shamt-XLEN < 0: // Shamt < XLEN
787 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
790 // Hi = Lo << (Shamt-XLEN)
792 SDValue Zero = DAG.getConstant(0, DL, VT);
793 SDValue One = DAG.getConstant(1, DL, VT);
794 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
795 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
796 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
797 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
799 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
800 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
801 SDValue ShiftRightLo =
802 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
803 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
804 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
805 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
807 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
809 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
810 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
812 SDValue Parts[2] = {Lo, Hi};
813 return DAG.getMergeValues(Parts, DL);
816 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
819 SDValue Lo = Op.getOperand(0);
820 SDValue Hi = Op.getOperand(1);
821 SDValue Shamt = Op.getOperand(2);
822 EVT VT = Lo.getValueType();
825 // if Shamt-XLEN < 0: // Shamt < XLEN
826 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
829 // Lo = Hi >>s (Shamt-XLEN);
830 // Hi = Hi >>s (XLEN-1)
833 // if Shamt-XLEN < 0: // Shamt < XLEN
834 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
837 // Lo = Hi >>u (Shamt-XLEN);
840 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
842 SDValue Zero = DAG.getConstant(0, DL, VT);
843 SDValue One = DAG.getConstant(1, DL, VT);
844 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
845 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
846 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
847 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
849 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
850 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
851 SDValue ShiftLeftHi =
852 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
853 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
854 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
855 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
857 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
859 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
861 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
862 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
864 SDValue Parts[2] = {Lo, Hi};
865 return DAG.getMergeValues(Parts, DL);
868 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
869 SelectionDAG &DAG) const {
870 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
874 return SDValue(); // Don't custom lower most intrinsics.
875 case Intrinsic::thread_pointer: {
876 EVT PtrVT = getPointerTy(DAG.getDataLayout());
877 return DAG.getRegister(RISCV::X4, PtrVT);
882 // Returns the opcode of the target-specific SDNode that implements the 32-bit
883 // form of the given Opcode.
884 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
887 llvm_unreachable("Unexpected opcode");
889 return RISCVISD::SLLW;
891 return RISCVISD::SRAW;
893 return RISCVISD::SRLW;
895 return RISCVISD::DIVW;
897 return RISCVISD::DIVUW;
899 return RISCVISD::REMUW;
903 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
904 // Because i32 isn't a legal type for RV64, these operations would otherwise
905 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
906 // later one because the fact the operation was originally of type i32 is
908 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
910 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
911 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
912 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
913 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
914 // ReplaceNodeResults requires we maintain the same type for the return value.
915 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
918 // Converts the given 32-bit operation to a i64 operation with signed extension
919 // semantic to reduce the signed extension instructions.
920 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
922 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
923 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
924 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
925 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
926 DAG.getValueType(MVT::i32));
927 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
930 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
931 SmallVectorImpl<SDValue> &Results,
932 SelectionDAG &DAG) const {
934 switch (N->getOpcode()) {
936 llvm_unreachable("Don't know how to custom type legalize this operation!");
937 case ISD::STRICT_FP_TO_SINT:
938 case ISD::STRICT_FP_TO_UINT:
939 case ISD::FP_TO_SINT:
940 case ISD::FP_TO_UINT: {
941 bool IsStrict = N->isStrictFPOpcode();
942 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
943 "Unexpected custom legalisation");
944 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
946 if (N->getOpcode() == ISD::FP_TO_SINT ||
947 N->getOpcode() == ISD::STRICT_FP_TO_SINT)
948 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
950 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
951 MakeLibCallOptions CallOptions;
952 EVT OpVT = Op0.getValueType();
953 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
954 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
956 std::tie(Result, Chain) =
957 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
958 Results.push_back(Result);
960 Results.push_back(Chain);
963 case ISD::READCYCLECOUNTER: {
964 assert(!Subtarget.is64Bit() &&
965 "READCYCLECOUNTER only has custom type legalization on riscv32");
967 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
969 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
972 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
973 Results.push_back(RCW.getValue(2));
979 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
980 "Unexpected custom legalisation");
981 if (N->getOperand(1).getOpcode() == ISD::Constant)
983 Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
988 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
989 "Unexpected custom legalisation");
990 if (N->getOperand(1).getOpcode() == ISD::Constant)
992 Results.push_back(customLegalizeToWOp(N, DAG));
997 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
998 Subtarget.hasStdExtM() && "Unexpected custom legalisation");
999 if (N->getOperand(0).getOpcode() == ISD::Constant ||
1000 N->getOperand(1).getOpcode() == ISD::Constant)
1002 Results.push_back(customLegalizeToWOp(N, DAG));
1004 case ISD::BITCAST: {
1005 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1006 Subtarget.hasStdExtF() && "Unexpected custom legalisation");
1008 SDValue Op0 = N->getOperand(0);
1009 if (Op0.getValueType() != MVT::f32)
1012 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
1013 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
1019 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1020 DAGCombinerInfo &DCI) const {
1021 SelectionDAG &DAG = DCI.DAG;
1023 switch (N->getOpcode()) {
1026 case RISCVISD::SplitF64: {
1027 SDValue Op0 = N->getOperand(0);
1028 // If the input to SplitF64 is just BuildPairF64 then the operation is
1029 // redundant. Instead, use BuildPairF64's operands directly.
1030 if (Op0->getOpcode() == RISCVISD::BuildPairF64)
1031 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
1035 // It's cheaper to materialise two 32-bit integers than to load a double
1036 // from the constant pool and transfer it to integer registers through the
1038 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
1039 APInt V = C->getValueAPF().bitcastToAPInt();
1040 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
1041 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
1042 return DCI.CombineTo(N, Lo, Hi);
1045 // This is a target-specific version of a DAGCombine performed in
1046 // DAGCombiner::visitBITCAST. It performs the equivalent of:
1047 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1048 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1049 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1050 !Op0.getNode()->hasOneUse())
1052 SDValue NewSplitF64 =
1053 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
1055 SDValue Lo = NewSplitF64.getValue(0);
1056 SDValue Hi = NewSplitF64.getValue(1);
1057 APInt SignBit = APInt::getSignMask(32);
1058 if (Op0.getOpcode() == ISD::FNEG) {
1059 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
1060 DAG.getConstant(SignBit, DL, MVT::i32));
1061 return DCI.CombineTo(N, Lo, NewHi);
1063 assert(Op0.getOpcode() == ISD::FABS);
1064 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
1065 DAG.getConstant(~SignBit, DL, MVT::i32));
1066 return DCI.CombineTo(N, Lo, NewHi);
1068 case RISCVISD::SLLW:
1069 case RISCVISD::SRAW:
1070 case RISCVISD::SRLW: {
1071 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
1072 SDValue LHS = N->getOperand(0);
1073 SDValue RHS = N->getOperand(1);
1074 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
1075 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
1076 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) ||
1077 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)))
1081 case RISCVISD::FMV_X_ANYEXTW_RV64: {
1083 SDValue Op0 = N->getOperand(0);
1084 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
1085 // conversion is unnecessary and can be replaced with an ANY_EXTEND
1086 // of the FMV_W_X_RV64 operand.
1087 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
1089 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0));
1090 return DCI.CombineTo(N, AExtOp);
1093 // This is a target-specific version of a DAGCombine performed in
1094 // DAGCombiner::visitBITCAST. It performs the equivalent of:
1095 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1096 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1097 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1098 !Op0.getNode()->hasOneUse())
1100 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1102 APInt SignBit = APInt::getSignMask(32).sext(64);
1103 if (Op0.getOpcode() == ISD::FNEG) {
1104 return DCI.CombineTo(N,
1105 DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1106 DAG.getConstant(SignBit, DL, MVT::i64)));
1108 assert(Op0.getOpcode() == ISD::FABS);
1109 return DCI.CombineTo(N,
1110 DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1111 DAG.getConstant(~SignBit, DL, MVT::i64)));
1118 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1119 const SDNode *N, CombineLevel Level) const {
1120 // The following folds are only desirable if `(OP _, c1 << c2)` can be
1121 // materialised in fewer instructions than `(OP _, c1)`:
1123 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1124 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1125 SDValue N0 = N->getOperand(0);
1126 EVT Ty = N0.getValueType();
1127 if (Ty.isScalarInteger() &&
1128 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1129 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1130 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1132 APInt C1Int = C1->getAPIntValue();
1133 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1135 // We can materialise `c1 << c2` into an add immediate, so it's "free",
1136 // and the combine should happen, to potentially allow further combines
1138 if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1139 isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1142 // We can materialise `c1` in an add immediate, so it's "free", and the
1143 // combine should be prevented.
1144 if (C1Int.getMinSignedBits() <= 64 &&
1145 isLegalAddImmediate(C1Int.getSExtValue()))
1148 // Neither constant will fit into an immediate, so find materialisation
1150 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1151 Subtarget.is64Bit());
1152 int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1153 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1155 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1156 // combine should be prevented.
1157 if (C1Cost < ShiftedC1Cost)
1164 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1165 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1166 unsigned Depth) const {
1167 switch (Op.getOpcode()) {
1170 case RISCVISD::SLLW:
1171 case RISCVISD::SRAW:
1172 case RISCVISD::SRLW:
1173 case RISCVISD::DIVW:
1174 case RISCVISD::DIVUW:
1175 case RISCVISD::REMUW:
1176 // TODO: As the result is sign-extended, this is conservatively correct. A
1177 // more precise answer could be calculated for SRAW depending on known
1178 // bits in the shift amount.
1185 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1186 MachineBasicBlock *BB) {
1187 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
1189 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1190 // Should the count have wrapped while it was being read, we need to try
1194 // rdcycleh x3 # load high word of cycle
1195 // rdcycle x2 # load low word of cycle
1196 // rdcycleh x4 # load high word of cycle
1197 // bne x3, x4, read # check if high word reads match, otherwise try again
1200 MachineFunction &MF = *BB->getParent();
1201 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1202 MachineFunction::iterator It = ++BB->getIterator();
1204 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1205 MF.insert(It, LoopMBB);
1207 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1208 MF.insert(It, DoneMBB);
1210 // Transfer the remainder of BB and its successor edges to DoneMBB.
1211 DoneMBB->splice(DoneMBB->begin(), BB,
1212 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1213 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1215 BB->addSuccessor(LoopMBB);
1217 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1218 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1219 Register LoReg = MI.getOperand(0).getReg();
1220 Register HiReg = MI.getOperand(1).getReg();
1221 DebugLoc DL = MI.getDebugLoc();
1223 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1224 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1225 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1227 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1228 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1230 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1231 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1234 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1236 .addReg(ReadAgainReg)
1239 LoopMBB->addSuccessor(LoopMBB);
1240 LoopMBB->addSuccessor(DoneMBB);
1242 MI.eraseFromParent();
1247 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
1248 MachineBasicBlock *BB) {
1249 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
1251 MachineFunction &MF = *BB->getParent();
1252 DebugLoc DL = MI.getDebugLoc();
1253 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1254 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1255 Register LoReg = MI.getOperand(0).getReg();
1256 Register HiReg = MI.getOperand(1).getReg();
1257 Register SrcReg = MI.getOperand(2).getReg();
1258 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
1259 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1261 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
1263 MachineMemOperand *MMO =
1264 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1265 MachineMemOperand::MOLoad, 8, Align(8));
1266 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
1269 .addMemOperand(MMO);
1270 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
1273 .addMemOperand(MMO);
1274 MI.eraseFromParent(); // The pseudo instruction is gone now.
1278 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1279 MachineBasicBlock *BB) {
1280 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
1281 "Unexpected instruction");
1283 MachineFunction &MF = *BB->getParent();
1284 DebugLoc DL = MI.getDebugLoc();
1285 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1286 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1287 Register DstReg = MI.getOperand(0).getReg();
1288 Register LoReg = MI.getOperand(1).getReg();
1289 Register HiReg = MI.getOperand(2).getReg();
1290 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
1291 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1293 MachineMemOperand *MMO =
1294 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1295 MachineMemOperand::MOStore, 8, Align(8));
1296 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1297 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
1300 .addMemOperand(MMO);
1301 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1302 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
1305 .addMemOperand(MMO);
1306 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
1307 MI.eraseFromParent(); // The pseudo instruction is gone now.
1311 static bool isSelectPseudo(MachineInstr &MI) {
1312 switch (MI.getOpcode()) {
1315 case RISCV::Select_GPR_Using_CC_GPR:
1316 case RISCV::Select_FPR32_Using_CC_GPR:
1317 case RISCV::Select_FPR64_Using_CC_GPR:
1322 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
1323 MachineBasicBlock *BB) {
1324 // To "insert" Select_* instructions, we actually have to insert the triangle
1325 // control-flow pattern. The incoming instructions know the destination vreg
1326 // to set, the condition code register to branch on, the true/false values to
1327 // select between, and the condcode to use to select the appropriate branch.
1329 // We produce the following control flow:
1336 // When we find a sequence of selects we attempt to optimize their emission
1337 // by sharing the control flow. Currently we only handle cases where we have
1338 // multiple selects with the exact same condition (same LHS, RHS and CC).
1339 // The selects may be interleaved with other instructions if the other
1340 // instructions meet some requirements we deem safe:
1341 // - They are debug instructions. Otherwise,
1342 // - They do not have side-effects, do not access memory and their inputs do
1343 // not depend on the results of the select pseudo-instructions.
1344 // The TrueV/FalseV operands of the selects cannot depend on the result of
1345 // previous selects in the sequence.
1346 // These conditions could be further relaxed. See the X86 target for a
1347 // related approach and more information.
1348 Register LHS = MI.getOperand(1).getReg();
1349 Register RHS = MI.getOperand(2).getReg();
1350 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
1352 SmallVector<MachineInstr *, 4> SelectDebugValues;
1353 SmallSet<Register, 4> SelectDests;
1354 SelectDests.insert(MI.getOperand(0).getReg());
1356 MachineInstr *LastSelectPseudo = &MI;
1358 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
1359 SequenceMBBI != E; ++SequenceMBBI) {
1360 if (SequenceMBBI->isDebugInstr())
1362 else if (isSelectPseudo(*SequenceMBBI)) {
1363 if (SequenceMBBI->getOperand(1).getReg() != LHS ||
1364 SequenceMBBI->getOperand(2).getReg() != RHS ||
1365 SequenceMBBI->getOperand(3).getImm() != CC ||
1366 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
1367 SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
1369 LastSelectPseudo = &*SequenceMBBI;
1370 SequenceMBBI->collectDebugValues(SelectDebugValues);
1371 SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
1373 if (SequenceMBBI->hasUnmodeledSideEffects() ||
1374 SequenceMBBI->mayLoadOrStore())
1376 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
1377 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
1383 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1384 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1385 DebugLoc DL = MI.getDebugLoc();
1386 MachineFunction::iterator I = ++BB->getIterator();
1388 MachineBasicBlock *HeadMBB = BB;
1389 MachineFunction *F = BB->getParent();
1390 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
1391 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
1393 F->insert(I, IfFalseMBB);
1394 F->insert(I, TailMBB);
1396 // Transfer debug instructions associated with the selects to TailMBB.
1397 for (MachineInstr *DebugInstr : SelectDebugValues) {
1398 TailMBB->push_back(DebugInstr->removeFromParent());
1401 // Move all instructions after the sequence to TailMBB.
1402 TailMBB->splice(TailMBB->end(), HeadMBB,
1403 std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
1404 // Update machine-CFG edges by transferring all successors of the current
1405 // block to the new block which will contain the Phi nodes for the selects.
1406 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
1407 // Set the successors for HeadMBB.
1408 HeadMBB->addSuccessor(IfFalseMBB);
1409 HeadMBB->addSuccessor(TailMBB);
1411 // Insert appropriate branch.
1412 unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
1414 BuildMI(HeadMBB, DL, TII.get(Opcode))
1419 // IfFalseMBB just falls through to TailMBB.
1420 IfFalseMBB->addSuccessor(TailMBB);
1422 // Create PHIs for all of the select pseudo-instructions.
1423 auto SelectMBBI = MI.getIterator();
1424 auto SelectEnd = std::next(LastSelectPseudo->getIterator());
1425 auto InsertionPoint = TailMBB->begin();
1426 while (SelectMBBI != SelectEnd) {
1427 auto Next = std::next(SelectMBBI);
1428 if (isSelectPseudo(*SelectMBBI)) {
1429 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
1430 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
1431 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
1432 .addReg(SelectMBBI->getOperand(4).getReg())
1434 .addReg(SelectMBBI->getOperand(5).getReg())
1435 .addMBB(IfFalseMBB);
1436 SelectMBBI->eraseFromParent();
1441 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
1446 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1447 MachineBasicBlock *BB) const {
1448 switch (MI.getOpcode()) {
1450 llvm_unreachable("Unexpected instr type to insert");
1451 case RISCV::ReadCycleWide:
1452 assert(!Subtarget.is64Bit() &&
1453 "ReadCycleWrite is only to be used on riscv32");
1454 return emitReadCycleWidePseudo(MI, BB);
1455 case RISCV::Select_GPR_Using_CC_GPR:
1456 case RISCV::Select_FPR32_Using_CC_GPR:
1457 case RISCV::Select_FPR64_Using_CC_GPR:
1458 return emitSelectPseudo(MI, BB);
1459 case RISCV::BuildPairF64Pseudo:
1460 return emitBuildPairF64Pseudo(MI, BB);
1461 case RISCV::SplitF64Pseudo:
1462 return emitSplitF64Pseudo(MI, BB);
1466 // Calling Convention Implementation.
1467 // The expectations for frontend ABI lowering vary from target to target.
1468 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
1469 // details, but this is a longer term goal. For now, we simply try to keep the
1470 // role of the frontend as simple and well-defined as possible. The rules can
1471 // be summarised as:
1472 // * Never split up large scalar arguments. We handle them here.
1473 // * If a hardfloat calling convention is being used, and the struct may be
1474 // passed in a pair of registers (fp+fp, int+fp), and both registers are
1475 // available, then pass as two separate arguments. If either the GPRs or FPRs
1476 // are exhausted, then pass according to the rule below.
1477 // * If a struct could never be passed in registers or directly in a stack
1478 // slot (as it is larger than 2*XLEN and the floating point rules don't
1479 // apply), then pass it using a pointer with the byval attribute.
1480 // * If a struct is less than 2*XLEN, then coerce to either a two-element
1481 // word-sized array or a 2*XLEN scalar (depending on alignment).
1482 // * The frontend can determine whether a struct is returned by reference or
1483 // not based on its size and fields. If it will be returned by reference, the
1484 // frontend must modify the prototype so a pointer with the sret annotation is
1485 // passed as the first argument. This is not necessary for large scalar
1487 // * Struct return values and varargs should be coerced to structs containing
1488 // register-size fields in the same situations they would be for fixed
1491 static const MCPhysReg ArgGPRs[] = {
1492 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
1493 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
1495 static const MCPhysReg ArgFPR32s[] = {
1496 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
1497 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
1499 static const MCPhysReg ArgFPR64s[] = {
1500 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
1501 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
1504 // Pass a 2*XLEN argument that has been split into two XLEN values through
1505 // registers or the stack as necessary.
1506 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
1507 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
1508 MVT ValVT2, MVT LocVT2,
1509 ISD::ArgFlagsTy ArgFlags2) {
1510 unsigned XLenInBytes = XLen / 8;
1511 if (Register Reg = State.AllocateReg(ArgGPRs)) {
1512 // At least one half can be passed via register.
1513 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
1514 VA1.getLocVT(), CCValAssign::Full));
1516 // Both halves must be passed on the stack, with proper alignment.
1518 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
1520 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
1521 State.AllocateStack(XLenInBytes, StackAlign),
1522 VA1.getLocVT(), CCValAssign::Full));
1523 State.addLoc(CCValAssign::getMem(
1524 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
1525 LocVT2, CCValAssign::Full));
1529 if (Register Reg = State.AllocateReg(ArgGPRs)) {
1530 // The second half can also be passed via register.
1532 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
1534 // The second half is passed via the stack, without additional alignment.
1535 State.addLoc(CCValAssign::getMem(
1536 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
1537 LocVT2, CCValAssign::Full));
1543 // Implements the RISC-V calling convention. Returns true upon failure.
1544 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
1545 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
1546 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
1547 bool IsRet, Type *OrigTy) {
1548 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
1549 assert(XLen == 32 || XLen == 64);
1550 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
1552 // Any return value split in to more than two values can't be returned
1554 if (IsRet && ValNo > 1)
1557 // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a
1558 // variadic argument, or if no F32 argument registers are available.
1559 bool UseGPRForF32 = true;
1560 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
1561 // variadic argument, or if no F64 argument registers are available.
1562 bool UseGPRForF64 = true;
1566 llvm_unreachable("Unexpected ABI");
1567 case RISCVABI::ABI_ILP32:
1568 case RISCVABI::ABI_LP64:
1570 case RISCVABI::ABI_ILP32F:
1571 case RISCVABI::ABI_LP64F:
1572 UseGPRForF32 = !IsFixed;
1574 case RISCVABI::ABI_ILP32D:
1575 case RISCVABI::ABI_LP64D:
1576 UseGPRForF32 = !IsFixed;
1577 UseGPRForF64 = !IsFixed;
1581 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s))
1582 UseGPRForF32 = true;
1583 if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s))
1584 UseGPRForF64 = true;
1586 // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local
1587 // variables rather than directly checking against the target ABI.
1589 if (UseGPRForF32 && ValVT == MVT::f32) {
1591 LocInfo = CCValAssign::BCvt;
1592 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
1594 LocInfo = CCValAssign::BCvt;
1597 // If this is a variadic argument, the RISC-V calling convention requires
1598 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
1599 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
1600 // be used regardless of whether the original argument was split during
1601 // legalisation or not. The argument will not be passed by registers if the
1602 // original type is larger than 2*XLEN, so the register alignment rule does
1604 unsigned TwoXLenInBytes = (2 * XLen) / 8;
1605 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
1606 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
1607 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
1608 // Skip 'odd' register if necessary.
1609 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
1610 State.AllocateReg(ArgGPRs);
1613 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
1614 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
1615 State.getPendingArgFlags();
1617 assert(PendingLocs.size() == PendingArgFlags.size() &&
1618 "PendingLocs and PendingArgFlags out of sync");
1620 // Handle passing f64 on RV32D with a soft float ABI or when floating point
1621 // registers are exhausted.
1622 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
1623 assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
1624 "Can't lower f64 if it is split");
1625 // Depending on available argument GPRS, f64 may be passed in a pair of
1626 // GPRs, split between a GPR and the stack, or passed completely on the
1627 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
1629 Register Reg = State.AllocateReg(ArgGPRs);
1632 unsigned StackOffset = State.AllocateStack(8, Align(8));
1634 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1637 if (!State.AllocateReg(ArgGPRs))
1638 State.AllocateStack(4, Align(4));
1639 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1643 // Split arguments might be passed indirectly, so keep track of the pending
1645 if (ArgFlags.isSplit() || !PendingLocs.empty()) {
1647 LocInfo = CCValAssign::Indirect;
1648 PendingLocs.push_back(
1649 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
1650 PendingArgFlags.push_back(ArgFlags);
1651 if (!ArgFlags.isSplitEnd()) {
1656 // If the split argument only had two elements, it should be passed directly
1657 // in registers or on the stack.
1658 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
1659 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
1660 // Apply the normal calling convention rules to the first half of the
1662 CCValAssign VA = PendingLocs[0];
1663 ISD::ArgFlagsTy AF = PendingArgFlags[0];
1664 PendingLocs.clear();
1665 PendingArgFlags.clear();
1666 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
1670 // Allocate to a register if possible, or else a stack slot.
1672 if (ValVT == MVT::f32 && !UseGPRForF32)
1673 Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s);
1674 else if (ValVT == MVT::f64 && !UseGPRForF64)
1675 Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
1677 Reg = State.AllocateReg(ArgGPRs);
1678 unsigned StackOffset =
1679 Reg ? 0 : State.AllocateStack(XLen / 8, Align(XLen / 8));
1681 // If we reach this point and PendingLocs is non-empty, we must be at the
1682 // end of a split argument that must be passed indirectly.
1683 if (!PendingLocs.empty()) {
1684 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
1685 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
1687 for (auto &It : PendingLocs) {
1689 It.convertToReg(Reg);
1691 It.convertToMem(StackOffset);
1694 PendingLocs.clear();
1695 PendingArgFlags.clear();
1699 assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT) &&
1700 "Expected an XLenVT at this stage");
1703 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1707 // When an f32 or f64 is passed on the stack, no bit-conversion is needed.
1708 if (ValVT == MVT::f32 || ValVT == MVT::f64) {
1710 LocInfo = CCValAssign::Full;
1712 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1716 void RISCVTargetLowering::analyzeInputArgs(
1717 MachineFunction &MF, CCState &CCInfo,
1718 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
1719 unsigned NumArgs = Ins.size();
1720 FunctionType *FType = MF.getFunction().getFunctionType();
1722 for (unsigned i = 0; i != NumArgs; ++i) {
1723 MVT ArgVT = Ins[i].VT;
1724 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
1726 Type *ArgTy = nullptr;
1728 ArgTy = FType->getReturnType();
1729 else if (Ins[i].isOrigArg())
1730 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
1732 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1733 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1734 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy)) {
1735 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
1736 << EVT(ArgVT).getEVTString() << '\n');
1737 llvm_unreachable(nullptr);
1742 void RISCVTargetLowering::analyzeOutputArgs(
1743 MachineFunction &MF, CCState &CCInfo,
1744 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
1745 CallLoweringInfo *CLI) const {
1746 unsigned NumArgs = Outs.size();
1748 for (unsigned i = 0; i != NumArgs; i++) {
1749 MVT ArgVT = Outs[i].VT;
1750 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
1751 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
1753 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
1754 if (CC_RISCV(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
1755 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy)) {
1756 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
1757 << EVT(ArgVT).getEVTString() << "\n");
1758 llvm_unreachable(nullptr);
1763 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
1765 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
1766 const CCValAssign &VA, const SDLoc &DL) {
1767 switch (VA.getLocInfo()) {
1769 llvm_unreachable("Unexpected CCValAssign::LocInfo");
1770 case CCValAssign::Full:
1772 case CCValAssign::BCvt:
1773 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1774 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
1777 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
1783 // The caller is responsible for loading the full value if the argument is
1784 // passed with CCValAssign::Indirect.
1785 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
1786 const CCValAssign &VA, const SDLoc &DL) {
1787 MachineFunction &MF = DAG.getMachineFunction();
1788 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1789 EVT LocVT = VA.getLocVT();
1791 const TargetRegisterClass *RC;
1793 switch (LocVT.getSimpleVT().SimpleTy) {
1795 llvm_unreachable("Unexpected register type");
1798 RC = &RISCV::GPRRegClass;
1801 RC = &RISCV::FPR32RegClass;
1804 RC = &RISCV::FPR64RegClass;
1808 Register VReg = RegInfo.createVirtualRegister(RC);
1809 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1810 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
1812 if (VA.getLocInfo() == CCValAssign::Indirect)
1815 return convertLocVTToValVT(DAG, Val, VA, DL);
1818 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
1819 const CCValAssign &VA, const SDLoc &DL) {
1820 EVT LocVT = VA.getLocVT();
1822 switch (VA.getLocInfo()) {
1824 llvm_unreachable("Unexpected CCValAssign::LocInfo");
1825 case CCValAssign::Full:
1827 case CCValAssign::BCvt:
1828 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
1829 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
1832 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
1838 // The caller is responsible for loading the full value if the argument is
1839 // passed with CCValAssign::Indirect.
1840 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
1841 const CCValAssign &VA, const SDLoc &DL) {
1842 MachineFunction &MF = DAG.getMachineFunction();
1843 MachineFrameInfo &MFI = MF.getFrameInfo();
1844 EVT LocVT = VA.getLocVT();
1845 EVT ValVT = VA.getValVT();
1846 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
1847 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
1848 VA.getLocMemOffset(), /*Immutable=*/true);
1849 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
1852 ISD::LoadExtType ExtType;
1853 switch (VA.getLocInfo()) {
1855 llvm_unreachable("Unexpected CCValAssign::LocInfo");
1856 case CCValAssign::Full:
1857 case CCValAssign::Indirect:
1858 case CCValAssign::BCvt:
1859 ExtType = ISD::NON_EXTLOAD;
1862 Val = DAG.getExtLoad(
1863 ExtType, DL, LocVT, Chain, FIN,
1864 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
1868 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
1869 const CCValAssign &VA, const SDLoc &DL) {
1870 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
1872 MachineFunction &MF = DAG.getMachineFunction();
1873 MachineFrameInfo &MFI = MF.getFrameInfo();
1874 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1876 if (VA.isMemLoc()) {
1877 // f64 is passed on the stack.
1878 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
1879 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1880 return DAG.getLoad(MVT::f64, DL, Chain, FIN,
1881 MachinePointerInfo::getFixedStack(MF, FI));
1884 assert(VA.isRegLoc() && "Expected register VA assignment");
1886 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1887 RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
1888 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
1890 if (VA.getLocReg() == RISCV::X17) {
1891 // Second half of f64 is passed on the stack.
1892 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
1893 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1894 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
1895 MachinePointerInfo::getFixedStack(MF, FI));
1897 // Second half of f64 is passed in another GPR.
1898 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1899 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
1900 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
1902 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
1905 // FastCC has less than 1% performance improvement for some particular
1906 // benchmark. But theoretically, it may has benenfit for some cases.
1907 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
1908 CCValAssign::LocInfo LocInfo,
1909 ISD::ArgFlagsTy ArgFlags, CCState &State) {
1911 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
1912 // X5 and X6 might be used for save-restore libcall.
1913 static const MCPhysReg GPRList[] = {
1914 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
1915 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
1916 RISCV::X29, RISCV::X30, RISCV::X31};
1917 if (unsigned Reg = State.AllocateReg(GPRList)) {
1918 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1923 if (LocVT == MVT::f32) {
1924 static const MCPhysReg FPR32List[] = {
1925 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
1926 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
1927 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
1928 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
1929 if (unsigned Reg = State.AllocateReg(FPR32List)) {
1930 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1935 if (LocVT == MVT::f64) {
1936 static const MCPhysReg FPR64List[] = {
1937 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
1938 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
1939 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
1940 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
1941 if (unsigned Reg = State.AllocateReg(FPR64List)) {
1942 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1947 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
1948 unsigned Offset4 = State.AllocateStack(4, Align(4));
1949 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
1953 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
1954 unsigned Offset5 = State.AllocateStack(8, Align(8));
1955 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
1959 return true; // CC didn't match.
1962 // Transform physical registers into virtual registers.
1963 SDValue RISCVTargetLowering::LowerFormalArguments(
1964 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
1965 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
1966 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
1970 report_fatal_error("Unsupported calling convention");
1971 case CallingConv::C:
1972 case CallingConv::Fast:
1976 MachineFunction &MF = DAG.getMachineFunction();
1978 const Function &Func = MF.getFunction();
1979 if (Func.hasFnAttribute("interrupt")) {
1980 if (!Func.arg_empty())
1982 "Functions with the interrupt attribute cannot have arguments!");
1985 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
1987 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
1989 "Function interrupt attribute argument not supported!");
1992 EVT PtrVT = getPointerTy(DAG.getDataLayout());
1993 MVT XLenVT = Subtarget.getXLenVT();
1994 unsigned XLenInBytes = Subtarget.getXLen() / 8;
1995 // Used with vargs to acumulate store chains.
1996 std::vector<SDValue> OutChains;
1998 // Assign locations to all of the incoming arguments.
1999 SmallVector<CCValAssign, 16> ArgLocs;
2000 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2002 if (CallConv == CallingConv::Fast)
2003 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
2005 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
2007 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2008 CCValAssign &VA = ArgLocs[i];
2010 // Passing f64 on RV32D with a soft float ABI must be handled as a special
2012 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
2013 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
2014 else if (VA.isRegLoc())
2015 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
2017 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
2019 if (VA.getLocInfo() == CCValAssign::Indirect) {
2020 // If the original argument was split and passed by reference (e.g. i128
2021 // on RV32), we need to load all parts of it here (using the same
2023 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
2024 MachinePointerInfo()));
2025 unsigned ArgIndex = Ins[i].OrigArgIndex;
2026 assert(Ins[i].PartOffset == 0);
2027 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
2028 CCValAssign &PartVA = ArgLocs[i + 1];
2029 unsigned PartOffset = Ins[i + 1].PartOffset;
2030 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue,
2031 DAG.getIntPtrConstant(PartOffset, DL));
2032 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
2033 MachinePointerInfo()));
2038 InVals.push_back(ArgValue);
2042 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
2043 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
2044 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
2045 MachineFrameInfo &MFI = MF.getFrameInfo();
2046 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2047 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2049 // Offset of the first variable argument from stack pointer, and size of
2050 // the vararg save area. For now, the varargs save area is either zero or
2051 // large enough to hold a0-a7.
2052 int VaArgOffset, VarArgsSaveSize;
2054 // If all registers are allocated, then all varargs must be passed on the
2055 // stack and we don't need to save any argregs.
2056 if (ArgRegs.size() == Idx) {
2057 VaArgOffset = CCInfo.getNextStackOffset();
2058 VarArgsSaveSize = 0;
2060 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
2061 VaArgOffset = -VarArgsSaveSize;
2064 // Record the frame index of the first variable argument
2065 // which is a value necessary to VASTART.
2066 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2067 RVFI->setVarArgsFrameIndex(FI);
2069 // If saving an odd number of registers then create an extra stack slot to
2070 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
2071 // offsets to even-numbered registered remain 2*XLEN-aligned.
2073 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
2074 VarArgsSaveSize += XLenInBytes;
2077 // Copy the integer registers that may have been used for passing varargs
2078 // to the vararg save area.
2079 for (unsigned I = Idx; I < ArgRegs.size();
2080 ++I, VaArgOffset += XLenInBytes) {
2081 const Register Reg = RegInfo.createVirtualRegister(RC);
2082 RegInfo.addLiveIn(ArgRegs[I], Reg);
2083 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
2084 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2085 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2086 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
2087 MachinePointerInfo::getFixedStack(MF, FI));
2088 cast<StoreSDNode>(Store.getNode())
2090 ->setValue((Value *)nullptr);
2091 OutChains.push_back(Store);
2093 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
2096 // All stores are grouped in one node to allow the matching between
2097 // the size of Ins and InVals. This only happens for vararg functions.
2098 if (!OutChains.empty()) {
2099 OutChains.push_back(Chain);
2100 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
2106 /// isEligibleForTailCallOptimization - Check whether the call is eligible
2107 /// for tail call optimization.
2108 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
2109 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
2110 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
2111 const SmallVector<CCValAssign, 16> &ArgLocs) const {
2113 auto &Callee = CLI.Callee;
2114 auto CalleeCC = CLI.CallConv;
2115 auto &Outs = CLI.Outs;
2116 auto &Caller = MF.getFunction();
2117 auto CallerCC = Caller.getCallingConv();
2119 // Exception-handling functions need a special set of instructions to
2120 // indicate a return to the hardware. Tail-calling another function would
2121 // probably break this.
2122 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
2123 // should be expanded as new function attributes are introduced.
2124 if (Caller.hasFnAttribute("interrupt"))
2127 // Do not tail call opt if the stack is used to pass parameters.
2128 if (CCInfo.getNextStackOffset() != 0)
2131 // Do not tail call opt if any parameters need to be passed indirectly.
2132 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
2133 // passed indirectly. So the address of the value will be passed in a
2134 // register, or if not available, then the address is put on the stack. In
2135 // order to pass indirectly, space on the stack often needs to be allocated
2136 // in order to store the value. In this case the CCInfo.getNextStackOffset()
2137 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
2138 // are passed CCValAssign::Indirect.
2139 for (auto &VA : ArgLocs)
2140 if (VA.getLocInfo() == CCValAssign::Indirect)
2143 // Do not tail call opt if either caller or callee uses struct return
2145 auto IsCallerStructRet = Caller.hasStructRetAttr();
2146 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
2147 if (IsCallerStructRet || IsCalleeStructRet)
2150 // Externally-defined functions with weak linkage should not be
2151 // tail-called. The behaviour of branch instructions in this situation (as
2152 // used for tail calls) is implementation-defined, so we cannot rely on the
2153 // linker replacing the tail call with a return.
2154 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2155 const GlobalValue *GV = G->getGlobal();
2156 if (GV->hasExternalWeakLinkage())
2160 // The callee has to preserve all registers the caller needs to preserve.
2161 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2162 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2163 if (CalleeCC != CallerCC) {
2164 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2165 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2169 // Byval parameters hand the function a pointer directly into the stack area
2170 // we want to reuse during a tail call. Working around this *is* possible
2171 // but less efficient and uglier in LowerCall.
2172 for (auto &Arg : Outs)
2173 if (Arg.Flags.isByVal())
2179 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
2180 // and output parameter nodes.
2181 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
2182 SmallVectorImpl<SDValue> &InVals) const {
2183 SelectionDAG &DAG = CLI.DAG;
2185 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2186 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2187 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2188 SDValue Chain = CLI.Chain;
2189 SDValue Callee = CLI.Callee;
2190 bool &IsTailCall = CLI.IsTailCall;
2191 CallingConv::ID CallConv = CLI.CallConv;
2192 bool IsVarArg = CLI.IsVarArg;
2193 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2194 MVT XLenVT = Subtarget.getXLenVT();
2196 MachineFunction &MF = DAG.getMachineFunction();
2198 // Analyze the operands of the call, assigning locations to each operand.
2199 SmallVector<CCValAssign, 16> ArgLocs;
2200 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2202 if (CallConv == CallingConv::Fast)
2203 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
2205 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
2207 // Check if it's really possible to do a tail call.
2209 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
2213 else if (CLI.CB && CLI.CB->isMustTailCall())
2214 report_fatal_error("failed to perform tail call elimination on a call "
2215 "site marked musttail");
2217 // Get a count of how many bytes are to be pushed on the stack.
2218 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
2220 // Create local copies for byval args
2221 SmallVector<SDValue, 8> ByValArgs;
2222 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2223 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2224 if (!Flags.isByVal())
2227 SDValue Arg = OutVals[i];
2228 unsigned Size = Flags.getByValSize();
2229 Align Alignment = Flags.getNonZeroByValAlign();
2232 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
2233 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
2234 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
2236 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
2237 /*IsVolatile=*/false,
2238 /*AlwaysInline=*/false, IsTailCall,
2239 MachinePointerInfo(), MachinePointerInfo());
2240 ByValArgs.push_back(FIPtr);
2244 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
2246 // Copy argument values to their designated locations.
2247 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
2248 SmallVector<SDValue, 8> MemOpChains;
2250 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
2251 CCValAssign &VA = ArgLocs[i];
2252 SDValue ArgValue = OutVals[i];
2253 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2255 // Handle passing f64 on RV32D with a soft float ABI as a special case.
2256 bool IsF64OnRV32DSoftABI =
2257 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
2258 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
2259 SDValue SplitF64 = DAG.getNode(
2260 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
2261 SDValue Lo = SplitF64.getValue(0);
2262 SDValue Hi = SplitF64.getValue(1);
2264 Register RegLo = VA.getLocReg();
2265 RegsToPass.push_back(std::make_pair(RegLo, Lo));
2267 if (RegLo == RISCV::X17) {
2268 // Second half of f64 is passed on the stack.
2269 // Work out the address of the stack slot.
2270 if (!StackPtr.getNode())
2271 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2273 MemOpChains.push_back(
2274 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
2276 // Second half of f64 is passed in another GPR.
2277 assert(RegLo < RISCV::X31 && "Invalid register pair");
2278 Register RegHigh = RegLo + 1;
2279 RegsToPass.push_back(std::make_pair(RegHigh, Hi));
2284 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
2285 // as any other MemLoc.
2287 // Promote the value if needed.
2288 // For now, only handle fully promoted and indirect arguments.
2289 if (VA.getLocInfo() == CCValAssign::Indirect) {
2290 // Store the argument in a stack slot and pass its address.
2291 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
2292 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2293 MemOpChains.push_back(
2294 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
2295 MachinePointerInfo::getFixedStack(MF, FI)));
2296 // If the original argument was split (e.g. i128), we need
2297 // to store all parts of it here (and pass just one address).
2298 unsigned ArgIndex = Outs[i].OrigArgIndex;
2299 assert(Outs[i].PartOffset == 0);
2300 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
2301 SDValue PartValue = OutVals[i + 1];
2302 unsigned PartOffset = Outs[i + 1].PartOffset;
2303 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot,
2304 DAG.getIntPtrConstant(PartOffset, DL));
2305 MemOpChains.push_back(
2306 DAG.getStore(Chain, DL, PartValue, Address,
2307 MachinePointerInfo::getFixedStack(MF, FI)));
2310 ArgValue = SpillSlot;
2312 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
2315 // Use local copy if it is a byval arg.
2316 if (Flags.isByVal())
2317 ArgValue = ByValArgs[j++];
2319 if (VA.isRegLoc()) {
2320 // Queue up the argument copies and emit them at the end.
2321 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
2323 assert(VA.isMemLoc() && "Argument not register or memory");
2324 assert(!IsTailCall && "Tail call not allowed if stack is used "
2325 "for passing parameters");
2327 // Work out the address of the stack slot.
2328 if (!StackPtr.getNode())
2329 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
2331 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
2332 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
2335 MemOpChains.push_back(
2336 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
2340 // Join the stores, which are independent of one another.
2341 if (!MemOpChains.empty())
2342 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2346 // Build a sequence of copy-to-reg nodes, chained and glued together.
2347 for (auto &Reg : RegsToPass) {
2348 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
2349 Glue = Chain.getValue(1);
2352 // Validate that none of the argument registers have been marked as
2353 // reserved, if so report an error. Do the same for the return address if this
2354 // is not a tailcall.
2355 validateCCReservedRegs(RegsToPass, MF);
2357 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
2358 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2360 "Return address register required, but has been reserved."});
2362 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
2363 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
2364 // split it and then direct call can be matched by PseudoCALL.
2365 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
2366 const GlobalValue *GV = S->getGlobal();
2368 unsigned OpFlags = RISCVII::MO_CALL;
2369 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
2370 OpFlags = RISCVII::MO_PLT;
2372 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2373 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2374 unsigned OpFlags = RISCVII::MO_CALL;
2376 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
2378 OpFlags = RISCVII::MO_PLT;
2380 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
2383 // The first call operand is the chain and the second is the target address.
2384 SmallVector<SDValue, 8> Ops;
2385 Ops.push_back(Chain);
2386 Ops.push_back(Callee);
2388 // Add argument registers to the end of the list so that they are
2389 // known live into the call.
2390 for (auto &Reg : RegsToPass)
2391 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2394 // Add a register mask operand representing the call-preserved registers.
2395 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2396 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2397 assert(Mask && "Missing call preserved mask for calling convention");
2398 Ops.push_back(DAG.getRegisterMask(Mask));
2401 // Glue the call to the argument copies, if any.
2403 Ops.push_back(Glue);
2406 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2409 MF.getFrameInfo().setHasTailCall();
2410 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
2413 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
2414 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
2415 Glue = Chain.getValue(1);
2417 // Mark the end of the call, which is glued to the call itself.
2418 Chain = DAG.getCALLSEQ_END(Chain,
2419 DAG.getConstant(NumBytes, DL, PtrVT, true),
2420 DAG.getConstant(0, DL, PtrVT, true),
2422 Glue = Chain.getValue(1);
2424 // Assign locations to each value returned by this call.
2425 SmallVector<CCValAssign, 16> RVLocs;
2426 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
2427 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
2429 // Copy all of the result registers out of their specified physreg.
2430 for (auto &VA : RVLocs) {
2431 // Copy the value out
2433 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
2434 // Glue the RetValue to the end of the call sequence
2435 Chain = RetValue.getValue(1);
2436 Glue = RetValue.getValue(2);
2438 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2439 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
2441 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
2442 Chain = RetValue2.getValue(1);
2443 Glue = RetValue2.getValue(2);
2444 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
2448 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
2450 InVals.push_back(RetValue);
2456 bool RISCVTargetLowering::CanLowerReturn(
2457 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
2458 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2459 SmallVector<CCValAssign, 16> RVLocs;
2460 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2461 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2462 MVT VT = Outs[i].VT;
2463 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2464 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
2465 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
2466 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr))
2473 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2475 const SmallVectorImpl<ISD::OutputArg> &Outs,
2476 const SmallVectorImpl<SDValue> &OutVals,
2477 const SDLoc &DL, SelectionDAG &DAG) const {
2478 const MachineFunction &MF = DAG.getMachineFunction();
2479 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2481 // Stores the assignment of the return value to a location.
2482 SmallVector<CCValAssign, 16> RVLocs;
2484 // Info about the registers and stack slot.
2485 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2488 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
2492 SmallVector<SDValue, 4> RetOps(1, Chain);
2494 // Copy the result values into the output registers.
2495 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
2496 SDValue Val = OutVals[i];
2497 CCValAssign &VA = RVLocs[i];
2498 assert(VA.isRegLoc() && "Can only return in registers!");
2500 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2501 // Handle returning f64 on RV32D with a soft float ABI.
2502 assert(VA.isRegLoc() && "Expected return via registers");
2503 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
2504 DAG.getVTList(MVT::i32, MVT::i32), Val);
2505 SDValue Lo = SplitF64.getValue(0);
2506 SDValue Hi = SplitF64.getValue(1);
2507 Register RegLo = VA.getLocReg();
2508 assert(RegLo < RISCV::X31 && "Invalid register pair");
2509 Register RegHi = RegLo + 1;
2511 if (STI.isRegisterReservedByUser(RegLo) ||
2512 STI.isRegisterReservedByUser(RegHi))
2513 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2515 "Return value register required, but has been reserved."});
2517 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
2518 Glue = Chain.getValue(1);
2519 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
2520 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
2521 Glue = Chain.getValue(1);
2522 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
2524 // Handle a 'normal' return.
2525 Val = convertValVTToLocVT(DAG, Val, VA, DL);
2526 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
2528 if (STI.isRegisterReservedByUser(VA.getLocReg()))
2529 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2531 "Return value register required, but has been reserved."});
2533 // Guarantee that all emitted copies are stuck together.
2534 Glue = Chain.getValue(1);
2535 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2539 RetOps[0] = Chain; // Update chain.
2541 // Add the glue node if we have it.
2542 if (Glue.getNode()) {
2543 RetOps.push_back(Glue);
2546 // Interrupt service routines use different return instructions.
2547 const Function &Func = DAG.getMachineFunction().getFunction();
2548 if (Func.hasFnAttribute("interrupt")) {
2549 if (!Func.getReturnType()->isVoidTy())
2551 "Functions with the interrupt attribute must have void return type!");
2553 MachineFunction &MF = DAG.getMachineFunction();
2555 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2559 RetOpc = RISCVISD::URET_FLAG;
2560 else if (Kind == "supervisor")
2561 RetOpc = RISCVISD::SRET_FLAG;
2563 RetOpc = RISCVISD::MRET_FLAG;
2565 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
2568 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
2571 void RISCVTargetLowering::validateCCReservedRegs(
2572 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
2573 MachineFunction &MF) const {
2574 const Function &F = MF.getFunction();
2575 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2577 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) {
2578 return STI.isRegisterReservedByUser(Reg.first);
2580 F.getContext().diagnose(DiagnosticInfoUnsupported{
2581 F, "Argument register required, but has been reserved."});
2584 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2585 return CI->isTailCall();
2588 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
2589 switch ((RISCVISD::NodeType)Opcode) {
2590 case RISCVISD::FIRST_NUMBER:
2592 case RISCVISD::RET_FLAG:
2593 return "RISCVISD::RET_FLAG";
2594 case RISCVISD::URET_FLAG:
2595 return "RISCVISD::URET_FLAG";
2596 case RISCVISD::SRET_FLAG:
2597 return "RISCVISD::SRET_FLAG";
2598 case RISCVISD::MRET_FLAG:
2599 return "RISCVISD::MRET_FLAG";
2600 case RISCVISD::CALL:
2601 return "RISCVISD::CALL";
2602 case RISCVISD::SELECT_CC:
2603 return "RISCVISD::SELECT_CC";
2604 case RISCVISD::BuildPairF64:
2605 return "RISCVISD::BuildPairF64";
2606 case RISCVISD::SplitF64:
2607 return "RISCVISD::SplitF64";
2608 case RISCVISD::TAIL:
2609 return "RISCVISD::TAIL";
2610 case RISCVISD::SLLW:
2611 return "RISCVISD::SLLW";
2612 case RISCVISD::SRAW:
2613 return "RISCVISD::SRAW";
2614 case RISCVISD::SRLW:
2615 return "RISCVISD::SRLW";
2616 case RISCVISD::DIVW:
2617 return "RISCVISD::DIVW";
2618 case RISCVISD::DIVUW:
2619 return "RISCVISD::DIVUW";
2620 case RISCVISD::REMUW:
2621 return "RISCVISD::REMUW";
2622 case RISCVISD::FMV_W_X_RV64:
2623 return "RISCVISD::FMV_W_X_RV64";
2624 case RISCVISD::FMV_X_ANYEXTW_RV64:
2625 return "RISCVISD::FMV_X_ANYEXTW_RV64";
2626 case RISCVISD::READ_CYCLE_WIDE:
2627 return "RISCVISD::READ_CYCLE_WIDE";
2632 /// getConstraintType - Given a constraint letter, return the type of
2633 /// constraint it is for this target.
2634 RISCVTargetLowering::ConstraintType
2635 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
2636 if (Constraint.size() == 1) {
2637 switch (Constraint[0]) {
2641 return C_RegisterClass;
2650 return TargetLowering::getConstraintType(Constraint);
2653 std::pair<unsigned, const TargetRegisterClass *>
2654 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2655 StringRef Constraint,
2657 // First, see if this is a constraint that directly corresponds to a
2658 // RISCV register class.
2659 if (Constraint.size() == 1) {
2660 switch (Constraint[0]) {
2662 return std::make_pair(0U, &RISCV::GPRRegClass);
2664 if (Subtarget.hasStdExtF() && VT == MVT::f32)
2665 return std::make_pair(0U, &RISCV::FPR32RegClass);
2666 if (Subtarget.hasStdExtD() && VT == MVT::f64)
2667 return std::make_pair(0U, &RISCV::FPR64RegClass);
2674 // Clang will correctly decode the usage of register name aliases into their
2675 // official names. However, other frontends like `rustc` do not. This allows
2676 // users of these frontends to use the ABI names for registers in LLVM-style
2677 // register constraints.
2678 Register XRegFromAlias = StringSwitch<Register>(Constraint.lower())
2679 .Case("{zero}", RISCV::X0)
2680 .Case("{ra}", RISCV::X1)
2681 .Case("{sp}", RISCV::X2)
2682 .Case("{gp}", RISCV::X3)
2683 .Case("{tp}", RISCV::X4)
2684 .Case("{t0}", RISCV::X5)
2685 .Case("{t1}", RISCV::X6)
2686 .Case("{t2}", RISCV::X7)
2687 .Cases("{s0}", "{fp}", RISCV::X8)
2688 .Case("{s1}", RISCV::X9)
2689 .Case("{a0}", RISCV::X10)
2690 .Case("{a1}", RISCV::X11)
2691 .Case("{a2}", RISCV::X12)
2692 .Case("{a3}", RISCV::X13)
2693 .Case("{a4}", RISCV::X14)
2694 .Case("{a5}", RISCV::X15)
2695 .Case("{a6}", RISCV::X16)
2696 .Case("{a7}", RISCV::X17)
2697 .Case("{s2}", RISCV::X18)
2698 .Case("{s3}", RISCV::X19)
2699 .Case("{s4}", RISCV::X20)
2700 .Case("{s5}", RISCV::X21)
2701 .Case("{s6}", RISCV::X22)
2702 .Case("{s7}", RISCV::X23)
2703 .Case("{s8}", RISCV::X24)
2704 .Case("{s9}", RISCV::X25)
2705 .Case("{s10}", RISCV::X26)
2706 .Case("{s11}", RISCV::X27)
2707 .Case("{t3}", RISCV::X28)
2708 .Case("{t4}", RISCV::X29)
2709 .Case("{t5}", RISCV::X30)
2710 .Case("{t6}", RISCV::X31)
2711 .Default(RISCV::NoRegister);
2712 if (XRegFromAlias != RISCV::NoRegister)
2713 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
2715 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
2716 // TableGen record rather than the AsmName to choose registers for InlineAsm
2717 // constraints, plus we want to match those names to the widest floating point
2718 // register type available, manually select floating point registers here.
2720 // The second case is the ABI name of the register, so that frontends can also
2721 // use the ABI names in register constraint lists.
2722 if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) {
2723 std::pair<Register, Register> FReg =
2724 StringSwitch<std::pair<Register, Register>>(Constraint.lower())
2725 .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D})
2726 .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D})
2727 .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D})
2728 .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D})
2729 .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D})
2730 .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D})
2731 .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D})
2732 .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D})
2733 .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D})
2734 .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D})
2735 .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D})
2736 .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D})
2737 .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D})
2738 .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D})
2739 .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D})
2740 .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D})
2741 .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D})
2742 .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D})
2743 .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D})
2744 .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D})
2745 .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D})
2746 .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D})
2747 .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D})
2748 .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D})
2749 .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D})
2750 .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D})
2751 .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D})
2752 .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D})
2753 .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D})
2754 .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D})
2755 .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D})
2756 .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D})
2757 .Default({RISCV::NoRegister, RISCV::NoRegister});
2758 if (FReg.first != RISCV::NoRegister)
2759 return Subtarget.hasStdExtD()
2760 ? std::make_pair(FReg.second, &RISCV::FPR64RegClass)
2761 : std::make_pair(FReg.first, &RISCV::FPR32RegClass);
2764 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
2768 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2769 // Currently only support length 1 constraints.
2770 if (ConstraintCode.size() == 1) {
2771 switch (ConstraintCode[0]) {
2773 return InlineAsm::Constraint_A;
2779 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
2782 void RISCVTargetLowering::LowerAsmOperandForConstraint(
2783 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
2784 SelectionDAG &DAG) const {
2785 // Currently only support length 1 constraints.
2786 if (Constraint.length() == 1) {
2787 switch (Constraint[0]) {
2789 // Validate & create a 12-bit signed immediate operand.
2790 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2791 uint64_t CVal = C->getSExtValue();
2792 if (isInt<12>(CVal))
2794 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2798 // Validate & create an integer zero operand.
2799 if (auto *C = dyn_cast<ConstantSDNode>(Op))
2800 if (C->getZExtValue() == 0)
2802 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
2805 // Validate & create a 5-bit unsigned immediate operand.
2806 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
2807 uint64_t CVal = C->getZExtValue();
2808 if (isUInt<5>(CVal))
2810 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
2817 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
2820 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
2822 AtomicOrdering Ord) const {
2823 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
2824 return Builder.CreateFence(Ord);
2825 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
2826 return Builder.CreateFence(AtomicOrdering::Release);
2830 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
2832 AtomicOrdering Ord) const {
2833 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
2834 return Builder.CreateFence(AtomicOrdering::Acquire);
2838 TargetLowering::AtomicExpansionKind
2839 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
2840 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
2841 // point operations can't be used in an lr/sc sequence without breaking the
2842 // forward-progress guarantee.
2843 if (AI->isFloatingPointOperation())
2844 return AtomicExpansionKind::CmpXChg;
2846 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
2847 if (Size == 8 || Size == 16)
2848 return AtomicExpansionKind::MaskedIntrinsic;
2849 return AtomicExpansionKind::None;
2852 static Intrinsic::ID
2853 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
2857 llvm_unreachable("Unexpected AtomicRMW BinOp");
2858 case AtomicRMWInst::Xchg:
2859 return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
2860 case AtomicRMWInst::Add:
2861 return Intrinsic::riscv_masked_atomicrmw_add_i32;
2862 case AtomicRMWInst::Sub:
2863 return Intrinsic::riscv_masked_atomicrmw_sub_i32;
2864 case AtomicRMWInst::Nand:
2865 return Intrinsic::riscv_masked_atomicrmw_nand_i32;
2866 case AtomicRMWInst::Max:
2867 return Intrinsic::riscv_masked_atomicrmw_max_i32;
2868 case AtomicRMWInst::Min:
2869 return Intrinsic::riscv_masked_atomicrmw_min_i32;
2870 case AtomicRMWInst::UMax:
2871 return Intrinsic::riscv_masked_atomicrmw_umax_i32;
2872 case AtomicRMWInst::UMin:
2873 return Intrinsic::riscv_masked_atomicrmw_umin_i32;
2880 llvm_unreachable("Unexpected AtomicRMW BinOp");
2881 case AtomicRMWInst::Xchg:
2882 return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
2883 case AtomicRMWInst::Add:
2884 return Intrinsic::riscv_masked_atomicrmw_add_i64;
2885 case AtomicRMWInst::Sub:
2886 return Intrinsic::riscv_masked_atomicrmw_sub_i64;
2887 case AtomicRMWInst::Nand:
2888 return Intrinsic::riscv_masked_atomicrmw_nand_i64;
2889 case AtomicRMWInst::Max:
2890 return Intrinsic::riscv_masked_atomicrmw_max_i64;
2891 case AtomicRMWInst::Min:
2892 return Intrinsic::riscv_masked_atomicrmw_min_i64;
2893 case AtomicRMWInst::UMax:
2894 return Intrinsic::riscv_masked_atomicrmw_umax_i64;
2895 case AtomicRMWInst::UMin:
2896 return Intrinsic::riscv_masked_atomicrmw_umin_i64;
2900 llvm_unreachable("Unexpected XLen\n");
2903 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
2904 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
2905 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
2906 unsigned XLen = Subtarget.getXLen();
2908 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
2909 Type *Tys[] = {AlignedAddr->getType()};
2910 Function *LrwOpScwLoop = Intrinsic::getDeclaration(
2912 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
2915 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
2916 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2917 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
2922 // Must pass the shift amount needed to sign extend the loaded value prior
2923 // to performing a signed comparison for min/max. ShiftAmt is the number of
2924 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
2925 // is the number of bits to left+right shift the value in order to
2927 if (AI->getOperation() == AtomicRMWInst::Min ||
2928 AI->getOperation() == AtomicRMWInst::Max) {
2929 const DataLayout &DL = AI->getModule()->getDataLayout();
2931 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
2933 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
2934 Result = Builder.CreateCall(LrwOpScwLoop,
2935 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
2938 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
2942 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2946 TargetLowering::AtomicExpansionKind
2947 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
2948 AtomicCmpXchgInst *CI) const {
2949 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
2950 if (Size == 8 || Size == 16)
2951 return AtomicExpansionKind::MaskedIntrinsic;
2952 return AtomicExpansionKind::None;
2955 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
2956 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
2957 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
2958 unsigned XLen = Subtarget.getXLen();
2959 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
2960 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
2962 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
2963 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
2964 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
2965 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
2967 Type *Tys[] = {AlignedAddr->getType()};
2968 Function *MaskedCmpXchg =
2969 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
2970 Value *Result = Builder.CreateCall(
2971 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
2973 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
2977 Register RISCVTargetLowering::getExceptionPointerRegister(
2978 const Constant *PersonalityFn) const {
2982 Register RISCVTargetLowering::getExceptionSelectorRegister(
2983 const Constant *PersonalityFn) const {
2987 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
2988 // Return false to suppress the unnecessary extensions if the LibCall
2989 // arguments or return value is f32 type for LP64 ABI.
2990 RISCVABI::ABI ABI = Subtarget.getTargetABI();
2991 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
2997 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
2999 // Check integral scalar types.
3000 if (VT.isScalarInteger()) {
3001 // Do not perform the transformation on riscv32 with the M extension.
3002 if (!Subtarget.is64Bit() && Subtarget.hasStdExtM())
3004 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
3005 if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t))
3007 int64_t Imm = ConstNode->getSExtValue();
3008 if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) ||
3009 isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm))
3017 #define GET_REGISTER_MATCHER
3018 #include "RISCVGenAsmMatcher.inc"
3021 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
3022 const MachineFunction &MF) const {
3023 Register Reg = MatchRegisterAltName(RegName);
3024 if (Reg == RISCV::NoRegister)
3025 Reg = MatchRegisterName(RegName);
3026 if (Reg == RISCV::NoRegister)
3028 Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
3029 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
3030 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
3031 report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
3032 StringRef(RegName) + "\"."));