1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
18 #include "llvm/CodeGen/CallingConvLower.h"
19 #include "llvm/CodeGen/SelectionDAG.h"
20 #include "llvm/CodeGen/TargetLowering.h"
24 struct RISCVRegisterInfo;
26 enum NodeType : unsigned {
27 FIRST_NUMBER = ISD::BUILTIN_OP_END,
33 /// Select with condition operator - This selects between a true value and
34 /// a false value (ops #3 and #4) based on the boolean result of comparing
35 /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
36 /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
37 /// The lhs and rhs are XLenVT integers. The true and false values can be
38 /// integer or floating point.
44 // Multiply high for signedxunsigned.
46 // RV64I shifts, directly matching the semantics of the named RISC-V
51 // 32-bit operations from RV64M that can't be simply matched with a pattern
52 // at instruction selection time. These have undefined behavior for division
53 // by 0 or overflow (divw) like their target independent counterparts.
57 // RV64IB rotates, directly matching the semantics of the named RISC-V
61 // RV64IZbb bit counting instructions directly matching the semantics of the
62 // named RISC-V instructions.
65 // RV64IB/RV32IB funnel shifts, with the semantics of the named RISC-V
66 // instructions, but the same operand order as fshl/fshr intrinsics.
69 // RV64IB funnel shifts, with the semantics of the named RISC-V instructions,
70 // but the same operand order as fshl/fshr intrinsics.
73 // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
74 // XLEN is the only legal integer width.
76 // FMV_H_X matches the semantics of the FMV.H.X.
77 // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
78 // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
79 // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
81 // This is a more convenient semantic for producing dagcombines that remove
82 // unnecessary GPR->FPR->GPR moves.
87 // FP to XLen int conversions. Corresponds to fcvt.l(u).s/d/h on RV64 and
88 // fcvt.w(u).s/d/h on RV32. Unlike FP_TO_S/UINT these saturate out of
89 // range inputs. These are used for FP_TO_S/UINT_SAT lowering.
92 // FP to 32 bit int conversions for RV64. These are used to keep track of the
93 // result being sign extended to 64 bit. These saturate out of range inputs.
94 // Used for FP_TO_S/UINT and FP_TO_S/UINT_SAT lowering.
97 // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
98 // (returns (Lo, Hi)). It takes a chain operand.
100 // Generalized Reverse and Generalized Or-Combine - directly matching the
101 // semantics of the named RISC-V instructions. Lowered as custom nodes as
102 // TableGen chokes when faced with commutative permutations in deeply-nested
103 // DAGs. Each node takes an input operand and a control operand and outputs a
104 // bit-manipulated version of input. All operands are i32 or XLenVT.
113 // Bit Compress/Decompress implement the generic bit extract and bit deposit
114 // functions. This operation is also referred to as bit gather/scatter, bit
115 // pack/unpack, parallel extract/deposit, compress/expand, or right
116 // compress/right expand.
122 // VMV_V_X_VL matches the semantics of vmv.v.x but includes an extra operand
123 // for the VL value to be used for the operation.
125 // VFMV_V_F_VL matches the semantics of vfmv.v.f but includes an extra operand
126 // for the VL value to be used for the operation.
128 // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT sign
129 // extended from the vector element size.
131 // VMV_S_X_VL matches the semantics of vmv.s.x. It carries a VL operand.
133 // VFMV_S_F_VL matches the semantics of vfmv.s.f. It carries a VL operand.
135 // Splats an i64 scalar to a vector type (with element type i64) where the
136 // scalar is a sign-extended i32.
138 // Splats an 64-bit value that has been split into two i32 parts. This is
139 // expanded late to two scalar stores and a stride 0 vector load.
140 SPLAT_VECTOR_SPLIT_I64_VL,
143 // Truncates a RVV integer vector by one power-of-two. Carries both an extra
144 // mask and VL operand.
146 // Matches the semantics of vslideup/vslidedown. The first operand is the
147 // pass-thru operand, the second is the source vector, the third is the
148 // XLenVT index (either constant or non-constant), the fourth is the mask
149 // and the fifth the VL.
152 // Matches the semantics of vslide1up/slide1down. The first operand is the
153 // source vector, the second is the XLenVT scalar value. The third and fourth
154 // operands are the mask and VL operands.
157 // Matches the semantics of the vid.v instruction, with a mask and VL
160 // Matches the semantics of the vfcnvt.rod function (Convert double-width
161 // float to single-width float, rounding towards odd). Takes a double-width
162 // float vector and produces a single-width float vector. Also has a mask and
165 // These nodes match the semantics of the corresponding RVV vector reduction
166 // instructions. They produce a vector result which is the reduction
167 // performed over the second vector operand plus the first element of the
168 // third vector operand. The first operand is the pass-thru operand. The
169 // second operand is an unconstrained vector type, and the result, first, and
170 // third operand's types are expected to be the corresponding full-width
171 // LMUL=1 type for the second operand:
172 // nxv8i8 = vecreduce_add nxv8i8, nxv32i8, nxv8i8
173 // nxv2i32 = vecreduce_add nxv2i32, nxv8i32, nxv2i32
174 // The different in types does introduce extra vsetvli instructions but
175 // similarly it reduces the number of registers consumed per reduction.
176 // Also has a mask and VL operand.
186 VECREDUCE_SEQ_FADD_VL,
190 // Vector binary and unary ops with a mask as a third operand, and VL as a
192 // FIXME: Can we replace these with ISD::VP_*?
236 // Widening instructions
240 // Vector compare producing a mask. Fourth operand is input mask. Fifth
244 // Vector select with an additional VL operand. This operation is unmasked.
247 // Mask binary operators.
252 // Set mask vector to all zeros or ones.
256 // Matches the semantics of vrgather.vx and vrgather.vv with an extra operand
262 // Vector sign/zero extend with additional mask & VL operands.
266 // vcpop.m with additional mask and VL operands.
269 // Reads value of CSR.
270 // The first operand is a chain pointer. The second specifies address of the
271 // required CSR. Two results are produced, the read value and the new chain
274 // Write value to CSR.
275 // The first operand is a chain pointer, the second specifies address of the
276 // required CSR and the third is the value to write. The result is the new
279 // Read and write value of CSR.
280 // The first operand is a chain pointer, the second specifies address of the
281 // required CSR and the third is the value to write. Two results are produced,
282 // the value read before the modification and the new chain pointer.
285 // Memory opcodes start here.
286 VLE_VL = ISD::FIRST_TARGET_MEMORY_OPCODE,
289 // WARNING: Do not add anything in the end unless you want the node to
290 // have memop! In fact, starting from FIRST_TARGET_MEMORY_OPCODE all
291 // opcodes will be thought as target memory ops!
293 } // namespace RISCVISD
295 class RISCVTargetLowering : public TargetLowering {
296 const RISCVSubtarget &Subtarget;
299 explicit RISCVTargetLowering(const TargetMachine &TM,
300 const RISCVSubtarget &STI);
302 const RISCVSubtarget &getSubtarget() const { return Subtarget; }
304 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
306 unsigned Intrinsic) const override;
307 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
309 Instruction *I = nullptr) const override;
310 bool isLegalICmpImmediate(int64_t Imm) const override;
311 bool isLegalAddImmediate(int64_t Imm) const override;
312 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
313 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
314 bool isZExtFree(SDValue Val, EVT VT2) const override;
315 bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
316 bool isCheapToSpeculateCttz() const override;
317 bool isCheapToSpeculateCtlz() const override;
318 bool hasAndNot(SDValue Y) const override;
319 bool shouldSinkOperands(Instruction *I,
320 SmallVectorImpl<Use *> &Ops) const override;
321 bool isFPImmLegal(const APFloat &Imm, EVT VT,
322 bool ForCodeSize) const override;
324 bool softPromoteHalfType() const override { return true; }
326 /// Return the register type for a given MVT, ensuring vectors are treated
327 /// as a series of gpr sized integers.
328 MVT getRegisterTypeForCallingConv(LLVMContext &Context, CallingConv::ID CC,
329 EVT VT) const override;
331 /// Return the number of registers for a given MVT, ensuring vectors are
332 /// treated as a series of gpr sized integers.
333 unsigned getNumRegistersForCallingConv(LLVMContext &Context,
335 EVT VT) const override;
337 /// Return true if the given shuffle mask can be codegen'd directly, or if it
338 /// should be stack expanded.
339 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
341 bool hasBitPreservingFPLogic(EVT VT) const override;
343 shouldExpandBuildVectorWithShuffles(EVT VT,
344 unsigned DefinedValues) const override;
346 // Provide custom lowering hooks for some operations.
347 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
348 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
349 SelectionDAG &DAG) const override;
351 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
353 bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
354 const APInt &DemandedElts,
355 TargetLoweringOpt &TLO) const override;
357 void computeKnownBitsForTargetNode(const SDValue Op,
359 const APInt &DemandedElts,
360 const SelectionDAG &DAG,
361 unsigned Depth) const override;
362 unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
363 const APInt &DemandedElts,
364 const SelectionDAG &DAG,
365 unsigned Depth) const override;
367 // This method returns the name of a target specific DAG node.
368 const char *getTargetNodeName(unsigned Opcode) const override;
370 ConstraintType getConstraintType(StringRef Constraint) const override;
372 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
374 std::pair<unsigned, const TargetRegisterClass *>
375 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
376 StringRef Constraint, MVT VT) const override;
378 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
379 std::vector<SDValue> &Ops,
380 SelectionDAG &DAG) const override;
383 EmitInstrWithCustomInserter(MachineInstr &MI,
384 MachineBasicBlock *BB) const override;
386 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
387 EVT VT) const override;
389 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
390 return VT.isScalarInteger();
392 bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
394 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
395 return isa<LoadInst>(I) || isa<StoreInst>(I);
397 Instruction *emitLeadingFence(IRBuilderBase &Builder, Instruction *Inst,
398 AtomicOrdering Ord) const override;
399 Instruction *emitTrailingFence(IRBuilderBase &Builder, Instruction *Inst,
400 AtomicOrdering Ord) const override;
402 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
403 EVT VT) const override;
405 ISD::NodeType getExtendForAtomicOps() const override {
406 return ISD::SIGN_EXTEND;
409 ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
410 return ISD::SIGN_EXTEND;
413 bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
414 if (DAG.getMachineFunction().getFunction().hasMinSize())
418 bool isDesirableToCommuteWithShift(const SDNode *N,
419 CombineLevel Level) const override;
421 /// If a physical register, this returns the register that receives the
422 /// exception address on entry to an EH pad.
424 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
426 /// If a physical register, this returns the register that receives the
427 /// exception typeid on entry to a landing pad.
429 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
431 bool shouldExtendTypeInLibCall(EVT Type) const override;
432 bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
434 /// Returns the register with the specified architectural or ABI name. This
435 /// method is necessary to lower the llvm.read_register.* and
436 /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
437 /// with the clang -ffixed-xX flag for access to be allowed.
438 Register getRegisterByName(const char *RegName, LLT VT,
439 const MachineFunction &MF) const override;
441 // Lower incoming arguments, copy physregs into vregs
442 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
444 const SmallVectorImpl<ISD::InputArg> &Ins,
445 const SDLoc &DL, SelectionDAG &DAG,
446 SmallVectorImpl<SDValue> &InVals) const override;
447 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
449 const SmallVectorImpl<ISD::OutputArg> &Outs,
450 LLVMContext &Context) const override;
451 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
452 const SmallVectorImpl<ISD::OutputArg> &Outs,
453 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
454 SelectionDAG &DAG) const override;
455 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
456 SmallVectorImpl<SDValue> &InVals) const override;
458 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
459 Type *Ty) const override {
462 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
463 bool shouldConsiderGEPOffsetSplit() const override { return true; }
465 bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
466 SDValue C) const override;
468 bool isMulAddWithConstProfitable(const SDValue &AddNode,
469 const SDValue &ConstNode) const override;
471 TargetLowering::AtomicExpansionKind
472 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
473 Value *emitMaskedAtomicRMWIntrinsic(IRBuilderBase &Builder, AtomicRMWInst *AI,
474 Value *AlignedAddr, Value *Incr,
475 Value *Mask, Value *ShiftAmt,
476 AtomicOrdering Ord) const override;
477 TargetLowering::AtomicExpansionKind
478 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
479 Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase &Builder,
480 AtomicCmpXchgInst *CI,
481 Value *AlignedAddr, Value *CmpVal,
482 Value *NewVal, Value *Mask,
483 AtomicOrdering Ord) const override;
485 /// Returns true if the target allows unaligned memory accesses of the
487 bool allowsMisalignedMemoryAccesses(
488 EVT VT, unsigned AddrSpace = 0, Align Alignment = Align(1),
489 MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
490 bool *Fast = nullptr) const override;
492 bool splitValueIntoRegisterParts(SelectionDAG &DAG, const SDLoc &DL,
493 SDValue Val, SDValue *Parts,
494 unsigned NumParts, MVT PartVT,
495 Optional<CallingConv::ID> CC) const override;
498 joinRegisterPartsIntoValue(SelectionDAG &DAG, const SDLoc &DL,
499 const SDValue *Parts, unsigned NumParts,
500 MVT PartVT, EVT ValueVT,
501 Optional<CallingConv::ID> CC) const override;
503 static RISCVII::VLMUL getLMUL(MVT VT);
504 static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul);
505 static unsigned getSubregIndexByMVT(MVT VT, unsigned Index);
506 static unsigned getRegClassIDForVecVT(MVT VT);
507 static std::pair<unsigned, unsigned>
508 decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT,
509 unsigned InsertExtractIdx,
510 const RISCVRegisterInfo *TRI);
511 MVT getContainerForFixedLengthVector(MVT VT) const;
513 bool shouldRemoveExtendFromGSIndex(EVT VT) const override;
515 bool isLegalElementTypeForRVV(Type *ScalarTy) const;
517 bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
520 /// RISCVCCAssignFn - This target-specific function extends the default
521 /// CCValAssign with additional information used to lower RISC-V calling
523 typedef bool RISCVCCAssignFn(const DataLayout &DL, RISCVABI::ABI,
524 unsigned ValNo, MVT ValVT, MVT LocVT,
525 CCValAssign::LocInfo LocInfo,
526 ISD::ArgFlagsTy ArgFlags, CCState &State,
527 bool IsFixed, bool IsRet, Type *OrigTy,
528 const RISCVTargetLowering &TLI,
529 Optional<unsigned> FirstMaskArgument);
531 void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
532 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
533 RISCVCCAssignFn Fn) const;
534 void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
535 const SmallVectorImpl<ISD::OutputArg> &Outs,
536 bool IsRet, CallLoweringInfo *CLI,
537 RISCVCCAssignFn Fn) const;
539 template <class NodeTy>
540 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
542 SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
544 SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
546 SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
547 SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
548 SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
549 SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
550 SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
551 SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
552 SDValue lowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
553 SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
554 SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
555 SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
556 SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
557 SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
558 SDValue lowerSPLAT_VECTOR_PARTS(SDValue Op, SelectionDAG &DAG) const;
559 SDValue lowerVectorMaskSplat(SDValue Op, SelectionDAG &DAG) const;
560 SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
561 int64_t ExtTrueVal) const;
562 SDValue lowerVectorMaskTrunc(SDValue Op, SelectionDAG &DAG) const;
563 SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
564 SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
565 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
566 SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
567 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
568 SDValue lowerVPREDUCE(SDValue Op, SelectionDAG &DAG) const;
569 SDValue lowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
570 SDValue lowerVectorMaskVecReduction(SDValue Op, SelectionDAG &DAG,
572 SDValue lowerFPVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
573 SDValue lowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
574 SDValue lowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
575 SDValue lowerSTEP_VECTOR(SDValue Op, SelectionDAG &DAG) const;
576 SDValue lowerVECTOR_REVERSE(SDValue Op, SelectionDAG &DAG) const;
577 SDValue lowerABS(SDValue Op, SelectionDAG &DAG) const;
578 SDValue lowerMaskedLoad(SDValue Op, SelectionDAG &DAG) const;
579 SDValue lowerMaskedStore(SDValue Op, SelectionDAG &DAG) const;
580 SDValue lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,
581 SelectionDAG &DAG) const;
582 SDValue lowerMaskedGather(SDValue Op, SelectionDAG &DAG) const;
583 SDValue lowerMaskedScatter(SDValue Op, SelectionDAG &DAG) const;
584 SDValue lowerFixedLengthVectorLoadToRVV(SDValue Op, SelectionDAG &DAG) const;
585 SDValue lowerFixedLengthVectorStoreToRVV(SDValue Op, SelectionDAG &DAG) const;
586 SDValue lowerFixedLengthVectorSetccToRVV(SDValue Op, SelectionDAG &DAG) const;
587 SDValue lowerFixedLengthVectorLogicOpToRVV(SDValue Op, SelectionDAG &DAG,
589 unsigned VecOpc) const;
590 SDValue lowerFixedLengthVectorShiftToRVV(SDValue Op, SelectionDAG &DAG) const;
591 SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
592 SelectionDAG &DAG) const;
593 SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG, unsigned NewOpc,
594 bool HasMask = true) const;
595 SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const;
596 SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG,
597 unsigned ExtendOpc) const;
598 SDValue lowerGET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
599 SDValue lowerSET_ROUNDING(SDValue Op, SelectionDAG &DAG) const;
601 SDValue expandUnalignedRVVLoad(SDValue Op, SelectionDAG &DAG) const;
602 SDValue expandUnalignedRVVStore(SDValue Op, SelectionDAG &DAG) const;
604 bool isEligibleForTailCallOptimization(
605 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
606 const SmallVector<CCValAssign, 16> &ArgLocs) const;
608 /// Generate error diagnostics if any register used by CC has been marked
610 void validateCCReservedRegs(
611 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
612 MachineFunction &MF) const;
614 bool useRVVForFixedLengthVectorVT(MVT VT) const;
616 MVT getVPExplicitVectorLengthTy() const override;
618 /// RVV code generation for fixed length vectors does not lower all
619 /// BUILD_VECTORs. This makes BUILD_VECTOR legalisation a source of stores to
620 /// merge. However, merging them creates a BUILD_VECTOR that is just as
621 /// illegal as the original, thus leading to an infinite legalisation loop.
622 /// NOTE: Once BUILD_VECTOR can be custom lowered for all legal vector types,
623 /// this override can be removed.
624 bool mergeStoresAfterLegalization(EVT VT) const override;
626 /// Disable normalizing
627 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
628 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y))
629 /// RISCV doesn't have flags so it's better to perform the and/or in a GPR.
630 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override {
636 // We use 64 bits as the known part in the scalable vector types.
637 static constexpr unsigned RVVBitsPerBlock = 64;
640 namespace RISCVVIntrinsicsTable {
642 struct RISCVVIntrinsicInfo {
643 unsigned IntrinsicID;
644 uint8_t SplatOperand;
647 using namespace RISCV;
649 #define GET_RISCVVIntrinsicsTable_DECL
650 #include "RISCVGenSearchableTables.inc"
652 } // end namespace RISCVVIntrinsicsTable
654 } // end namespace llvm