1 //===-- RISCVInstrInfo.td - Target Description for RISCV ---*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file describes the RISC-V instructions in TableGen format.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
14 // RISC-V specific DAG Nodes.
15 //===----------------------------------------------------------------------===//
17 // Target-independent type requirements, but with target-specific formats.
18 def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
20 def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>,
23 // Target-dependent type requirements.
24 def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>;
25 def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>,
29 // Target-independent nodes, but with target-specific formats.
30 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
31 [SDNPHasChain, SDNPOutGlue]>;
32 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
33 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
35 // Target-dependent nodes.
36 def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall,
37 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
39 def riscv_ret_flag : SDNode<"RISCVISD::RET_FLAG", SDTNone,
40 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
41 def riscv_uret_flag : SDNode<"RISCVISD::URET_FLAG", SDTNone,
42 [SDNPHasChain, SDNPOptInGlue]>;
43 def riscv_sret_flag : SDNode<"RISCVISD::SRET_FLAG", SDTNone,
44 [SDNPHasChain, SDNPOptInGlue]>;
45 def riscv_mret_flag : SDNode<"RISCVISD::MRET_FLAG", SDTNone,
46 [SDNPHasChain, SDNPOptInGlue]>;
47 def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC,
49 def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall,
50 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
52 def riscv_sllw : SDNode<"RISCVISD::SLLW", SDTIntShiftOp>;
53 def riscv_sraw : SDNode<"RISCVISD::SRAW", SDTIntShiftOp>;
54 def riscv_srlw : SDNode<"RISCVISD::SRLW", SDTIntShiftOp>;
56 //===----------------------------------------------------------------------===//
57 // Operand and SDNode transformation definitions.
58 //===----------------------------------------------------------------------===//
60 class ImmXLenAsmOperand<string prefix, string suffix = ""> : AsmOperandClass {
61 let Name = prefix # "ImmXLen" # suffix;
62 let RenderMethod = "addImmOperands";
63 let DiagnosticType = !strconcat("Invalid", Name);
66 class ImmAsmOperand<string prefix, int width, string suffix> : AsmOperandClass {
67 let Name = prefix # "Imm" # width # suffix;
68 let RenderMethod = "addImmOperands";
69 let DiagnosticType = !strconcat("Invalid", Name);
72 def ImmZeroAsmOperand : AsmOperandClass {
74 let RenderMethod = "addImmOperands";
75 let DiagnosticType = !strconcat("Invalid", Name);
78 class SImmAsmOperand<int width, string suffix = "">
79 : ImmAsmOperand<"S", width, suffix> {
82 class UImmAsmOperand<int width, string suffix = "">
83 : ImmAsmOperand<"U", width, suffix> {
86 def FenceArg : AsmOperandClass {
87 let Name = "FenceArg";
88 let RenderMethod = "addFenceArgOperands";
89 let DiagnosticType = "InvalidFenceArg";
92 def fencearg : Operand<XLenVT> {
93 let ParserMatchClass = FenceArg;
94 let PrintMethod = "printFenceArg";
95 let DecoderMethod = "decodeUImmOperand<4>";
96 let OperandType = "OPERAND_UIMM4";
97 let OperandNamespace = "RISCVOp";
100 def UImmLog2XLenAsmOperand : AsmOperandClass {
101 let Name = "UImmLog2XLen";
102 let RenderMethod = "addImmOperands";
103 let DiagnosticType = "InvalidUImmLog2XLen";
106 def uimmlog2xlen : Operand<XLenVT>, ImmLeaf<XLenVT, [{
107 if (Subtarget->is64Bit())
108 return isUInt<6>(Imm);
109 return isUInt<5>(Imm);
111 let ParserMatchClass = UImmLog2XLenAsmOperand;
112 // TODO: should ensure invalid shamt is rejected when decoding.
113 let DecoderMethod = "decodeUImmOperand<6>";
114 let MCOperandPredicate = [{
116 if (!MCOp.evaluateAsConstantImm(Imm))
118 if (STI.getTargetTriple().isArch64Bit())
119 return isUInt<6>(Imm);
120 return isUInt<5>(Imm);
122 let OperandType = "OPERAND_UIMMLOG2XLEN";
123 let OperandNamespace = "RISCVOp";
126 def uimm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isUInt<5>(Imm);}]> {
127 let ParserMatchClass = UImmAsmOperand<5>;
128 let DecoderMethod = "decodeUImmOperand<5>";
129 let OperandType = "OPERAND_UIMM5";
130 let OperandNamespace = "RISCVOp";
133 def simm12 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<12>(Imm);}]> {
134 let ParserMatchClass = SImmAsmOperand<12>;
135 let EncoderMethod = "getImmOpValue";
136 let DecoderMethod = "decodeSImmOperand<12>";
137 let MCOperandPredicate = [{
139 if (MCOp.evaluateAsConstantImm(Imm))
140 return isInt<12>(Imm);
141 return MCOp.isBareSymbolRef();
143 let OperandType = "OPERAND_SIMM12";
144 let OperandNamespace = "RISCVOp";
147 // A 12-bit signed immediate plus one where the imm range will be -2047~2048.
148 def simm12_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
149 [{return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;}]> {
150 let ParserMatchClass = SImmAsmOperand<12>;
151 let EncoderMethod = "getImmOpValue";
152 let DecoderMethod = "decodeSImmOperand<12>";
153 let MCOperandPredicate = [{
155 if (MCOp.evaluateAsConstantImm(Imm))
156 return (isInt<12>(Imm) && Imm != -2048) || Imm == 2048;
157 return MCOp.isBareSymbolRef();
161 // A 13-bit signed immediate where the least significant bit is zero.
162 def simm13_lsb0 : Operand<OtherVT> {
163 let ParserMatchClass = SImmAsmOperand<13, "Lsb0">;
164 let EncoderMethod = "getImmOpValueAsr1";
165 let DecoderMethod = "decodeSImmOperandAndLsl1<13>";
166 let MCOperandPredicate = [{
168 if (MCOp.evaluateAsConstantImm(Imm))
169 return isShiftedInt<12, 1>(Imm);
170 return MCOp.isBareSymbolRef();
172 let OperandType = "OPERAND_SIMM13_LSB0";
173 let OperandNamespace = "RISCVOp";
176 class UImm20Operand : Operand<XLenVT> {
177 let EncoderMethod = "getImmOpValue";
178 let DecoderMethod = "decodeUImmOperand<20>";
179 let MCOperandPredicate = [{
181 if (MCOp.evaluateAsConstantImm(Imm))
182 return isUInt<20>(Imm);
183 return MCOp.isBareSymbolRef();
185 let OperandType = "OPERAND_UIMM20";
186 let OperandNamespace = "RISCVOp";
189 def uimm20_lui : UImm20Operand {
190 let ParserMatchClass = UImmAsmOperand<20, "LUI">;
192 def uimm20_auipc : UImm20Operand {
193 let ParserMatchClass = UImmAsmOperand<20, "AUIPC">;
196 def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> {
197 let ParserMethod = "parseJALOffset";
200 // A 21-bit signed immediate where the least significant bit is zero.
201 def simm21_lsb0_jal : Operand<OtherVT> {
202 let ParserMatchClass = Simm21Lsb0JALAsmOperand;
203 let EncoderMethod = "getImmOpValueAsr1";
204 let DecoderMethod = "decodeSImmOperandAndLsl1<21>";
205 let MCOperandPredicate = [{
207 if (MCOp.evaluateAsConstantImm(Imm))
208 return isShiftedInt<20, 1>(Imm);
209 return MCOp.isBareSymbolRef();
211 let OperandType = "OPERAND_SIMM21_LSB0";
212 let OperandNamespace = "RISCVOp";
215 def BareSymbol : AsmOperandClass {
216 let Name = "BareSymbol";
217 let RenderMethod = "addImmOperands";
218 let DiagnosticType = "InvalidBareSymbol";
219 let ParserMethod = "parseBareSymbol";
223 def bare_symbol : Operand<XLenVT> {
224 let ParserMatchClass = BareSymbol;
227 def CallSymbol : AsmOperandClass {
228 let Name = "CallSymbol";
229 let RenderMethod = "addImmOperands";
230 let DiagnosticType = "InvalidCallSymbol";
231 let ParserMethod = "parseCallSymbol";
234 // A bare symbol used in call/tail only.
235 def call_symbol : Operand<XLenVT> {
236 let ParserMatchClass = CallSymbol;
239 def PseudoJumpSymbol : AsmOperandClass {
240 let Name = "PseudoJumpSymbol";
241 let RenderMethod = "addImmOperands";
242 let DiagnosticType = "InvalidPseudoJumpSymbol";
243 let ParserMethod = "parsePseudoJumpSymbol";
246 // A bare symbol used for pseudo jumps only.
247 def pseudo_jump_symbol : Operand<XLenVT> {
248 let ParserMatchClass = PseudoJumpSymbol;
251 def TPRelAddSymbol : AsmOperandClass {
252 let Name = "TPRelAddSymbol";
253 let RenderMethod = "addImmOperands";
254 let DiagnosticType = "InvalidTPRelAddSymbol";
255 let ParserMethod = "parseOperandWithModifier";
258 // A bare symbol with the %tprel_add variant.
259 def tprel_add_symbol : Operand<XLenVT> {
260 let ParserMatchClass = TPRelAddSymbol;
263 def CSRSystemRegister : AsmOperandClass {
264 let Name = "CSRSystemRegister";
265 let ParserMethod = "parseCSRSystemRegister";
266 let DiagnosticType = "InvalidCSRSystemRegister";
269 def csr_sysreg : Operand<XLenVT> {
270 let ParserMatchClass = CSRSystemRegister;
271 let PrintMethod = "printCSRSystemRegister";
272 let DecoderMethod = "decodeUImmOperand<12>";
273 let OperandType = "OPERAND_UIMM12";
274 let OperandNamespace = "RISCVOp";
277 // A parameterized register class alternative to i32imm/i64imm from Target.td.
278 def ixlenimm : Operand<XLenVT>;
280 def ixlenimm_li : Operand<XLenVT> {
281 let ParserMatchClass = ImmXLenAsmOperand<"", "LI">;
284 // Standalone (codegen-only) immleaf patterns.
285 def simm32 : ImmLeaf<XLenVT, [{return isInt<32>(Imm);}]>;
286 def simm32hi20 : ImmLeaf<XLenVT, [{return isShiftedInt<20, 12>(Imm);}]>;
287 // A mask value that won't affect significant shift bits.
288 def immbottomxlenset : ImmLeaf<XLenVT, [{
289 if (Subtarget->is64Bit())
290 return countTrailingOnes<uint64_t>(Imm) >= 6;
291 return countTrailingOnes<uint64_t>(Imm) >= 5;
295 // Necessary because a frameindex can't be matched directly in a pattern.
296 def AddrFI : ComplexPattern<iPTR, 1, "SelectAddrFI", [frameindex], []>;
298 // Extract least significant 12 bits from an immediate value and sign extend
300 def LO12Sext : SDNodeXForm<imm, [{
301 return CurDAG->getTargetConstant(SignExtend64<12>(N->getZExtValue()),
302 SDLoc(N), N->getValueType(0));
305 // Extract the most significant 20 bits from an immediate value. Add 1 if bit
306 // 11 is 1, to compensate for the low 12 bits in the matching immediate addi
307 // or ld/st being negative.
308 def HI20 : SDNodeXForm<imm, [{
309 return CurDAG->getTargetConstant(((N->getZExtValue()+0x800) >> 12) & 0xfffff,
310 SDLoc(N), N->getValueType(0));
313 // Return the negation of an immediate value.
314 def NegImm : SDNodeXForm<imm, [{
315 return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N),
319 //===----------------------------------------------------------------------===//
320 // Instruction Formats
321 //===----------------------------------------------------------------------===//
323 include "RISCVInstrFormats.td"
325 //===----------------------------------------------------------------------===//
326 // Instruction Class Templates
327 //===----------------------------------------------------------------------===//
329 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
330 class BranchCC_rri<bits<3> funct3, string opcodestr>
331 : RVInstB<funct3, OPC_BRANCH, (outs),
332 (ins GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12),
333 opcodestr, "$rs1, $rs2, $imm12">,
334 Sched<[WriteJmp, ReadJmp, ReadJmp]> {
336 let isTerminator = 1;
339 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in
340 class Load_ri<bits<3> funct3, string opcodestr>
341 : RVInstI<funct3, OPC_LOAD, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
342 opcodestr, "$rd, ${imm12}(${rs1})">;
344 // Operands for stores are in the order srcreg, base, offset rather than
345 // reflecting the order these fields are specified in the instruction
347 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in
348 class Store_rri<bits<3> funct3, string opcodestr>
349 : RVInstS<funct3, OPC_STORE, (outs),
350 (ins GPR:$rs2, GPR:$rs1, simm12:$imm12),
351 opcodestr, "$rs2, ${imm12}(${rs1})">;
353 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
354 class ALU_ri<bits<3> funct3, string opcodestr>
355 : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12),
356 opcodestr, "$rd, $rs1, $imm12">,
357 Sched<[WriteIALU, ReadIALU]>;
359 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
360 class Shift_ri<bit arithshift, bits<3> funct3, string opcodestr>
361 : RVInstIShift<arithshift, funct3, OPC_OP_IMM, (outs GPR:$rd),
362 (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr,
363 "$rd, $rs1, $shamt">,
364 Sched<[WriteShift, ReadShift]>;
366 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
367 class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
368 : RVInstR<funct7, funct3, OPC_OP, (outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
369 opcodestr, "$rd, $rs1, $rs2">;
371 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
372 class CSR_ir<bits<3> funct3, string opcodestr>
373 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1),
374 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR, ReadCSR]>;
376 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
377 class CSR_ii<bits<3> funct3, string opcodestr>
378 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd),
379 (ins csr_sysreg:$imm12, uimm5:$rs1),
380 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR]>;
382 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
383 class ShiftW_ri<bit arithshift, bits<3> funct3, string opcodestr>
384 : RVInstIShiftW<arithshift, funct3, OPC_OP_IMM_32, (outs GPR:$rd),
385 (ins GPR:$rs1, uimm5:$shamt), opcodestr,
386 "$rd, $rs1, $shamt">,
387 Sched<[WriteShift32, ReadShift32]>;
389 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
390 class ALUW_rr<bits<7> funct7, bits<3> funct3, string opcodestr>
391 : RVInstR<funct7, funct3, OPC_OP_32, (outs GPR:$rd),
392 (ins GPR:$rs1, GPR:$rs2), opcodestr, "$rd, $rs1, $rs2">;
394 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
395 class Priv<string opcodestr, bits<7> funct7>
396 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2),
399 //===----------------------------------------------------------------------===//
401 //===----------------------------------------------------------------------===//
403 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
404 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
405 def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20),
406 "lui", "$rd, $imm20">, Sched<[WriteIALU]>;
408 def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20),
409 "auipc", "$rd, $imm20">, Sched<[WriteIALU]>;
412 def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20),
413 "jal", "$rd, $imm20">, Sched<[WriteJal]>;
416 def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd),
417 (ins GPR:$rs1, simm12:$imm12),
418 "jalr", "$rd, ${imm12}(${rs1})">,
419 Sched<[WriteJalr, ReadJalr]>;
420 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
422 def BEQ : BranchCC_rri<0b000, "beq">;
423 def BNE : BranchCC_rri<0b001, "bne">;
424 def BLT : BranchCC_rri<0b100, "blt">;
425 def BGE : BranchCC_rri<0b101, "bge">;
426 def BLTU : BranchCC_rri<0b110, "bltu">;
427 def BGEU : BranchCC_rri<0b111, "bgeu">;
429 def LB : Load_ri<0b000, "lb">, Sched<[WriteLDB, ReadMemBase]>;
430 def LH : Load_ri<0b001, "lh">, Sched<[WriteLDH, ReadMemBase]>;
431 def LW : Load_ri<0b010, "lw">, Sched<[WriteLDW, ReadMemBase]>;
432 def LBU : Load_ri<0b100, "lbu">, Sched<[WriteLDB, ReadMemBase]>;
433 def LHU : Load_ri<0b101, "lhu">, Sched<[WriteLDH, ReadMemBase]>;
435 def SB : Store_rri<0b000, "sb">, Sched<[WriteSTB, ReadStoreData, ReadMemBase]>;
436 def SH : Store_rri<0b001, "sh">, Sched<[WriteSTH, ReadStoreData, ReadMemBase]>;
437 def SW : Store_rri<0b010, "sw">, Sched<[WriteSTW, ReadStoreData, ReadMemBase]>;
439 // ADDI isn't always rematerializable, but isReMaterializable will be used as
440 // a hint which is verified in isReallyTriviallyReMaterializable.
441 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
442 def ADDI : ALU_ri<0b000, "addi">;
444 def SLTI : ALU_ri<0b010, "slti">;
445 def SLTIU : ALU_ri<0b011, "sltiu">;
447 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
448 def XORI : ALU_ri<0b100, "xori">;
449 def ORI : ALU_ri<0b110, "ori">;
452 def ANDI : ALU_ri<0b111, "andi">;
454 def SLLI : Shift_ri<0, 0b001, "slli">;
455 def SRLI : Shift_ri<0, 0b101, "srli">;
456 def SRAI : Shift_ri<1, 0b101, "srai">;
458 def ADD : ALU_rr<0b0000000, 0b000, "add">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
459 def SUB : ALU_rr<0b0100000, 0b000, "sub">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
460 def SLL : ALU_rr<0b0000000, 0b001, "sll">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
461 def SLT : ALU_rr<0b0000000, 0b010, "slt">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
462 def SLTU : ALU_rr<0b0000000, 0b011, "sltu">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
463 def XOR : ALU_rr<0b0000000, 0b100, "xor">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
464 def SRL : ALU_rr<0b0000000, 0b101, "srl">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
465 def SRA : ALU_rr<0b0100000, 0b101, "sra">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
466 def OR : ALU_rr<0b0000000, 0b110, "or">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
467 def AND : ALU_rr<0b0000000, 0b111, "and">, Sched<[WriteIALU, ReadIALU, ReadIALU]>;
469 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
470 def FENCE : RVInstI<0b000, OPC_MISC_MEM, (outs),
471 (ins fencearg:$pred, fencearg:$succ),
472 "fence", "$pred, $succ">, Sched<[]> {
478 let imm12 = {0b0000,pred,succ};
481 def FENCE_TSO : RVInstI<0b000, OPC_MISC_MEM, (outs), (ins), "fence.tso", "">, Sched<[]> {
484 let imm12 = {0b1000,0b0011,0b0011};
487 def FENCE_I : RVInstI<0b001, OPC_MISC_MEM, (outs), (ins), "fence.i", "">, Sched<[]> {
493 def ECALL : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ecall", "">, Sched<[WriteJmp]> {
499 def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", "">,
506 // This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
507 // instruction (i.e., it should always trap, if your implementation has invalid
508 // instruction traps).
509 def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", "">,
513 let imm12 = 0b110000000000;
515 } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
517 def CSRRW : CSR_ir<0b001, "csrrw">;
518 def CSRRS : CSR_ir<0b010, "csrrs">;
519 def CSRRC : CSR_ir<0b011, "csrrc">;
521 def CSRRWI : CSR_ii<0b101, "csrrwi">;
522 def CSRRSI : CSR_ii<0b110, "csrrsi">;
523 def CSRRCI : CSR_ii<0b111, "csrrci">;
525 /// RV64I instructions
527 let Predicates = [IsRV64] in {
528 def LWU : Load_ri<0b110, "lwu">, Sched<[WriteLDWU, ReadMemBase]>;
529 def LD : Load_ri<0b011, "ld">, Sched<[WriteLDD, ReadMemBase]>;
530 def SD : Store_rri<0b011, "sd">, Sched<[WriteSTD, ReadStoreData, ReadMemBase]>;
532 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
533 def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd),
534 (ins GPR:$rs1, simm12:$imm12),
535 "addiw", "$rd, $rs1, $imm12">,
536 Sched<[WriteIALU32, ReadIALU32]>;
538 def SLLIW : ShiftW_ri<0, 0b001, "slliw">;
539 def SRLIW : ShiftW_ri<0, 0b101, "srliw">;
540 def SRAIW : ShiftW_ri<1, 0b101, "sraiw">;
542 def ADDW : ALUW_rr<0b0000000, 0b000, "addw">,
543 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
544 def SUBW : ALUW_rr<0b0100000, 0b000, "subw">,
545 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
546 def SLLW : ALUW_rr<0b0000000, 0b001, "sllw">,
547 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
548 def SRLW : ALUW_rr<0b0000000, 0b101, "srlw">,
549 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
550 def SRAW : ALUW_rr<0b0100000, 0b101, "sraw">,
551 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>;
552 } // Predicates = [IsRV64]
554 //===----------------------------------------------------------------------===//
555 // Privileged instructions
556 //===----------------------------------------------------------------------===//
558 let isBarrier = 1, isReturn = 1, isTerminator = 1 in {
559 def URET : Priv<"uret", 0b0000000>, Sched<[]> {
565 def SRET : Priv<"sret", 0b0001000>, Sched<[]> {
571 def MRET : Priv<"mret", 0b0011000>, Sched<[]> {
576 } // isBarrier = 1, isReturn = 1, isTerminator = 1
578 def WFI : Priv<"wfi", 0b0001000>, Sched<[]> {
584 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in
585 def SFENCE_VMA : RVInstR<0b0001001, 0b000, OPC_SYSTEM, (outs),
586 (ins GPR:$rs1, GPR:$rs2),
587 "sfence.vma", "$rs1, $rs2">, Sched<[]> {
591 //===----------------------------------------------------------------------===//
592 // Debug instructions
593 //===----------------------------------------------------------------------===//
595 let isBarrier = 1, isReturn = 1, isTerminator = 1 in {
596 def DRET : Priv<"dret", 0b0111101>, Sched<[]> {
601 } // isBarrier = 1, isReturn = 1, isTerminator = 1
603 //===----------------------------------------------------------------------===//
604 // Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20)
605 //===----------------------------------------------------------------------===//
607 def : InstAlias<"nop", (ADDI X0, X0, 0)>;
609 // Note that the size is 32 because up to 8 32-bit instructions are needed to
610 // generate an arbitrary 64-bit immediate. However, the size does not really
611 // matter since PseudoLI is currently only used in the AsmParser where it gets
612 // expanded to real instructions immediately.
613 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32,
614 isCodeGenOnly = 0, isAsmParserOnly = 1 in
615 def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [],
618 def PseudoLB : PseudoLoad<"lb">;
619 def PseudoLBU : PseudoLoad<"lbu">;
620 def PseudoLH : PseudoLoad<"lh">;
621 def PseudoLHU : PseudoLoad<"lhu">;
622 def PseudoLW : PseudoLoad<"lw">;
624 def PseudoSB : PseudoStore<"sb">;
625 def PseudoSH : PseudoStore<"sh">;
626 def PseudoSW : PseudoStore<"sw">;
628 let Predicates = [IsRV64] in {
629 def PseudoLWU : PseudoLoad<"lwu">;
630 def PseudoLD : PseudoLoad<"ld">;
631 def PseudoSD : PseudoStore<"sd">;
632 } // Predicates = [IsRV64]
634 def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>;
635 def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>;
636 def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>;
638 let Predicates = [IsRV64] in {
639 def : InstAlias<"negw $rd, $rs", (SUBW GPR:$rd, X0, GPR:$rs)>;
640 def : InstAlias<"sext.w $rd, $rs", (ADDIW GPR:$rd, GPR:$rs, 0)>;
641 } // Predicates = [IsRV64]
643 def : InstAlias<"seqz $rd, $rs", (SLTIU GPR:$rd, GPR:$rs, 1)>;
644 def : InstAlias<"snez $rd, $rs", (SLTU GPR:$rd, X0, GPR:$rs)>;
645 def : InstAlias<"sltz $rd, $rs", (SLT GPR:$rd, GPR:$rs, X0)>;
646 def : InstAlias<"sgtz $rd, $rs", (SLT GPR:$rd, X0, GPR:$rs)>;
648 // sgt/sgtu are recognised by the GNU assembler but the canonical slt/sltu
649 // form will always be printed. Therefore, set a zero weight.
650 def : InstAlias<"sgt $rd, $rs, $rt", (SLT GPR:$rd, GPR:$rt, GPR:$rs), 0>;
651 def : InstAlias<"sgtu $rd, $rs, $rt", (SLTU GPR:$rd, GPR:$rt, GPR:$rs), 0>;
653 def : InstAlias<"beqz $rs, $offset",
654 (BEQ GPR:$rs, X0, simm13_lsb0:$offset)>;
655 def : InstAlias<"bnez $rs, $offset",
656 (BNE GPR:$rs, X0, simm13_lsb0:$offset)>;
657 def : InstAlias<"blez $rs, $offset",
658 (BGE X0, GPR:$rs, simm13_lsb0:$offset)>;
659 def : InstAlias<"bgez $rs, $offset",
660 (BGE GPR:$rs, X0, simm13_lsb0:$offset)>;
661 def : InstAlias<"bltz $rs, $offset",
662 (BLT GPR:$rs, X0, simm13_lsb0:$offset)>;
663 def : InstAlias<"bgtz $rs, $offset",
664 (BLT X0, GPR:$rs, simm13_lsb0:$offset)>;
666 // Always output the canonical mnemonic for the pseudo branch instructions.
667 // The GNU tools emit the canonical mnemonic for the branch pseudo instructions
668 // as well (e.g. "bgt" will be recognised by the assembler but never printed by
669 // objdump). Match this behaviour by setting a zero weight.
670 def : InstAlias<"bgt $rs, $rt, $offset",
671 (BLT GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
672 def : InstAlias<"ble $rs, $rt, $offset",
673 (BGE GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
674 def : InstAlias<"bgtu $rs, $rt, $offset",
675 (BLTU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
676 def : InstAlias<"bleu $rs, $rt, $offset",
677 (BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>;
679 def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>;
680 def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>;
682 // Non-zero offset aliases of "jalr" are the lowest weight, followed by the
683 // two-register form, then the one-register forms and finally "ret".
684 def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>;
685 def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>;
686 def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>;
687 def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>;
688 def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>;
689 def : InstAlias<"ret", (JALR X0, X1, 0), 4>;
691 // Non-canonical forms for jump targets also accepted by the assembler.
692 def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>;
693 def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>;
694 def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>;
696 def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw
698 def : InstAlias<"rdinstret $rd", (CSRRS GPR:$rd, INSTRET.Encoding, X0)>;
699 def : InstAlias<"rdcycle $rd", (CSRRS GPR:$rd, CYCLE.Encoding, X0)>;
700 def : InstAlias<"rdtime $rd", (CSRRS GPR:$rd, TIME.Encoding, X0)>;
702 let Predicates = [IsRV32] in {
703 def : InstAlias<"rdinstreth $rd", (CSRRS GPR:$rd, INSTRETH.Encoding, X0)>;
704 def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, CYCLEH.Encoding, X0)>;
705 def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, TIMEH.Encoding, X0)>;
706 } // Predicates = [IsRV32]
708 def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>;
709 def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>;
710 def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>;
711 def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>;
713 def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
714 def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
715 def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
717 let EmitPriority = 0 in {
718 def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>;
719 def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>;
720 def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>;
722 def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
723 def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
724 def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>;
727 def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0)>;
728 def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>;
730 let EmitPriority = 0 in {
731 def : InstAlias<"lb $rd, (${rs1})",
732 (LB GPR:$rd, GPR:$rs1, 0)>;
733 def : InstAlias<"lh $rd, (${rs1})",
734 (LH GPR:$rd, GPR:$rs1, 0)>;
735 def : InstAlias<"lw $rd, (${rs1})",
736 (LW GPR:$rd, GPR:$rs1, 0)>;
737 def : InstAlias<"lbu $rd, (${rs1})",
738 (LBU GPR:$rd, GPR:$rs1, 0)>;
739 def : InstAlias<"lhu $rd, (${rs1})",
740 (LHU GPR:$rd, GPR:$rs1, 0)>;
742 def : InstAlias<"sb $rs2, (${rs1})",
743 (SB GPR:$rs2, GPR:$rs1, 0)>;
744 def : InstAlias<"sh $rs2, (${rs1})",
745 (SH GPR:$rs2, GPR:$rs1, 0)>;
746 def : InstAlias<"sw $rs2, (${rs1})",
747 (SW GPR:$rs2, GPR:$rs1, 0)>;
749 def : InstAlias<"add $rd, $rs1, $imm12",
750 (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
751 def : InstAlias<"and $rd, $rs1, $imm12",
752 (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
753 def : InstAlias<"xor $rd, $rs1, $imm12",
754 (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
755 def : InstAlias<"or $rd, $rs1, $imm12",
756 (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
757 def : InstAlias<"sll $rd, $rs1, $shamt",
758 (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
759 def : InstAlias<"srl $rd, $rs1, $shamt",
760 (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
761 def : InstAlias<"sra $rd, $rs1, $shamt",
762 (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>;
763 let Predicates = [IsRV64] in {
764 def : InstAlias<"lwu $rd, (${rs1})",
765 (LWU GPR:$rd, GPR:$rs1, 0)>;
766 def : InstAlias<"ld $rd, (${rs1})",
767 (LD GPR:$rd, GPR:$rs1, 0)>;
768 def : InstAlias<"sd $rs2, (${rs1})",
769 (SD GPR:$rs2, GPR:$rs1, 0)>;
771 def : InstAlias<"addw $rd, $rs1, $imm12",
772 (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>;
773 def : InstAlias<"sllw $rd, $rs1, $shamt",
774 (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
775 def : InstAlias<"srlw $rd, $rs1, $shamt",
776 (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
777 def : InstAlias<"sraw $rd, $rs1, $shamt",
778 (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>;
779 } // Predicates = [IsRV64]
780 def : InstAlias<"slt $rd, $rs1, $imm12",
781 (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>;
782 def : InstAlias<"sltu $rd, $rs1, $imm12",
783 (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>;
786 def : MnemonicAlias<"move", "mv">;
788 // The SCALL and SBREAK instructions wererenamed to ECALL and EBREAK in
789 // version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept
790 // the old name for backwards compatibility.
791 def : MnemonicAlias<"scall", "ecall">;
792 def : MnemonicAlias<"sbreak", "ebreak">;
794 //===----------------------------------------------------------------------===//
795 // Pseudo-instructions and codegen patterns
797 // Naming convention: For 'generic' pattern classes, we use the naming
798 // convention PatTy1Ty2. For pattern classes which offer a more complex
799 // expansion, prefix the class name, e.g. BccPat.
800 //===----------------------------------------------------------------------===//
802 /// Generic pattern classes
804 class PatGprGpr<SDPatternOperator OpNode, RVInst Inst>
805 : Pat<(OpNode GPR:$rs1, GPR:$rs2), (Inst GPR:$rs1, GPR:$rs2)>;
806 class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst>
807 : Pat<(OpNode GPR:$rs1, simm12:$imm12), (Inst GPR:$rs1, simm12:$imm12)>;
808 class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst>
809 : Pat<(OpNode GPR:$rs1, uimmlog2xlen:$shamt),
810 (Inst GPR:$rs1, uimmlog2xlen:$shamt)>;
814 def IsOrAdd: PatFrag<(ops node:$A, node:$B), (or node:$A, node:$B), [{
815 return isOrEquivalentToAdd(N);
817 def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{
818 return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
820 def sexti32 : PatFrags<(ops node:$src),
821 [(sext_inreg node:$src, i32),
822 (assertsexti32 node:$src)]>;
823 def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{
824 return cast<VTSDNode>(N->getOperand(1))->getVT() == MVT::i32;
826 def zexti32 : PatFrags<(ops node:$src),
827 [(and node:$src, 0xffffffff),
828 (assertzexti32 node:$src)]>;
832 def : Pat<(simm12:$imm), (ADDI X0, simm12:$imm)>;
833 def : Pat<(simm32hi20:$imm), (LUI (HI20 imm:$imm))>;
834 def : Pat<(simm32:$imm), (ADDI (LUI (HI20 imm:$imm)), (LO12Sext imm:$imm))>,
837 /// Simple arithmetic operations
839 def : PatGprGpr<add, ADD>;
840 def : PatGprSimm12<add, ADDI>;
841 def : PatGprGpr<sub, SUB>;
842 def : PatGprGpr<or, OR>;
843 def : PatGprSimm12<or, ORI>;
844 def : PatGprGpr<and, AND>;
845 def : PatGprSimm12<and, ANDI>;
846 def : PatGprGpr<xor, XOR>;
847 def : PatGprSimm12<xor, XORI>;
848 def : PatGprUimmLog2XLen<shl, SLLI>;
849 def : PatGprUimmLog2XLen<srl, SRLI>;
850 def : PatGprUimmLog2XLen<sra, SRAI>;
852 // Match both a plain shift and one where the shift amount is masked (this is
853 // typically introduced when the legalizer promotes the shift amount and
854 // zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base
855 // ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I).
856 class shiftop<SDPatternOperator operator>
857 : PatFrags<(ops node:$val, node:$count),
858 [(operator node:$val, node:$count),
859 (operator node:$val, (and node:$count, immbottomxlenset))]>;
861 def : PatGprGpr<shiftop<shl>, SLL>;
862 def : PatGprGpr<shiftop<srl>, SRL>;
863 def : PatGprGpr<shiftop<sra>, SRA>;
865 // This is a special case of the ADD instruction used to facilitate the use of a
866 // fourth operand to emit a relocation on a symbol relating to this instruction.
867 // The relocation does not affect any bits of the instruction itself but is used
868 // as a hint to the linker.
869 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in
870 def PseudoAddTPRel : Pseudo<(outs GPR:$rd),
871 (ins GPR:$rs1, GPR:$rs2, tprel_add_symbol:$src), [],
872 "add", "$rd, $rs1, $rs2, $src">;
874 /// FrameIndex calculations
876 def : Pat<(add (i32 AddrFI:$Rs), simm12:$imm12),
877 (ADDI (i32 AddrFI:$Rs), simm12:$imm12)>;
878 def : Pat<(IsOrAdd (i32 AddrFI:$Rs), simm12:$imm12),
879 (ADDI (i32 AddrFI:$Rs), simm12:$imm12)>;
883 def : PatGprGpr<setlt, SLT>;
884 def : PatGprSimm12<setlt, SLTI>;
885 def : PatGprGpr<setult, SLTU>;
886 def : PatGprSimm12<setult, SLTIU>;
888 // Define pattern expansions for setcc operations that aren't directly
889 // handled by a RISC-V instruction.
890 def : Pat<(seteq GPR:$rs1, 0), (SLTIU GPR:$rs1, 1)>;
891 def : Pat<(seteq GPR:$rs1, GPR:$rs2), (SLTIU (XOR GPR:$rs1, GPR:$rs2), 1)>;
892 def : Pat<(seteq GPR:$rs1, simm12_plus1:$imm12),
893 (SLTIU (ADDI GPR:$rs1, (NegImm simm12_plus1:$imm12)), 1)>;
894 def : Pat<(setne GPR:$rs1, 0), (SLTU X0, GPR:$rs1)>;
895 def : Pat<(setne GPR:$rs1, GPR:$rs2), (SLTU X0, (XOR GPR:$rs1, GPR:$rs2))>;
896 def : Pat<(setne GPR:$rs1, simm12_plus1:$imm12),
897 (SLTU X0, (ADDI GPR:$rs1, (NegImm simm12_plus1:$imm12)))>;
898 def : Pat<(setugt GPR:$rs1, GPR:$rs2), (SLTU GPR:$rs2, GPR:$rs1)>;
899 def : Pat<(setuge GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs1, GPR:$rs2), 1)>;
900 def : Pat<(setule GPR:$rs1, GPR:$rs2), (XORI (SLTU GPR:$rs2, GPR:$rs1), 1)>;
901 def : Pat<(setgt GPR:$rs1, GPR:$rs2), (SLT GPR:$rs2, GPR:$rs1)>;
902 def : Pat<(setge GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs1, GPR:$rs2), 1)>;
903 def : Pat<(setle GPR:$rs1, GPR:$rs2), (XORI (SLT GPR:$rs2, GPR:$rs1), 1)>;
905 let usesCustomInserter = 1 in
906 class SelectCC_rrirr<RegisterClass valty, RegisterClass cmpty>
907 : Pseudo<(outs valty:$dst),
908 (ins cmpty:$lhs, cmpty:$rhs, ixlenimm:$imm,
909 valty:$truev, valty:$falsev),
910 [(set valty:$dst, (riscv_selectcc cmpty:$lhs, cmpty:$rhs,
911 (XLenVT imm:$imm), valty:$truev, valty:$falsev))]>;
913 def Select_GPR_Using_CC_GPR : SelectCC_rrirr<GPR, GPR>;
915 /// Branches and jumps
917 // Match `(brcond (CondOp ..), ..)` and lower to the appropriate RISC-V branch
919 class BccPat<PatFrag CondOp, RVInstB Inst>
920 : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
921 (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>;
923 def : BccPat<seteq, BEQ>;
924 def : BccPat<setne, BNE>;
925 def : BccPat<setlt, BLT>;
926 def : BccPat<setge, BGE>;
927 def : BccPat<setult, BLTU>;
928 def : BccPat<setuge, BGEU>;
930 class BccSwapPat<PatFrag CondOp, RVInst InstBcc>
931 : Pat<(brcond (XLenVT (CondOp GPR:$rs1, GPR:$rs2)), bb:$imm12),
932 (InstBcc GPR:$rs2, GPR:$rs1, bb:$imm12)>;
934 // Condition codes that don't have matching RISC-V branch instructions, but
935 // are trivially supported by swapping the two input operands
936 def : BccSwapPat<setgt, BLT>;
937 def : BccSwapPat<setle, BGE>;
938 def : BccSwapPat<setugt, BLTU>;
939 def : BccSwapPat<setule, BGEU>;
941 // An extra pattern is needed for a brcond without a setcc (i.e. where the
942 // condition was calculated elsewhere).
943 def : Pat<(brcond GPR:$cond, bb:$imm12), (BNE GPR:$cond, X0, bb:$imm12)>;
945 let isBarrier = 1, isBranch = 1, isTerminator = 1 in
946 def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>,
947 PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>;
949 let isCall = 1, Defs=[X1] in
950 let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in
951 def PseudoBRIND : Pseudo<(outs), (ins GPR:$rs1, simm12:$imm12), []>,
952 PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>;
954 def : Pat<(brind GPR:$rs1), (PseudoBRIND GPR:$rs1, 0)>;
955 def : Pat<(brind (add GPR:$rs1, simm12:$imm12)),
956 (PseudoBRIND GPR:$rs1, simm12:$imm12)>;
958 // PseudoCALLReg is a generic pseudo instruction for calls which will eventually
959 // expand to auipc and jalr while encoding, with any given register used as the
961 // Define AsmString to print "call" when compile with -S flag.
962 // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
963 let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, hasSideEffects = 0,
964 mayStore = 0, mayLoad = 0 in
965 def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), []> {
966 let AsmString = "call\t$rd, $func";
969 // PseudoCALL is a pseudo instruction which will eventually expand to auipc
970 // and jalr while encoding. This is desirable, as an auipc+jalr pair with
971 // R_RISCV_CALL and R_RISCV_RELAX relocations can be be relaxed by the linker
972 // if the offset fits in a signed 21-bit immediate.
973 // Define AsmString to print "call" when compile with -S flag.
974 // Define isCodeGenOnly = 0 to support parsing assembly "call" instruction.
975 let isCall = 1, Defs = [X1], isCodeGenOnly = 0 in
976 def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), []> {
977 let AsmString = "call\t$func";
980 def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>;
981 def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>;
983 def : Pat<(riscv_uret_flag), (URET X0, X0)>;
984 def : Pat<(riscv_sret_flag), (SRET X0, X0)>;
985 def : Pat<(riscv_mret_flag), (MRET X0, X0)>;
987 let isCall = 1, Defs = [X1] in
988 def PseudoCALLIndirect : Pseudo<(outs), (ins GPR:$rs1),
989 [(riscv_call GPR:$rs1)]>,
990 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>;
992 let isBarrier = 1, isReturn = 1, isTerminator = 1 in
993 def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_flag)]>,
994 PseudoInstExpansion<(JALR X0, X1, 0)>;
996 // PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually
997 // expand to auipc and jalr while encoding.
998 // Define AsmString to print "tail" when compile with -S flag.
999 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2],
1000 isCodeGenOnly = 0 in
1001 def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), []> {
1002 let AsmString = "tail\t$dst";
1005 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in
1006 def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1),
1007 [(riscv_tail GPRTC:$rs1)]>,
1008 PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>;
1010 def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)),
1011 (PseudoTAIL texternalsym:$dst)>;
1012 def : Pat<(riscv_tail (iPTR texternalsym:$dst)),
1013 (PseudoTAIL texternalsym:$dst)>;
1015 let isCall = 0, isBarrier = 1, isBranch = 1, isTerminator = 1,
1016 isCodeGenOnly = 0, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in
1017 def PseudoJump : Pseudo<(outs GPR:$rd), (ins pseudo_jump_symbol:$target), []> {
1018 let AsmString = "jump\t$target, $rd";
1021 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0,
1022 isAsmParserOnly = 1 in
1023 def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1024 "lla", "$dst, $src">;
1026 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
1027 isAsmParserOnly = 1 in
1028 def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1029 "la", "$dst, $src">;
1031 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
1032 isAsmParserOnly = 1 in
1033 def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1034 "la.tls.ie", "$dst, $src">;
1036 let hasSideEffects = 0, mayLoad = 1, mayStore = 0, isCodeGenOnly = 0,
1037 isAsmParserOnly = 1 in
1038 def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [],
1039 "la.tls.gd", "$dst, $src">;
1043 multiclass LdPat<PatFrag LoadOp, RVInst Inst> {
1044 def : Pat<(LoadOp GPR:$rs1), (Inst GPR:$rs1, 0)>;
1045 def : Pat<(LoadOp AddrFI:$rs1), (Inst AddrFI:$rs1, 0)>;
1046 def : Pat<(LoadOp (add GPR:$rs1, simm12:$imm12)),
1047 (Inst GPR:$rs1, simm12:$imm12)>;
1048 def : Pat<(LoadOp (add AddrFI:$rs1, simm12:$imm12)),
1049 (Inst AddrFI:$rs1, simm12:$imm12)>;
1050 def : Pat<(LoadOp (IsOrAdd AddrFI:$rs1, simm12:$imm12)),
1051 (Inst AddrFI:$rs1, simm12:$imm12)>;
1054 defm : LdPat<sextloadi8, LB>;
1055 defm : LdPat<extloadi8, LB>;
1056 defm : LdPat<sextloadi16, LH>;
1057 defm : LdPat<extloadi16, LH>;
1058 defm : LdPat<load, LW>, Requires<[IsRV32]>;
1059 defm : LdPat<zextloadi8, LBU>;
1060 defm : LdPat<zextloadi16, LHU>;
1064 multiclass StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy> {
1065 def : Pat<(StoreOp StTy:$rs2, GPR:$rs1), (Inst StTy:$rs2, GPR:$rs1, 0)>;
1066 def : Pat<(StoreOp StTy:$rs2, AddrFI:$rs1), (Inst StTy:$rs2, AddrFI:$rs1, 0)>;
1067 def : Pat<(StoreOp StTy:$rs2, (add GPR:$rs1, simm12:$imm12)),
1068 (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>;
1069 def : Pat<(StoreOp StTy:$rs2, (add AddrFI:$rs1, simm12:$imm12)),
1070 (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
1071 def : Pat<(StoreOp StTy:$rs2, (IsOrAdd AddrFI:$rs1, simm12:$imm12)),
1072 (Inst StTy:$rs2, AddrFI:$rs1, simm12:$imm12)>;
1075 defm : StPat<truncstorei8, SB, GPR>;
1076 defm : StPat<truncstorei16, SH, GPR>;
1077 defm : StPat<store, SW, GPR>, Requires<[IsRV32]>;
1081 // Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set
1082 // Manual: Volume I.
1084 // fence acquire -> fence r, rw
1085 def : Pat<(atomic_fence (XLenVT 4), (timm)), (FENCE 0b10, 0b11)>;
1086 // fence release -> fence rw, w
1087 def : Pat<(atomic_fence (XLenVT 5), (timm)), (FENCE 0b11, 0b1)>;
1088 // fence acq_rel -> fence.tso
1089 def : Pat<(atomic_fence (XLenVT 6), (timm)), (FENCE_TSO)>;
1090 // fence seq_cst -> fence rw, rw
1091 def : Pat<(atomic_fence (XLenVT 7), (timm)), (FENCE 0b11, 0b11)>;
1093 // Lowering for atomic load and store is defined in RISCVInstrInfoA.td.
1094 // Although these are lowered to fence+load/store instructions defined in the
1095 // base RV32I/RV64I ISA, this lowering is only used when the A extension is
1096 // present. This is necessary as it isn't valid to mix __atomic_* libcalls
1097 // with inline atomic operations for the same object.
1099 /// Other pseudo-instructions
1101 // Pessimistically assume the stack pointer will be clobbered
1102 let Defs = [X2], Uses = [X2] in {
1103 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
1104 [(callseq_start timm:$amt1, timm:$amt2)]>;
1105 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
1106 [(callseq_end timm:$amt1, timm:$amt2)]>;
1107 } // Defs = [X2], Uses = [X2]
1111 let Predicates = [IsRV64] in {
1115 def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>;
1116 def : Pat<(and GPR:$rs1, 0xffffffff), (SRLI (SLLI GPR:$rs1, 32), 32)>;
1120 def : Pat<(sext_inreg (add GPR:$rs1, GPR:$rs2), i32),
1121 (ADDW GPR:$rs1, GPR:$rs2)>;
1122 def : Pat<(sext_inreg (add GPR:$rs1, simm12:$imm12), i32),
1123 (ADDIW GPR:$rs1, simm12:$imm12)>;
1124 def : Pat<(sext_inreg (sub GPR:$rs1, GPR:$rs2), i32),
1125 (SUBW GPR:$rs1, GPR:$rs2)>;
1126 def : Pat<(sext_inreg (shl GPR:$rs1, uimm5:$shamt), i32),
1127 (SLLIW GPR:$rs1, uimm5:$shamt)>;
1128 // (srl (zexti32 ...), uimm5:$shamt) is matched with custom code due to the
1129 // need to undo manipulation of the mask value performed by DAGCombine.
1130 def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt),
1131 (SRAIW GPR:$rs1, uimm5:$shamt)>;
1133 def : PatGprGpr<riscv_sllw, SLLW>;
1134 def : PatGprGpr<riscv_srlw, SRLW>;
1135 def : PatGprGpr<riscv_sraw, SRAW>;
1139 defm : LdPat<sextloadi32, LW>;
1140 defm : LdPat<extloadi32, LW>;
1141 defm : LdPat<zextloadi32, LWU>;
1142 defm : LdPat<load, LD>;
1146 defm : StPat<truncstorei32, SW, GPR>;
1147 defm : StPat<store, SD, GPR>;
1148 } // Predicates = [IsRV64]
1150 /// readcyclecounter
1151 // On RV64, we can directly read the 64-bit "cycle" CSR.
1152 let Predicates = [IsRV64] in
1153 def : Pat<(readcyclecounter), (CSRRS CYCLE.Encoding, X0)>;
1154 // On RV32, ReadCycleWide will be expanded to the suggested loop reading both
1155 // halves of the 64-bit "cycle" CSR.
1156 let Predicates = [IsRV32], usesCustomInserter = 1, hasSideEffects = 0,
1157 mayLoad = 0, mayStore = 0, hasNoSchedulingInfo = 1 in
1158 def ReadCycleWide : Pseudo<(outs GPR:$lo, GPR:$hi), (ins), [], "", "">;
1162 // We lower `trap` to `unimp`, as this causes a hard exception on nearly all
1164 def : Pat<(trap), (UNIMP)>;
1166 // We lower `debugtrap` to `ebreak`, as this will get the attention of the
1167 // debugger if possible.
1168 def : Pat<(debugtrap), (EBREAK)>;
1170 //===----------------------------------------------------------------------===//
1171 // Standard extensions
1172 //===----------------------------------------------------------------------===//
1174 include "RISCVInstrInfoM.td"
1175 include "RISCVInstrInfoA.td"
1176 include "RISCVInstrInfoF.td"
1177 include "RISCVInstrInfoD.td"
1178 include "RISCVInstrInfoC.td"
1179 include "RISCVInstrInfoB.td"
1180 include "RISCVInstrInfoV.td"