1 //===-- RISCVInstrInfoV.td - RISC-V 'V' instructions -------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file describes the RISC-V instructions from the standard 'V' Vector
10 /// extension, version 1.0.
12 //===----------------------------------------------------------------------===//
14 include "RISCVInstrFormatsV.td"
16 //===----------------------------------------------------------------------===//
17 // Operand and SDNode transformation definitions.
18 //===----------------------------------------------------------------------===//
20 class VTypeIAsmOperand<int VTypeINum> : AsmOperandClass {
21 let Name = "VTypeI" # VTypeINum;
22 let ParserMethod = "parseVTypeI";
23 let DiagnosticType = "InvalidVTypeI";
24 let RenderMethod = "addVTypeIOperands";
27 class VTypeIOp<int VTypeINum> : Operand<XLenVT> {
28 let ParserMatchClass = VTypeIAsmOperand<VTypeINum>;
29 let PrintMethod = "printVTypeI";
30 let DecoderMethod = "decodeUImmOperand<"#VTypeINum#">";
33 def VTypeIOp10 : VTypeIOp<10>;
34 def VTypeIOp11 : VTypeIOp<11>;
36 def VMaskAsmOperand : AsmOperandClass {
37 let Name = "RVVMaskRegOpOperand";
38 let RenderMethod = "addRegOperands";
39 let PredicateMethod = "isV0Reg";
40 let ParserMethod = "parseMaskReg";
42 let DefaultMethod = "defaultMaskRegOp";
43 let DiagnosticType = "InvalidVMaskRegister";
46 def VMaskOp : RegisterOperand<VMV0> {
47 let ParserMatchClass = VMaskAsmOperand;
48 let PrintMethod = "printVMaskReg";
49 let EncoderMethod = "getVMaskReg";
50 let DecoderMethod = "decodeVMaskReg";
53 def simm5 : Operand<XLenVT>, ImmLeaf<XLenVT, [{return isInt<5>(Imm);}]> {
54 let ParserMatchClass = SImmAsmOperand<5>;
55 let EncoderMethod = "getImmOpValue";
56 let DecoderMethod = "decodeSImmOperand<5>";
57 let MCOperandPredicate = [{
59 if (MCOp.evaluateAsConstantImm(Imm))
61 return MCOp.isBareSymbolRef();
65 def SImm5Plus1AsmOperand : AsmOperandClass {
66 let Name = "SImm5Plus1";
67 let RenderMethod = "addImmOperands";
68 let DiagnosticType = "InvalidSImm5Plus1";
71 def simm5_plus1 : Operand<XLenVT>, ImmLeaf<XLenVT,
72 [{return (isInt<5>(Imm) && Imm != -16) || Imm == 16;}]> {
73 let ParserMatchClass = SImm5Plus1AsmOperand;
74 let MCOperandPredicate = [{
76 if (MCOp.evaluateAsConstantImm(Imm))
77 return (isInt<5>(Imm) && Imm != -16) || Imm == 16;
78 return MCOp.isBareSymbolRef();
82 def simm5_plus1_nonzero : ImmLeaf<XLenVT,
83 [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
85 //===----------------------------------------------------------------------===//
86 // Scheduling definitions.
87 //===----------------------------------------------------------------------===//
89 class VMVRSched<int n>: Sched <[!cast<SchedReadWrite>("WriteVMov" # n # "V"),
90 !cast<SchedReadWrite>("ReadVMov" # n # "V")]>;
92 class VLESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDE" # n),
93 ReadVLDX, ReadVMask]>;
95 class VSESched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTE" # n),
96 !cast<SchedReadWrite>("ReadVSTE" # n # "V"),
97 ReadVSTX, ReadVMask]>;
99 class VLSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDS" # n),
100 ReadVLDX, ReadVLDSX, ReadVMask]>;
102 class VSSSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVSTS" # n),
103 !cast<SchedReadWrite>("ReadVSTS" # n # "V"),
104 ReadVSTX, ReadVSTSX, ReadVMask]>;
106 class VLXSched<int n, string o> :
107 Sched <[!cast<SchedReadWrite>("WriteVLD" # o # "X" # n),
108 ReadVLDX, !cast<SchedReadWrite>("ReadVLD" # o # "XV"), ReadVMask]>;
110 class VSXSched<int n, string o> :
111 Sched <[!cast<SchedReadWrite>("WriteVST" # o # "X" # n),
112 !cast<SchedReadWrite>("ReadVST" # o # "X" # n),
113 ReadVSTX, !cast<SchedReadWrite>("ReadVST" # o # "XV"), ReadVMask]>;
115 class VLFSched<int n> : Sched <[!cast<SchedReadWrite>("WriteVLDFF" # n),
116 ReadVLDX, ReadVMask]>;
118 //===----------------------------------------------------------------------===//
119 // Instruction class templates
120 //===----------------------------------------------------------------------===//
122 let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in {
123 // unit-stride load vd, (rs1), vm
124 class VUnitStrideLoad<RISCVWidth width, string opcodestr>
125 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
127 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
129 let vm = 1, RVVConstraint = NoConstraint in {
130 // unit-stride whole register load vl<nf>r.v vd, (rs1)
131 class VWholeLoad<bits<3> nf, RISCVWidth width, string opcodestr, RegisterClass VRC>
132 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideWholeReg,
133 width.Value{2-0}, (outs VRC:$vd), (ins GPR:$rs1),
134 opcodestr, "$vd, (${rs1})"> {
138 // unit-stride mask load vd, (rs1)
139 class VUnitStrideLoadMask<string opcodestr>
140 : RVInstVLU<0b000, LSWidth8.Value{3}, LUMOPUnitStrideMask, LSWidth8.Value{2-0},
142 (ins GPR:$rs1), opcodestr, "$vd, (${rs1})">;
143 } // vm = 1, RVVConstraint = NoConstraint
145 // unit-stride fault-only-first load vd, (rs1), vm
146 class VUnitStrideLoadFF<RISCVWidth width, string opcodestr>
147 : RVInstVLU<0b000, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
149 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
151 // strided load vd, (rs1), rs2, vm
152 class VStridedLoad<RISCVWidth width, string opcodestr>
153 : RVInstVLS<0b000, width.Value{3}, width.Value{2-0},
155 (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
156 "$vd, (${rs1}), $rs2$vm">;
158 // indexed load vd, (rs1), vs2, vm
159 class VIndexedLoad<RISCVMOP mop, RISCVWidth width, string opcodestr>
160 : RVInstVLX<0b000, width.Value{3}, mop, width.Value{2-0},
162 (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
163 "$vd, (${rs1}), $vs2$vm">;
165 // unit-stride segment load vd, (rs1), vm
166 class VUnitStrideSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
167 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStride, width.Value{2-0},
169 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
171 // segment fault-only-first load vd, (rs1), vm
172 class VUnitStrideSegmentLoadFF<bits<3> nf, RISCVWidth width, string opcodestr>
173 : RVInstVLU<nf, width.Value{3}, LUMOPUnitStrideFF, width.Value{2-0},
175 (ins GPR:$rs1, VMaskOp:$vm), opcodestr, "$vd, (${rs1})$vm">;
177 // strided segment load vd, (rs1), rs2, vm
178 class VStridedSegmentLoad<bits<3> nf, RISCVWidth width, string opcodestr>
179 : RVInstVLS<nf, width.Value{3}, width.Value{2-0},
181 (ins GPR:$rs1, GPR:$rs2, VMaskOp:$vm), opcodestr,
182 "$vd, (${rs1}), $rs2$vm">;
184 // indexed segment load vd, (rs1), vs2, vm
185 class VIndexedSegmentLoad<bits<3> nf, RISCVMOP mop, RISCVWidth width,
187 : RVInstVLX<nf, width.Value{3}, mop, width.Value{2-0},
189 (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm), opcodestr,
190 "$vd, (${rs1}), $vs2$vm">;
191 } // hasSideEffects = 0, mayLoad = 1, mayStore = 0
193 let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in {
194 // unit-stride store vd, vs3, (rs1), vm
195 class VUnitStrideStore<RISCVWidth width, string opcodestr>
196 : RVInstVSU<0b000, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
197 (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
198 "$vs3, (${rs1})$vm">;
201 // vs<nf>r.v vd, (rs1)
202 class VWholeStore<bits<3> nf, string opcodestr, RegisterClass VRC>
203 : RVInstVSU<nf, 0, SUMOPUnitStrideWholeReg,
204 0b000, (outs), (ins VRC:$vs3, GPR:$rs1),
205 opcodestr, "$vs3, (${rs1})"> {
209 // unit-stride mask store vd, vs3, (rs1)
210 class VUnitStrideStoreMask<string opcodestr>
211 : RVInstVSU<0b000, LSWidth8.Value{3}, SUMOPUnitStrideMask, LSWidth8.Value{2-0},
212 (outs), (ins VR:$vs3, GPR:$rs1), opcodestr,
216 // strided store vd, vs3, (rs1), rs2, vm
217 class VStridedStore<RISCVWidth width, string opcodestr>
218 : RVInstVSS<0b000, width.Value{3}, width.Value{2-0}, (outs),
219 (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
220 opcodestr, "$vs3, (${rs1}), $rs2$vm">;
222 // indexed store vd, vs3, (rs1), vs2, vm
223 class VIndexedStore<RISCVMOP mop, RISCVWidth width, string opcodestr>
224 : RVInstVSX<0b000, width.Value{3}, mop, width.Value{2-0}, (outs),
225 (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
226 opcodestr, "$vs3, (${rs1}), $vs2$vm">;
228 // segment store vd, vs3, (rs1), vm
229 class VUnitStrideSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
230 : RVInstVSU<nf, width.Value{3}, SUMOPUnitStride, width.Value{2-0},
231 (outs), (ins VR:$vs3, GPR:$rs1, VMaskOp:$vm), opcodestr,
232 "$vs3, (${rs1})$vm">;
234 // segment store vd, vs3, (rs1), rs2, vm
235 class VStridedSegmentStore<bits<3> nf, RISCVWidth width, string opcodestr>
236 : RVInstVSS<nf, width.Value{3}, width.Value{2-0}, (outs),
237 (ins VR:$vs3, GPR:$rs1, GPR:$rs2, VMaskOp:$vm),
238 opcodestr, "$vs3, (${rs1}), $rs2$vm">;
240 // segment store vd, vs3, (rs1), vs2, vm
241 class VIndexedSegmentStore<bits<3> nf, RISCVMOP mop, RISCVWidth width,
243 : RVInstVSX<nf, width.Value{3}, mop, width.Value{2-0}, (outs),
244 (ins VR:$vs3, GPR:$rs1, VR:$vs2, VMaskOp:$vm),
245 opcodestr, "$vs3, (${rs1}), $vs2$vm">;
246 } // hasSideEffects = 0, mayLoad = 0, mayStore = 1
248 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
249 // op vd, vs2, vs1, vm
250 class VALUVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
251 : RVInstVV<funct6, opv, (outs VR:$vd),
252 (ins VR:$vs2, VR:$vs1, VMaskOp:$vm),
253 opcodestr, "$vd, $vs2, $vs1$vm">;
255 // op vd, vs2, vs1, v0 (without mask, use v0 as carry input)
256 class VALUmVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
257 : RVInstVV<funct6, opv, (outs VR:$vd),
258 (ins VR:$vs2, VR:$vs1, VMV0:$v0),
259 opcodestr, "$vd, $vs2, $vs1, v0"> {
263 // op vd, vs1, vs2, vm (reverse the order of vs1 and vs2)
264 class VALUrVV<bits<6> funct6, RISCVVFormat opv, string opcodestr>
265 : RVInstVV<funct6, opv, (outs VR:$vd),
266 (ins VR:$vs1, VR:$vs2, VMaskOp:$vm),
267 opcodestr, "$vd, $vs1, $vs2$vm">;
270 class VALUVVNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
271 : RVInstVV<funct6, opv, (outs VR:$vd),
272 (ins VR:$vs2, VR:$vs1),
273 opcodestr, "$vd, $vs2, $vs1"> {
277 // op vd, vs2, rs1, vm
278 class VALUVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
279 : RVInstVX<funct6, opv, (outs VR:$vd),
280 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
281 opcodestr, "$vd, $vs2, $rs1$vm">;
283 // op vd, vs2, rs1, v0 (without mask, use v0 as carry input)
284 class VALUmVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
285 : RVInstVX<funct6, opv, (outs VR:$vd),
286 (ins VR:$vs2, GPR:$rs1, VMV0:$v0),
287 opcodestr, "$vd, $vs2, $rs1, v0"> {
291 // op vd, rs1, vs2, vm (reverse the order of rs1 and vs2)
292 class VALUrVX<bits<6> funct6, RISCVVFormat opv, string opcodestr>
293 : RVInstVX<funct6, opv, (outs VR:$vd),
294 (ins GPR:$rs1, VR:$vs2, VMaskOp:$vm),
295 opcodestr, "$vd, $rs1, $vs2$vm">;
298 class VALUVXNoVm<bits<6> funct6, RISCVVFormat opv, string opcodestr>
299 : RVInstVX<funct6, opv, (outs VR:$vd),
300 (ins VR:$vs2, GPR:$rs1),
301 opcodestr, "$vd, $vs2, $rs1"> {
305 // op vd, vs2, imm, vm
306 class VALUVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
307 : RVInstIVI<funct6, (outs VR:$vd),
308 (ins VR:$vs2, optype:$imm, VMaskOp:$vm),
309 opcodestr, "$vd, $vs2, $imm$vm">;
311 // op vd, vs2, imm, v0 (without mask, use v0 as carry input)
312 class VALUmVI<bits<6> funct6, string opcodestr, Operand optype = simm5>
313 : RVInstIVI<funct6, (outs VR:$vd),
314 (ins VR:$vs2, optype:$imm, VMV0:$v0),
315 opcodestr, "$vd, $vs2, $imm, v0"> {
319 // op vd, vs2, imm, vm
320 class VALUVINoVm<bits<6> funct6, string opcodestr, Operand optype = simm5>
321 : RVInstIVI<funct6, (outs VR:$vd),
322 (ins VR:$vs2, optype:$imm),
323 opcodestr, "$vd, $vs2, $imm"> {
327 // op vd, vs2, rs1, vm (Float)
328 class VALUVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
329 : RVInstVX<funct6, opv, (outs VR:$vd),
330 (ins VR:$vs2, FPR32:$rs1, VMaskOp:$vm),
331 opcodestr, "$vd, $vs2, $rs1$vm">;
333 // op vd, rs1, vs2, vm (Float) (with mask, reverse the order of rs1 and vs2)
334 class VALUrVF<bits<6> funct6, RISCVVFormat opv, string opcodestr>
335 : RVInstVX<funct6, opv, (outs VR:$vd),
336 (ins FPR32:$rs1, VR:$vs2, VMaskOp:$vm),
337 opcodestr, "$vd, $rs1, $vs2$vm">;
339 // op vd, vs2, vm (use vs1 as instruction encoding)
340 class VALUVs2<bits<6> funct6, bits<5> vs1, RISCVVFormat opv, string opcodestr>
341 : RVInstV<funct6, vs1, opv, (outs VR:$vd),
342 (ins VR:$vs2, VMaskOp:$vm),
343 opcodestr, "$vd, $vs2$vm">;
344 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
346 //===----------------------------------------------------------------------===//
347 // Combination of instruction classes.
348 // Use these multiclasses to define instructions more easily.
349 //===----------------------------------------------------------------------===//
351 multiclass VIndexLoadStore<list<int> EEWList> {
352 foreach n = EEWList in {
353 defvar w = !cast<RISCVWidth>("LSWidth" # n);
355 def VLUXEI # n # _V :
356 VIndexedLoad<MOPLDIndexedUnord, w, "vluxei" # n # ".v">,
358 def VLOXEI # n # _V :
359 VIndexedLoad<MOPLDIndexedOrder, w, "vloxei" # n # ".v">,
362 def VSUXEI # n # _V :
363 VIndexedStore<MOPSTIndexedUnord, w, "vsuxei" # n # ".v">,
365 def VSOXEI # n # _V :
366 VIndexedStore<MOPSTIndexedOrder, w, "vsoxei" # n # ".v">,
371 multiclass VALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
372 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
373 Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
374 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
375 Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
376 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
377 Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
380 multiclass VALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
381 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
382 Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUV, ReadVMask]>;
383 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
384 Sched<[WriteVIALUX, ReadVIALUV, ReadVIALUX, ReadVMask]>;
387 multiclass VALU_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
388 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
389 Sched<[WriteVIALUV, ReadVIALUV, ReadVIALUX, ReadVMask]>;
390 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
391 Sched<[WriteVIALUI, ReadVIALUV, ReadVMask]>;
394 multiclass VALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
395 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
396 Sched<[WriteVIWALUV, ReadVIWALUV, ReadVIWALUV, ReadVMask]>;
397 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
398 Sched<[WriteVIWALUX, ReadVIWALUV, ReadVIWALUX, ReadVMask]>;
401 multiclass VMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
402 def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
403 Sched<[WriteVIMulAddV, ReadVIMulAddV, ReadVIMulAddV, ReadVMask]>;
404 def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
405 Sched<[WriteVIMulAddX, ReadVIMulAddV, ReadVIMulAddX, ReadVMask]>;
408 multiclass VWMAC_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
409 def V : VALUrVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
410 Sched<[WriteVIWMulAddV, ReadVIWMulAddV, ReadVIWMulAddV, ReadVMask]>;
411 def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
412 Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
415 multiclass VWMAC_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
416 def X : VALUrVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
417 Sched<[WriteVIWMulAddX, ReadVIWMulAddV, ReadVIWMulAddX, ReadVMask]>;
420 multiclass VALU_MV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
421 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
422 Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
425 multiclass VALUm_IV_V_X_I<string opcodestr, bits<6> funct6> {
426 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
427 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
428 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
429 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
430 def IM : VALUmVI<funct6, opcodestr # ".vim">,
431 Sched<[WriteVICALUI, ReadVIALUCV, ReadVMask]>;
434 multiclass VMRG_IV_V_X_I<string opcodestr, bits<6> funct6> {
435 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
436 Sched<[WriteVIMergeV, ReadVIMergeV, ReadVIMergeV, ReadVMask]>;
437 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
438 Sched<[WriteVIMergeX, ReadVIMergeV, ReadVIMergeX, ReadVMask]>;
439 def IM : VALUmVI<funct6, opcodestr # ".vim">,
440 Sched<[WriteVIMergeI, ReadVIMergeV, ReadVMask]>;
443 multiclass VALUm_IV_V_X<string opcodestr, bits<6> funct6> {
444 def VM : VALUmVV<funct6, OPIVV, opcodestr # ".vvm">,
445 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV, ReadVMask]>;
446 def XM : VALUmVX<funct6, OPIVX, opcodestr # ".vxm">,
447 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX, ReadVMask]>;
450 multiclass VALUNoVm_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5> {
451 def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
452 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
453 def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
454 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
455 def I : VALUVINoVm<funct6, opcodestr # ".vi", optype>,
456 Sched<[WriteVICALUI, ReadVIALUCV]>;
459 multiclass VALUNoVm_IV_V_X<string opcodestr, bits<6> funct6> {
460 def V : VALUVVNoVm<funct6, OPIVV, opcodestr # ".vv">,
461 Sched<[WriteVICALUV, ReadVIALUCV, ReadVIALUCV]>;
462 def X : VALUVXNoVm<funct6, OPIVX, opcodestr # ".vx">,
463 Sched<[WriteVICALUX, ReadVIALUCV, ReadVIALUCX]>;
466 multiclass VALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
467 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
468 Sched<[WriteVFALUV, ReadVFALUV, ReadVFALUV, ReadVMask]>;
469 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
470 Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
473 multiclass VALU_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
474 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
475 Sched<[WriteVFALUF, ReadVFALUV, ReadVFALUF, ReadVMask]>;
478 multiclass VWALU_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
479 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
480 Sched<[WriteVFWALUV, ReadVFWALUV, ReadVFWALUV, ReadVMask]>;
481 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
482 Sched<[WriteVFWALUF, ReadVFWALUV, ReadVFWALUF, ReadVMask]>;
485 multiclass VMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
486 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
487 Sched<[WriteVFMulV, ReadVFMulV, ReadVFMulV, ReadVMask]>;
488 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
489 Sched<[WriteVFMulF, ReadVFMulV, ReadVFMulF, ReadVMask]>;
492 multiclass VDIV_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
493 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
494 Sched<[WriteVFDivV, ReadVFDivV, ReadVFDivV, ReadVMask]>;
495 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
496 Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
499 multiclass VRDIV_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
500 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
501 Sched<[WriteVFDivF, ReadVFDivV, ReadVFDivF, ReadVMask]>;
504 multiclass VWMUL_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
505 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
506 Sched<[WriteVFWMulV, ReadVFWMulV, ReadVFWMulV, ReadVMask]>;
507 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
508 Sched<[WriteVFWMulF, ReadVFWMulV, ReadVFWMulF, ReadVMask]>;
511 multiclass VMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
512 def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
513 Sched<[WriteVFMulAddV, ReadVFMulAddV, ReadVFMulAddV, ReadVMask]>;
514 def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
515 Sched<[WriteVFMulAddF, ReadVFMulAddV, ReadVFMulAddF, ReadVMask]>;
518 multiclass VWMAC_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
519 def V : VALUrVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
520 Sched<[WriteVFWMulAddV, ReadVFWMulAddV, ReadVFWMulAddV, ReadVMask]>;
521 def F : VALUrVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
522 Sched<[WriteVFWMulAddF, ReadVFWMulAddV, ReadVFWMulAddF, ReadVMask]>;
525 multiclass VSQR_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
526 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
527 Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
530 multiclass VRCP_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
531 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
532 Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
535 multiclass VCMP_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
536 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
537 Sched<[WriteVFCmpV, ReadVFCmpV, ReadVFCmpV, ReadVMask]>;
538 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
539 Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
542 multiclass VCMP_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
543 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
544 Sched<[WriteVFCmpF, ReadVFCmpV, ReadVFCmpF, ReadVMask]>;
547 multiclass VSGNJ_FV_V_F<string opcodestr, bits<6> funct6, string vw = "v"> {
548 def V : VALUVV<funct6, OPFVV, opcodestr # "." # vw # "v">,
549 Sched<[WriteVFSgnjV, ReadVFSgnjV, ReadVFSgnjV, ReadVMask]>;
550 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
551 Sched<[WriteVFSgnjF, ReadVFSgnjV, ReadVFSgnjF, ReadVMask]>;
554 multiclass VCLS_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
555 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
556 Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
559 multiclass VCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
560 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
561 Sched<[WriteVFCvtIToFV, ReadVFCvtIToFV, ReadVMask]>;
564 multiclass VCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
565 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
566 Sched<[WriteVFCvtFToIV, ReadVFCvtFToIV, ReadVMask]>;
569 multiclass VWCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
570 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
571 Sched<[WriteVFWCvtIToFV, ReadVFWCvtIToFV, ReadVMask]>;
574 multiclass VWCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
575 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
576 Sched<[WriteVFWCvtFToIV, ReadVFWCvtFToIV, ReadVMask]>;
579 multiclass VWCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
580 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
581 Sched<[WriteVFWCvtFToFV, ReadVFWCvtFToFV, ReadVMask]>;
584 multiclass VNCVTF_IV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
585 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
586 Sched<[WriteVFNCvtIToFV, ReadVFNCvtIToFV, ReadVMask]>;
589 multiclass VNCVTI_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
590 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
591 Sched<[WriteVFNCvtFToIV, ReadVFNCvtFToIV, ReadVMask]>;
594 multiclass VNCVTF_FV_VS2<string opcodestr, bits<6> funct6, bits<5> vs1> {
595 def "" : VALUVs2<funct6, vs1, OPFVV, opcodestr>,
596 Sched<[WriteVFNCvtFToFV, ReadVFNCvtFToFV, ReadVMask]>;
599 multiclass VRED_MV_V<string opcodestr, bits<6> funct6> {
600 def _VS : VALUVV<funct6, OPMVV, opcodestr # ".vs">,
601 Sched<[WriteVIRedV, ReadVIRedV, ReadVIRedV0, ReadVMask]>;
604 multiclass VWRED_IV_V<string opcodestr, bits<6> funct6> {
605 def _VS : VALUVV<funct6, OPIVV, opcodestr # ".vs">,
606 Sched<[WriteVIWRedV, ReadVIWRedV, ReadVIWRedV0, ReadVMask]>;
609 multiclass VRED_FV_V<string opcodestr, bits<6> funct6> {
610 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
611 Sched<[WriteVFRedV, ReadVFRedV, ReadVFRedV0, ReadVMask]>;
614 multiclass VREDO_FV_V<string opcodestr, bits<6> funct6> {
615 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
616 Sched<[WriteVFRedOV, ReadVFRedOV, ReadVFRedOV0, ReadVMask]>;
619 multiclass VWRED_FV_V<string opcodestr, bits<6> funct6> {
620 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
621 Sched<[WriteVFWRedV, ReadVFWRedV, ReadVFWRedV0, ReadVMask]>;
624 multiclass VWREDO_FV_V<string opcodestr, bits<6> funct6> {
625 def _VS : VALUVV<funct6, OPFVV, opcodestr # ".vs">,
626 Sched<[WriteVFWRedOV, ReadVFWRedOV, ReadVFWRedOV0, ReadVMask]>;
629 multiclass VMALU_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
630 def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
631 Sched<[WriteVMALUV, ReadVMALUV, ReadVMALUV]>;
634 multiclass VMSFS_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
635 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
636 Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
639 multiclass VMIOT_MV_V<string opcodestr, bits<6> funct6, bits<5> vs1> {
640 def "" : VALUVs2<funct6, vs1, OPMVV, opcodestr>,
641 Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
644 multiclass VSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
645 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
646 Sched<[WriteVShiftV, ReadVShiftV, ReadVShiftV, ReadVMask]>;
647 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
648 Sched<[WriteVShiftX, ReadVShiftV, ReadVShiftX, ReadVMask]>;
649 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
650 Sched<[WriteVShiftI, ReadVShiftV, ReadVMask]>;
653 multiclass VNSHT_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
654 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
655 Sched<[WriteVNShiftV, ReadVNShiftV, ReadVNShiftV, ReadVMask]>;
656 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
657 Sched<[WriteVNShiftX, ReadVNShiftV, ReadVNShiftX, ReadVMask]>;
658 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
659 Sched<[WriteVNShiftI, ReadVNShiftV, ReadVMask]>;
662 multiclass VCMP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
663 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
664 Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
665 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
666 Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
667 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
668 Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
671 multiclass VCMP_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
672 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
673 Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpX, ReadVMask]>;
674 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
675 Sched<[WriteVICmpI, ReadVICmpV, ReadVMask]>;
678 multiclass VCMP_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
679 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
680 Sched<[WriteVICmpV, ReadVICmpV, ReadVICmpV, ReadVMask]>;
681 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
682 Sched<[WriteVICmpX, ReadVICmpV, ReadVICmpX, ReadVMask]>;
685 multiclass VMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
686 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
687 Sched<[WriteVIMulV, ReadVIMulV, ReadVIMulV, ReadVMask]>;
688 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
689 Sched<[WriteVIMulX, ReadVIMulV, ReadVIMulX, ReadVMask]>;
692 multiclass VWMUL_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
693 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
694 Sched<[WriteVIWMulV, ReadVIWMulV, ReadVIWMulV, ReadVMask]>;
695 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
696 Sched<[WriteVIWMulX, ReadVIWMulV, ReadVIWMulX, ReadVMask]>;
699 multiclass VDIV_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
700 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
701 Sched<[WriteVIDivV, ReadVIDivV, ReadVIDivV, ReadVMask]>;
702 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
703 Sched<[WriteVIDivX, ReadVIDivV, ReadVIDivX, ReadVMask]>;
706 multiclass VSALU_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
707 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
708 Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
709 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
710 Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
711 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
712 Sched<[WriteVSALUI, ReadVSALUV, ReadVMask]>;
715 multiclass VSALU_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
716 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
717 Sched<[WriteVSALUV, ReadVSALUV, ReadVSALUV, ReadVMask]>;
718 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
719 Sched<[WriteVSALUX, ReadVSALUV, ReadVSALUX, ReadVMask]>;
722 multiclass VAALU_MV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
723 def V : VALUVV<funct6, OPMVV, opcodestr # "." # vw # "v">,
724 Sched<[WriteVAALUV, ReadVAALUV, ReadVAALUV, ReadVMask]>;
725 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
726 Sched<[WriteVAALUX, ReadVAALUV, ReadVAALUX, ReadVMask]>;
729 multiclass VSMUL_IV_V_X<string opcodestr, bits<6> funct6, string vw = "v"> {
730 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
731 Sched<[WriteVSMulV, ReadVSMulV, ReadVSMulV, ReadVMask]>;
732 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
733 Sched<[WriteVSMulX, ReadVSMulV, ReadVSMulX, ReadVMask]>;
736 multiclass VSSHF_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
737 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
738 Sched<[WriteVSShiftV, ReadVSShiftV, ReadVSShiftV, ReadVMask]>;
739 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
740 Sched<[WriteVSShiftX, ReadVSShiftV, ReadVSShiftX, ReadVMask]>;
741 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
742 Sched<[WriteVSShiftI, ReadVSShiftV, ReadVMask]>;
745 multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
746 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
747 Sched<[WriteVNClipV, ReadVNClipV, ReadVNClipV, ReadVMask]>;
748 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
749 Sched<[WriteVNClipX, ReadVNClipV, ReadVNClipX, ReadVMask]>;
750 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
751 Sched<[WriteVNClipI, ReadVNClipV, ReadVMask]>;
754 multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
755 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
756 Sched<[WriteVISlideX, ReadVISlideV, ReadVISlideX, ReadVMask]>;
757 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
758 Sched<[WriteVISlideI, ReadVISlideV, ReadVMask]>;
761 multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6, string vw = "v"> {
762 def X : VALUVX<funct6, OPMVX, opcodestr # "." # vw # "x">,
763 Sched<[WriteVISlide1X, ReadVISlideV, ReadVISlideX, ReadVMask]>;
766 multiclass VSLD1_FV_F<string opcodestr, bits<6> funct6, string vw = "v"> {
767 def F : VALUVF<funct6, OPFVF, opcodestr # "." # vw # "f">,
768 Sched<[WriteVFSlide1F, ReadVFSlideV, ReadVFSlideF, ReadVMask]>;
771 multiclass VGTR_IV_V_X_I<string opcodestr, bits<6> funct6, Operand optype = simm5, string vw = "v"> {
772 def V : VALUVV<funct6, OPIVV, opcodestr # "." # vw # "v">,
773 Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV, ReadVMask]>;
774 def X : VALUVX<funct6, OPIVX, opcodestr # "." # vw # "x">,
775 Sched<[WriteVGatherX, ReadVGatherV, ReadVGatherX, ReadVMask]>;
776 def I : VALUVI<funct6, opcodestr # "." # vw # "i", optype>,
777 Sched<[WriteVGatherI, ReadVGatherV, ReadVMask]>;
780 multiclass VCPR_MV_Mask<string opcodestr, bits<6> funct6, string vm = "v"> {
781 def M : VALUVVNoVm<funct6, OPMVV, opcodestr # "." # vm # "m">,
782 Sched<[WriteVCompressV, ReadVCompressV, ReadVCompressV]>;
785 multiclass VWholeLoadN<bits<3> nf, string opcodestr, RegisterClass VRC> {
786 foreach l = [8, 16, 32] in {
787 defvar w = !cast<RISCVWidth>("LSWidth" # l);
788 defvar s = !cast<SchedWrite>("WriteVLD" # !add(nf, 1) # "R" # l);
790 def E # l # _V : VWholeLoad<nf, w, opcodestr # "e" # l # ".v", VRC>,
791 Sched<[s, ReadVLDX]>;
794 multiclass VWholeLoadEEW64<bits<3> nf, string opcodestr, RegisterClass VRC, SchedReadWrite schedrw> {
795 def E64_V : VWholeLoad<nf, LSWidth64, opcodestr # "e64.v", VRC>,
796 Sched<[schedrw, ReadVLDX]>;
799 //===----------------------------------------------------------------------===//
801 //===----------------------------------------------------------------------===//
803 let Predicates = [HasVInstructions] in {
804 let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in {
805 def VSETVLI : RVInstSetVLi<(outs GPR:$rd), (ins GPR:$rs1, VTypeIOp11:$vtypei),
806 "vsetvli", "$rd, $rs1, $vtypei">;
808 def VSETIVLI : RVInstSetiVLi<(outs GPR:$rd), (ins uimm5:$uimm, VTypeIOp10:$vtypei),
809 "vsetivli", "$rd, $uimm, $vtypei">;
811 def VSETVL : RVInstSetVL<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2),
812 "vsetvl", "$rd, $rs1, $rs2">;
813 } // hasSideEffects = 1, mayLoad = 0, mayStore = 0
814 foreach eew = [8, 16, 32] in {
815 defvar w = !cast<RISCVWidth>("LSWidth" # eew);
817 // Vector Unit-Stride Instructions
818 def VLE#eew#_V : VUnitStrideLoad<w, "vle"#eew#".v">, VLESched<eew>;
819 def VSE#eew#_V : VUnitStrideStore<w, "vse"#eew#".v">, VSESched<eew>;
821 // Vector Unit-Stride Fault-only-First Loads
822 def VLE#eew#FF_V : VUnitStrideLoadFF<w, "vle"#eew#"ff.v">, VLFSched<eew>;
824 // Vector Strided Instructions
825 def VLSE#eew#_V : VStridedLoad<w, "vlse"#eew#".v">, VLSSched<eew>;
826 def VSSE#eew#_V : VStridedStore<w, "vsse"#eew#".v">, VSSSched<eew>;
829 defm "" : VIndexLoadStore<[8, 16, 32]>;
830 } // Predicates = [HasVInstructions]
832 let Predicates = [HasVInstructions] in {
833 def VLM_V : VUnitStrideLoadMask<"vlm.v">,
834 Sched<[WriteVLDM, ReadVLDX]>;
835 def VSM_V : VUnitStrideStoreMask<"vsm.v">,
836 Sched<[WriteVSTM, ReadVSTM, ReadVSTX]>;
837 def : InstAlias<"vle1.v $vd, (${rs1})",
838 (VLM_V VR:$vd, GPR:$rs1), 0>;
839 def : InstAlias<"vse1.v $vs3, (${rs1})",
840 (VSM_V VR:$vs3, GPR:$rs1), 0>;
842 defm VL1R : VWholeLoadN<0, "vl1r", VR>;
843 defm VL2R : VWholeLoadN<1, "vl2r", VRM2>;
844 defm VL4R : VWholeLoadN<3, "vl4r", VRM4>;
845 defm VL8R : VWholeLoadN<7, "vl8r", VRM8>;
847 def VS1R_V : VWholeStore<0, "vs1r.v", VR>,
848 Sched<[WriteVST1R, ReadVST1R, ReadVSTX]>;
849 def VS2R_V : VWholeStore<1, "vs2r.v", VRM2>,
850 Sched<[WriteVST2R, ReadVST2R, ReadVSTX]>;
851 def VS4R_V : VWholeStore<3, "vs4r.v", VRM4>,
852 Sched<[WriteVST4R, ReadVST4R, ReadVSTX]>;
853 def VS8R_V : VWholeStore<7, "vs8r.v", VRM8>,
854 Sched<[WriteVST8R, ReadVST8R, ReadVSTX]>;
856 def : InstAlias<"vl1r.v $vd, (${rs1})", (VL1RE8_V VR:$vd, GPR:$rs1)>;
857 def : InstAlias<"vl2r.v $vd, (${rs1})", (VL2RE8_V VRM2:$vd, GPR:$rs1)>;
858 def : InstAlias<"vl4r.v $vd, (${rs1})", (VL4RE8_V VRM4:$vd, GPR:$rs1)>;
859 def : InstAlias<"vl8r.v $vd, (${rs1})", (VL8RE8_V VRM8:$vd, GPR:$rs1)>;
860 } // Predicates = [HasVInstructions]
862 let Predicates = [HasVInstructionsI64] in {
863 // Vector Unit-Stride Instructions
864 def VLE64_V : VUnitStrideLoad<LSWidth64, "vle64.v">,
867 def VLE64FF_V : VUnitStrideLoadFF<LSWidth64, "vle64ff.v">,
870 def VSE64_V : VUnitStrideStore<LSWidth64, "vse64.v">,
872 // Vector Strided Instructions
873 def VLSE64_V : VStridedLoad<LSWidth64, "vlse64.v">,
876 def VSSE64_V : VStridedStore<LSWidth64, "vsse64.v">,
879 defm VL1R: VWholeLoadEEW64<0, "vl1r", VR, WriteVLD1R64>;
880 defm VL2R: VWholeLoadEEW64<1, "vl2r", VRM2, WriteVLD2R64>;
881 defm VL4R: VWholeLoadEEW64<3, "vl4r", VRM4, WriteVLD4R64>;
882 defm VL8R: VWholeLoadEEW64<7, "vl8r", VRM8, WriteVLD8R64>;
883 } // Predicates = [HasVInstructionsI64]
884 let Predicates = [IsRV64, HasVInstructionsI64] in {
885 // Vector Indexed Instructions
886 defm "" : VIndexLoadStore<[64]>;
887 } // [IsRV64, HasVInstructionsI64]
889 let Predicates = [HasVInstructions] in {
890 // Vector Single-Width Integer Add and Subtract
891 defm VADD_V : VALU_IV_V_X_I<"vadd", 0b000000>;
892 defm VSUB_V : VALU_IV_V_X<"vsub", 0b000010>;
893 defm VRSUB_V : VALU_IV_X_I<"vrsub", 0b000011>;
895 def : InstAlias<"vneg.v $vd, $vs$vm", (VRSUB_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
896 def : InstAlias<"vneg.v $vd, $vs", (VRSUB_VX VR:$vd, VR:$vs, X0, zero_reg)>;
898 // Vector Widening Integer Add/Subtract
899 // Refer to 11.2 Widening Vector Arithmetic Instructions
900 // The destination vector register group cannot overlap a source vector
901 // register group of a different element width (including the mask register
902 // if masked), otherwise an illegal instruction exception is raised.
903 let Constraints = "@earlyclobber $vd" in {
904 let RVVConstraint = WidenV in {
905 defm VWADDU_V : VALU_MV_V_X<"vwaddu", 0b110000>;
906 defm VWSUBU_V : VALU_MV_V_X<"vwsubu", 0b110010>;
907 defm VWADD_V : VALU_MV_V_X<"vwadd", 0b110001>;
908 defm VWSUB_V : VALU_MV_V_X<"vwsub", 0b110011>;
909 } // RVVConstraint = WidenV
910 // Set earlyclobber for following instructions for second and mask operands.
911 // This has the downside that the earlyclobber constraint is too coarse and
912 // will impose unnecessary restrictions by not allowing the destination to
913 // overlap with the first (wide) operand.
914 let RVVConstraint = WidenW in {
915 defm VWADDU_W : VALU_MV_V_X<"vwaddu", 0b110100, "w">;
916 defm VWSUBU_W : VALU_MV_V_X<"vwsubu", 0b110110, "w">;
917 defm VWADD_W : VALU_MV_V_X<"vwadd", 0b110101, "w">;
918 defm VWSUB_W : VALU_MV_V_X<"vwsub", 0b110111, "w">;
919 } // RVVConstraint = WidenW
920 } // Constraints = "@earlyclobber $vd"
922 def : InstAlias<"vwcvt.x.x.v $vd, $vs$vm",
923 (VWADD_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
924 def : InstAlias<"vwcvt.x.x.v $vd, $vs",
925 (VWADD_VX VR:$vd, VR:$vs, X0, zero_reg)>;
926 def : InstAlias<"vwcvtu.x.x.v $vd, $vs$vm",
927 (VWADDU_VX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
928 def : InstAlias<"vwcvtu.x.x.v $vd, $vs",
929 (VWADDU_VX VR:$vd, VR:$vs, X0, zero_reg)>;
931 // Vector Integer Extension
932 defm VZEXT_VF8 : VALU_MV_VS2<"vzext.vf8", 0b010010, 0b00010>;
933 defm VSEXT_VF8 : VALU_MV_VS2<"vsext.vf8", 0b010010, 0b00011>;
934 defm VZEXT_VF4 : VALU_MV_VS2<"vzext.vf4", 0b010010, 0b00100>;
935 defm VSEXT_VF4 : VALU_MV_VS2<"vsext.vf4", 0b010010, 0b00101>;
936 defm VZEXT_VF2 : VALU_MV_VS2<"vzext.vf2", 0b010010, 0b00110>;
937 defm VSEXT_VF2 : VALU_MV_VS2<"vsext.vf2", 0b010010, 0b00111>;
939 // Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
940 defm VADC_V : VALUm_IV_V_X_I<"vadc", 0b010000>;
941 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
942 defm VMADC_V : VALUm_IV_V_X_I<"vmadc", 0b010001>;
943 defm VMADC_V : VALUNoVm_IV_V_X_I<"vmadc", 0b010001>;
944 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
945 defm VSBC_V : VALUm_IV_V_X<"vsbc", 0b010010>;
946 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
947 defm VMSBC_V : VALUm_IV_V_X<"vmsbc", 0b010011>;
948 defm VMSBC_V : VALUNoVm_IV_V_X<"vmsbc", 0b010011>;
949 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
951 // Vector Bitwise Logical Instructions
952 defm VAND_V : VALU_IV_V_X_I<"vand", 0b001001>;
953 defm VOR_V : VALU_IV_V_X_I<"vor", 0b001010>;
954 defm VXOR_V : VALU_IV_V_X_I<"vxor", 0b001011>;
956 def : InstAlias<"vnot.v $vd, $vs$vm",
957 (VXOR_VI VR:$vd, VR:$vs, -1, VMaskOp:$vm)>;
958 def : InstAlias<"vnot.v $vd, $vs",
959 (VXOR_VI VR:$vd, VR:$vs, -1, zero_reg)>;
961 // Vector Single-Width Bit Shift Instructions
962 defm VSLL_V : VSHT_IV_V_X_I<"vsll", 0b100101, uimm5>;
963 defm VSRL_V : VSHT_IV_V_X_I<"vsrl", 0b101000, uimm5>;
964 defm VSRA_V : VSHT_IV_V_X_I<"vsra", 0b101001, uimm5>;
966 // Vector Narrowing Integer Right Shift Instructions
967 // Refer to 11.3. Narrowing Vector Arithmetic Instructions
968 // The destination vector register group cannot overlap the first source
969 // vector register group (specified by vs2). The destination vector register
970 // group cannot overlap the mask register if used, unless LMUL=1.
971 let Constraints = "@earlyclobber $vd" in {
972 defm VNSRL_W : VNSHT_IV_V_X_I<"vnsrl", 0b101100, uimm5, "w">;
973 defm VNSRA_W : VNSHT_IV_V_X_I<"vnsra", 0b101101, uimm5, "w">;
974 } // Constraints = "@earlyclobber $vd"
976 def : InstAlias<"vncvt.x.x.w $vd, $vs$vm",
977 (VNSRL_WX VR:$vd, VR:$vs, X0, VMaskOp:$vm)>;
978 def : InstAlias<"vncvt.x.x.w $vd, $vs",
979 (VNSRL_WX VR:$vd, VR:$vs, X0, zero_reg)>;
981 // Vector Integer Comparison Instructions
982 let RVVConstraint = NoConstraint in {
983 defm VMSEQ_V : VCMP_IV_V_X_I<"vmseq", 0b011000>;
984 defm VMSNE_V : VCMP_IV_V_X_I<"vmsne", 0b011001>;
985 defm VMSLTU_V : VCMP_IV_V_X<"vmsltu", 0b011010>;
986 defm VMSLT_V : VCMP_IV_V_X<"vmslt", 0b011011>;
987 defm VMSLEU_V : VCMP_IV_V_X_I<"vmsleu", 0b011100>;
988 defm VMSLE_V : VCMP_IV_V_X_I<"vmsle", 0b011101>;
989 defm VMSGTU_V : VCMP_IV_X_I<"vmsgtu", 0b011110>;
990 defm VMSGT_V : VCMP_IV_X_I<"vmsgt", 0b011111>;
991 } // RVVConstraint = NoConstraint
993 def : InstAlias<"vmsgtu.vv $vd, $va, $vb$vm",
994 (VMSLTU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
995 def : InstAlias<"vmsgt.vv $vd, $va, $vb$vm",
996 (VMSLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
997 def : InstAlias<"vmsgeu.vv $vd, $va, $vb$vm",
998 (VMSLEU_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
999 def : InstAlias<"vmsge.vv $vd, $va, $vb$vm",
1000 (VMSLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1002 let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1004 // For unsigned comparisons we need to special case 0 immediate to maintain
1005 // the always true/false semantics we would invert if we just decremented the
1006 // immediate like we do for signed. To match the GNU assembler we will use
1007 // vmseq/vmsne.vv with the same register for both operands which we can't do
1008 // from an InstAlias.
1009 def PseudoVMSGEU_VI : Pseudo<(outs VR:$vd),
1010 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1011 [], "vmsgeu.vi", "$vd, $vs2, $imm$vm">;
1012 def PseudoVMSLTU_VI : Pseudo<(outs VR:$vd),
1013 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1014 [], "vmsltu.vi", "$vd, $vs2, $imm$vm">;
1015 // Handle signed with pseudos as well for more consistency in the
1017 def PseudoVMSGE_VI : Pseudo<(outs VR:$vd),
1018 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1019 [], "vmsge.vi", "$vd, $vs2, $imm$vm">;
1020 def PseudoVMSLT_VI : Pseudo<(outs VR:$vd),
1021 (ins VR:$vs2, simm5_plus1:$imm, VMaskOp:$vm),
1022 [], "vmslt.vi", "$vd, $vs2, $imm$vm">;
1025 let isCodeGenOnly = 0, isAsmParserOnly = 1, hasSideEffects = 0, mayLoad = 0,
1027 def PseudoVMSGEU_VX : Pseudo<(outs VR:$vd),
1028 (ins VR:$vs2, GPR:$rs1),
1029 [], "vmsgeu.vx", "$vd, $vs2, $rs1">;
1030 def PseudoVMSGE_VX : Pseudo<(outs VR:$vd),
1031 (ins VR:$vs2, GPR:$rs1),
1032 [], "vmsge.vx", "$vd, $vs2, $rs1">;
1033 def PseudoVMSGEU_VX_M : Pseudo<(outs VRNoV0:$vd),
1034 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1035 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm">;
1036 def PseudoVMSGE_VX_M : Pseudo<(outs VRNoV0:$vd),
1037 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1038 [], "vmsge.vx", "$vd, $vs2, $rs1$vm">;
1039 def PseudoVMSGEU_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1040 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1041 [], "vmsgeu.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1042 def PseudoVMSGE_VX_M_T : Pseudo<(outs VR:$vd, VRNoV0:$scratch),
1043 (ins VR:$vs2, GPR:$rs1, VMaskOp:$vm),
1044 [], "vmsge.vx", "$vd, $vs2, $rs1$vm, $scratch">;
1047 // Vector Integer Min/Max Instructions
1048 defm VMINU_V : VCMP_IV_V_X<"vminu", 0b000100>;
1049 defm VMIN_V : VCMP_IV_V_X<"vmin", 0b000101>;
1050 defm VMAXU_V : VCMP_IV_V_X<"vmaxu", 0b000110>;
1051 defm VMAX_V : VCMP_IV_V_X<"vmax", 0b000111>;
1053 // Vector Single-Width Integer Multiply Instructions
1054 defm VMUL_V : VMUL_MV_V_X<"vmul", 0b100101>;
1055 defm VMULH_V : VMUL_MV_V_X<"vmulh", 0b100111>;
1056 defm VMULHU_V : VMUL_MV_V_X<"vmulhu", 0b100100>;
1057 defm VMULHSU_V : VMUL_MV_V_X<"vmulhsu", 0b100110>;
1059 // Vector Integer Divide Instructions
1060 defm VDIVU_V : VDIV_MV_V_X<"vdivu", 0b100000>;
1061 defm VDIV_V : VDIV_MV_V_X<"vdiv", 0b100001>;
1062 defm VREMU_V : VDIV_MV_V_X<"vremu", 0b100010>;
1063 defm VREM_V : VDIV_MV_V_X<"vrem", 0b100011>;
1065 // Vector Widening Integer Multiply Instructions
1066 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1067 defm VWMUL_V : VWMUL_MV_V_X<"vwmul", 0b111011>;
1068 defm VWMULU_V : VWMUL_MV_V_X<"vwmulu", 0b111000>;
1069 defm VWMULSU_V : VWMUL_MV_V_X<"vwmulsu", 0b111010>;
1070 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1072 // Vector Single-Width Integer Multiply-Add Instructions
1073 defm VMACC_V : VMAC_MV_V_X<"vmacc", 0b101101>;
1074 defm VNMSAC_V : VMAC_MV_V_X<"vnmsac", 0b101111>;
1075 defm VMADD_V : VMAC_MV_V_X<"vmadd", 0b101001>;
1076 defm VNMSUB_V : VMAC_MV_V_X<"vnmsub", 0b101011>;
1078 // Vector Widening Integer Multiply-Add Instructions
1079 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV in {
1080 defm VWMACCU_V : VWMAC_MV_V_X<"vwmaccu", 0b111100>;
1081 defm VWMACC_V : VWMAC_MV_V_X<"vwmacc", 0b111101>;
1082 defm VWMACCSU_V : VWMAC_MV_V_X<"vwmaccsu", 0b111111>;
1083 defm VWMACCUS_V : VWMAC_MV_X<"vwmaccus", 0b111110>;
1084 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV
1086 // Vector Integer Merge Instructions
1087 defm VMERGE_V : VMRG_IV_V_X_I<"vmerge", 0b010111>;
1089 // Vector Integer Move Instructions
1090 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vs2 = 0, vm = 1,
1091 RVVConstraint = NoConstraint in {
1093 def VMV_V_V : RVInstVV<0b010111, OPIVV, (outs VR:$vd),
1094 (ins VR:$vs1), "vmv.v.v", "$vd, $vs1">,
1095 Sched<[WriteVIMovV, ReadVIMovV]>;
1097 def VMV_V_X : RVInstVX<0b010111, OPIVX, (outs VR:$vd),
1098 (ins GPR:$rs1), "vmv.v.x", "$vd, $rs1">,
1099 Sched<[WriteVIMovX, ReadVIMovX]>;
1101 def VMV_V_I : RVInstIVI<0b010111, (outs VR:$vd),
1102 (ins simm5:$imm), "vmv.v.i", "$vd, $imm">,
1103 Sched<[WriteVIMovI]>;
1104 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1106 // Vector Fixed-Point Arithmetic Instructions
1107 defm VSADDU_V : VSALU_IV_V_X_I<"vsaddu", 0b100000>;
1108 defm VSADD_V : VSALU_IV_V_X_I<"vsadd", 0b100001>;
1109 defm VSSUBU_V : VSALU_IV_V_X<"vssubu", 0b100010>;
1110 defm VSSUB_V : VSALU_IV_V_X<"vssub", 0b100011>;
1112 // Vector Single-Width Averaging Add and Subtract
1113 defm VAADDU_V : VAALU_MV_V_X<"vaaddu", 0b001000>;
1114 defm VAADD_V : VAALU_MV_V_X<"vaadd", 0b001001>;
1115 defm VASUBU_V : VAALU_MV_V_X<"vasubu", 0b001010>;
1116 defm VASUB_V : VAALU_MV_V_X<"vasub", 0b001011>;
1118 // Vector Single-Width Fractional Multiply with Rounding and Saturation
1119 defm VSMUL_V : VSMUL_IV_V_X<"vsmul", 0b100111>;
1121 // Vector Single-Width Scaling Shift Instructions
1122 defm VSSRL_V : VSSHF_IV_V_X_I<"vssrl", 0b101010, uimm5>;
1123 defm VSSRA_V : VSSHF_IV_V_X_I<"vssra", 0b101011, uimm5>;
1125 // Vector Narrowing Fixed-Point Clip Instructions
1126 let Constraints = "@earlyclobber $vd" in {
1127 defm VNCLIPU_W : VNCLP_IV_V_X_I<"vnclipu", 0b101110, uimm5, "w">;
1128 defm VNCLIP_W : VNCLP_IV_V_X_I<"vnclip", 0b101111, uimm5, "w">;
1129 } // Constraints = "@earlyclobber $vd"
1130 } // Predicates = [HasVInstructions]
1132 let Predicates = [HasVInstructionsAnyF] in {
1133 // Vector Single-Width Floating-Point Add/Subtract Instructions
1134 let Uses = [FRM], mayRaiseFPException = true in {
1135 defm VFADD_V : VALU_FV_V_F<"vfadd", 0b000000>;
1136 defm VFSUB_V : VALU_FV_V_F<"vfsub", 0b000010>;
1137 defm VFRSUB_V : VALU_FV_F<"vfrsub", 0b100111>;
1140 // Vector Widening Floating-Point Add/Subtract Instructions
1141 let Constraints = "@earlyclobber $vd",
1143 mayRaiseFPException = true in {
1144 let RVVConstraint = WidenV in {
1145 defm VFWADD_V : VWALU_FV_V_F<"vfwadd", 0b110000>;
1146 defm VFWSUB_V : VWALU_FV_V_F<"vfwsub", 0b110010>;
1147 } // RVVConstraint = WidenV
1148 // Set earlyclobber for following instructions for second and mask operands.
1149 // This has the downside that the earlyclobber constraint is too coarse and
1150 // will impose unnecessary restrictions by not allowing the destination to
1151 // overlap with the first (wide) operand.
1152 let RVVConstraint = WidenW in {
1153 defm VFWADD_W : VWALU_FV_V_F<"vfwadd", 0b110100, "w">;
1154 defm VFWSUB_W : VWALU_FV_V_F<"vfwsub", 0b110110, "w">;
1155 } // RVVConstraint = WidenW
1156 } // Constraints = "@earlyclobber $vd", Uses = [FRM], mayRaiseFPException = true
1158 // Vector Single-Width Floating-Point Multiply/Divide Instructions
1159 let Uses = [FRM], mayRaiseFPException = true in {
1160 defm VFMUL_V : VMUL_FV_V_F<"vfmul", 0b100100>;
1161 defm VFDIV_V : VDIV_FV_V_F<"vfdiv", 0b100000>;
1162 defm VFRDIV_V : VRDIV_FV_F<"vfrdiv", 0b100001>;
1165 // Vector Widening Floating-Point Multiply
1166 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1167 Uses = [FRM], mayRaiseFPException = true in {
1168 defm VFWMUL_V : VWMUL_FV_V_F<"vfwmul", 0b111000>;
1169 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1171 // Vector Single-Width Floating-Point Fused Multiply-Add Instructions
1172 let Uses = [FRM], mayRaiseFPException = true in {
1173 defm VFMACC_V : VMAC_FV_V_F<"vfmacc", 0b101100>;
1174 defm VFNMACC_V : VMAC_FV_V_F<"vfnmacc", 0b101101>;
1175 defm VFMSAC_V : VMAC_FV_V_F<"vfmsac", 0b101110>;
1176 defm VFNMSAC_V : VMAC_FV_V_F<"vfnmsac", 0b101111>;
1177 defm VFMADD_V : VMAC_FV_V_F<"vfmadd", 0b101000>;
1178 defm VFNMADD_V : VMAC_FV_V_F<"vfnmadd", 0b101001>;
1179 defm VFMSUB_V : VMAC_FV_V_F<"vfmsub", 0b101010>;
1180 defm VFNMSUB_V : VMAC_FV_V_F<"vfnmsub", 0b101011>;
1183 // Vector Widening Floating-Point Fused Multiply-Add Instructions
1184 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenV,
1185 Uses = [FRM], mayRaiseFPException = true in {
1186 defm VFWMACC_V : VWMAC_FV_V_F<"vfwmacc", 0b111100>;
1187 defm VFWNMACC_V : VWMAC_FV_V_F<"vfwnmacc", 0b111101>;
1188 defm VFWMSAC_V : VWMAC_FV_V_F<"vfwmsac", 0b111110>;
1189 defm VFWNMSAC_V : VWMAC_FV_V_F<"vfwnmsac", 0b111111>;
1190 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenV, Uses = [FRM], mayRaiseFPException = true
1192 // Vector Floating-Point Square-Root Instruction
1193 let Uses = [FRM], mayRaiseFPException = true in {
1194 defm VFSQRT_V : VSQR_FV_VS2<"vfsqrt.v", 0b010011, 0b00000>;
1195 defm VFREC7_V : VRCP_FV_VS2<"vfrec7.v", 0b010011, 0b00101>;
1198 let mayRaiseFPException = true in
1199 defm VFRSQRT7_V : VRCP_FV_VS2<"vfrsqrt7.v", 0b010011, 0b00100>;
1201 // Vector Floating-Point MIN/MAX Instructions
1202 let mayRaiseFPException = true in {
1203 defm VFMIN_V : VCMP_FV_V_F<"vfmin", 0b000100>;
1204 defm VFMAX_V : VCMP_FV_V_F<"vfmax", 0b000110>;
1207 // Vector Floating-Point Sign-Injection Instructions
1208 defm VFSGNJ_V : VSGNJ_FV_V_F<"vfsgnj", 0b001000>;
1209 defm VFSGNJN_V : VSGNJ_FV_V_F<"vfsgnjn", 0b001001>;
1210 defm VFSGNJX_V : VSGNJ_FV_V_F<"vfsgnjx", 0b001010>;
1212 def : InstAlias<"vfneg.v $vd, $vs$vm",
1213 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1214 def : InstAlias<"vfneg.v $vd, $vs",
1215 (VFSGNJN_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1216 def : InstAlias<"vfabs.v $vd, $vs$vm",
1217 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, VMaskOp:$vm)>;
1218 def : InstAlias<"vfabs.v $vd, $vs",
1219 (VFSGNJX_VV VR:$vd, VR:$vs, VR:$vs, zero_reg)>;
1221 // Vector Floating-Point Compare Instructions
1222 let RVVConstraint = NoConstraint, mayRaiseFPException = true in {
1223 defm VMFEQ_V : VCMP_FV_V_F<"vmfeq", 0b011000>;
1224 defm VMFNE_V : VCMP_FV_V_F<"vmfne", 0b011100>;
1225 defm VMFLT_V : VCMP_FV_V_F<"vmflt", 0b011011>;
1226 defm VMFLE_V : VCMP_FV_V_F<"vmfle", 0b011001>;
1227 defm VMFGT_V : VCMP_FV_F<"vmfgt", 0b011101>;
1228 defm VMFGE_V : VCMP_FV_F<"vmfge", 0b011111>;
1229 } // RVVConstraint = NoConstraint, mayRaiseFPException = true
1231 def : InstAlias<"vmfgt.vv $vd, $va, $vb$vm",
1232 (VMFLT_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1233 def : InstAlias<"vmfge.vv $vd, $va, $vb$vm",
1234 (VMFLE_VV VR:$vd, VR:$vb, VR:$va, VMaskOp:$vm), 0>;
1236 // Vector Floating-Point Classify Instruction
1237 defm VFCLASS_V : VCLS_FV_VS2<"vfclass.v", 0b010011, 0b10000>;
1239 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1241 // Vector Floating-Point Merge Instruction
1243 def VFMERGE_VFM : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1244 (ins VR:$vs2, FPR32:$rs1, VMV0:$v0),
1245 "vfmerge.vfm", "$vd, $vs2, $rs1, v0">,
1246 Sched<[WriteVFMergeV, ReadVFMergeV, ReadVFMergeF, ReadVMask]>;
1248 // Vector Floating-Point Move Instruction
1249 let RVVConstraint = NoConstraint in
1250 let vm = 1, vs2 = 0 in
1251 def VFMV_V_F : RVInstVX<0b010111, OPFVF, (outs VR:$vd),
1252 (ins FPR32:$rs1), "vfmv.v.f", "$vd, $rs1">,
1253 Sched<[WriteVFMovV, ReadVFMovF]>;
1255 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1257 // Single-Width Floating-Point/Integer Type-Convert Instructions
1258 defm VFCVT_XU_F_V : VCVTI_FV_VS2<"vfcvt.xu.f.v", 0b010010, 0b00000>;
1259 defm VFCVT_X_F_V : VCVTI_FV_VS2<"vfcvt.x.f.v", 0b010010, 0b00001>;
1260 defm VFCVT_RTZ_XU_F_V : VCVTI_FV_VS2<"vfcvt.rtz.xu.f.v", 0b010010, 0b00110>;
1261 defm VFCVT_RTZ_X_F_V : VCVTI_FV_VS2<"vfcvt.rtz.x.f.v", 0b010010, 0b00111>;
1262 defm VFCVT_F_XU_V : VCVTF_IV_VS2<"vfcvt.f.xu.v", 0b010010, 0b00010>;
1263 defm VFCVT_F_X_V : VCVTF_IV_VS2<"vfcvt.f.x.v", 0b010010, 0b00011>;
1265 // Widening Floating-Point/Integer Type-Convert Instructions
1266 let Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt in {
1267 defm VFWCVT_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.xu.f.v", 0b010010, 0b01000>;
1268 defm VFWCVT_X_F_V : VWCVTI_FV_VS2<"vfwcvt.x.f.v", 0b010010, 0b01001>;
1269 defm VFWCVT_RTZ_XU_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.xu.f.v", 0b010010, 0b01110>;
1270 defm VFWCVT_RTZ_X_F_V : VWCVTI_FV_VS2<"vfwcvt.rtz.x.f.v", 0b010010, 0b01111>;
1271 defm VFWCVT_F_XU_V : VWCVTF_IV_VS2<"vfwcvt.f.xu.v", 0b010010, 0b01010>;
1272 defm VFWCVT_F_X_V : VWCVTF_IV_VS2<"vfwcvt.f.x.v", 0b010010, 0b01011>;
1273 defm VFWCVT_F_F_V : VWCVTF_FV_VS2<"vfwcvt.f.f.v", 0b010010, 0b01100>;
1274 } // Constraints = "@earlyclobber $vd", RVVConstraint = WidenCvt
1276 // Narrowing Floating-Point/Integer Type-Convert Instructions
1277 let Constraints = "@earlyclobber $vd" in {
1278 defm VFNCVT_XU_F_W : VNCVTI_FV_VS2<"vfncvt.xu.f.w", 0b010010, 0b10000>;
1279 defm VFNCVT_X_F_W : VNCVTI_FV_VS2<"vfncvt.x.f.w", 0b010010, 0b10001>;
1280 defm VFNCVT_RTZ_XU_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.xu.f.w", 0b010010, 0b10110>;
1281 defm VFNCVT_RTZ_X_F_W : VNCVTI_FV_VS2<"vfncvt.rtz.x.f.w", 0b010010, 0b10111>;
1282 defm VFNCVT_F_XU_W : VNCVTF_IV_VS2<"vfncvt.f.xu.w", 0b010010, 0b10010>;
1283 defm VFNCVT_F_X_W : VNCVTF_IV_VS2<"vfncvt.f.x.w", 0b010010, 0b10011>;
1284 defm VFNCVT_F_F_W : VNCVTF_FV_VS2<"vfncvt.f.f.w", 0b010010, 0b10100>;
1285 defm VFNCVT_ROD_F_F_W : VNCVTF_FV_VS2<"vfncvt.rod.f.f.w", 0b010010, 0b10101>;
1286 } // Constraints = "@earlyclobber $vd"
1287 } // Predicates = HasVInstructionsAnyF]
1289 let Predicates = [HasVInstructions] in {
1291 // Vector Single-Width Integer Reduction Instructions
1292 let RVVConstraint = NoConstraint in {
1293 defm VREDSUM : VRED_MV_V<"vredsum", 0b000000>;
1294 defm VREDMAXU : VRED_MV_V<"vredmaxu", 0b000110>;
1295 defm VREDMAX : VRED_MV_V<"vredmax", 0b000111>;
1296 defm VREDMINU : VRED_MV_V<"vredminu", 0b000100>;
1297 defm VREDMIN : VRED_MV_V<"vredmin", 0b000101>;
1298 defm VREDAND : VRED_MV_V<"vredand", 0b000001>;
1299 defm VREDOR : VRED_MV_V<"vredor", 0b000010>;
1300 defm VREDXOR : VRED_MV_V<"vredxor", 0b000011>;
1301 } // RVVConstraint = NoConstraint
1303 // Vector Widening Integer Reduction Instructions
1304 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1305 // Set earlyclobber for following instructions for second and mask operands.
1306 // This has the downside that the earlyclobber constraint is too coarse and
1307 // will impose unnecessary restrictions by not allowing the destination to
1308 // overlap with the first (wide) operand.
1309 defm VWREDSUMU : VWRED_IV_V<"vwredsumu", 0b110000>;
1310 defm VWREDSUM : VWRED_IV_V<"vwredsum", 0b110001>;
1311 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1313 } // Predicates = [HasVInstructions]
1315 let Predicates = [HasVInstructionsAnyF] in {
1316 // Vector Single-Width Floating-Point Reduction Instructions
1317 let RVVConstraint = NoConstraint in {
1318 let Uses = [FRM], mayRaiseFPException = true in {
1319 defm VFREDOSUM : VREDO_FV_V<"vfredosum", 0b000011>;
1320 defm VFREDUSUM : VRED_FV_V<"vfredusum", 0b000001>;
1322 let mayRaiseFPException = true in {
1323 defm VFREDMAX : VRED_FV_V<"vfredmax", 0b000111>;
1324 defm VFREDMIN : VRED_FV_V<"vfredmin", 0b000101>;
1326 } // RVVConstraint = NoConstraint
1328 def : InstAlias<"vfredsum.vs $vd, $vs2, $vs1$vm",
1329 (VFREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1331 // Vector Widening Floating-Point Reduction Instructions
1332 let Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint in {
1333 // Set earlyclobber for following instructions for second and mask operands.
1334 // This has the downside that the earlyclobber constraint is too coarse and
1335 // will impose unnecessary restrictions by not allowing the destination to
1336 // overlap with the first (wide) operand.
1337 let Uses = [FRM], mayRaiseFPException = true in {
1338 defm VFWREDOSUM : VWREDO_FV_V<"vfwredosum", 0b110011>;
1339 defm VFWREDUSUM : VWRED_FV_V<"vfwredusum", 0b110001>;
1341 } // Constraints = "@earlyclobber $vd", RVVConstraint = NoConstraint
1343 def : InstAlias<"vfwredsum.vs $vd, $vs2, $vs1$vm",
1344 (VFWREDUSUM_VS VR:$vd, VR:$vs2, VR:$vs1, VMaskOp:$vm), 0>;
1345 } // Predicates = [HasVInstructionsAnyF]
1347 let Predicates = [HasVInstructions] in {
1348 // Vector Mask-Register Logical Instructions
1349 let RVVConstraint = NoConstraint in {
1350 defm VMAND_M : VMALU_MV_Mask<"vmand", 0b011001, "m">;
1351 defm VMNAND_M : VMALU_MV_Mask<"vmnand", 0b011101, "m">;
1352 defm VMANDN_M : VMALU_MV_Mask<"vmandn", 0b011000, "m">;
1353 defm VMXOR_M : VMALU_MV_Mask<"vmxor", 0b011011, "m">;
1354 defm VMOR_M : VMALU_MV_Mask<"vmor", 0b011010, "m">;
1355 defm VMNOR_M : VMALU_MV_Mask<"vmnor", 0b011110, "m">;
1356 defm VMORN_M : VMALU_MV_Mask<"vmorn", 0b011100, "m">;
1357 defm VMXNOR_M : VMALU_MV_Mask<"vmxnor", 0b011111, "m">;
1360 def : InstAlias<"vmmv.m $vd, $vs",
1361 (VMAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1362 def : InstAlias<"vmclr.m $vd",
1363 (VMXOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1364 def : InstAlias<"vmset.m $vd",
1365 (VMXNOR_MM VR:$vd, VR:$vd, VR:$vd)>;
1366 def : InstAlias<"vmnot.m $vd, $vs",
1367 (VMNAND_MM VR:$vd, VR:$vs, VR:$vs)>;
1369 def : InstAlias<"vmandnot.mm $vd, $vs2, $vs1",
1370 (VMANDN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1371 def : InstAlias<"vmornot.mm $vd, $vs2, $vs1",
1372 (VMORN_MM VR:$vd, VR:$vs2, VR:$vs1), 0>;
1374 let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1375 RVVConstraint = NoConstraint in {
1377 // Vector mask population count vcpop
1378 def VCPOP_M : RVInstV<0b010000, 0b10000, OPMVV, (outs GPR:$vd),
1379 (ins VR:$vs2, VMaskOp:$vm),
1380 "vcpop.m", "$vd, $vs2$vm">,
1381 Sched<[WriteVMPopV, ReadVMPopV, ReadVMask]>;
1383 // vfirst find-first-set mask bit
1384 def VFIRST_M : RVInstV<0b010000, 0b10001, OPMVV, (outs GPR:$vd),
1385 (ins VR:$vs2, VMaskOp:$vm),
1386 "vfirst.m", "$vd, $vs2$vm">,
1387 Sched<[WriteVMFFSV, ReadVMFFSV, ReadVMask]>;
1389 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1391 def : InstAlias<"vpopc.m $vd, $vs2$vm",
1392 (VCPOP_M GPR:$vd, VR:$vs2, VMaskOp:$vm), 0>;
1394 let Constraints = "@earlyclobber $vd", RVVConstraint = Iota in {
1396 // vmsbf.m set-before-first mask bit
1397 defm VMSBF_M : VMSFS_MV_V<"vmsbf.m", 0b010100, 0b00001>;
1398 // vmsif.m set-including-first mask bit
1399 defm VMSIF_M : VMSFS_MV_V<"vmsif.m", 0b010100, 0b00011>;
1400 // vmsof.m set-only-first mask bit
1401 defm VMSOF_M : VMSFS_MV_V<"vmsof.m", 0b010100, 0b00010>;
1402 // Vector Iota Instruction
1403 defm VIOTA_M : VMIOT_MV_V<"viota.m", 0b010100, 0b10000>;
1405 } // Constraints = "@earlyclobber $vd", RVVConstraint = Iota
1407 // Vector Element Index Instruction
1408 let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in {
1411 def VID_V : RVInstV<0b010100, 0b10001, OPMVV, (outs VR:$vd),
1412 (ins VMaskOp:$vm), "vid.v", "$vd$vm">,
1413 Sched<[WriteVMIdxV, ReadVMask]>;
1415 // Integer Scalar Move Instructions
1416 let vm = 1, RVVConstraint = NoConstraint in {
1417 def VMV_X_S : RVInstV<0b010000, 0b00000, OPMVV, (outs GPR:$vd),
1418 (ins VR:$vs2), "vmv.x.s", "$vd, $vs2">,
1419 Sched<[WriteVIMovVX, ReadVIMovVX]>;
1420 let Constraints = "$vd = $vd_wb" in
1421 def VMV_S_X : RVInstV2<0b010000, 0b00000, OPMVX, (outs VR:$vd_wb),
1422 (ins VR:$vd, GPR:$rs1), "vmv.s.x", "$vd, $rs1">,
1423 Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>;
1426 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1428 } // Predicates = [HasVInstructions]
1430 let Predicates = [HasVInstructionsAnyF] in {
1432 let hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1,
1433 RVVConstraint = NoConstraint in {
1434 // Floating-Point Scalar Move Instructions
1435 def VFMV_F_S : RVInstV<0b010000, 0b00000, OPFVV, (outs FPR32:$vd),
1436 (ins VR:$vs2), "vfmv.f.s", "$vd, $vs2">,
1437 Sched<[WriteVFMovVF, ReadVFMovVF]>;
1438 let Constraints = "$vd = $vd_wb" in
1439 def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
1440 (ins VR:$vd, FPR32:$rs1), "vfmv.s.f", "$vd, $rs1">,
1441 Sched<[WriteVFMovFV, ReadVFMovFV, ReadVFMovFX]>;
1443 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0, vm = 1
1445 } // Predicates = [HasVInstructionsAnyF]
1447 let Predicates = [HasVInstructions] in {
1448 // Vector Slide Instructions
1449 let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1450 defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, uimm5>;
1451 defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
1452 } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1453 defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, uimm5>;
1454 defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
1455 } // Predicates = [HasVInstructions]
1457 let Predicates = [HasVInstructionsAnyF] in {
1458 let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
1459 defm VFSLIDE1UP_V : VSLD1_FV_F<"vfslide1up", 0b001110>;
1460 } // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
1461 defm VFSLIDE1DOWN_V : VSLD1_FV_F<"vfslide1down", 0b001111>;
1462 } // Predicates = [HasVInstructionsAnyF]
1464 let Predicates = [HasVInstructions] in {
1465 // Vector Register Gather Instruction
1466 let Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather in {
1467 defm VRGATHER_V : VGTR_IV_V_X_I<"vrgather", 0b001100, uimm5>;
1468 def VRGATHEREI16_VV : VALUVV<0b001110, OPIVV, "vrgatherei16.vv">,
1469 Sched<[WriteVGatherV, ReadVGatherV, ReadVGatherV]>;
1470 } // Constraints = "@earlyclobber $vd", RVVConstraint = Vrgather
1472 // Vector Compress Instruction
1473 let Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress in {
1474 defm VCOMPRESS_V : VCPR_MV_Mask<"vcompress", 0b010111>;
1475 } // Constraints = "@earlyclobber $vd", RVVConstraint = Vcompress
1477 let hasSideEffects = 0, mayLoad = 0, mayStore = 0,
1478 RVVConstraint = NoConstraint in {
1479 def VMV1R_V : RVInstV<0b100111, 0, OPIVI, (outs VR:$vd), (ins VR:$vs2),
1480 "vmv1r.v", "$vd, $vs2">, VMVRSched<1> {
1484 // A future extension may relax the vector register alignment restrictions.
1485 foreach n = [2, 4, 8] in {
1486 defvar vrc = !cast<VReg>("VRM"#n);
1487 def VMV#n#R_V : RVInstV<0b100111, !add(n, -1), OPIVI, (outs vrc:$vd),
1488 (ins vrc:$vs2), "vmv" # n # "r.v", "$vd, $vs2">,
1494 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
1495 } // Predicates = [HasVInstructions]
1497 let Predicates = [HasVInstructions] in {
1499 foreach eew = [8, 16, 32] in {
1500 defvar w = !cast<RISCVWidth>("LSWidth"#eew);
1502 def VLSEG#nf#E#eew#_V :
1503 VUnitStrideSegmentLoad<!add(nf, -1), w, "vlseg"#nf#"e"#eew#".v">;
1504 def VLSEG#nf#E#eew#FF_V :
1505 VUnitStrideSegmentLoadFF<!add(nf, -1), w, "vlseg"#nf#"e"#eew#"ff.v">;
1506 def VSSEG#nf#E#eew#_V :
1507 VUnitStrideSegmentStore<!add(nf, -1), w, "vsseg"#nf#"e"#eew#".v">;
1509 // Vector Strided Instructions
1510 def VLSSEG#nf#E#eew#_V :
1511 VStridedSegmentLoad<!add(nf, -1), w, "vlsseg"#nf#"e"#eew#".v">;
1512 def VSSSEG#nf#E#eew#_V :
1513 VStridedSegmentStore<!add(nf, -1), w, "vssseg"#nf#"e"#eew#".v">;
1515 // Vector Indexed Instructions
1516 def VLUXSEG#nf#EI#eew#_V :
1517 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, w,
1518 "vluxseg"#nf#"ei"#eew#".v">;
1519 def VLOXSEG#nf#EI#eew#_V :
1520 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, w,
1521 "vloxseg"#nf#"ei"#eew#".v">;
1522 def VSUXSEG#nf#EI#eew#_V :
1523 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, w,
1524 "vsuxseg"#nf#"ei"#eew#".v">;
1525 def VSOXSEG#nf#EI#eew#_V :
1526 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, w,
1527 "vsoxseg"#nf#"ei"#eew#".v">;
1530 } // Predicates = [HasVInstructions]
1532 let Predicates = [HasVInstructionsI64] in {
1534 // Vector Unit-strided Segment Instructions
1535 def VLSEG#nf#E64_V :
1536 VUnitStrideSegmentLoad<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64.v">;
1537 def VLSEG#nf#E64FF_V :
1538 VUnitStrideSegmentLoadFF<!add(nf, -1), LSWidth64, "vlseg"#nf#"e64ff.v">;
1539 def VSSEG#nf#E64_V :
1540 VUnitStrideSegmentStore<!add(nf, -1), LSWidth64, "vsseg"#nf#"e64.v">;
1542 // Vector Strided Segment Instructions
1543 def VLSSEG#nf#E64_V :
1544 VStridedSegmentLoad<!add(nf, -1), LSWidth64, "vlsseg"#nf#"e64.v">;
1545 def VSSSEG#nf#E64_V :
1546 VStridedSegmentStore<!add(nf, -1), LSWidth64, "vssseg"#nf#"e64.v">;
1548 } // Predicates = [HasVInstructionsI64]
1549 let Predicates = [HasVInstructionsI64, IsRV64] in {
1551 // Vector Indexed Segment Instructions
1552 def VLUXSEG#nf#EI64_V :
1553 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord, LSWidth64,
1554 "vluxseg"#nf#"ei64.v">;
1555 def VLOXSEG#nf#EI64_V :
1556 VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder, LSWidth64,
1557 "vloxseg"#nf#"ei64.v">;
1558 def VSUXSEG#nf#EI64_V :
1559 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord, LSWidth64,
1560 "vsuxseg"#nf#"ei64.v">;
1561 def VSOXSEG#nf#EI64_V :
1562 VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder, LSWidth64,
1563 "vsoxseg"#nf#"ei64.v">;
1565 } // Predicates = [HasVInstructionsI64, IsRV64]
1567 include "RISCVInstrInfoVPseudos.td"