1 //===-- VOP2Instructions.td - Vector Instruction Defintions ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
14 class VOP2e <bits<6> op, VOPProfile P> : Enc32 {
19 let Inst{8-0} = !if(P.HasSrc0, src0, 0);
20 let Inst{16-9} = !if(P.HasSrc1, src1, 0);
21 let Inst{24-17} = !if(P.EmitDst, vdst, 0);
23 let Inst{31} = 0x0; //encoding
26 class VOP2_MADKe <bits<6> op, VOPProfile P> : Enc64 {
32 let Inst{8-0} = !if(P.HasSrc0, src0, 0);
33 let Inst{16-9} = !if(P.HasSrc1, src1, 0);
34 let Inst{24-17} = !if(P.EmitDst, vdst, 0);
36 let Inst{31} = 0x0; // encoding
37 let Inst{63-32} = imm;
40 class VOP2_SDWAe <bits<6> op, VOPProfile P> : VOP_SDWAe <P> {
44 let Inst{8-0} = 0xf9; // sdwa
45 let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0);
46 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
48 let Inst{31} = 0x0; // encoding
51 class VOP2_SDWA9Ae <bits<6> op, VOPProfile P> : VOP_SDWA9Ae <P> {
55 let Inst{8-0} = 0xf9; // sdwa
56 let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0);
57 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
59 let Inst{31} = 0x0; // encoding
60 let Inst{63} = !if(P.HasSrc1, src1{8}, 0); // src1_sgpr
63 class VOP2_Pseudo <string opName, VOPProfile P, list<dag> pattern=[], string suffix = "_e32"> :
64 VOP_Pseudo <opName, suffix, P, P.Outs32, P.Ins32, "", pattern> {
66 let AsmOperands = P.Asm32;
71 let hasSideEffects = 0;
72 let SubtargetPredicate = isGCN;
78 let AsmVariantName = AMDGPUAsmVariants.Default;
81 class VOP2_Real <VOP2_Pseudo ps, int EncodingFamily> :
82 InstSI <ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []>,
83 SIMCInstr <ps.PseudoInstr, EncodingFamily> {
86 let isCodeGenOnly = 0;
88 let Constraints = ps.Constraints;
89 let DisableEncoding = ps.DisableEncoding;
91 // copy relevant pseudo op flags
92 let SubtargetPredicate = ps.SubtargetPredicate;
93 let AsmMatchConverter = ps.AsmMatchConverter;
94 let AsmVariantName = ps.AsmVariantName;
95 let Constraints = ps.Constraints;
96 let DisableEncoding = ps.DisableEncoding;
97 let TSFlags = ps.TSFlags;
98 let UseNamedOperandTable = ps.UseNamedOperandTable;
103 class VOP2_SDWA_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[]> :
104 VOP_SDWA_Pseudo <OpName, P, pattern> {
105 let AsmMatchConverter = "cvtSdwaVOP2";
108 class VOP2_DPP_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[]> :
109 VOP_DPP_Pseudo <OpName, P, pattern> {
113 class getVOP2Pat64 <SDPatternOperator node, VOPProfile P> : LetDummies {
114 list<dag> ret = !if(P.HasModifiers,
118 (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
119 (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))),
120 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
121 [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]);
124 multiclass VOP2Inst_e32<string opName,
126 SDPatternOperator node = null_frag,
127 string revOp = opName,
128 bit GFX9Renamed = 0> {
129 let renamedInGFX9 = GFX9Renamed in {
130 def _e32 : VOP2_Pseudo <opName, P, VOPPatOrNull<node,P>.ret>,
131 Commutable_REV<revOp#"_e32", !eq(revOp, opName)>;
132 } // End renamedInGFX9 = GFX9Renamed
135 multiclass VOP2Inst_e64<string opName,
137 SDPatternOperator node = null_frag,
138 string revOp = opName,
139 bit GFX9Renamed = 0> {
140 let renamedInGFX9 = GFX9Renamed in {
141 def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>,
142 Commutable_REV<revOp#"_e64", !eq(revOp, opName)>;
143 } // End renamedInGFX9 = GFX9Renamed
146 multiclass VOP2Inst_sdwa<string opName,
148 SDPatternOperator node = null_frag,
149 string revOp = opName,
150 bit GFX9Renamed = 0> {
151 let renamedInGFX9 = GFX9Renamed in {
152 def _sdwa : VOP2_SDWA_Pseudo <opName, P>;
153 } // End renamedInGFX9 = GFX9Renamed
156 multiclass VOP2Inst<string opName,
158 SDPatternOperator node = null_frag,
159 string revOp = opName,
160 bit GFX9Renamed = 0> :
161 VOP2Inst_e32<opName, P, node, revOp, GFX9Renamed>,
162 VOP2Inst_e64<opName, P, node, revOp, GFX9Renamed>,
163 VOP2Inst_sdwa<opName, P, node, revOp, GFX9Renamed> {
164 let renamedInGFX9 = GFX9Renamed in {
165 foreach _ = BoolToList<P.HasExtDPP>.ret in
166 def _dpp : VOP2_DPP_Pseudo <opName, P>;
170 multiclass VOP2bInst <string opName,
172 SDPatternOperator node = null_frag,
173 string revOp = opName,
175 bit useSGPRInput = !eq(P.NumSrcArgs, 3)> {
176 let renamedInGFX9 = GFX9Renamed in {
177 let SchedRW = [Write32Bit, WriteSALU] in {
178 let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]), Defs = [VCC] in {
179 def _e32 : VOP2_Pseudo <opName, P, VOPPatOrNull<node,P>.ret>,
180 Commutable_REV<revOp#"_e32", !eq(revOp, opName)>;
182 def _sdwa : VOP2_SDWA_Pseudo <opName, P> {
183 let AsmMatchConverter = "cvtSdwaVOP2b";
185 foreach _ = BoolToList<P.HasExtDPP>.ret in
186 def _dpp : VOP2_DPP_Pseudo <opName, P>;
189 def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>,
190 Commutable_REV<revOp#"_e64", !eq(revOp, opName)>;
195 multiclass VOP2eInst <string opName,
197 SDPatternOperator node = null_frag,
198 string revOp = opName,
199 bit useSGPRInput = !eq(P.NumSrcArgs, 3)> {
201 let SchedRW = [Write32Bit] in {
202 let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]) in {
203 def _e32 : VOP2_Pseudo <opName, P>,
204 Commutable_REV<revOp#"_e32", !eq(revOp, opName)>;
206 def _sdwa : VOP2_SDWA_Pseudo <opName, P> {
207 let AsmMatchConverter = "cvtSdwaVOP2b";
210 foreach _ = BoolToList<P.HasExtDPP>.ret in
211 def _dpp : VOP2_DPP_Pseudo <opName, P>;
214 def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>,
215 Commutable_REV<revOp#"_e64", !eq(revOp, opName)>;
219 class VOP_MADAK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
220 field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm);
221 field dag Ins32 = (ins VCSrc_f32:$src0, VGPR_32:$src1, ImmOpType:$imm);
222 field bit HasExt = 0;
224 // Hack to stop printing _e64
225 let DstRC = RegisterOperand<VGPR_32>;
226 field string Asm32 = " $vdst, $src0, $src1, $imm";
229 def VOP_MADAK_F16 : VOP_MADAK <f16>;
230 def VOP_MADAK_F32 : VOP_MADAK <f32>;
232 class VOP_MADMK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
233 field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm);
234 field dag Ins32 = (ins VCSrc_f32:$src0, ImmOpType:$imm, VGPR_32:$src1);
235 field bit HasExt = 0;
237 // Hack to stop printing _e64
238 let DstRC = RegisterOperand<VGPR_32>;
239 field string Asm32 = " $vdst, $src0, $imm, $src1";
242 def VOP_MADMK_F16 : VOP_MADMK <f16>;
243 def VOP_MADMK_F32 : VOP_MADMK <f32>;
245 // FIXME: Remove src2_modifiers. It isn't used, so is wasting memory
246 // and processing time but it makes it easier to convert to mad.
247 class VOP_MAC <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
248 let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VGPR_32:$src2);
249 let Ins64 = getIns64<Src0RC64, Src1RC64, RegisterOperand<VGPR_32>, 3,
250 0, HasModifiers, HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret;
251 let InsDPP = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0,
252 Src1ModDPP:$src1_modifiers, Src1DPP:$src1,
253 VGPR_32:$src2, // stub argument
254 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
255 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl);
257 let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0,
258 Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1,
259 VGPR_32:$src2, // stub argument
260 clampmod:$clamp, omod:$omod,
261 dst_sel:$dst_sel, dst_unused:$dst_unused,
262 src0_sel:$src0_sel, src1_sel:$src1_sel);
263 let Asm32 = getAsm32<1, 2, vt>.ret;
264 let Asm64 = getAsm64<1, 2, 0, HasModifiers, HasOMod, vt>.ret;
265 let AsmDPP = getAsmDPP<1, 2, HasModifiers, vt>.ret;
266 let AsmSDWA = getAsmSDWA<1, 2, vt>.ret;
267 let AsmSDWA9 = getAsmSDWA9<1, 1, 2, vt>.ret;
277 def VOP_MAC_F16 : VOP_MAC <f16>;
278 def VOP_MAC_F32 : VOP_MAC <f32>;
280 // Write out to vcc or arbitrary SGPR.
281 def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped]> {
282 let Asm32 = "$vdst, vcc, $src0, $src1";
283 let Asm64 = "$vdst, $sdst, $src0, $src1";
284 let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
285 let AsmSDWA9 = "$vdst, vcc, $src0_modifiers, $src1_modifiers$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
286 let AsmDPP = "$vdst, vcc, $src0, $src1 $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
287 let Outs32 = (outs DstRC:$vdst);
288 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
291 // Write out to vcc or arbitrary SGPR and read in from vcc or
293 def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> {
294 // We use VCSrc_b32 to exclude literal constants, even though the
295 // encoding normally allows them since the implicit VCC use means
296 // using one would always violate the constant bus
297 // restriction. SGPRs are still allowed because it should
298 // technically be possible to use VCC again as src0.
299 let Src0RC32 = VCSrc_b32;
300 let Asm32 = "$vdst, vcc, $src0, $src1, vcc";
301 let Asm64 = "$vdst, $sdst, $src0, $src1, $src2";
302 let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers, vcc $clamp $dst_sel $dst_unused $src0_sel $src1_sel";
303 let AsmSDWA9 = "$vdst, vcc, $src0_modifiers, $src1_modifiers, vcc $clamp $dst_sel $dst_unused $src0_sel $src1_sel";
304 let AsmDPP = "$vdst, vcc, $src0, $src1, vcc $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
305 let Outs32 = (outs DstRC:$vdst);
306 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
308 // Suppress src2 implied by type since the 32-bit encoding uses an
310 let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1);
312 let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0,
313 Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1,
315 dst_sel:$dst_sel, dst_unused:$dst_unused,
316 src0_sel:$src0_sel, src1_sel:$src1_sel);
318 let InsDPP = (ins DstRCDPP:$old,
321 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
322 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl);
329 // Read in from vcc or arbitrary SGPR
330 def VOP2e_I32_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> {
331 let Src0RC32 = VCSrc_b32; // See comment in def VOP2b_I32_I1_I32_I32_I1 above.
332 let Asm32 = "$vdst, $src0, $src1, vcc";
333 let Asm64 = "$vdst, $src0, $src1, $src2";
334 let AsmSDWA = "$vdst, $src0_modifiers, $src1_modifiers, vcc $clamp $dst_sel $dst_unused $src0_sel $src1_sel";
335 let AsmSDWA9 = "$vdst, $src0_modifiers, $src1_modifiers, vcc $clamp $dst_sel $dst_unused $src0_sel $src1_sel";
336 let AsmDPP = "$vdst, $src0, $src1, vcc $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
338 let Outs32 = (outs DstRC:$vdst);
339 let Outs64 = (outs DstRC:$vdst);
341 // Suppress src2 implied by type since the 32-bit encoding uses an
343 let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1);
345 let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0,
346 Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1,
348 dst_sel:$dst_sel, dst_unused:$dst_unused,
349 src0_sel:$src0_sel, src1_sel:$src1_sel);
351 let InsDPP = (ins DstRCDPP:$old,
354 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
355 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl);
362 def VOP_READLANE : VOPProfile<[i32, i32, i32]> {
363 let Outs32 = (outs SReg_32:$vdst);
365 let Ins32 = (ins VGPR_32:$src0, SCSrc_b32:$src1);
367 let Asm32 = " $vdst, $src0, $src1";
376 def VOP_WRITELANE : VOPProfile<[i32, i32, i32, i32]> {
377 let Outs32 = (outs VGPR_32:$vdst);
379 let Ins32 = (ins SCSrc_b32:$src0, SCSrc_b32:$src1, VGPR_32:$vdst_in);
381 let Asm32 = " $vdst, $src0, $src1";
392 //===----------------------------------------------------------------------===//
394 //===----------------------------------------------------------------------===//
396 let SubtargetPredicate = isGCN, Predicates = [isGCN] in {
398 defm V_CNDMASK_B32 : VOP2eInst <"v_cndmask_b32", VOP2e_I32_I32_I32_I1>;
399 def V_MADMK_F32 : VOP2_Pseudo <"v_madmk_f32", VOP_MADMK_F32, []>;
401 let isCommutable = 1 in {
402 defm V_ADD_F32 : VOP2Inst <"v_add_f32", VOP_F32_F32_F32, fadd>;
403 defm V_SUB_F32 : VOP2Inst <"v_sub_f32", VOP_F32_F32_F32, fsub>;
404 defm V_SUBREV_F32 : VOP2Inst <"v_subrev_f32", VOP_F32_F32_F32, null_frag, "v_sub_f32">;
405 defm V_MUL_LEGACY_F32 : VOP2Inst <"v_mul_legacy_f32", VOP_F32_F32_F32, AMDGPUfmul_legacy>;
406 defm V_MUL_F32 : VOP2Inst <"v_mul_f32", VOP_F32_F32_F32, fmul>;
407 defm V_MUL_I32_I24 : VOP2Inst <"v_mul_i32_i24", VOP_PAT_GEN<VOP_I32_I32_I32, 2>, AMDGPUmul_i24>;
408 defm V_MUL_HI_I32_I24 : VOP2Inst <"v_mul_hi_i32_i24", VOP_PAT_GEN<VOP_I32_I32_I32, 2>, AMDGPUmulhi_i24>;
409 defm V_MUL_U32_U24 : VOP2Inst <"v_mul_u32_u24", VOP_PAT_GEN<VOP_I32_I32_I32, 2>, AMDGPUmul_u24>;
410 defm V_MUL_HI_U32_U24 : VOP2Inst <"v_mul_hi_u32_u24", VOP_PAT_GEN<VOP_I32_I32_I32, 2>, AMDGPUmulhi_u24>;
411 defm V_MIN_F32 : VOP2Inst <"v_min_f32", VOP_F32_F32_F32, fminnum_like>;
412 defm V_MAX_F32 : VOP2Inst <"v_max_f32", VOP_F32_F32_F32, fmaxnum_like>;
413 defm V_MIN_I32 : VOP2Inst <"v_min_i32", VOP_PAT_GEN<VOP_I32_I32_I32>, smin>;
414 defm V_MAX_I32 : VOP2Inst <"v_max_i32", VOP_PAT_GEN<VOP_I32_I32_I32>, smax>;
415 defm V_MIN_U32 : VOP2Inst <"v_min_u32", VOP_PAT_GEN<VOP_I32_I32_I32>, umin>;
416 defm V_MAX_U32 : VOP2Inst <"v_max_u32", VOP_PAT_GEN<VOP_I32_I32_I32>, umax>;
417 defm V_LSHRREV_B32 : VOP2Inst <"v_lshrrev_b32", VOP_I32_I32_I32, null_frag, "v_lshr_b32">;
418 defm V_ASHRREV_I32 : VOP2Inst <"v_ashrrev_i32", VOP_I32_I32_I32, null_frag, "v_ashr_i32">;
419 defm V_LSHLREV_B32 : VOP2Inst <"v_lshlrev_b32", VOP_I32_I32_I32, null_frag, "v_lshl_b32">;
420 defm V_AND_B32 : VOP2Inst <"v_and_b32", VOP_PAT_GEN<VOP_I32_I32_I32>, and>;
421 defm V_OR_B32 : VOP2Inst <"v_or_b32", VOP_PAT_GEN<VOP_I32_I32_I32>, or>;
422 defm V_XOR_B32 : VOP2Inst <"v_xor_b32", VOP_PAT_GEN<VOP_I32_I32_I32>, xor>;
424 let Constraints = "$vdst = $src2", DisableEncoding="$src2",
425 isConvertibleToThreeAddress = 1 in {
426 defm V_MAC_F32 : VOP2Inst <"v_mac_f32", VOP_MAC_F32>;
429 def V_MADAK_F32 : VOP2_Pseudo <"v_madak_f32", VOP_MADAK_F32, []>;
431 // No patterns so that the scalar instructions are always selected.
432 // The scalar versions will be replaced with vector when needed later.
434 // V_ADD_I32, V_SUB_I32, and V_SUBREV_I32 where renamed to *_U32 in VI,
435 // but the VI instructions behave the same as the SI versions.
436 defm V_ADD_I32 : VOP2bInst <"v_add_i32", VOP2b_I32_I1_I32_I32, null_frag, "v_add_i32", 1>;
437 defm V_SUB_I32 : VOP2bInst <"v_sub_i32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_i32", 1>;
438 defm V_SUBREV_I32 : VOP2bInst <"v_subrev_i32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_i32", 1>;
439 defm V_ADDC_U32 : VOP2bInst <"v_addc_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_addc_u32", 1>;
440 defm V_SUBB_U32 : VOP2bInst <"v_subb_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32", 1>;
441 defm V_SUBBREV_U32 : VOP2bInst <"v_subbrev_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32", 1>;
444 let SubtargetPredicate = HasAddNoCarryInsts in {
445 defm V_ADD_U32 : VOP2Inst <"v_add_u32", VOP_I32_I32_I32, null_frag, "v_add_u32", 1>;
446 defm V_SUB_U32 : VOP2Inst <"v_sub_u32", VOP_I32_I32_I32, null_frag, "v_sub_u32", 1>;
447 defm V_SUBREV_U32 : VOP2Inst <"v_subrev_u32", VOP_I32_I32_I32, null_frag, "v_sub_u32", 1>;
450 } // End isCommutable = 1
452 // These are special and do not read the exec mask.
453 let isConvergent = 1, Uses = []<Register> in {
454 def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE,
455 [(set i32:$vdst, (int_amdgcn_readlane i32:$src0, i32:$src1))]>;
457 let Constraints = "$vdst = $vdst_in", DisableEncoding="$vdst_in" in {
458 def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE,
459 [(set i32:$vdst, (int_amdgcn_writelane i32:$src0, i32:$src1, i32:$vdst_in))]>;
460 } // End $vdst = $vdst_in, DisableEncoding $vdst_in
461 } // End isConvergent = 1
463 defm V_BFM_B32 : VOP2Inst <"v_bfm_b32", VOP_NO_EXT<VOP_I32_I32_I32>>;
464 defm V_BCNT_U32_B32 : VOP2Inst <"v_bcnt_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>>;
465 defm V_MBCNT_LO_U32_B32 : VOP2Inst <"v_mbcnt_lo_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>, int_amdgcn_mbcnt_lo>;
466 defm V_MBCNT_HI_U32_B32 : VOP2Inst <"v_mbcnt_hi_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>, int_amdgcn_mbcnt_hi>;
467 defm V_LDEXP_F32 : VOP2Inst <"v_ldexp_f32", VOP_NO_EXT<VOP_F32_F32_I32>, AMDGPUldexp>;
468 defm V_CVT_PKACCUM_U8_F32 : VOP2Inst <"v_cvt_pkaccum_u8_f32", VOP_NO_EXT<VOP_I32_F32_I32>>; // TODO: set "Uses = dst"
469 defm V_CVT_PKNORM_I16_F32 : VOP2Inst <"v_cvt_pknorm_i16_f32", VOP_NO_EXT<VOP_V2I16_F32_F32>, AMDGPUpknorm_i16_f32>;
470 defm V_CVT_PKNORM_U16_F32 : VOP2Inst <"v_cvt_pknorm_u16_f32", VOP_NO_EXT<VOP_V2I16_F32_F32>, AMDGPUpknorm_u16_f32>;
471 defm V_CVT_PKRTZ_F16_F32 : VOP2Inst <"v_cvt_pkrtz_f16_f32", VOP_NO_EXT<VOP_V2F16_F32_F32>, AMDGPUpkrtz_f16_f32>;
472 defm V_CVT_PK_U16_U32 : VOP2Inst <"v_cvt_pk_u16_u32", VOP_NO_EXT<VOP_V2I16_I32_I32>, AMDGPUpk_u16_u32>;
473 defm V_CVT_PK_I16_I32 : VOP2Inst <"v_cvt_pk_i16_i32", VOP_NO_EXT<VOP_V2I16_I32_I32>, AMDGPUpk_i16_i32>;
475 } // End SubtargetPredicate = isGCN, Predicates = [isGCN]
478 (AMDGPUadde i32:$src0, i32:$src1, i1:$src2),
479 (V_ADDC_U32_e64 $src0, $src1, $src2)
483 (AMDGPUsube i32:$src0, i32:$src1, i1:$src2),
484 (V_SUBB_U32_e64 $src0, $src1, $src2)
487 // These instructions only exist on SI and CI
488 let SubtargetPredicate = isSICI, Predicates = [isSICI] in {
490 defm V_MIN_LEGACY_F32 : VOP2Inst <"v_min_legacy_f32", VOP_F32_F32_F32, AMDGPUfmin_legacy>;
491 defm V_MAX_LEGACY_F32 : VOP2Inst <"v_max_legacy_f32", VOP_F32_F32_F32, AMDGPUfmax_legacy>;
493 let isCommutable = 1 in {
494 defm V_MAC_LEGACY_F32 : VOP2Inst <"v_mac_legacy_f32", VOP_F32_F32_F32>;
495 defm V_LSHR_B32 : VOP2Inst <"v_lshr_b32", VOP_PAT_GEN<VOP_I32_I32_I32>, srl>;
496 defm V_ASHR_I32 : VOP2Inst <"v_ashr_i32", VOP_PAT_GEN<VOP_I32_I32_I32>, sra>;
497 defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_PAT_GEN<VOP_I32_I32_I32>, shl>;
498 } // End isCommutable = 1
500 } // End let SubtargetPredicate = SICI, Predicates = [isSICI]
502 class DivergentBinOp<SDPatternOperator Op, VOP_Pseudo Inst> :
504 (getDivergentFrag<Op>.ret Inst.Pfl.Src0VT:$src0, Inst.Pfl.Src1VT:$src1),
505 !if(!cast<Commutable_REV>(Inst).IsOrig,
511 let AddedComplexity = 1 in {
512 def : DivergentBinOp<srl, V_LSHRREV_B32_e64>;
513 def : DivergentBinOp<sra, V_ASHRREV_I32_e64>;
514 def : DivergentBinOp<shl, V_LSHLREV_B32_e64>;
517 let SubtargetPredicate = HasAddNoCarryInsts in {
518 def : DivergentBinOp<add, V_ADD_U32_e32>;
519 def : DivergentBinOp<sub, V_SUB_U32_e32>;
520 def : DivergentBinOp<sub, V_SUBREV_U32_e32>;
524 def : DivergentBinOp<add, V_ADD_I32_e32>;
526 def : DivergentBinOp<add, V_ADD_I32_e64>;
527 def : DivergentBinOp<sub, V_SUB_I32_e32>;
529 def : DivergentBinOp<sub, V_SUBREV_I32_e32>;
531 def : DivergentBinOp<srl, V_LSHRREV_B32_e32>;
532 def : DivergentBinOp<sra, V_ASHRREV_I32_e32>;
533 def : DivergentBinOp<shl, V_LSHLREV_B32_e32>;
534 def : DivergentBinOp<adde, V_ADDC_U32_e32>;
535 def : DivergentBinOp<sube, V_SUBB_U32_e32>;
537 class divergent_i64_BinOp <SDPatternOperator Op, Instruction Inst> :
539 (getDivergentFrag<Op>.ret i64:$src0, i64:$src1),
540 (REG_SEQUENCE VReg_64,
542 (i32 (EXTRACT_SUBREG $src0, sub0)),
543 (i32 (EXTRACT_SUBREG $src1, sub0))
546 (i32 (EXTRACT_SUBREG $src0, sub1)),
547 (i32 (EXTRACT_SUBREG $src1, sub1))
552 def : divergent_i64_BinOp <and, V_AND_B32_e32>;
553 def : divergent_i64_BinOp <or, V_OR_B32_e32>;
554 def : divergent_i64_BinOp <xor, V_XOR_B32_e32>;
556 let SubtargetPredicate = Has16BitInsts in {
558 let FPDPRounding = 1 in {
559 def V_MADMK_F16 : VOP2_Pseudo <"v_madmk_f16", VOP_MADMK_F16, [], "">;
560 defm V_LDEXP_F16 : VOP2Inst <"v_ldexp_f16", VOP_F16_F16_I32, AMDGPUldexp>;
561 } // End FPDPRounding = 1
563 defm V_LSHLREV_B16 : VOP2Inst <"v_lshlrev_b16", VOP_I16_I16_I16>;
564 defm V_LSHRREV_B16 : VOP2Inst <"v_lshrrev_b16", VOP_I16_I16_I16>;
565 defm V_ASHRREV_I16 : VOP2Inst <"v_ashrrev_i16", VOP_I16_I16_I16>;
567 let isCommutable = 1 in {
568 let FPDPRounding = 1 in {
569 defm V_ADD_F16 : VOP2Inst <"v_add_f16", VOP_F16_F16_F16, fadd>;
570 defm V_SUB_F16 : VOP2Inst <"v_sub_f16", VOP_F16_F16_F16, fsub>;
571 defm V_SUBREV_F16 : VOP2Inst <"v_subrev_f16", VOP_F16_F16_F16, null_frag, "v_sub_f16">;
572 defm V_MUL_F16 : VOP2Inst <"v_mul_f16", VOP_F16_F16_F16, fmul>;
573 def V_MADAK_F16 : VOP2_Pseudo <"v_madak_f16", VOP_MADAK_F16, [], "">;
574 } // End FPDPRounding = 1
575 defm V_ADD_U16 : VOP2Inst <"v_add_u16", VOP_I16_I16_I16>;
576 defm V_SUB_U16 : VOP2Inst <"v_sub_u16" , VOP_I16_I16_I16>;
577 defm V_SUBREV_U16 : VOP2Inst <"v_subrev_u16", VOP_I16_I16_I16, null_frag, "v_sub_u16">;
578 defm V_MUL_LO_U16 : VOP2Inst <"v_mul_lo_u16", VOP_I16_I16_I16>;
579 defm V_MAX_F16 : VOP2Inst <"v_max_f16", VOP_F16_F16_F16, fmaxnum_like>;
580 defm V_MIN_F16 : VOP2Inst <"v_min_f16", VOP_F16_F16_F16, fminnum_like>;
581 defm V_MAX_U16 : VOP2Inst <"v_max_u16", VOP_I16_I16_I16>;
582 defm V_MAX_I16 : VOP2Inst <"v_max_i16", VOP_I16_I16_I16>;
583 defm V_MIN_U16 : VOP2Inst <"v_min_u16", VOP_I16_I16_I16>;
584 defm V_MIN_I16 : VOP2Inst <"v_min_i16", VOP_I16_I16_I16>;
586 let Constraints = "$vdst = $src2", DisableEncoding="$src2",
587 isConvertibleToThreeAddress = 1 in {
588 defm V_MAC_F16 : VOP2Inst <"v_mac_f16", VOP_MAC_F16>;
590 } // End isCommutable = 1
592 } // End SubtargetPredicate = Has16BitInsts
594 let SubtargetPredicate = HasDLInsts in {
596 defm V_XNOR_B32 : VOP2Inst <"v_xnor_b32", VOP_I32_I32_I32>;
598 let Constraints = "$vdst = $src2",
599 DisableEncoding="$src2",
600 isConvertibleToThreeAddress = 1,
601 isCommutable = 1 in {
602 defm V_FMAC_F32 : VOP2Inst <"v_fmac_f32", VOP_MAC_F32>;
605 } // End SubtargetPredicate = HasDLInsts
607 // Note: 16-bit instructions produce a 0 result in the high 16-bits.
608 multiclass Arithmetic_i16_Pats <SDPatternOperator op, Instruction inst> {
611 (op i16:$src0, i16:$src1),
616 (i32 (zext (op i16:$src0, i16:$src1))),
621 (i64 (zext (op i16:$src0, i16:$src1))),
622 (REG_SEQUENCE VReg_64,
623 (inst $src0, $src1), sub0,
624 (V_MOV_B32_e32 (i32 0)), sub1)
629 multiclass Bits_OpsRev_i16_Pats <SDPatternOperator op, Instruction inst> {
632 (op i16:$src0, i16:$src1),
637 (i32 (zext (op i16:$src0, i16:$src1))),
643 (i64 (zext (op i16:$src0, i16:$src1))),
644 (REG_SEQUENCE VReg_64,
645 (inst $src1, $src0), sub0,
646 (V_MOV_B32_e32 (i32 0)), sub1)
650 class ZExt_i16_i1_Pat <SDNode ext> : GCNPat <
652 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src)
655 let Predicates = [Has16BitInsts] in {
657 defm : Arithmetic_i16_Pats<add, V_ADD_U16_e64>;
658 defm : Arithmetic_i16_Pats<mul, V_MUL_LO_U16_e64>;
659 defm : Arithmetic_i16_Pats<sub, V_SUB_U16_e64>;
660 defm : Arithmetic_i16_Pats<smin, V_MIN_I16_e64>;
661 defm : Arithmetic_i16_Pats<smax, V_MAX_I16_e64>;
662 defm : Arithmetic_i16_Pats<umin, V_MIN_U16_e64>;
663 defm : Arithmetic_i16_Pats<umax, V_MAX_U16_e64>;
666 (and i16:$src0, i16:$src1),
667 (V_AND_B32_e64 $src0, $src1)
671 (or i16:$src0, i16:$src1),
672 (V_OR_B32_e64 $src0, $src1)
676 (xor i16:$src0, i16:$src1),
677 (V_XOR_B32_e64 $src0, $src1)
680 defm : Bits_OpsRev_i16_Pats<shl, V_LSHLREV_B16_e64>;
681 defm : Bits_OpsRev_i16_Pats<srl, V_LSHRREV_B16_e64>;
682 defm : Bits_OpsRev_i16_Pats<sra, V_ASHRREV_I16_e64>;
684 def : ZExt_i16_i1_Pat<zext>;
685 def : ZExt_i16_i1_Pat<anyext>;
688 (i16 (sext i1:$src)),
689 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src)
692 // Undo sub x, c -> add x, -c canonicalization since c is more likely
693 // an inline immediate than -c.
694 // TODO: Also do for 64-bit.
696 (add i16:$src0, (i16 NegSubInlineConst16:$src1)),
697 (V_SUB_U16_e64 $src0, NegSubInlineConst16:$src1)
700 } // End Predicates = [Has16BitInsts]
702 //===----------------------------------------------------------------------===//
704 //===----------------------------------------------------------------------===//
706 let AssemblerPredicates = [isSICI], DecoderNamespace = "SICI" in {
708 multiclass VOP2_Real_si <bits<6> op> {
710 VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.SI>,
711 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>;
714 multiclass VOP2_Real_MADK_si <bits<6> op> {
715 def _si : VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.SI>,
716 VOP2_MADKe<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>;
719 multiclass VOP2_Real_e32_si <bits<6> op> {
721 VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>,
722 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>;
725 multiclass VOP2_Real_e32e64_si <bits<6> op> : VOP2_Real_e32_si<op> {
727 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
728 VOP3e_si <{1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
731 multiclass VOP2be_Real_e32e64_si <bits<6> op> : VOP2_Real_e32_si<op> {
733 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
734 VOP3be_si <{1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
737 } // End AssemblerPredicates = [isSICI], DecoderNamespace = "SICI"
739 defm V_CNDMASK_B32 : VOP2_Real_e32e64_si <0x0>;
740 defm V_ADD_F32 : VOP2_Real_e32e64_si <0x3>;
741 defm V_SUB_F32 : VOP2_Real_e32e64_si <0x4>;
742 defm V_SUBREV_F32 : VOP2_Real_e32e64_si <0x5>;
743 defm V_MUL_LEGACY_F32 : VOP2_Real_e32e64_si <0x7>;
744 defm V_MUL_F32 : VOP2_Real_e32e64_si <0x8>;
745 defm V_MUL_I32_I24 : VOP2_Real_e32e64_si <0x9>;
746 defm V_MUL_HI_I32_I24 : VOP2_Real_e32e64_si <0xa>;
747 defm V_MUL_U32_U24 : VOP2_Real_e32e64_si <0xb>;
748 defm V_MUL_HI_U32_U24 : VOP2_Real_e32e64_si <0xc>;
749 defm V_MIN_F32 : VOP2_Real_e32e64_si <0xf>;
750 defm V_MAX_F32 : VOP2_Real_e32e64_si <0x10>;
751 defm V_MIN_I32 : VOP2_Real_e32e64_si <0x11>;
752 defm V_MAX_I32 : VOP2_Real_e32e64_si <0x12>;
753 defm V_MIN_U32 : VOP2_Real_e32e64_si <0x13>;
754 defm V_MAX_U32 : VOP2_Real_e32e64_si <0x14>;
755 defm V_LSHRREV_B32 : VOP2_Real_e32e64_si <0x16>;
756 defm V_ASHRREV_I32 : VOP2_Real_e32e64_si <0x18>;
757 defm V_LSHLREV_B32 : VOP2_Real_e32e64_si <0x1a>;
758 defm V_AND_B32 : VOP2_Real_e32e64_si <0x1b>;
759 defm V_OR_B32 : VOP2_Real_e32e64_si <0x1c>;
760 defm V_XOR_B32 : VOP2_Real_e32e64_si <0x1d>;
761 defm V_MAC_F32 : VOP2_Real_e32e64_si <0x1f>;
762 defm V_MADMK_F32 : VOP2_Real_MADK_si <0x20>;
763 defm V_MADAK_F32 : VOP2_Real_MADK_si <0x21>;
764 defm V_ADD_I32 : VOP2be_Real_e32e64_si <0x25>;
765 defm V_SUB_I32 : VOP2be_Real_e32e64_si <0x26>;
766 defm V_SUBREV_I32 : VOP2be_Real_e32e64_si <0x27>;
767 defm V_ADDC_U32 : VOP2be_Real_e32e64_si <0x28>;
768 defm V_SUBB_U32 : VOP2be_Real_e32e64_si <0x29>;
769 defm V_SUBBREV_U32 : VOP2be_Real_e32e64_si <0x2a>;
771 defm V_READLANE_B32 : VOP2_Real_si <0x01>;
773 let InOperandList = (ins SSrc_b32:$src0, SCSrc_b32:$src1, VSrc_b32:$vdst_in) in {
774 defm V_WRITELANE_B32 : VOP2_Real_si <0x02>;
777 defm V_MAC_LEGACY_F32 : VOP2_Real_e32e64_si <0x6>;
778 defm V_MIN_LEGACY_F32 : VOP2_Real_e32e64_si <0xd>;
779 defm V_MAX_LEGACY_F32 : VOP2_Real_e32e64_si <0xe>;
780 defm V_LSHR_B32 : VOP2_Real_e32e64_si <0x15>;
781 defm V_ASHR_I32 : VOP2_Real_e32e64_si <0x17>;
782 defm V_LSHL_B32 : VOP2_Real_e32e64_si <0x19>;
784 defm V_BFM_B32 : VOP2_Real_e32e64_si <0x1e>;
785 defm V_BCNT_U32_B32 : VOP2_Real_e32e64_si <0x22>;
786 defm V_MBCNT_LO_U32_B32 : VOP2_Real_e32e64_si <0x23>;
787 defm V_MBCNT_HI_U32_B32 : VOP2_Real_e32e64_si <0x24>;
788 defm V_LDEXP_F32 : VOP2_Real_e32e64_si <0x2b>;
789 defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e32e64_si <0x2c>;
790 defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e32e64_si <0x2d>;
791 defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e32e64_si <0x2e>;
792 defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e32e64_si <0x2f>;
793 defm V_CVT_PK_U16_U32 : VOP2_Real_e32e64_si <0x30>;
794 defm V_CVT_PK_I16_I32 : VOP2_Real_e32e64_si <0x31>;
797 //===----------------------------------------------------------------------===//
799 //===----------------------------------------------------------------------===//
801 class VOP2_DPPe <bits<6> op, VOP2_DPP_Pseudo ps, VOPProfile P = ps.Pfl> :
805 let Inst{8-0} = 0xfa; //dpp
806 let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0);
807 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
808 let Inst{30-25} = op;
809 let Inst{31} = 0x0; //encoding
812 let AssemblerPredicates = [isVI], DecoderNamespace = "VI" in {
814 multiclass VOP2_Real_MADK_vi <bits<6> op> {
815 def _vi : VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.VI>,
816 VOP2_MADKe<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>;
819 multiclass VOP2_Real_e32_vi <bits<6> op> {
821 VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.VI>,
822 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>;
825 multiclass VOP2_Real_e64_vi <bits<10> op> {
827 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
828 VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
831 multiclass VOP2_Real_e64only_vi <bits<10> op> {
833 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
834 VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
835 // Hack to stop printing _e64
836 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(NAME#"_e64");
837 let OutOperandList = (outs VGPR_32:$vdst);
838 let AsmString = ps.Mnemonic # " " # ps.AsmOperands;
842 multiclass Base_VOP2_Real_e32e64_vi <bits<6> op> :
843 VOP2_Real_e32_vi<op>,
844 VOP2_Real_e64_vi<{0, 1, 0, 0, op{5-0}}>;
846 } // End AssemblerPredicates = [isVI], DecoderNamespace = "VI"
848 multiclass VOP2_SDWA_Real <bits<6> op> {
850 VOP_SDWA_Real <!cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa")>,
851 VOP2_SDWAe <op{5-0}, !cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa").Pfl>;
854 multiclass VOP2_SDWA9_Real <bits<6> op> {
856 VOP_SDWA9_Real <!cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa")>,
857 VOP2_SDWA9Ae <op{5-0}, !cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa").Pfl>;
860 let AssemblerPredicates = [isVIOnly] in {
862 multiclass VOP2be_Real_e32e64_vi_only <bits<6> op, string OpName, string AsmName> {
864 VOP2_Real<!cast<VOP2_Pseudo>(OpName#"_e32"), SIEncodingFamily.VI>,
865 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(OpName#"_e32").Pfl> {
866 VOP2_Pseudo ps = !cast<VOP2_Pseudo>(OpName#"_e32");
867 let AsmString = AsmName # ps.AsmOperands;
868 let DecoderNamespace = "VI";
871 VOP3_Real<!cast<VOP3_Pseudo>(OpName#"_e64"), SIEncodingFamily.VI>,
872 VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(OpName#"_e64").Pfl> {
873 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName#"_e64");
874 let AsmString = AsmName # ps.AsmOperands;
875 let DecoderNamespace = "VI";
878 VOP_SDWA_Real <!cast<VOP2_SDWA_Pseudo>(OpName#"_sdwa")>,
879 VOP2_SDWAe <op{5-0}, !cast<VOP2_SDWA_Pseudo>(OpName#"_sdwa").Pfl> {
880 VOP2_SDWA_Pseudo ps = !cast<VOP2_SDWA_Pseudo>(OpName#"_sdwa");
881 let AsmString = AsmName # ps.AsmOperands;
883 foreach _ = BoolToList<!cast<VOP2_Pseudo>(OpName#"_e32").Pfl.HasExtDPP>.ret in
885 VOP_DPP_Real<!cast<VOP2_DPP_Pseudo>(OpName#"_dpp"), SIEncodingFamily.VI>,
886 VOP2_DPPe<op, !cast<VOP2_DPP_Pseudo>(OpName#"_dpp")> {
887 VOP2_DPP_Pseudo ps = !cast<VOP2_DPP_Pseudo>(OpName#"_dpp");
888 let AsmString = AsmName # ps.AsmOperands;
893 let AssemblerPredicates = [isGFX9] in {
895 multiclass VOP2be_Real_e32e64_gfx9 <bits<6> op, string OpName, string AsmName> {
897 VOP2_Real<!cast<VOP2_Pseudo>(OpName#"_e32"), SIEncodingFamily.GFX9>,
898 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(OpName#"_e32").Pfl> {
899 VOP2_Pseudo ps = !cast<VOP2_Pseudo>(OpName#"_e32");
900 let AsmString = AsmName # ps.AsmOperands;
901 let DecoderNamespace = "GFX9";
904 VOP3_Real<!cast<VOP3_Pseudo>(OpName#"_e64"), SIEncodingFamily.GFX9>,
905 VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(OpName#"_e64").Pfl> {
906 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(OpName#"_e64");
907 let AsmString = AsmName # ps.AsmOperands;
908 let DecoderNamespace = "GFX9";
911 VOP_SDWA9_Real <!cast<VOP2_SDWA_Pseudo>(OpName#"_sdwa")>,
912 VOP2_SDWA9Ae <op{5-0}, !cast<VOP2_SDWA_Pseudo>(OpName#"_sdwa").Pfl> {
913 VOP2_SDWA_Pseudo ps = !cast<VOP2_SDWA_Pseudo>(OpName#"_sdwa");
914 let AsmString = AsmName # ps.AsmOperands;
916 foreach _ = BoolToList<!cast<VOP2_Pseudo>(OpName#"_e32").Pfl.HasExtDPP>.ret in
918 VOP_DPP_Real<!cast<VOP2_DPP_Pseudo>(OpName#"_dpp"), SIEncodingFamily.GFX9>,
919 VOP2_DPPe<op, !cast<VOP2_DPP_Pseudo>(OpName#"_dpp")> {
920 VOP2_DPP_Pseudo ps = !cast<VOP2_DPP_Pseudo>(OpName#"_dpp");
921 let AsmString = AsmName # ps.AsmOperands;
922 let DecoderNamespace = "SDWA9";
926 multiclass VOP2_Real_e32e64_gfx9 <bits<6> op> {
928 VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.GFX9>,
929 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>{
930 let DecoderNamespace = "GFX9";
933 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.GFX9>,
934 VOP3e_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
935 let DecoderNamespace = "GFX9";
938 VOP_SDWA9_Real <!cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa")>,
939 VOP2_SDWA9Ae <op{5-0}, !cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa").Pfl> {
941 foreach _ = BoolToList<!cast<VOP2_Pseudo>(NAME#"_e32").Pfl.HasExtDPP>.ret in
943 VOP_DPP_Real<!cast<VOP2_DPP_Pseudo>(NAME#"_dpp"), SIEncodingFamily.GFX9>,
944 VOP2_DPPe<op, !cast<VOP2_DPP_Pseudo>(NAME#"_dpp")> {
945 let DecoderNamespace = "SDWA9";
949 } // AssemblerPredicates = [isGFX9]
951 multiclass VOP2_Real_e32e64_vi <bits<6> op> :
952 Base_VOP2_Real_e32e64_vi<op>, VOP2_SDWA_Real<op>, VOP2_SDWA9_Real<op> {
954 foreach _ = BoolToList<!cast<VOP2_Pseudo>(NAME#"_e32").Pfl.HasExtDPP>.ret in
956 VOP_DPP_Real<!cast<VOP2_DPP_Pseudo>(NAME#"_dpp"), SIEncodingFamily.VI>,
957 VOP2_DPPe<op, !cast<VOP2_DPP_Pseudo>(NAME#"_dpp")>;
960 defm V_CNDMASK_B32 : VOP2_Real_e32e64_vi <0x0>;
961 defm V_ADD_F32 : VOP2_Real_e32e64_vi <0x1>;
962 defm V_SUB_F32 : VOP2_Real_e32e64_vi <0x2>;
963 defm V_SUBREV_F32 : VOP2_Real_e32e64_vi <0x3>;
964 defm V_MUL_LEGACY_F32 : VOP2_Real_e32e64_vi <0x4>;
965 defm V_MUL_F32 : VOP2_Real_e32e64_vi <0x5>;
966 defm V_MUL_I32_I24 : VOP2_Real_e32e64_vi <0x6>;
967 defm V_MUL_HI_I32_I24 : VOP2_Real_e32e64_vi <0x7>;
968 defm V_MUL_U32_U24 : VOP2_Real_e32e64_vi <0x8>;
969 defm V_MUL_HI_U32_U24 : VOP2_Real_e32e64_vi <0x9>;
970 defm V_MIN_F32 : VOP2_Real_e32e64_vi <0xa>;
971 defm V_MAX_F32 : VOP2_Real_e32e64_vi <0xb>;
972 defm V_MIN_I32 : VOP2_Real_e32e64_vi <0xc>;
973 defm V_MAX_I32 : VOP2_Real_e32e64_vi <0xd>;
974 defm V_MIN_U32 : VOP2_Real_e32e64_vi <0xe>;
975 defm V_MAX_U32 : VOP2_Real_e32e64_vi <0xf>;
976 defm V_LSHRREV_B32 : VOP2_Real_e32e64_vi <0x10>;
977 defm V_ASHRREV_I32 : VOP2_Real_e32e64_vi <0x11>;
978 defm V_LSHLREV_B32 : VOP2_Real_e32e64_vi <0x12>;
979 defm V_AND_B32 : VOP2_Real_e32e64_vi <0x13>;
980 defm V_OR_B32 : VOP2_Real_e32e64_vi <0x14>;
981 defm V_XOR_B32 : VOP2_Real_e32e64_vi <0x15>;
982 defm V_MAC_F32 : VOP2_Real_e32e64_vi <0x16>;
983 defm V_MADMK_F32 : VOP2_Real_MADK_vi <0x17>;
984 defm V_MADAK_F32 : VOP2_Real_MADK_vi <0x18>;
986 defm V_ADD_U32 : VOP2be_Real_e32e64_vi_only <0x19, "V_ADD_I32", "v_add_u32">;
987 defm V_SUB_U32 : VOP2be_Real_e32e64_vi_only <0x1a, "V_SUB_I32", "v_sub_u32">;
988 defm V_SUBREV_U32 : VOP2be_Real_e32e64_vi_only <0x1b, "V_SUBREV_I32", "v_subrev_u32">;
989 defm V_ADDC_U32 : VOP2be_Real_e32e64_vi_only <0x1c, "V_ADDC_U32", "v_addc_u32">;
990 defm V_SUBB_U32 : VOP2be_Real_e32e64_vi_only <0x1d, "V_SUBB_U32", "v_subb_u32">;
991 defm V_SUBBREV_U32 : VOP2be_Real_e32e64_vi_only <0x1e, "V_SUBBREV_U32", "v_subbrev_u32">;
993 defm V_ADD_CO_U32 : VOP2be_Real_e32e64_gfx9 <0x19, "V_ADD_I32", "v_add_co_u32">;
994 defm V_SUB_CO_U32 : VOP2be_Real_e32e64_gfx9 <0x1a, "V_SUB_I32", "v_sub_co_u32">;
995 defm V_SUBREV_CO_U32 : VOP2be_Real_e32e64_gfx9 <0x1b, "V_SUBREV_I32", "v_subrev_co_u32">;
996 defm V_ADDC_CO_U32 : VOP2be_Real_e32e64_gfx9 <0x1c, "V_ADDC_U32", "v_addc_co_u32">;
997 defm V_SUBB_CO_U32 : VOP2be_Real_e32e64_gfx9 <0x1d, "V_SUBB_U32", "v_subb_co_u32">;
998 defm V_SUBBREV_CO_U32 : VOP2be_Real_e32e64_gfx9 <0x1e, "V_SUBBREV_U32", "v_subbrev_co_u32">;
1000 defm V_ADD_U32 : VOP2_Real_e32e64_gfx9 <0x34>;
1001 defm V_SUB_U32 : VOP2_Real_e32e64_gfx9 <0x35>;
1002 defm V_SUBREV_U32 : VOP2_Real_e32e64_gfx9 <0x36>;
1004 defm V_BFM_B32 : VOP2_Real_e64only_vi <0x293>;
1005 defm V_BCNT_U32_B32 : VOP2_Real_e64only_vi <0x28b>;
1006 defm V_MBCNT_LO_U32_B32 : VOP2_Real_e64only_vi <0x28c>;
1007 defm V_MBCNT_HI_U32_B32 : VOP2_Real_e64only_vi <0x28d>;
1008 defm V_LDEXP_F32 : VOP2_Real_e64only_vi <0x288>;
1009 defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e64only_vi <0x1f0>;
1010 defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e64only_vi <0x294>;
1011 defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e64only_vi <0x295>;
1012 defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e64only_vi <0x296>;
1013 defm V_CVT_PK_U16_U32 : VOP2_Real_e64only_vi <0x297>;
1014 defm V_CVT_PK_I16_I32 : VOP2_Real_e64only_vi <0x298>;
1016 defm V_ADD_F16 : VOP2_Real_e32e64_vi <0x1f>;
1017 defm V_SUB_F16 : VOP2_Real_e32e64_vi <0x20>;
1018 defm V_SUBREV_F16 : VOP2_Real_e32e64_vi <0x21>;
1019 defm V_MUL_F16 : VOP2_Real_e32e64_vi <0x22>;
1020 defm V_MAC_F16 : VOP2_Real_e32e64_vi <0x23>;
1021 defm V_MADMK_F16 : VOP2_Real_MADK_vi <0x24>;
1022 defm V_MADAK_F16 : VOP2_Real_MADK_vi <0x25>;
1023 defm V_ADD_U16 : VOP2_Real_e32e64_vi <0x26>;
1024 defm V_SUB_U16 : VOP2_Real_e32e64_vi <0x27>;
1025 defm V_SUBREV_U16 : VOP2_Real_e32e64_vi <0x28>;
1026 defm V_MUL_LO_U16 : VOP2_Real_e32e64_vi <0x29>;
1027 defm V_LSHLREV_B16 : VOP2_Real_e32e64_vi <0x2a>;
1028 defm V_LSHRREV_B16 : VOP2_Real_e32e64_vi <0x2b>;
1029 defm V_ASHRREV_I16 : VOP2_Real_e32e64_vi <0x2c>;
1030 defm V_MAX_F16 : VOP2_Real_e32e64_vi <0x2d>;
1031 defm V_MIN_F16 : VOP2_Real_e32e64_vi <0x2e>;
1032 defm V_MAX_U16 : VOP2_Real_e32e64_vi <0x2f>;
1033 defm V_MAX_I16 : VOP2_Real_e32e64_vi <0x30>;
1034 defm V_MIN_U16 : VOP2_Real_e32e64_vi <0x31>;
1035 defm V_MIN_I16 : VOP2_Real_e32e64_vi <0x32>;
1036 defm V_LDEXP_F16 : VOP2_Real_e32e64_vi <0x33>;
1038 let SubtargetPredicate = isVI in {
1040 // Aliases to simplify matching of floating-point instructions that
1041 // are VOP2 on SI and VOP3 on VI.
1042 class SI2_VI3Alias <string name, VOP3_Real inst> : InstAlias <
1043 name#" $dst, $src0, $src1",
1044 !if(inst.Pfl.HasOMod,
1045 (inst VGPR_32:$dst, 0, VCSrc_f32:$src0, 0, VCSrc_f32:$src1, 0, 0),
1046 (inst VGPR_32:$dst, 0, VCSrc_f32:$src0, 0, VCSrc_f32:$src1, 0))
1047 >, PredicateControl {
1048 let UseInstAsmMatchConverter = 0;
1049 let AsmVariantName = AMDGPUAsmVariants.VOP3;
1052 def : SI2_VI3Alias <"v_ldexp_f32", V_LDEXP_F32_e64_vi>;
1053 def : SI2_VI3Alias <"v_cvt_pkaccum_u8_f32", V_CVT_PKACCUM_U8_F32_e64_vi>;
1054 def : SI2_VI3Alias <"v_cvt_pknorm_i16_f32", V_CVT_PKNORM_I16_F32_e64_vi>;
1055 def : SI2_VI3Alias <"v_cvt_pknorm_u16_f32", V_CVT_PKNORM_U16_F32_e64_vi>;
1056 def : SI2_VI3Alias <"v_cvt_pkrtz_f16_f32", V_CVT_PKRTZ_F16_F32_e64_vi>;
1058 } // End SubtargetPredicate = isVI
1060 let SubtargetPredicate = HasDLInsts in {
1062 defm V_FMAC_F32 : VOP2_Real_e32e64_vi <0x3b>;
1063 defm V_XNOR_B32 : VOP2_Real_e32e64_vi <0x3d>;
1065 } // End SubtargetPredicate = HasDLInsts