1 //===-- VOP2Instructions.td - Vector Instruction Defintions ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 //===----------------------------------------------------------------------===//
12 //===----------------------------------------------------------------------===//
14 class VOP2e <bits<6> op, VOPProfile P> : Enc32 {
19 let Inst{8-0} = !if(P.HasSrc0, src0, 0);
20 let Inst{16-9} = !if(P.HasSrc1, src1, 0);
21 let Inst{24-17} = !if(P.EmitDst, vdst, 0);
23 let Inst{31} = 0x0; //encoding
26 class VOP2_MADKe <bits<6> op, VOPProfile P> : Enc64 {
32 let Inst{8-0} = !if(P.HasSrc0, src0, 0);
33 let Inst{16-9} = !if(P.HasSrc1, src1, 0);
34 let Inst{24-17} = !if(P.EmitDst, vdst, 0);
36 let Inst{31} = 0x0; // encoding
37 let Inst{63-32} = imm;
40 class VOP2_SDWAe <bits<6> op, VOPProfile P> : VOP_SDWAe <P> {
44 let Inst{8-0} = 0xf9; // sdwa
45 let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0);
46 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
48 let Inst{31} = 0x0; // encoding
51 class VOP2_SDWA9Ae <bits<6> op, VOPProfile P> : VOP_SDWA9Ae <P> {
55 let Inst{8-0} = 0xf9; // sdwa
56 let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0);
57 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
59 let Inst{31} = 0x0; // encoding
60 let Inst{63} = !if(P.HasSrc1, src1{8}, 0); // src1_sgpr
63 class VOP2_Pseudo <string opName, VOPProfile P, list<dag> pattern=[], string suffix = "_e32"> :
64 InstSI <P.Outs32, P.Ins32, "", pattern>,
66 SIMCInstr <opName#suffix, SIEncodingFamily.NONE>,
67 MnemonicAlias<opName#suffix, opName> {
70 let isCodeGenOnly = 1;
71 let UseNamedOperandTable = 1;
73 string Mnemonic = opName;
74 string AsmOperands = P.Asm32;
79 let hasSideEffects = 0;
80 let SubtargetPredicate = isGCN;
86 let AsmVariantName = AMDGPUAsmVariants.Default;
91 class VOP2_Real <VOP2_Pseudo ps, int EncodingFamily> :
92 InstSI <ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []>,
93 SIMCInstr <ps.PseudoInstr, EncodingFamily> {
96 let isCodeGenOnly = 0;
98 let Constraints = ps.Constraints;
99 let DisableEncoding = ps.DisableEncoding;
101 // copy relevant pseudo op flags
102 let SubtargetPredicate = ps.SubtargetPredicate;
103 let AsmMatchConverter = ps.AsmMatchConverter;
104 let AsmVariantName = ps.AsmVariantName;
105 let Constraints = ps.Constraints;
106 let DisableEncoding = ps.DisableEncoding;
107 let TSFlags = ps.TSFlags;
108 let UseNamedOperandTable = ps.UseNamedOperandTable;
112 class VOP2_SDWA_Pseudo <string OpName, VOPProfile P, list<dag> pattern=[]> :
113 VOP_SDWA_Pseudo <OpName, P, pattern> {
114 let AsmMatchConverter = "cvtSdwaVOP2";
117 class getVOP2Pat64 <SDPatternOperator node, VOPProfile P> : LetDummies {
118 list<dag> ret = !if(P.HasModifiers,
122 (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omod),
123 (VOP3Mods0 P.Src0VT:$src0, i32:$src0_modifiers, i1:$clamp))),
124 (P.Src1VT (VOP3Mods P.Src1VT:$src1, i32:$src1_modifiers))))],
125 [(set P.DstVT:$vdst, (node P.Src0VT:$src0, P.Src1VT:$src1))]);
128 multiclass VOP2Inst <string opName,
130 SDPatternOperator node = null_frag,
131 string revOp = opName> {
133 def _e32 : VOP2_Pseudo <opName, P>,
134 Commutable_REV<revOp#"_e32", !eq(revOp, opName)>;
136 def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>,
137 Commutable_REV<revOp#"_e64", !eq(revOp, opName)>;
139 def _sdwa : VOP2_SDWA_Pseudo <opName, P>;
142 multiclass VOP2bInst <string opName,
144 SDPatternOperator node = null_frag,
145 string revOp = opName,
146 bit useSGPRInput = !eq(P.NumSrcArgs, 3)> {
148 let SchedRW = [Write32Bit, WriteSALU] in {
149 let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]), Defs = [VCC] in {
150 def _e32 : VOP2_Pseudo <opName, P>,
151 Commutable_REV<revOp#"_e32", !eq(revOp, opName)>;
153 def _sdwa : VOP2_SDWA_Pseudo <opName, P> {
154 let AsmMatchConverter = "cvtSdwaVOP2b";
158 def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>,
159 Commutable_REV<revOp#"_e64", !eq(revOp, opName)>;
163 multiclass VOP2eInst <string opName,
165 SDPatternOperator node = null_frag,
166 string revOp = opName,
167 bit useSGPRInput = !eq(P.NumSrcArgs, 3)> {
169 let SchedRW = [Write32Bit] in {
170 let Uses = !if(useSGPRInput, [VCC, EXEC], [EXEC]) in {
171 def _e32 : VOP2_Pseudo <opName, P>,
172 Commutable_REV<revOp#"_e32", !eq(revOp, opName)>;
175 def _e64 : VOP3_Pseudo <opName, P, getVOP2Pat64<node, P>.ret>,
176 Commutable_REV<revOp#"_e64", !eq(revOp, opName)>;
180 class VOP_MADAK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
181 field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm);
182 field dag Ins32 = (ins VCSrc_f32:$src0, VGPR_32:$src1, ImmOpType:$imm);
183 field bit HasExt = 0;
185 // Hack to stop printing _e64
186 let DstRC = RegisterOperand<VGPR_32>;
187 field string Asm32 = " $vdst, $src0, $src1, $imm";
190 def VOP_MADAK_F16 : VOP_MADAK <f16>;
191 def VOP_MADAK_F32 : VOP_MADAK <f32>;
193 class VOP_MADMK <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
194 field Operand ImmOpType = !if(!eq(vt.Size, 32), f32kimm, f16kimm);
195 field dag Ins32 = (ins VCSrc_f32:$src0, ImmOpType:$imm, VGPR_32:$src1);
196 field bit HasExt = 0;
198 // Hack to stop printing _e64
199 let DstRC = RegisterOperand<VGPR_32>;
200 field string Asm32 = " $vdst, $src0, $imm, $src1";
203 def VOP_MADMK_F16 : VOP_MADMK <f16>;
204 def VOP_MADMK_F32 : VOP_MADMK <f32>;
206 // FIXME: Remove src2_modifiers. It isn't used, so is wasting memory
207 // and processing time but it makes it easier to convert to mad.
208 class VOP_MAC <ValueType vt> : VOPProfile <[vt, vt, vt, vt]> {
209 let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1, VGPR_32:$src2);
210 let Ins64 = getIns64<Src0RC64, Src1RC64, RegisterOperand<VGPR_32>, 3,
211 HasModifiers, HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret;
212 let InsDPP = (ins Src0ModDPP:$src0_modifiers, Src0DPP:$src0,
213 Src1ModDPP:$src1_modifiers, Src1DPP:$src1,
214 VGPR_32:$src2, // stub argument
215 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
216 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl);
218 let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0,
219 Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1,
220 VGPR_32:$src2, // stub argument
221 clampmod:$clamp, omod:$omod,
222 dst_sel:$dst_sel, dst_unused:$dst_unused,
223 src0_sel:$src0_sel, src1_sel:$src1_sel);
224 let Asm32 = getAsm32<1, 2, vt>.ret;
225 let Asm64 = getAsm64<1, 2, HasModifiers, HasOMod, vt>.ret;
226 let AsmDPP = getAsmDPP<1, 2, HasModifiers, vt>.ret;
227 let AsmSDWA = getAsmSDWA<1, 2, vt>.ret;
228 let AsmSDWA9 = getAsmSDWA9<1, 1, 2, vt>.ret;
235 def VOP_MAC_F16 : VOP_MAC <f16> {
236 // FIXME: Move 'Asm64' definition to VOP_MAC, and use 'vt'. Currently it gives
237 // 'not a string initializer' error.
238 let Asm64 = getAsm64<1, 2, HasModifiers, HasOMod, f16>.ret;
241 def VOP_MAC_F32 : VOP_MAC <f32> {
242 // FIXME: Move 'Asm64' definition to VOP_MAC, and use 'vt'. Currently it gives
243 // 'not a string initializer' error.
244 let Asm64 = getAsm64<1, 2, HasModifiers, HasOMod, f32>.ret;
247 // Write out to vcc or arbitrary SGPR.
248 def VOP2b_I32_I1_I32_I32 : VOPProfile<[i32, i32, i32, untyped]> {
249 let Asm32 = "$vdst, vcc, $src0, $src1";
250 let Asm64 = "$vdst, $sdst, $src0, $src1";
251 let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
252 let AsmSDWA9 = "$vdst, vcc, $src0_modifiers, $src1_modifiers$clamp $dst_sel $dst_unused $src0_sel $src1_sel";
253 let AsmDPP = "$vdst, vcc, $src0, $src1 $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
254 let Outs32 = (outs DstRC:$vdst);
255 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
258 // Write out to vcc or arbitrary SGPR and read in from vcc or
260 def VOP2b_I32_I1_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> {
261 // We use VCSrc_b32 to exclude literal constants, even though the
262 // encoding normally allows them since the implicit VCC use means
263 // using one would always violate the constant bus
264 // restriction. SGPRs are still allowed because it should
265 // technically be possible to use VCC again as src0.
266 let Src0RC32 = VCSrc_b32;
267 let Asm32 = "$vdst, vcc, $src0, $src1, vcc";
268 let Asm64 = "$vdst, $sdst, $src0, $src1, $src2";
269 let AsmSDWA = "$vdst, vcc, $src0_modifiers, $src1_modifiers, vcc $clamp $dst_sel $dst_unused $src0_sel $src1_sel";
270 let AsmSDWA9 = "$vdst, vcc, $src0_modifiers, $src1_modifiers, vcc $clamp $dst_sel $dst_unused $src0_sel $src1_sel";
271 let AsmDPP = "$vdst, vcc, $src0, $src1, vcc $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
272 let Outs32 = (outs DstRC:$vdst);
273 let Outs64 = (outs DstRC:$vdst, SReg_64:$sdst);
275 // Suppress src2 implied by type since the 32-bit encoding uses an
277 let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1);
279 let InsSDWA = (ins Src0ModSDWA:$src0_modifiers, Src0SDWA:$src0,
280 Src1ModSDWA:$src1_modifiers, Src1SDWA:$src1,
281 clampmod:$clamp, omod:$omod,
282 dst_sel:$dst_sel, dst_unused:$dst_unused,
283 src0_sel:$src0_sel, src1_sel:$src1_sel);
285 let InsDPP = (ins Src0Mod:$src0_modifiers, Src0DPP:$src0,
286 Src1Mod:$src1_modifiers, Src1DPP:$src1,
287 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
288 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl);
293 // Read in from vcc or arbitrary SGPR
294 def VOP2e_I32_I32_I32_I1 : VOPProfile<[i32, i32, i32, i1]> {
295 let Src0RC32 = VCSrc_b32; // See comment in def VOP2b_I32_I1_I32_I32_I1 above.
296 let Asm32 = "$vdst, $src0, $src1, vcc";
297 let Asm64 = "$vdst, $src0, $src1, $src2";
298 let Outs32 = (outs DstRC:$vdst);
299 let Outs64 = (outs DstRC:$vdst);
301 // Suppress src2 implied by type since the 32-bit encoding uses an
303 let Ins32 = (ins Src0RC32:$src0, Src1RC32:$src1);
306 def VOP_READLANE : VOPProfile<[i32, i32, i32]> {
307 let Outs32 = (outs SReg_32:$vdst);
309 let Ins32 = (ins VGPR_32:$src0, SCSrc_b32:$src1);
311 let Asm32 = " $vdst, $src0, $src1";
317 def VOP_WRITELANE : VOPProfile<[i32, i32, i32]> {
318 let Outs32 = (outs VGPR_32:$vdst);
320 let Ins32 = (ins SCSrc_b32:$src0, SCSrc_b32:$src1);
322 let Asm32 = " $vdst, $src0, $src1";
328 //===----------------------------------------------------------------------===//
330 //===----------------------------------------------------------------------===//
332 let SubtargetPredicate = isGCN in {
334 defm V_CNDMASK_B32 : VOP2eInst <"v_cndmask_b32", VOP2e_I32_I32_I32_I1>;
335 def V_MADMK_F32 : VOP2_Pseudo <"v_madmk_f32", VOP_MADMK_F32, [], "">;
337 let isCommutable = 1 in {
338 defm V_ADD_F32 : VOP2Inst <"v_add_f32", VOP_F32_F32_F32, fadd>;
339 defm V_SUB_F32 : VOP2Inst <"v_sub_f32", VOP_F32_F32_F32, fsub>;
340 defm V_SUBREV_F32 : VOP2Inst <"v_subrev_f32", VOP_F32_F32_F32, null_frag, "v_sub_f32">;
341 defm V_MUL_LEGACY_F32 : VOP2Inst <"v_mul_legacy_f32", VOP_F32_F32_F32, AMDGPUfmul_legacy>;
342 defm V_MUL_F32 : VOP2Inst <"v_mul_f32", VOP_F32_F32_F32, fmul>;
343 defm V_MUL_I32_I24 : VOP2Inst <"v_mul_i32_i24", VOP_I32_I32_I32, AMDGPUmul_i24>;
344 defm V_MUL_HI_I32_I24 : VOP2Inst <"v_mul_hi_i32_i24", VOP_I32_I32_I32, AMDGPUmulhi_i24>;
345 defm V_MUL_U32_U24 : VOP2Inst <"v_mul_u32_u24", VOP_I32_I32_I32, AMDGPUmul_u24>;
346 defm V_MUL_HI_U32_U24 : VOP2Inst <"v_mul_hi_u32_u24", VOP_I32_I32_I32, AMDGPUmulhi_u24>;
347 defm V_MIN_F32 : VOP2Inst <"v_min_f32", VOP_F32_F32_F32, fminnum>;
348 defm V_MAX_F32 : VOP2Inst <"v_max_f32", VOP_F32_F32_F32, fmaxnum>;
349 defm V_MIN_I32 : VOP2Inst <"v_min_i32", VOP_I32_I32_I32>;
350 defm V_MAX_I32 : VOP2Inst <"v_max_i32", VOP_I32_I32_I32>;
351 defm V_MIN_U32 : VOP2Inst <"v_min_u32", VOP_I32_I32_I32>;
352 defm V_MAX_U32 : VOP2Inst <"v_max_u32", VOP_I32_I32_I32>;
353 defm V_LSHRREV_B32 : VOP2Inst <"v_lshrrev_b32", VOP_I32_I32_I32, null_frag, "v_lshr_b32">;
354 defm V_ASHRREV_I32 : VOP2Inst <"v_ashrrev_i32", VOP_I32_I32_I32, null_frag, "v_ashr_i32">;
355 defm V_LSHLREV_B32 : VOP2Inst <"v_lshlrev_b32", VOP_I32_I32_I32, null_frag, "v_lshl_b32">;
356 defm V_AND_B32 : VOP2Inst <"v_and_b32", VOP_I32_I32_I32>;
357 defm V_OR_B32 : VOP2Inst <"v_or_b32", VOP_I32_I32_I32>;
358 defm V_XOR_B32 : VOP2Inst <"v_xor_b32", VOP_I32_I32_I32>;
360 let Constraints = "$vdst = $src2", DisableEncoding="$src2",
361 isConvertibleToThreeAddress = 1 in {
362 defm V_MAC_F32 : VOP2Inst <"v_mac_f32", VOP_MAC_F32>;
365 def V_MADAK_F32 : VOP2_Pseudo <"v_madak_f32", VOP_MADAK_F32, [], "">;
367 // No patterns so that the scalar instructions are always selected.
368 // The scalar versions will be replaced with vector when needed later.
370 // V_ADD_I32, V_SUB_I32, and V_SUBREV_I32 where renamed to *_U32 in VI,
371 // but the VI instructions behave the same as the SI versions.
372 defm V_ADD_I32 : VOP2bInst <"v_add_i32", VOP2b_I32_I1_I32_I32>;
373 defm V_SUB_I32 : VOP2bInst <"v_sub_i32", VOP2b_I32_I1_I32_I32>;
374 defm V_SUBREV_I32 : VOP2bInst <"v_subrev_i32", VOP2b_I32_I1_I32_I32, null_frag, "v_sub_i32">;
375 defm V_ADDC_U32 : VOP2bInst <"v_addc_u32", VOP2b_I32_I1_I32_I32_I1>;
376 defm V_SUBB_U32 : VOP2bInst <"v_subb_u32", VOP2b_I32_I1_I32_I32_I1>;
377 defm V_SUBBREV_U32 : VOP2bInst <"v_subbrev_u32", VOP2b_I32_I1_I32_I32_I1, null_frag, "v_subb_u32">;
378 } // End isCommutable = 1
380 // These are special and do not read the exec mask.
381 let isConvergent = 1, Uses = []<Register> in {
382 def V_READLANE_B32 : VOP2_Pseudo<"v_readlane_b32", VOP_READLANE,
383 [(set i32:$vdst, (int_amdgcn_readlane i32:$src0, i32:$src1))], "">;
385 def V_WRITELANE_B32 : VOP2_Pseudo<"v_writelane_b32", VOP_WRITELANE, [], "">;
386 } // End isConvergent = 1
388 defm V_BFM_B32 : VOP2Inst <"v_bfm_b32", VOP_NO_EXT<VOP_I32_I32_I32>>;
389 defm V_BCNT_U32_B32 : VOP2Inst <"v_bcnt_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>>;
390 defm V_MBCNT_LO_U32_B32 : VOP2Inst <"v_mbcnt_lo_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>, int_amdgcn_mbcnt_lo>;
391 defm V_MBCNT_HI_U32_B32 : VOP2Inst <"v_mbcnt_hi_u32_b32", VOP_NO_EXT<VOP_I32_I32_I32>, int_amdgcn_mbcnt_hi>;
392 defm V_LDEXP_F32 : VOP2Inst <"v_ldexp_f32", VOP_NO_EXT<VOP_F32_F32_I32>, AMDGPUldexp>;
393 defm V_CVT_PKACCUM_U8_F32 : VOP2Inst <"v_cvt_pkaccum_u8_f32", VOP_NO_EXT<VOP_I32_F32_I32>>; // TODO: set "Uses = dst"
394 defm V_CVT_PKNORM_I16_F32 : VOP2Inst <"v_cvt_pknorm_i16_f32", VOP_NO_EXT<VOP_I32_F32_F32>>;
395 defm V_CVT_PKNORM_U16_F32 : VOP2Inst <"v_cvt_pknorm_u16_f32", VOP_NO_EXT<VOP_I32_F32_F32>>;
396 defm V_CVT_PKRTZ_F16_F32 : VOP2Inst <"v_cvt_pkrtz_f16_f32", VOP_NO_EXT<VOP_I32_F32_F32>, AMDGPUpkrtz_f16_f32>;
397 defm V_CVT_PK_U16_U32 : VOP2Inst <"v_cvt_pk_u16_u32", VOP_NO_EXT<VOP_I32_I32_I32>>;
398 defm V_CVT_PK_I16_I32 : VOP2Inst <"v_cvt_pk_i16_i32", VOP_NO_EXT<VOP_I32_I32_I32>>;
400 } // End SubtargetPredicate = isGCN
403 (AMDGPUadde i32:$src0, i32:$src1, i1:$src2),
404 (V_ADDC_U32_e64 $src0, $src1, $src2)
408 (AMDGPUsube i32:$src0, i32:$src1, i1:$src2),
409 (V_SUBB_U32_e64 $src0, $src1, $src2)
412 // These instructions only exist on SI and CI
413 let SubtargetPredicate = isSICI in {
415 defm V_MIN_LEGACY_F32 : VOP2Inst <"v_min_legacy_f32", VOP_F32_F32_F32, AMDGPUfmin_legacy>;
416 defm V_MAX_LEGACY_F32 : VOP2Inst <"v_max_legacy_f32", VOP_F32_F32_F32, AMDGPUfmax_legacy>;
418 let isCommutable = 1 in {
419 defm V_MAC_LEGACY_F32 : VOP2Inst <"v_mac_legacy_f32", VOP_F32_F32_F32>;
420 defm V_LSHR_B32 : VOP2Inst <"v_lshr_b32", VOP_I32_I32_I32>;
421 defm V_ASHR_I32 : VOP2Inst <"v_ashr_i32", VOP_I32_I32_I32>;
422 defm V_LSHL_B32 : VOP2Inst <"v_lshl_b32", VOP_I32_I32_I32>;
423 } // End isCommutable = 1
425 } // End let SubtargetPredicate = SICI
427 let SubtargetPredicate = Has16BitInsts in {
429 def V_MADMK_F16 : VOP2_Pseudo <"v_madmk_f16", VOP_MADMK_F16, [], "">;
430 defm V_LSHLREV_B16 : VOP2Inst <"v_lshlrev_b16", VOP_I16_I16_I16>;
431 defm V_LSHRREV_B16 : VOP2Inst <"v_lshrrev_b16", VOP_I16_I16_I16>;
432 defm V_ASHRREV_I16 : VOP2Inst <"v_ashrrev_i16", VOP_I16_I16_I16>;
433 defm V_LDEXP_F16 : VOP2Inst <"v_ldexp_f16", VOP_F16_F16_I32, AMDGPUldexp>;
435 let isCommutable = 1 in {
436 defm V_ADD_F16 : VOP2Inst <"v_add_f16", VOP_F16_F16_F16, fadd>;
437 defm V_SUB_F16 : VOP2Inst <"v_sub_f16", VOP_F16_F16_F16, fsub>;
438 defm V_SUBREV_F16 : VOP2Inst <"v_subrev_f16", VOP_F16_F16_F16, null_frag, "v_sub_f16">;
439 defm V_MUL_F16 : VOP2Inst <"v_mul_f16", VOP_F16_F16_F16, fmul>;
440 def V_MADAK_F16 : VOP2_Pseudo <"v_madak_f16", VOP_MADAK_F16, [], "">;
441 defm V_ADD_U16 : VOP2Inst <"v_add_u16", VOP_I16_I16_I16>;
442 defm V_SUB_U16 : VOP2Inst <"v_sub_u16" , VOP_I16_I16_I16>;
443 defm V_SUBREV_U16 : VOP2Inst <"v_subrev_u16", VOP_I16_I16_I16, null_frag, "v_sub_u16">;
444 defm V_MUL_LO_U16 : VOP2Inst <"v_mul_lo_u16", VOP_I16_I16_I16>;
445 defm V_MAX_F16 : VOP2Inst <"v_max_f16", VOP_F16_F16_F16, fmaxnum>;
446 defm V_MIN_F16 : VOP2Inst <"v_min_f16", VOP_F16_F16_F16, fminnum>;
447 defm V_MAX_U16 : VOP2Inst <"v_max_u16", VOP_I16_I16_I16>;
448 defm V_MAX_I16 : VOP2Inst <"v_max_i16", VOP_I16_I16_I16>;
449 defm V_MIN_U16 : VOP2Inst <"v_min_u16", VOP_I16_I16_I16>;
450 defm V_MIN_I16 : VOP2Inst <"v_min_i16", VOP_I16_I16_I16>;
452 let Constraints = "$vdst = $src2", DisableEncoding="$src2",
453 isConvertibleToThreeAddress = 1 in {
454 defm V_MAC_F16 : VOP2Inst <"v_mac_f16", VOP_MAC_F16>;
456 } // End isCommutable = 1
458 } // End SubtargetPredicate = Has16BitInsts
460 // Note: 16-bit instructions produce a 0 result in the high 16-bits.
461 multiclass Arithmetic_i16_Pats <SDPatternOperator op, Instruction inst> {
464 (op i16:$src0, i16:$src1),
469 (i32 (zext (op i16:$src0, i16:$src1))),
474 (i64 (zext (op i16:$src0, i16:$src1))),
475 (REG_SEQUENCE VReg_64,
476 (inst $src0, $src1), sub0,
477 (V_MOV_B32_e32 (i32 0)), sub1)
482 multiclass Bits_OpsRev_i16_Pats <SDPatternOperator op, Instruction inst> {
485 (op i16:$src0, i16:$src1),
490 (i32 (zext (op i16:$src0, i16:$src1))),
496 (i64 (zext (op i16:$src0, i16:$src1))),
497 (REG_SEQUENCE VReg_64,
498 (inst $src1, $src0), sub0,
499 (V_MOV_B32_e32 (i32 0)), sub1)
503 class ZExt_i16_i1_Pat <SDNode ext> : Pat <
505 (V_CNDMASK_B32_e64 (i32 0), (i32 1), $src)
508 let Predicates = [Has16BitInsts] in {
510 defm : Arithmetic_i16_Pats<add, V_ADD_U16_e64>;
511 defm : Arithmetic_i16_Pats<mul, V_MUL_LO_U16_e64>;
512 defm : Arithmetic_i16_Pats<sub, V_SUB_U16_e64>;
513 defm : Arithmetic_i16_Pats<smin, V_MIN_I16_e64>;
514 defm : Arithmetic_i16_Pats<smax, V_MAX_I16_e64>;
515 defm : Arithmetic_i16_Pats<umin, V_MIN_U16_e64>;
516 defm : Arithmetic_i16_Pats<umax, V_MAX_U16_e64>;
519 (and i16:$src0, i16:$src1),
520 (V_AND_B32_e64 $src0, $src1)
524 (or i16:$src0, i16:$src1),
525 (V_OR_B32_e64 $src0, $src1)
529 (xor i16:$src0, i16:$src1),
530 (V_XOR_B32_e64 $src0, $src1)
533 defm : Bits_OpsRev_i16_Pats<shl, V_LSHLREV_B16_e64>;
534 defm : Bits_OpsRev_i16_Pats<srl, V_LSHRREV_B16_e64>;
535 defm : Bits_OpsRev_i16_Pats<sra, V_ASHRREV_I16_e64>;
537 def : ZExt_i16_i1_Pat<zext>;
538 def : ZExt_i16_i1_Pat<anyext>;
541 (i16 (sext i1:$src)),
542 (V_CNDMASK_B32_e64 (i32 0), (i32 -1), $src)
545 // Undo sub x, c -> add x, -c canonicalization since c is more likely
546 // an inline immediate than -c.
547 // TODO: Also do for 64-bit.
549 (add i16:$src0, (i16 NegSubInlineConst16:$src1)),
550 (V_SUB_U16_e64 $src0, NegSubInlineConst16:$src1)
553 } // End Predicates = [Has16BitInsts]
555 //===----------------------------------------------------------------------===//
557 //===----------------------------------------------------------------------===//
559 let AssemblerPredicates = [isSICI], DecoderNamespace = "SICI" in {
561 multiclass VOP2_Real_si <bits<6> op> {
563 VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.SI>,
564 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>;
567 multiclass VOP2_Real_MADK_si <bits<6> op> {
568 def _si : VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.SI>,
569 VOP2_MADKe<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>;
572 multiclass VOP2_Real_e32_si <bits<6> op> {
574 VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.SI>,
575 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>;
578 multiclass VOP2_Real_e32e64_si <bits<6> op> : VOP2_Real_e32_si<op> {
580 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
581 VOP3e_si <{1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
584 multiclass VOP2be_Real_e32e64_si <bits<6> op> : VOP2_Real_e32_si<op> {
586 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.SI>,
587 VOP3be_si <{1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
590 } // End AssemblerPredicates = [isSICI], DecoderNamespace = "SICI"
592 defm V_CNDMASK_B32 : VOP2_Real_e32e64_si <0x0>;
593 defm V_ADD_F32 : VOP2_Real_e32e64_si <0x3>;
594 defm V_SUB_F32 : VOP2_Real_e32e64_si <0x4>;
595 defm V_SUBREV_F32 : VOP2_Real_e32e64_si <0x5>;
596 defm V_MUL_LEGACY_F32 : VOP2_Real_e32e64_si <0x7>;
597 defm V_MUL_F32 : VOP2_Real_e32e64_si <0x8>;
598 defm V_MUL_I32_I24 : VOP2_Real_e32e64_si <0x9>;
599 defm V_MUL_HI_I32_I24 : VOP2_Real_e32e64_si <0xa>;
600 defm V_MUL_U32_U24 : VOP2_Real_e32e64_si <0xb>;
601 defm V_MUL_HI_U32_U24 : VOP2_Real_e32e64_si <0xc>;
602 defm V_MIN_F32 : VOP2_Real_e32e64_si <0xf>;
603 defm V_MAX_F32 : VOP2_Real_e32e64_si <0x10>;
604 defm V_MIN_I32 : VOP2_Real_e32e64_si <0x11>;
605 defm V_MAX_I32 : VOP2_Real_e32e64_si <0x12>;
606 defm V_MIN_U32 : VOP2_Real_e32e64_si <0x13>;
607 defm V_MAX_U32 : VOP2_Real_e32e64_si <0x14>;
608 defm V_LSHRREV_B32 : VOP2_Real_e32e64_si <0x16>;
609 defm V_ASHRREV_I32 : VOP2_Real_e32e64_si <0x18>;
610 defm V_LSHLREV_B32 : VOP2_Real_e32e64_si <0x1a>;
611 defm V_AND_B32 : VOP2_Real_e32e64_si <0x1b>;
612 defm V_OR_B32 : VOP2_Real_e32e64_si <0x1c>;
613 defm V_XOR_B32 : VOP2_Real_e32e64_si <0x1d>;
614 defm V_MAC_F32 : VOP2_Real_e32e64_si <0x1f>;
615 defm V_MADMK_F32 : VOP2_Real_MADK_si <0x20>;
616 defm V_MADAK_F32 : VOP2_Real_MADK_si <0x21>;
617 defm V_ADD_I32 : VOP2be_Real_e32e64_si <0x25>;
618 defm V_SUB_I32 : VOP2be_Real_e32e64_si <0x26>;
619 defm V_SUBREV_I32 : VOP2be_Real_e32e64_si <0x27>;
620 defm V_ADDC_U32 : VOP2be_Real_e32e64_si <0x28>;
621 defm V_SUBB_U32 : VOP2be_Real_e32e64_si <0x29>;
622 defm V_SUBBREV_U32 : VOP2be_Real_e32e64_si <0x2a>;
624 defm V_READLANE_B32 : VOP2_Real_si <0x01>;
626 let InOperandList = (ins SSrc_b32:$src0, SCSrc_b32:$src1) in {
627 defm V_WRITELANE_B32 : VOP2_Real_si <0x02>;
630 defm V_MAC_LEGACY_F32 : VOP2_Real_e32e64_si <0x6>;
631 defm V_MIN_LEGACY_F32 : VOP2_Real_e32e64_si <0xd>;
632 defm V_MAX_LEGACY_F32 : VOP2_Real_e32e64_si <0xe>;
633 defm V_LSHR_B32 : VOP2_Real_e32e64_si <0x15>;
634 defm V_ASHR_I32 : VOP2_Real_e32e64_si <0x17>;
635 defm V_LSHL_B32 : VOP2_Real_e32e64_si <0x19>;
637 defm V_BFM_B32 : VOP2_Real_e32e64_si <0x1e>;
638 defm V_BCNT_U32_B32 : VOP2_Real_e32e64_si <0x22>;
639 defm V_MBCNT_LO_U32_B32 : VOP2_Real_e32e64_si <0x23>;
640 defm V_MBCNT_HI_U32_B32 : VOP2_Real_e32e64_si <0x24>;
641 defm V_LDEXP_F32 : VOP2_Real_e32e64_si <0x2b>;
642 defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e32e64_si <0x2c>;
643 defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e32e64_si <0x2d>;
644 defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e32e64_si <0x2e>;
645 defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e32e64_si <0x2f>;
646 defm V_CVT_PK_U16_U32 : VOP2_Real_e32e64_si <0x30>;
647 defm V_CVT_PK_I16_I32 : VOP2_Real_e32e64_si <0x31>;
650 //===----------------------------------------------------------------------===//
652 //===----------------------------------------------------------------------===//
654 class VOP2_DPP <bits<6> op, VOP2_Pseudo ps, VOPProfile P = ps.Pfl> :
655 VOP_DPP <ps.OpName, P> {
658 let SchedRW = ps.SchedRW;
659 let hasSideEffects = ps.hasSideEffects;
660 let Constraints = ps.Constraints;
661 let DisableEncoding = ps.DisableEncoding;
665 let Inst{8-0} = 0xfa; //dpp
666 let Inst{16-9} = !if(P.HasSrc1, src1{7-0}, 0);
667 let Inst{24-17} = !if(P.EmitDst, vdst{7-0}, 0);
668 let Inst{30-25} = op;
669 let Inst{31} = 0x0; //encoding
672 let AssemblerPredicates = [isVI], DecoderNamespace = "VI" in {
674 multiclass VOP32_Real_vi <bits<10> op> {
676 VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.VI>,
677 VOP3e_vi<op, !cast<VOP2_Pseudo>(NAME).Pfl>;
680 multiclass VOP2_Real_MADK_vi <bits<6> op> {
681 def _vi : VOP2_Real<!cast<VOP2_Pseudo>(NAME), SIEncodingFamily.VI>,
682 VOP2_MADKe<op{5-0}, !cast<VOP2_Pseudo>(NAME).Pfl>;
685 multiclass VOP2_Real_e32_vi <bits<6> op> {
687 VOP2_Real<!cast<VOP2_Pseudo>(NAME#"_e32"), SIEncodingFamily.VI>,
688 VOP2e<op{5-0}, !cast<VOP2_Pseudo>(NAME#"_e32").Pfl>;
691 multiclass VOP2_Real_e64_vi <bits<10> op> {
693 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
694 VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
697 multiclass VOP2_Real_e64only_vi <bits<10> op> {
699 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
700 VOP3e_vi <op, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl> {
701 // Hack to stop printing _e64
702 VOP3_Pseudo ps = !cast<VOP3_Pseudo>(NAME#"_e64");
703 let OutOperandList = (outs VGPR_32:$vdst);
704 let AsmString = ps.Mnemonic # " " # ps.AsmOperands;
708 multiclass Base_VOP2be_Real_e32e64_vi <bits<6> op> : VOP2_Real_e32_vi<op> {
710 VOP3_Real<!cast<VOP3_Pseudo>(NAME#"_e64"), SIEncodingFamily.VI>,
711 VOP3be_vi <{0, 1, 0, 0, op{5-0}}, !cast<VOP3_Pseudo>(NAME#"_e64").Pfl>;
714 multiclass Base_VOP2_Real_e32e64_vi <bits<6> op> :
715 VOP2_Real_e32_vi<op>,
716 VOP2_Real_e64_vi<{0, 1, 0, 0, op{5-0}}>;
718 } // End AssemblerPredicates = [isVI], DecoderNamespace = "VI"
720 multiclass VOP2_SDWA_Real <bits<6> op> {
722 VOP_SDWA_Real <!cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa")>,
723 VOP2_SDWAe <op{5-0}, !cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa").Pfl>;
726 multiclass VOP2_SDWA9_Real <bits<6> op> {
728 VOP_SDWA9_Real <!cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa")>,
729 VOP2_SDWA9Ae <op{5-0}, !cast<VOP2_SDWA_Pseudo>(NAME#"_sdwa").Pfl>;
732 multiclass VOP2be_Real_e32e64_vi <bits<6> op> :
733 Base_VOP2be_Real_e32e64_vi<op>, VOP2_SDWA_Real<op>, VOP2_SDWA9_Real<op> {
734 // For now left dpp only for asm/dasm
735 // TODO: add corresponding pseudo
736 def _dpp : VOP2_DPP<op, !cast<VOP2_Pseudo>(NAME#"_e32")>;
739 multiclass VOP2_Real_e32e64_vi <bits<6> op> :
740 Base_VOP2_Real_e32e64_vi<op>, VOP2_SDWA_Real<op>, VOP2_SDWA9_Real<op> {
741 // For now left dpp only for asm/dasm
742 // TODO: add corresponding pseudo
743 def _dpp : VOP2_DPP<op, !cast<VOP2_Pseudo>(NAME#"_e32")>;
746 defm V_CNDMASK_B32 : Base_VOP2_Real_e32e64_vi <0x0>;
747 defm V_ADD_F32 : VOP2_Real_e32e64_vi <0x1>;
748 defm V_SUB_F32 : VOP2_Real_e32e64_vi <0x2>;
749 defm V_SUBREV_F32 : VOP2_Real_e32e64_vi <0x3>;
750 defm V_MUL_LEGACY_F32 : VOP2_Real_e32e64_vi <0x4>;
751 defm V_MUL_F32 : VOP2_Real_e32e64_vi <0x5>;
752 defm V_MUL_I32_I24 : VOP2_Real_e32e64_vi <0x6>;
753 defm V_MUL_HI_I32_I24 : VOP2_Real_e32e64_vi <0x7>;
754 defm V_MUL_U32_U24 : VOP2_Real_e32e64_vi <0x8>;
755 defm V_MUL_HI_U32_U24 : VOP2_Real_e32e64_vi <0x9>;
756 defm V_MIN_F32 : VOP2_Real_e32e64_vi <0xa>;
757 defm V_MAX_F32 : VOP2_Real_e32e64_vi <0xb>;
758 defm V_MIN_I32 : VOP2_Real_e32e64_vi <0xc>;
759 defm V_MAX_I32 : VOP2_Real_e32e64_vi <0xd>;
760 defm V_MIN_U32 : VOP2_Real_e32e64_vi <0xe>;
761 defm V_MAX_U32 : VOP2_Real_e32e64_vi <0xf>;
762 defm V_LSHRREV_B32 : VOP2_Real_e32e64_vi <0x10>;
763 defm V_ASHRREV_I32 : VOP2_Real_e32e64_vi <0x11>;
764 defm V_LSHLREV_B32 : VOP2_Real_e32e64_vi <0x12>;
765 defm V_AND_B32 : VOP2_Real_e32e64_vi <0x13>;
766 defm V_OR_B32 : VOP2_Real_e32e64_vi <0x14>;
767 defm V_XOR_B32 : VOP2_Real_e32e64_vi <0x15>;
768 defm V_MAC_F32 : VOP2_Real_e32e64_vi <0x16>;
769 defm V_MADMK_F32 : VOP2_Real_MADK_vi <0x17>;
770 defm V_MADAK_F32 : VOP2_Real_MADK_vi <0x18>;
771 defm V_ADD_I32 : VOP2be_Real_e32e64_vi <0x19>;
772 defm V_SUB_I32 : VOP2be_Real_e32e64_vi <0x1a>;
773 defm V_SUBREV_I32 : VOP2be_Real_e32e64_vi <0x1b>;
774 defm V_ADDC_U32 : VOP2be_Real_e32e64_vi <0x1c>;
775 defm V_SUBB_U32 : VOP2be_Real_e32e64_vi <0x1d>;
776 defm V_SUBBREV_U32 : VOP2be_Real_e32e64_vi <0x1e>;
778 defm V_READLANE_B32 : VOP32_Real_vi <0x289>;
779 defm V_WRITELANE_B32 : VOP32_Real_vi <0x28a>;
781 defm V_BFM_B32 : VOP2_Real_e64only_vi <0x293>;
782 defm V_BCNT_U32_B32 : VOP2_Real_e64only_vi <0x28b>;
783 defm V_MBCNT_LO_U32_B32 : VOP2_Real_e64only_vi <0x28c>;
784 defm V_MBCNT_HI_U32_B32 : VOP2_Real_e64only_vi <0x28d>;
785 defm V_LDEXP_F32 : VOP2_Real_e64only_vi <0x288>;
786 defm V_CVT_PKACCUM_U8_F32 : VOP2_Real_e64only_vi <0x1f0>;
787 defm V_CVT_PKNORM_I16_F32 : VOP2_Real_e64only_vi <0x294>;
788 defm V_CVT_PKNORM_U16_F32 : VOP2_Real_e64only_vi <0x295>;
789 defm V_CVT_PKRTZ_F16_F32 : VOP2_Real_e64only_vi <0x296>;
790 defm V_CVT_PK_U16_U32 : VOP2_Real_e64only_vi <0x297>;
791 defm V_CVT_PK_I16_I32 : VOP2_Real_e64only_vi <0x298>;
793 defm V_ADD_F16 : VOP2_Real_e32e64_vi <0x1f>;
794 defm V_SUB_F16 : VOP2_Real_e32e64_vi <0x20>;
795 defm V_SUBREV_F16 : VOP2_Real_e32e64_vi <0x21>;
796 defm V_MUL_F16 : VOP2_Real_e32e64_vi <0x22>;
797 defm V_MAC_F16 : VOP2_Real_e32e64_vi <0x23>;
798 defm V_MADMK_F16 : VOP2_Real_MADK_vi <0x24>;
799 defm V_MADAK_F16 : VOP2_Real_MADK_vi <0x25>;
800 defm V_ADD_U16 : VOP2_Real_e32e64_vi <0x26>;
801 defm V_SUB_U16 : VOP2_Real_e32e64_vi <0x27>;
802 defm V_SUBREV_U16 : VOP2_Real_e32e64_vi <0x28>;
803 defm V_MUL_LO_U16 : VOP2_Real_e32e64_vi <0x29>;
804 defm V_LSHLREV_B16 : VOP2_Real_e32e64_vi <0x2a>;
805 defm V_LSHRREV_B16 : VOP2_Real_e32e64_vi <0x2b>;
806 defm V_ASHRREV_I16 : VOP2_Real_e32e64_vi <0x2c>;
807 defm V_MAX_F16 : VOP2_Real_e32e64_vi <0x2d>;
808 defm V_MIN_F16 : VOP2_Real_e32e64_vi <0x2e>;
809 defm V_MAX_U16 : VOP2_Real_e32e64_vi <0x2f>;
810 defm V_MAX_I16 : VOP2_Real_e32e64_vi <0x30>;
811 defm V_MIN_U16 : VOP2_Real_e32e64_vi <0x31>;
812 defm V_MIN_I16 : VOP2_Real_e32e64_vi <0x32>;
813 defm V_LDEXP_F16 : VOP2_Real_e32e64_vi <0x33>;
815 let SubtargetPredicate = isVI in {
817 // Aliases to simplify matching of floating-point instructions that
818 // are VOP2 on SI and VOP3 on VI.
819 class SI2_VI3Alias <string name, VOP3_Real inst> : InstAlias <
820 name#" $dst, $src0, $src1",
821 !if(inst.Pfl.HasOMod,
822 (inst VGPR_32:$dst, 0, VCSrc_f32:$src0, 0, VCSrc_f32:$src1, 0, 0),
823 (inst VGPR_32:$dst, 0, VCSrc_f32:$src0, 0, VCSrc_f32:$src1, 0))
824 >, PredicateControl {
825 let UseInstAsmMatchConverter = 0;
826 let AsmVariantName = AMDGPUAsmVariants.VOP3;
829 def : SI2_VI3Alias <"v_ldexp_f32", V_LDEXP_F32_e64_vi>;
830 def : SI2_VI3Alias <"v_cvt_pkaccum_u8_f32", V_CVT_PKACCUM_U8_F32_e64_vi>;
831 def : SI2_VI3Alias <"v_cvt_pknorm_i16_f32", V_CVT_PKNORM_I16_F32_e64_vi>;
832 def : SI2_VI3Alias <"v_cvt_pknorm_u16_f32", V_CVT_PKNORM_U16_F32_e64_vi>;
833 def : SI2_VI3Alias <"v_cvt_pkrtz_f16_f32", V_CVT_PKRTZ_F16_F32_e64_vi>;
835 } // End SubtargetPredicate = isVI