//===-- VOP3PInstructions.td - Vector Instruction Defintions --------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // VOP3P Classes //===----------------------------------------------------------------------===// class VOP3PInst : VOP3P_Pseudo.ret, getVOP3Pat.ret) >; // Non-packed instructions that use the VOP3P encoding. // VOP3 neg/abs and VOP3P opsel/opsel_hi modifiers are allowed. class VOP3_VOP3PInst : VOP3P_Pseudo { // These operands are only sort of f16 operands. Depending on // op_sel_hi, these may be interpreted as f32. The inline immediate // values are really f16 converted to f32, so we treat these as f16 // operands. let InOperandList = !con( !con( (ins FP16InputMods:$src0_modifiers, VCSrc_f16:$src0, FP16InputMods:$src1_modifiers, VCSrc_f16:$src1, FP16InputMods:$src2_modifiers, VCSrc_f16:$src2, clampmod:$clamp), !if(UseTiedOutput, (ins VGPR_32:$vdst_in), (ins))), (ins op_sel:$op_sel, op_sel_hi:$op_sel_hi)); let Constraints = !if(UseTiedOutput, "$vdst = $vdst_in", ""); let DisableEncoding = !if(UseTiedOutput, "$vdst_in", ""); let AsmOperands = " $vdst, $src0_modifiers, $src1_modifiers, $src2_modifiers$op_sel$op_sel_hi$clamp"; } let isCommutable = 1 in { def V_PK_MAD_I16 : VOP3PInst<"v_pk_mad_i16", VOP3_Profile>; def V_PK_MAD_U16 : VOP3PInst<"v_pk_mad_u16", VOP3_Profile>; let FPDPRounding = 1 in { def V_PK_FMA_F16 : VOP3PInst<"v_pk_fma_f16", VOP3_Profile, fma>; def V_PK_ADD_F16 : VOP3PInst<"v_pk_add_f16", VOP3_Profile, fadd>; def V_PK_MUL_F16 : VOP3PInst<"v_pk_mul_f16", VOP3_Profile, fmul>; } // End FPDPRounding = 1 def V_PK_MAX_F16 : VOP3PInst<"v_pk_max_f16", VOP3_Profile, fmaxnum_like>; def V_PK_MIN_F16 : VOP3PInst<"v_pk_min_f16", VOP3_Profile, fminnum_like>; def V_PK_ADD_U16 : VOP3PInst<"v_pk_add_u16", VOP3_Profile, add>; def V_PK_ADD_I16 : VOP3PInst<"v_pk_add_i16", VOP3_Profile>; def V_PK_MUL_LO_U16 : VOP3PInst<"v_pk_mul_lo_u16", VOP3_Profile, mul>; def V_PK_MIN_I16 : VOP3PInst<"v_pk_min_i16", VOP3_Profile, smin>; def V_PK_MIN_U16 : VOP3PInst<"v_pk_min_u16", VOP3_Profile, umin>; def V_PK_MAX_I16 : VOP3PInst<"v_pk_max_i16", VOP3_Profile, smax>; def V_PK_MAX_U16 : VOP3PInst<"v_pk_max_u16", VOP3_Profile, umax>; } def V_PK_SUB_U16 : VOP3PInst<"v_pk_sub_u16", VOP3_Profile>; def V_PK_SUB_I16 : VOP3PInst<"v_pk_sub_i16", VOP3_Profile, sub>; def V_PK_LSHLREV_B16 : VOP3PInst<"v_pk_lshlrev_b16", VOP3_Profile, lshl_rev>; def V_PK_ASHRREV_I16 : VOP3PInst<"v_pk_ashrrev_i16", VOP3_Profile, ashr_rev>; def V_PK_LSHRREV_B16 : VOP3PInst<"v_pk_lshrrev_b16", VOP3_Profile, lshr_rev>; // Undo sub x, c -> add x, -c canonicalization since c is more likely // an inline immediate than -c. // The constant will be emitted as a mov, and folded later. // TODO: We could directly encode the immediate now def : GCNPat< (add (v2i16 (VOP3PMods0 v2i16:$src0, i32:$src0_modifiers, i1:$clamp)), NegSubInlineConstV216:$src1), (V_PK_SUB_U16 $src0_modifiers, $src0, SRCMODS.OP_SEL_1, NegSubInlineConstV216:$src1, $clamp) >; multiclass MadFmaMixPats { def : GCNPat < (f16 (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), (mixlo_inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2, DSTCLAMP.NONE, (i32 (IMPLICIT_DEF))) >; // FIXME: Special case handling for maxhi (especially for clamp) // because dealing with the write to high half of the register is // difficult. def : GCNPat < (build_vector f16:$elt0, (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers))))), (v2f16 (mixhi_inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2, DSTCLAMP.NONE, $elt0)) >; def : GCNPat < (build_vector f16:$elt0, (AMDGPUclamp (fpround (fma_like (f32 (VOP3PMadMixMods f16:$src0, i32:$src0_modifiers)), (f32 (VOP3PMadMixMods f16:$src1, i32:$src1_modifiers)), (f32 (VOP3PMadMixMods f16:$src2, i32:$src2_modifiers)))))), (v2f16 (mixhi_inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2, DSTCLAMP.ENABLE, $elt0)) >; def : GCNPat < (AMDGPUclamp (build_vector (fpround (fma_like (f32 (VOP3PMadMixMods f16:$lo_src0, i32:$lo_src0_modifiers)), (f32 (VOP3PMadMixMods f16:$lo_src1, i32:$lo_src1_modifiers)), (f32 (VOP3PMadMixMods f16:$lo_src2, i32:$lo_src2_modifiers)))), (fpround (fma_like (f32 (VOP3PMadMixMods f16:$hi_src0, i32:$hi_src0_modifiers)), (f32 (VOP3PMadMixMods f16:$hi_src1, i32:$hi_src1_modifiers)), (f32 (VOP3PMadMixMods f16:$hi_src2, i32:$hi_src2_modifiers)))))), (v2f16 (mixhi_inst $hi_src0_modifiers, $hi_src0, $hi_src1_modifiers, $hi_src1, $hi_src2_modifiers, $hi_src2, DSTCLAMP.ENABLE, (mixlo_inst $lo_src0_modifiers, $lo_src0, $lo_src1_modifiers, $lo_src1, $lo_src2_modifiers, $lo_src2, DSTCLAMP.ENABLE, (i32 (IMPLICIT_DEF))))) >; } let SubtargetPredicate = HasMadMixInsts in { // These are VOP3a-like opcodes which accept no omod. // Size of src arguments (16/32) is controlled by op_sel. // For 16-bit src arguments their location (hi/lo) are controlled by op_sel_hi. let isCommutable = 1 in { def V_MAD_MIX_F32 : VOP3_VOP3PInst<"v_mad_mix_f32", VOP3_Profile>; let FPDPRounding = 1 in { // Clamp modifier is applied after conversion to f16. def V_MAD_MIXLO_F16 : VOP3_VOP3PInst<"v_mad_mixlo_f16", VOP3_Profile, 1>; let ClampLo = 0, ClampHi = 1 in { def V_MAD_MIXHI_F16 : VOP3_VOP3PInst<"v_mad_mixhi_f16", VOP3_Profile, 1>; } } // End FPDPRounding = 1 } defm : MadFmaMixPats; } // End SubtargetPredicate = HasMadMixInsts // Essentially the same as the mad_mix versions let SubtargetPredicate = HasFmaMixInsts in { let isCommutable = 1 in { def V_FMA_MIX_F32 : VOP3_VOP3PInst<"v_fma_mix_f32", VOP3_Profile>; let FPDPRounding = 1 in { // Clamp modifier is applied after conversion to f16. def V_FMA_MIXLO_F16 : VOP3_VOP3PInst<"v_fma_mixlo_f16", VOP3_Profile, 1>; let ClampLo = 0, ClampHi = 1 in { def V_FMA_MIXHI_F16 : VOP3_VOP3PInst<"v_fma_mixhi_f16", VOP3_Profile, 1>; } } // End FPDPRounding = 1 } defm : MadFmaMixPats; } // Defines patterns that extract signed 4bit from each Idx[0]. foreach Idx = [[0,28],[4,24],[8,20],[12,16],[16,12],[20,8],[24,4]] in def ExtractSigned4bit_#Idx[0] : PatFrag<(ops node:$src), (sra (shl node:$src, (i32 Idx[1])), (i32 28))>; // Defines code pattern that extracts U(unsigned/signed) 4/8bit from FromBitIndex. class Extract: PatFrag< (ops node:$src), !if (!or (!and (!eq (BitMask, 255), !eq (FromBitIndex, 24)), !eq (FromBitIndex, 28)), // last element !if (U, (srl node:$src, (i32 FromBitIndex)), (sra node:$src, (i32 FromBitIndex))), !if (!eq (FromBitIndex, 0), // first element !if (U, (and node:$src, (i32 BitMask)), !if (!eq (BitMask, 15), (!cast("ExtractSigned4bit_"#FromBitIndex) node:$src), (sext_inreg node:$src, i8))), !if (U, (and (srl node:$src, (i32 FromBitIndex)), (i32 BitMask)), !if (!eq (BitMask, 15), (!cast("ExtractSigned4bit_"#FromBitIndex) node:$src), (sext_inreg (srl node:$src, (i32 FromBitIndex)), i8)))))>; foreach Type = ["I", "U"] in foreach Index = 0-3 in { // Defines patterns that extract each Index'ed 8bit from an unsigned // 32bit scalar value; def #Type#Index#"_8bit" : Extract; // Defines multiplication patterns where the multiplication is happening on each // Index'ed 8bit of a 32bit scalar value. def Mul#Type#_Elt#Index : PatFrag< (ops node:$src0, node:$src1), (!cast(!if (!eq (Type, "I"), AMDGPUmul_i24_oneuse, AMDGPUmul_u24_oneuse)) (!cast(#Type#Index#"_8bit") node:$src0), (!cast(#Type#Index#"_8bit") node:$src1))>; } // Different variants of dot8 patterns cause a huge increase in the compile time. // Define non-associative/commutative add/mul to prevent permutation in the dot8 // pattern. def NonACAdd : SDNode<"ISD::ADD" , SDTIntBinOp>; def NonACAdd_oneuse : HasOneUseBinOp; def NonACAMDGPUmul_u24 : SDNode<"AMDGPUISD::MUL_U24" , SDTIntBinOp>; def NonACAMDGPUmul_u24_oneuse : HasOneUseBinOp; def NonACAMDGPUmul_i24 : SDNode<"AMDGPUISD::MUL_I24" , SDTIntBinOp>; def NonACAMDGPUmul_i24_oneuse : HasOneUseBinOp; foreach Type = ["I", "U"] in foreach Index = 0-7 in { // Defines patterns that extract each Index'ed 4bit from an unsigned // 32bit scalar value; def #Type#Index#"_4bit" : Extract; // Defines multiplication patterns where the multiplication is happening on each // Index'ed 8bit of a 32bit scalar value. def Mul#Type#Index#"_4bit" : PatFrag< (ops node:$src0, node:$src1), (!cast(!if (!eq (Type, "I"), NonACAMDGPUmul_i24_oneuse, NonACAMDGPUmul_u24_oneuse)) (!cast(#Type#Index#"_4bit") node:$src0), (!cast(#Type#Index#"_4bit") node:$src1))>; } class UDot2Pat : GCNPat < (add (add_oneuse (AMDGPUmul_u24_oneuse (srl i32:$src0, (i32 16)), (srl i32:$src1, (i32 16))), i32:$src2), (AMDGPUmul_u24_oneuse (and i32:$src0, (i32 65535)), (and i32:$src1, (i32 65535))) ), (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { let SubtargetPredicate = !cast(Inst).SubtargetPredicate; } class SDot2Pat : GCNPat < (add (add_oneuse (AMDGPUmul_i24_oneuse (sra i32:$src0, (i32 16)), (sra i32:$src1, (i32 16))), i32:$src2), (AMDGPUmul_i24_oneuse (sext_inreg i32:$src0, i16), (sext_inreg i32:$src1, i16))), (Inst (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))> { let SubtargetPredicate = !cast(Inst).SubtargetPredicate; } let SubtargetPredicate = HasDot2Insts in { def V_DOT2_F32_F16 : VOP3PInst<"v_dot2_f32_f16", VOP3_Profile>; def V_DOT2_I32_I16 : VOP3PInst<"v_dot2_i32_i16", VOP3_Profile>; def V_DOT2_U32_U16 : VOP3PInst<"v_dot2_u32_u16", VOP3_Profile>; def V_DOT4_U32_U8 : VOP3PInst<"v_dot4_u32_u8", VOP3_Profile>; def V_DOT8_U32_U4 : VOP3PInst<"v_dot8_u32_u4", VOP3_Profile>; } // End SubtargetPredicate = HasDot2Insts let SubtargetPredicate = HasDot1Insts in { def V_DOT4_I32_I8 : VOP3PInst<"v_dot4_i32_i8", VOP3_Profile>; def V_DOT8_I32_I4 : VOP3PInst<"v_dot8_i32_i4", VOP3_Profile>; } // End SubtargetPredicate = HasDot1Insts multiclass DotPats { let SubtargetPredicate = dot_inst.SubtargetPredicate in def : GCNPat < (dot_op (dot_inst.Pfl.Src0VT (VOP3PMods0 dot_inst.Pfl.Src0VT:$src0, i32:$src0_modifiers)), (dot_inst.Pfl.Src1VT (VOP3PMods dot_inst.Pfl.Src1VT:$src1, i32:$src1_modifiers)), (dot_inst.Pfl.Src2VT (VOP3PMods dot_inst.Pfl.Src2VT:$src2, i32:$src2_modifiers)), i1:$clamp), (dot_inst $src0_modifiers, $src0, $src1_modifiers, $src1, $src2_modifiers, $src2, (as_i1imm $clamp))>; } defm : DotPats; defm : DotPats; defm : DotPats; defm : DotPats; defm : DotPats; defm : DotPats; defm : DotPats; def : UDot2Pat; def : SDot2Pat; foreach Type = ["U", "I"] in let SubtargetPredicate = !cast("V_DOT4_"#Type#"32_"#Type#8).SubtargetPredicate in def : GCNPat < !cast(!foldl((i32 i32:$src2), [0, 1, 2, 3], lhs, y, (add_oneuse lhs, (!cast("Mul"#Type#"_Elt"#y) i32:$src0, i32:$src1)))), (!cast("V_DOT4_"#Type#"32_"#Type#8) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; foreach Type = ["U", "I"] in let SubtargetPredicate = !cast("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in def : GCNPat < !cast(!foldl((add_oneuse i32:$src2, (!cast("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), [1, 2, 3, 4, 5, 6, 7], lhs, y, (NonACAdd_oneuse lhs, (!cast("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), (!cast("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; // Different variants of dot8 code-gen dag patterns are not generated through table-gen due to a huge increase // in the compile time. Directly handle the pattern generated by the FE here. foreach Type = ["U", "I"] in let SubtargetPredicate = !cast("V_DOT8_"#Type#"32_"#Type#4).SubtargetPredicate in def : GCNPat < !cast(!foldl((add_oneuse i32:$src2, (!cast("Mul"#Type#"0_4bit") i32:$src0, i32:$src1)), [7, 1, 2, 3, 4, 5, 6], lhs, y, (NonACAdd_oneuse lhs, (!cast("Mul"#Type#y#"_4bit") i32:$src0, i32:$src1)))), (!cast("V_DOT8_"#Type#"32_"#Type#4) (i32 8), $src0, (i32 8), $src1, (i32 8), $src2, (i1 0))>; def ADst_32 : VOPDstOperand; def ADst_128 : VOPDstOperand; def ADst_512 : VOPDstOperand; def ADst_1024 : VOPDstOperand; def VOPProfileAccRead : VOP3_Profile { let Src0RC64 = ARegSrc_32; } def VOPProfileAccWrite : VOP3_Profile { let DstRC = ADst_32; let Src0RC64 = VISrc_b32; } class VOPProfileMAI : VOP3_Profile { let DstRC = _DstRC; let Src0RC64 = SrcABRC; let Src1RC64 = SrcABRC; let Src2RC64 = _SrcRC; let HasOpSel = 0; let HasClamp = 0; let HasModifiers = 0; let Asm64 = " $vdst, $src0, $src1, $src2$cbsz$abid$blgp"; let Ins64 = (ins Src0RC64:$src0, Src1RC64:$src1, Src2RC64:$src2, cbsz:$cbsz, abid:$abid, blgp:$blgp); } def VOPProfileMAI_F32_F32_X4 : VOPProfileMAI; def VOPProfileMAI_F32_F32_X16 : VOPProfileMAI; def VOPProfileMAI_F32_F32_X32 : VOPProfileMAI; def VOPProfileMAI_I32_I32_X4 : VOPProfileMAI; def VOPProfileMAI_I32_I32_X16 : VOPProfileMAI; def VOPProfileMAI_I32_I32_X32 : VOPProfileMAI; def VOPProfileMAI_F32_V2I16_X4 : VOPProfileMAI; def VOPProfileMAI_F32_V2I16_X16 : VOPProfileMAI; def VOPProfileMAI_F32_V2I16_X32 : VOPProfileMAI; def VOPProfileMAI_F32_V4F16_X4 : VOPProfileMAI; def VOPProfileMAI_F32_V4F16_X16 : VOPProfileMAI; def VOPProfileMAI_F32_V4F16_X32 : VOPProfileMAI; let Predicates = [HasMAIInsts] in { def V_ACCVGPR_READ_B32 : VOP3Inst<"v_accvgpr_read_b32", VOPProfileAccRead>; def V_ACCVGPR_WRITE_B32 : VOP3Inst<"v_accvgpr_write_b32", VOPProfileAccWrite> { let isMoveImm = 1; } let isConvergent = 1 in { def V_MFMA_F32_4X4X1F32 : VOP3Inst<"v_mfma_f32_4x4x1f32", VOPProfileMAI_F32_F32_X4, int_amdgcn_mfma_f32_4x4x1f32>; def V_MFMA_F32_4X4X4F16 : VOP3Inst<"v_mfma_f32_4x4x4f16", VOPProfileMAI_F32_V4F16_X4, int_amdgcn_mfma_f32_4x4x4f16>; def V_MFMA_I32_4X4X4I8 : VOP3Inst<"v_mfma_i32_4x4x4i8", VOPProfileMAI_I32_I32_X4, int_amdgcn_mfma_i32_4x4x4i8>; def V_MFMA_F32_4X4X2BF16 : VOP3Inst<"v_mfma_f32_4x4x2bf16", VOPProfileMAI_F32_V2I16_X4, int_amdgcn_mfma_f32_4x4x2bf16>; def V_MFMA_F32_16X16X1F32 : VOP3Inst<"v_mfma_f32_16x16x1f32", VOPProfileMAI_F32_F32_X16, int_amdgcn_mfma_f32_16x16x1f32>; def V_MFMA_F32_16X16X4F32 : VOP3Inst<"v_mfma_f32_16x16x4f32", VOPProfileMAI_F32_F32_X4, int_amdgcn_mfma_f32_16x16x4f32>; def V_MFMA_F32_16X16X4F16 : VOP3Inst<"v_mfma_f32_16x16x4f16", VOPProfileMAI_F32_V4F16_X16, int_amdgcn_mfma_f32_16x16x4f16>; def V_MFMA_F32_16X16X16F16 : VOP3Inst<"v_mfma_f32_16x16x16f16", VOPProfileMAI_F32_V4F16_X4, int_amdgcn_mfma_f32_16x16x16f16>; def V_MFMA_I32_16X16X4I8 : VOP3Inst<"v_mfma_i32_16x16x4i8", VOPProfileMAI_I32_I32_X16, int_amdgcn_mfma_i32_16x16x4i8>; def V_MFMA_I32_16X16X16I8 : VOP3Inst<"v_mfma_i32_16x16x16i8", VOPProfileMAI_I32_I32_X4, int_amdgcn_mfma_i32_16x16x16i8>; def V_MFMA_F32_16X16X2BF16 : VOP3Inst<"v_mfma_f32_16x16x2bf16", VOPProfileMAI_F32_V2I16_X16, int_amdgcn_mfma_f32_16x16x2bf16>; def V_MFMA_F32_16X16X8BF16 : VOP3Inst<"v_mfma_f32_16x16x8bf16", VOPProfileMAI_F32_V2I16_X4, int_amdgcn_mfma_f32_16x16x8bf16>; def V_MFMA_F32_32X32X1F32 : VOP3Inst<"v_mfma_f32_32x32x1f32", VOPProfileMAI_F32_F32_X32, int_amdgcn_mfma_f32_32x32x1f32>; def V_MFMA_F32_32X32X2F32 : VOP3Inst<"v_mfma_f32_32x32x2f32", VOPProfileMAI_F32_F32_X16, int_amdgcn_mfma_f32_32x32x2f32>; def V_MFMA_F32_32X32X4F16 : VOP3Inst<"v_mfma_f32_32x32x4f16", VOPProfileMAI_F32_V4F16_X32, int_amdgcn_mfma_f32_32x32x4f16>; def V_MFMA_F32_32X32X8F16 : VOP3Inst<"v_mfma_f32_32x32x8f16", VOPProfileMAI_F32_V4F16_X16, int_amdgcn_mfma_f32_32x32x8f16>; def V_MFMA_I32_32X32X4I8 : VOP3Inst<"v_mfma_i32_32x32x4i8", VOPProfileMAI_I32_I32_X32, int_amdgcn_mfma_i32_32x32x4i8>; def V_MFMA_I32_32X32X8I8 : VOP3Inst<"v_mfma_i32_32x32x8i8", VOPProfileMAI_I32_I32_X16, int_amdgcn_mfma_i32_32x32x8i8>; def V_MFMA_F32_32X32X2BF16 : VOP3Inst<"v_mfma_f32_32x32x2bf16", VOPProfileMAI_F32_V2I16_X32, int_amdgcn_mfma_f32_32x32x2bf16>; def V_MFMA_F32_32X32X4BF16 : VOP3Inst<"v_mfma_f32_32x32x4bf16", VOPProfileMAI_F32_V2I16_X16, int_amdgcn_mfma_f32_32x32x4bf16>; } // End isConvergent = 1 } // End SubtargetPredicate = HasMAIInsts def : MnemonicAlias<"v_accvgpr_read", "v_accvgpr_read_b32">; def : MnemonicAlias<"v_accvgpr_write", "v_accvgpr_write_b32">; multiclass VOP3P_Real_vi op> { def _vi : VOP3P_Real(NAME), SIEncodingFamily.VI>, VOP3Pe (NAME).Pfl> { let AssemblerPredicates = [HasVOP3PInsts]; let DecoderNamespace = "GFX8"; } } multiclass VOP3P_Real_MAI op> { def _vi : VOP3P_Real(NAME), SIEncodingFamily.VI>, VOP3Pe_MAI (NAME).Pfl> { let AssemblerPredicates = [HasMAIInsts]; let DecoderNamespace = "GFX8"; } } defm V_PK_MAD_I16 : VOP3P_Real_vi <0x380>; defm V_PK_MUL_LO_U16 : VOP3P_Real_vi <0x381>; defm V_PK_ADD_I16 : VOP3P_Real_vi <0x382>; defm V_PK_SUB_I16 : VOP3P_Real_vi <0x383>; defm V_PK_LSHLREV_B16 : VOP3P_Real_vi <0x384>; defm V_PK_LSHRREV_B16 : VOP3P_Real_vi <0x385>; defm V_PK_ASHRREV_I16 : VOP3P_Real_vi <0x386>; defm V_PK_MAX_I16 : VOP3P_Real_vi <0x387>; defm V_PK_MIN_I16 : VOP3P_Real_vi <0x388>; defm V_PK_MAD_U16 : VOP3P_Real_vi <0x389>; defm V_PK_ADD_U16 : VOP3P_Real_vi <0x38a>; defm V_PK_SUB_U16 : VOP3P_Real_vi <0x38b>; defm V_PK_MAX_U16 : VOP3P_Real_vi <0x38c>; defm V_PK_MIN_U16 : VOP3P_Real_vi <0x38d>; defm V_PK_FMA_F16 : VOP3P_Real_vi <0x38e>; defm V_PK_ADD_F16 : VOP3P_Real_vi <0x38f>; defm V_PK_MUL_F16 : VOP3P_Real_vi <0x390>; defm V_PK_MIN_F16 : VOP3P_Real_vi <0x391>; defm V_PK_MAX_F16 : VOP3P_Real_vi <0x392>; let SubtargetPredicate = HasMadMixInsts in { defm V_MAD_MIX_F32 : VOP3P_Real_vi <0x3a0>; defm V_MAD_MIXLO_F16 : VOP3P_Real_vi <0x3a1>; defm V_MAD_MIXHI_F16 : VOP3P_Real_vi <0x3a2>; } let SubtargetPredicate = HasFmaMixInsts in { let DecoderNamespace = "GFX9_DL" in { // The mad_mix instructions were renamed and their behaviors changed, // but the opcode stayed the same so we need to put these in a // different DecoderNamespace to avoid the ambiguity. defm V_FMA_MIX_F32 : VOP3P_Real_vi <0x3a0>; defm V_FMA_MIXLO_F16 : VOP3P_Real_vi <0x3a1>; defm V_FMA_MIXHI_F16 : VOP3P_Real_vi <0x3a2>; } } let SubtargetPredicate = HasDot2Insts in { defm V_DOT2_F32_F16 : VOP3P_Real_vi <0x3a3>; defm V_DOT2_I32_I16 : VOP3P_Real_vi <0x3a6>; defm V_DOT2_U32_U16 : VOP3P_Real_vi <0x3a7>; defm V_DOT4_U32_U8 : VOP3P_Real_vi <0x3a9>; defm V_DOT8_U32_U4 : VOP3P_Real_vi <0x3ab>; } // End SubtargetPredicate = HasDot2Insts let SubtargetPredicate = HasDot1Insts in { defm V_DOT4_I32_I8 : VOP3P_Real_vi <0x3a8>; defm V_DOT8_I32_I4 : VOP3P_Real_vi <0x3aa>; } // End SubtargetPredicate = HasDot1Insts let SubtargetPredicate = HasMAIInsts in { defm V_ACCVGPR_READ_B32 : VOP3P_Real_MAI <0x3d8>; defm V_ACCVGPR_WRITE_B32 : VOP3P_Real_MAI <0x3d9>; defm V_MFMA_F32_32X32X1F32 : VOP3P_Real_MAI <0x3c0>; defm V_MFMA_F32_16X16X1F32 : VOP3P_Real_MAI <0x3c1>; defm V_MFMA_F32_4X4X1F32 : VOP3P_Real_MAI <0x3c2>; defm V_MFMA_F32_32X32X2F32 : VOP3P_Real_MAI <0x3c4>; defm V_MFMA_F32_16X16X4F32 : VOP3P_Real_MAI <0x3c5>; defm V_MFMA_F32_32X32X4F16 : VOP3P_Real_MAI <0x3c8>; defm V_MFMA_F32_16X16X4F16 : VOP3P_Real_MAI <0x3c9>; defm V_MFMA_F32_4X4X4F16 : VOP3P_Real_MAI <0x3ca>; defm V_MFMA_F32_32X32X8F16 : VOP3P_Real_MAI <0x3cc>; defm V_MFMA_F32_16X16X16F16 : VOP3P_Real_MAI <0x3cd>; defm V_MFMA_I32_32X32X4I8 : VOP3P_Real_MAI <0x3d0>; defm V_MFMA_I32_16X16X4I8 : VOP3P_Real_MAI <0x3d1>; defm V_MFMA_I32_4X4X4I8 : VOP3P_Real_MAI <0x3d2>; defm V_MFMA_I32_32X32X8I8 : VOP3P_Real_MAI <0x3d4>; defm V_MFMA_I32_16X16X16I8 : VOP3P_Real_MAI <0x3d5>; defm V_MFMA_F32_32X32X2BF16 : VOP3P_Real_MAI <0x3e8>; defm V_MFMA_F32_16X16X2BF16 : VOP3P_Real_MAI <0x3e9>; defm V_MFMA_F32_4X4X2BF16 : VOP3P_Real_MAI <0x3eb>; defm V_MFMA_F32_32X32X4BF16 : VOP3P_Real_MAI <0x3ec>; defm V_MFMA_F32_16X16X8BF16 : VOP3P_Real_MAI <0x3ed>; } // End SubtargetPredicate = HasMAIInsts //===----------------------------------------------------------------------===// // GFX10. //===----------------------------------------------------------------------===// let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in { multiclass VOP3P_Real_gfx10 op> { def _gfx10 : VOP3P_Real(NAME), SIEncodingFamily.GFX10>, VOP3Pe_gfx10 (NAME).Pfl>; } } // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" defm V_PK_MAD_I16 : VOP3P_Real_gfx10<0x000>; defm V_PK_MUL_LO_U16 : VOP3P_Real_gfx10<0x001>; defm V_PK_ADD_I16 : VOP3P_Real_gfx10<0x002>; defm V_PK_SUB_I16 : VOP3P_Real_gfx10<0x003>; defm V_PK_LSHLREV_B16 : VOP3P_Real_gfx10<0x004>; defm V_PK_LSHRREV_B16 : VOP3P_Real_gfx10<0x005>; defm V_PK_ASHRREV_I16 : VOP3P_Real_gfx10<0x006>; defm V_PK_MAX_I16 : VOP3P_Real_gfx10<0x007>; defm V_PK_MIN_I16 : VOP3P_Real_gfx10<0x008>; defm V_PK_MAD_U16 : VOP3P_Real_gfx10<0x009>; defm V_PK_ADD_U16 : VOP3P_Real_gfx10<0x00a>; defm V_PK_SUB_U16 : VOP3P_Real_gfx10<0x00b>; defm V_PK_MAX_U16 : VOP3P_Real_gfx10<0x00c>; defm V_PK_MIN_U16 : VOP3P_Real_gfx10<0x00d>; defm V_PK_FMA_F16 : VOP3P_Real_gfx10<0x00e>; defm V_PK_ADD_F16 : VOP3P_Real_gfx10<0x00f>; defm V_PK_MUL_F16 : VOP3P_Real_gfx10<0x010>; defm V_PK_MIN_F16 : VOP3P_Real_gfx10<0x011>; defm V_PK_MAX_F16 : VOP3P_Real_gfx10<0x012>; defm V_FMA_MIX_F32 : VOP3P_Real_gfx10<0x020>; defm V_FMA_MIXLO_F16 : VOP3P_Real_gfx10<0x021>; defm V_FMA_MIXHI_F16 : VOP3P_Real_gfx10<0x022>; let SubtargetPredicate = HasDot2Insts in { defm V_DOT2_F32_F16 : VOP3P_Real_gfx10 <0x013>; defm V_DOT2_I32_I16 : VOP3P_Real_gfx10 <0x014>; defm V_DOT2_U32_U16 : VOP3P_Real_gfx10 <0x015>; defm V_DOT4_U32_U8 : VOP3P_Real_gfx10 <0x017>; defm V_DOT8_U32_U4 : VOP3P_Real_gfx10 <0x019>; } // End SubtargetPredicate = HasDot2Insts let SubtargetPredicate = HasDot1Insts in { defm V_DOT4_I32_I8 : VOP3P_Real_gfx10 <0x016>; defm V_DOT8_I32_I4 : VOP3P_Real_gfx10 <0x018>; } // End SubtargetPredicate = HasDot1Insts