1 //===-- AMDGPUGIsel.td - AMDGPU GlobalISel Patterns---------*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 // This files contains patterns that should only be used by GlobalISel. For
9 // example patterns for V_* instructions that have S_* equivalents.
10 // SelectionDAG does not support selecting V_* instructions.
11 //===----------------------------------------------------------------------===//
14 include "AMDGPUCombine.td"
16 def sd_vsrc0 : ComplexPattern<i32, 1, "">;
18 GIComplexOperandMatcher<s32, "selectVSRC0">,
19 GIComplexPatternEquiv<sd_vsrc0>;
21 def sd_vcsrc : ComplexPattern<i32, 1, "">;
23 GIComplexOperandMatcher<s32, "selectVCSRC">,
24 GIComplexPatternEquiv<sd_vcsrc>;
27 GIComplexOperandMatcher<s32, "selectVOP3Mods0">,
28 GIComplexPatternEquiv<VOP3Mods0>;
31 GIComplexOperandMatcher<s32, "selectVOP3Mods">,
32 GIComplexPatternEquiv<VOP3Mods>;
35 GIComplexOperandMatcher<s32, "selectVOP3NoMods">,
36 GIComplexPatternEquiv<VOP3NoMods>;
38 def gi_vop3mods_nnan :
39 GIComplexOperandMatcher<s32, "selectVOP3Mods_nnan">,
40 GIComplexPatternEquiv<VOP3Mods_nnan>;
43 GIComplexOperandMatcher<s32, "selectVOP3OMods">,
44 GIComplexPatternEquiv<VOP3OMods>;
47 GIComplexOperandMatcher<s32, "selectVOP3PMods">,
48 GIComplexPatternEquiv<VOP3PMods>;
50 def gi_vop3opselmods :
51 GIComplexOperandMatcher<s32, "selectVOP3OpSelMods">,
52 GIComplexPatternEquiv<VOP3OpSelMods>;
55 GIComplexOperandMatcher<s64, "selectSmrdImm">,
56 GIComplexPatternEquiv<SMRDImm>;
59 GIComplexOperandMatcher<s64, "selectSmrdImm32">,
60 GIComplexPatternEquiv<SMRDImm32>;
63 GIComplexOperandMatcher<s64, "selectSmrdSgpr">,
64 GIComplexPatternEquiv<SMRDSgpr>;
66 // FIXME: Why are the atomic versions separated?
68 GIComplexOperandMatcher<s64, "selectFlatOffset">,
69 GIComplexPatternEquiv<FLATOffset>;
70 def gi_flat_offset_signed :
71 GIComplexOperandMatcher<s64, "selectFlatOffsetSigned">,
72 GIComplexPatternEquiv<FLATOffsetSigned>;
74 GIComplexOperandMatcher<s64, "selectFlatOffset">,
75 GIComplexPatternEquiv<FLATAtomic>;
76 def gi_flat_signed_atomic :
77 GIComplexOperandMatcher<s64, "selectFlatOffsetSigned">,
78 GIComplexPatternEquiv<FLATSignedAtomic>;
80 def gi_mubuf_scratch_offset :
81 GIComplexOperandMatcher<s32, "selectMUBUFScratchOffset">,
82 GIComplexPatternEquiv<MUBUFScratchOffset>;
83 def gi_mubuf_scratch_offen :
84 GIComplexOperandMatcher<s32, "selectMUBUFScratchOffen">,
85 GIComplexPatternEquiv<MUBUFScratchOffen>;
87 def gi_ds_1addr_1offset :
88 GIComplexOperandMatcher<s32, "selectDS1Addr1Offset">,
89 GIComplexPatternEquiv<DS1Addr1Offset>;
91 def gi_ds_64bit_4byte_aligned :
92 GIComplexOperandMatcher<s64, "selectDS64Bit4ByteAligned">,
93 GIComplexPatternEquiv<DS64Bit4ByteAligned>;
96 GIComplexOperandMatcher<s64, "selectMUBUFAddr64">,
97 GIComplexPatternEquiv<MUBUFAddr64>;
100 GIComplexOperandMatcher<s64, "selectMUBUFOffset">,
101 GIComplexPatternEquiv<MUBUFOffset>;
103 def gi_mubuf_addr64_atomic :
104 GIComplexOperandMatcher<s64, "selectMUBUFAddr64Atomic">,
105 GIComplexPatternEquiv<MUBUFAddr64Atomic>;
107 def gi_mubuf_offset_atomic :
108 GIComplexOperandMatcher<s64, "selectMUBUFOffsetAtomic">,
109 GIComplexPatternEquiv<MUBUFOffsetAtomic>;
111 def gi_smrd_buffer_imm :
112 GIComplexOperandMatcher<s64, "selectSMRDBufferImm">,
113 GIComplexPatternEquiv<SMRDBufferImm>;
115 def gi_smrd_buffer_imm32 :
116 GIComplexOperandMatcher<s64, "selectSMRDBufferImm32">,
117 GIComplexPatternEquiv<SMRDBufferImm32>;
119 // Separate load nodes are defined to glue m0 initialization in
120 // SelectionDAG. The GISel selector can just insert m0 initialization
121 // directly before before selecting a glue-less load, so hide this
124 def : GINodeEquiv<G_LOAD, AMDGPUld_glue> {
125 let CheckMMOIsNonAtomic = 1;
128 def : GINodeEquiv<G_STORE, AMDGPUst_glue> {
129 let CheckMMOIsNonAtomic = 1;
132 def : GINodeEquiv<G_LOAD, AMDGPUatomic_ld_glue> {
133 bit CheckMMOIsAtomic = 1;
138 def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap_glue>;
139 def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap_glue>;
140 def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add_glue>;
141 def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub_glue>;
142 def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and_glue>;
143 def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or_glue>;
144 def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor_glue>;
145 def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min_glue>;
146 def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max_glue>;
147 def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin_glue>;
148 def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax_glue>;
149 def : GINodeEquiv<G_ATOMICRMW_FADD, atomic_load_fadd_glue>;
151 def : GINodeEquiv<G_AMDGPU_FFBH_U32, AMDGPUffbh_u32_impl>;
152 def : GINodeEquiv<G_AMDGPU_FMIN_LEGACY, AMDGPUfmin_legacy>;
153 def : GINodeEquiv<G_AMDGPU_FMAX_LEGACY, AMDGPUfmax_legacy>;
154 def : GINodeEquiv<G_AMDGPU_RCP_IFLAG, AMDGPUrcp_iflag>;
156 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE0, AMDGPUcvt_f32_ubyte0>;
157 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE1, AMDGPUcvt_f32_ubyte1>;
158 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE2, AMDGPUcvt_f32_ubyte2>;
159 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE3, AMDGPUcvt_f32_ubyte3>;
161 def : GINodeEquiv<G_AMDGPU_ATOMIC_CMPXCHG, AMDGPUatomic_cmp_swap>;
162 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD, SIbuffer_load>;
163 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_USHORT, SIbuffer_load_ushort>;
164 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_UBYTE, SIbuffer_load_ubyte>;
165 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SSHORT, SIbuffer_load_short>;
166 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SBYTE, SIbuffer_load_byte>;
167 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT, SIbuffer_load_format>;
168 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT_D16, SIbuffer_load_format_d16>;
169 def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT, SItbuffer_load>;
170 def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT_D16, SItbuffer_load_d16>;
171 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE, SIbuffer_store>;
172 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_SHORT, SIbuffer_store_short>;
173 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_BYTE, SIbuffer_store_byte>;
174 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT, SIbuffer_store_format>;
175 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT_D16, SIbuffer_store_format_d16>;
176 def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT, SItbuffer_store>;
177 def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT_D16, SItbuffer_store_d16>;
179 // FIXME: Check MMO is atomic
180 def : GINodeEquiv<G_AMDGPU_ATOMIC_INC, SIatomic_inc>;
181 def : GINodeEquiv<G_AMDGPU_ATOMIC_DEC, SIatomic_dec>;
182 def : GINodeEquiv<G_AMDGPU_ATOMIC_INC, atomic_inc_glue>;
183 def : GINodeEquiv<G_AMDGPU_ATOMIC_DEC, atomic_dec_glue>;
185 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SWAP, SIbuffer_atomic_swap>;
186 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_ADD, SIbuffer_atomic_add>;
187 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SUB, SIbuffer_atomic_sub>;
188 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMIN, SIbuffer_atomic_smin>;
189 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMIN, SIbuffer_atomic_umin>;
190 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMAX, SIbuffer_atomic_smax>;
191 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMAX, SIbuffer_atomic_umax>;
192 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_AND, SIbuffer_atomic_and>;
193 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_OR, SIbuffer_atomic_or>;
194 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_XOR, SIbuffer_atomic_xor>;
195 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_INC, SIbuffer_atomic_inc>;
196 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_DEC, SIbuffer_atomic_dec>;
197 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_CMPSWAP, SIbuffer_atomic_cmpswap>;
198 def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD, SIsbuffer_load>;
201 SDPatternOperator node,
204 ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
206 (dst_vt (node (src0_vt SReg_32:$src0), (src1_vt SReg_32:$src1))),
207 (inst src0_vt:$src0, src1_vt:$src1)
211 SDPatternOperator node,
214 ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
216 (dst_vt (node (src0_vt (sd_vsrc0 src0_vt:$src0)), (src1_vt VGPR_32:$src1))),
217 (inst src0_vt:$src0, src1_vt:$src1)
220 class GISelVop2CommutePat <
221 SDPatternOperator node,
224 ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
226 (dst_vt (node (src1_vt VGPR_32:$src1), (src0_vt (sd_vsrc0 src0_vt:$src0)))),
227 (inst src0_vt:$src0, src1_vt:$src1)
230 class GISelVop3Pat2 <
231 SDPatternOperator node,
234 ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
236 (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
237 (inst src0_vt:$src0, src1_vt:$src1)
240 class GISelVop3Pat2CommutePat <
241 SDPatternOperator node,
244 ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
246 (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
247 (inst src0_vt:$src1, src1_vt:$src0)
250 class GISelVop3Pat2ModsPat <
251 SDPatternOperator node,
254 ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
256 (dst_vt (node (src0_vt (VOP3Mods0 src0_vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omods)),
257 (src1_vt (VOP3Mods src1_vt:$src1, i32:$src1_modifiers)))),
258 (inst i32:$src0_modifiers, src0_vt:$src0,
259 i32:$src1_modifiers, src1_vt:$src1, $clamp, $omods)
262 multiclass GISelVop2IntrPat <
263 SDPatternOperator node, Instruction inst,
264 ValueType dst_vt, ValueType src_vt = dst_vt> {
266 def : GISelVop2Pat <node, inst, dst_vt, src_vt>;
268 // FIXME: Intrinsics aren't marked as commutable, so we need to add an explicit
269 // pattern to handle commuting. This is another reason why legalizing to a
270 // generic machine instruction may be better that matching the intrinsic
272 def : GISelVop2CommutePat <node, inst, dst_vt, src_vt>;
275 // Since GlobalISel is more flexible then SelectionDAG, I think we can get
276 // away with adding patterns for integer types and not legalizing all
277 // loads and stores to vector types. This should help simplify the load/store
279 foreach Ty = [i64, p0, p1, p4] in {
280 defm : SMRD_Pattern <"S_LOAD_DWORDX2", Ty>;
283 def gi_as_i32timm : GICustomOperandRenderer<"renderTruncTImm32">,
284 GISDNodeXFormEquiv<as_i32timm>;
286 def gi_as_i16timm : GICustomOperandRenderer<"renderTruncTImm16">,
287 GISDNodeXFormEquiv<as_i16timm>;
289 def gi_as_i8timm : GICustomOperandRenderer<"renderTruncTImm8">,
290 GISDNodeXFormEquiv<as_i8timm>;
292 def gi_as_i1timm : GICustomOperandRenderer<"renderTruncTImm1">,
293 GISDNodeXFormEquiv<as_i1timm>;
295 def gi_NegateImm : GICustomOperandRenderer<"renderNegateImm">,
296 GISDNodeXFormEquiv<NegateImm>;
298 def gi_bitcast_fpimm_to_i32 : GICustomOperandRenderer<"renderBitcastImm">,
299 GISDNodeXFormEquiv<bitcast_fpimm_to_i32>;
301 def gi_IMMPopCount : GICustomOperandRenderer<"renderPopcntImm">,
302 GISDNodeXFormEquiv<IMMPopCount>;
304 def gi_extract_glc : GICustomOperandRenderer<"renderExtractGLC">,
305 GISDNodeXFormEquiv<extract_glc>;
307 def gi_extract_slc : GICustomOperandRenderer<"renderExtractSLC">,
308 GISDNodeXFormEquiv<extract_slc>;
310 def gi_extract_dlc : GICustomOperandRenderer<"renderExtractDLC">,
311 GISDNodeXFormEquiv<extract_dlc>;
313 def gi_extract_swz : GICustomOperandRenderer<"renderExtractSWZ">,
314 GISDNodeXFormEquiv<extract_swz>;