]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/llvm-project/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
Merge ^/head r364264 through r364278.
[FreeBSD/FreeBSD.git] / contrib / llvm-project / llvm / lib / Target / AMDGPU / AMDGPUGISel.td
1 //===-- AMDGPUGIsel.td - AMDGPU GlobalISel Patterns---------*- tablegen -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This files contains patterns that should only be used by GlobalISel.  For
9 // example patterns for V_* instructions that have S_* equivalents.
10 // SelectionDAG does not support selecting V_* instructions.
11 //===----------------------------------------------------------------------===//
12
13 include "AMDGPU.td"
14 include "AMDGPUCombine.td"
15
16 def sd_vsrc0 : ComplexPattern<i32, 1, "">;
17 def gi_vsrc0 :
18     GIComplexOperandMatcher<s32, "selectVSRC0">,
19     GIComplexPatternEquiv<sd_vsrc0>;
20
21 def sd_vcsrc : ComplexPattern<i32, 1, "">;
22 def gi_vcsrc :
23     GIComplexOperandMatcher<s32, "selectVCSRC">,
24     GIComplexPatternEquiv<sd_vcsrc>;
25
26 def gi_vop3mods0 :
27     GIComplexOperandMatcher<s32, "selectVOP3Mods0">,
28     GIComplexPatternEquiv<VOP3Mods0>;
29
30 def gi_vop3mods :
31     GIComplexOperandMatcher<s32, "selectVOP3Mods">,
32     GIComplexPatternEquiv<VOP3Mods>;
33
34 def gi_vop3_no_mods :
35     GIComplexOperandMatcher<s32, "selectVOP3NoMods">,
36     GIComplexPatternEquiv<VOP3NoMods>;
37
38 def gi_vop3mods_nnan :
39     GIComplexOperandMatcher<s32, "selectVOP3Mods_nnan">,
40     GIComplexPatternEquiv<VOP3Mods_nnan>;
41
42 def gi_vop3omods :
43     GIComplexOperandMatcher<s32, "selectVOP3OMods">,
44     GIComplexPatternEquiv<VOP3OMods>;
45
46 def gi_vop3pmods :
47     GIComplexOperandMatcher<s32, "selectVOP3PMods">,
48     GIComplexPatternEquiv<VOP3PMods>;
49
50 def gi_vop3opselmods :
51     GIComplexOperandMatcher<s32, "selectVOP3OpSelMods">,
52     GIComplexPatternEquiv<VOP3OpSelMods>;
53
54 def gi_smrd_imm :
55     GIComplexOperandMatcher<s64, "selectSmrdImm">,
56     GIComplexPatternEquiv<SMRDImm>;
57
58 def gi_smrd_imm32 :
59     GIComplexOperandMatcher<s64, "selectSmrdImm32">,
60     GIComplexPatternEquiv<SMRDImm32>;
61
62 def gi_smrd_sgpr :
63     GIComplexOperandMatcher<s64, "selectSmrdSgpr">,
64     GIComplexPatternEquiv<SMRDSgpr>;
65
66 // FIXME: Why are the atomic versions separated?
67 def gi_flat_offset :
68     GIComplexOperandMatcher<s64, "selectFlatOffset">,
69     GIComplexPatternEquiv<FLATOffset>;
70 def gi_flat_offset_signed :
71     GIComplexOperandMatcher<s64, "selectFlatOffsetSigned">,
72     GIComplexPatternEquiv<FLATOffsetSigned>;
73 def gi_flat_atomic :
74     GIComplexOperandMatcher<s64, "selectFlatOffset">,
75     GIComplexPatternEquiv<FLATAtomic>;
76 def gi_flat_signed_atomic :
77     GIComplexOperandMatcher<s64, "selectFlatOffsetSigned">,
78     GIComplexPatternEquiv<FLATSignedAtomic>;
79
80 def gi_mubuf_scratch_offset :
81     GIComplexOperandMatcher<s32, "selectMUBUFScratchOffset">,
82     GIComplexPatternEquiv<MUBUFScratchOffset>;
83 def gi_mubuf_scratch_offen :
84     GIComplexOperandMatcher<s32, "selectMUBUFScratchOffen">,
85     GIComplexPatternEquiv<MUBUFScratchOffen>;
86
87 def gi_ds_1addr_1offset :
88     GIComplexOperandMatcher<s32, "selectDS1Addr1Offset">,
89     GIComplexPatternEquiv<DS1Addr1Offset>;
90
91 def gi_ds_64bit_4byte_aligned :
92     GIComplexOperandMatcher<s64, "selectDS64Bit4ByteAligned">,
93     GIComplexPatternEquiv<DS64Bit4ByteAligned>;
94
95 def gi_mubuf_addr64 :
96     GIComplexOperandMatcher<s64, "selectMUBUFAddr64">,
97     GIComplexPatternEquiv<MUBUFAddr64>;
98
99 def gi_mubuf_offset :
100     GIComplexOperandMatcher<s64, "selectMUBUFOffset">,
101     GIComplexPatternEquiv<MUBUFOffset>;
102
103 def gi_mubuf_addr64_atomic :
104     GIComplexOperandMatcher<s64, "selectMUBUFAddr64Atomic">,
105     GIComplexPatternEquiv<MUBUFAddr64Atomic>;
106
107 def gi_mubuf_offset_atomic :
108     GIComplexOperandMatcher<s64, "selectMUBUFOffsetAtomic">,
109     GIComplexPatternEquiv<MUBUFOffsetAtomic>;
110
111 def gi_smrd_buffer_imm :
112     GIComplexOperandMatcher<s64, "selectSMRDBufferImm">,
113     GIComplexPatternEquiv<SMRDBufferImm>;
114
115 def gi_smrd_buffer_imm32 :
116     GIComplexOperandMatcher<s64, "selectSMRDBufferImm32">,
117     GIComplexPatternEquiv<SMRDBufferImm32>;
118
119 // Separate load nodes are defined to glue m0 initialization in
120 // SelectionDAG. The GISel selector can just insert m0 initialization
121 // directly before before selecting a glue-less load, so hide this
122 // distinction.
123
124 def : GINodeEquiv<G_LOAD, AMDGPUld_glue> {
125   let CheckMMOIsNonAtomic = 1;
126 }
127
128 def : GINodeEquiv<G_STORE, AMDGPUst_glue> {
129   let CheckMMOIsNonAtomic = 1;
130 }
131
132 def : GINodeEquiv<G_LOAD, AMDGPUatomic_ld_glue> {
133   bit CheckMMOIsAtomic = 1;
134 }
135
136
137
138 def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap_glue>;
139 def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap_glue>;
140 def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add_glue>;
141 def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub_glue>;
142 def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and_glue>;
143 def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or_glue>;
144 def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor_glue>;
145 def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min_glue>;
146 def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max_glue>;
147 def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin_glue>;
148 def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax_glue>;
149 def : GINodeEquiv<G_ATOMICRMW_FADD, atomic_load_fadd_glue>;
150
151 def : GINodeEquiv<G_AMDGPU_FFBH_U32, AMDGPUffbh_u32_impl>;
152 def : GINodeEquiv<G_AMDGPU_FMIN_LEGACY, AMDGPUfmin_legacy>;
153 def : GINodeEquiv<G_AMDGPU_FMAX_LEGACY, AMDGPUfmax_legacy>;
154 def : GINodeEquiv<G_AMDGPU_RCP_IFLAG, AMDGPUrcp_iflag>;
155
156 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE0, AMDGPUcvt_f32_ubyte0>;
157 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE1, AMDGPUcvt_f32_ubyte1>;
158 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE2, AMDGPUcvt_f32_ubyte2>;
159 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE3, AMDGPUcvt_f32_ubyte3>;
160
161 def : GINodeEquiv<G_AMDGPU_ATOMIC_CMPXCHG, AMDGPUatomic_cmp_swap>;
162 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD, SIbuffer_load>;
163 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_USHORT, SIbuffer_load_ushort>;
164 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_UBYTE, SIbuffer_load_ubyte>;
165 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SSHORT, SIbuffer_load_short>;
166 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SBYTE, SIbuffer_load_byte>;
167 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT, SIbuffer_load_format>;
168 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT_D16, SIbuffer_load_format_d16>;
169 def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT, SItbuffer_load>;
170 def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT_D16, SItbuffer_load_d16>;
171 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE, SIbuffer_store>;
172 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_SHORT, SIbuffer_store_short>;
173 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_BYTE, SIbuffer_store_byte>;
174 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT, SIbuffer_store_format>;
175 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT_D16, SIbuffer_store_format_d16>;
176 def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT, SItbuffer_store>;
177 def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT_D16, SItbuffer_store_d16>;
178
179 // FIXME: Check MMO is atomic
180 def : GINodeEquiv<G_AMDGPU_ATOMIC_INC, SIatomic_inc>;
181 def : GINodeEquiv<G_AMDGPU_ATOMIC_DEC, SIatomic_dec>;
182 def : GINodeEquiv<G_AMDGPU_ATOMIC_INC, atomic_inc_glue>;
183 def : GINodeEquiv<G_AMDGPU_ATOMIC_DEC, atomic_dec_glue>;
184
185 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SWAP, SIbuffer_atomic_swap>;
186 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_ADD, SIbuffer_atomic_add>;
187 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SUB, SIbuffer_atomic_sub>;
188 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMIN, SIbuffer_atomic_smin>;
189 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMIN, SIbuffer_atomic_umin>;
190 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMAX, SIbuffer_atomic_smax>;
191 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMAX, SIbuffer_atomic_umax>;
192 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_AND, SIbuffer_atomic_and>;
193 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_OR, SIbuffer_atomic_or>;
194 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_XOR, SIbuffer_atomic_xor>;
195 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_INC, SIbuffer_atomic_inc>;
196 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_DEC, SIbuffer_atomic_dec>;
197 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_CMPSWAP, SIbuffer_atomic_cmpswap>;
198 def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD, SIsbuffer_load>;
199
200 class GISelSop2Pat <
201   SDPatternOperator node,
202   Instruction inst,
203   ValueType dst_vt,
204   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
205
206   (dst_vt (node (src0_vt SReg_32:$src0), (src1_vt SReg_32:$src1))),
207   (inst src0_vt:$src0, src1_vt:$src1)
208 >;
209
210 class GISelVop2Pat <
211   SDPatternOperator node,
212   Instruction inst,
213   ValueType dst_vt,
214   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
215
216   (dst_vt (node (src0_vt (sd_vsrc0 src0_vt:$src0)), (src1_vt VGPR_32:$src1))),
217   (inst src0_vt:$src0, src1_vt:$src1)
218 >;
219
220 class GISelVop2CommutePat <
221   SDPatternOperator node,
222   Instruction inst,
223   ValueType dst_vt,
224   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
225
226   (dst_vt (node (src1_vt VGPR_32:$src1), (src0_vt (sd_vsrc0 src0_vt:$src0)))),
227   (inst src0_vt:$src0, src1_vt:$src1)
228 >;
229
230 class GISelVop3Pat2 <
231   SDPatternOperator node,
232   Instruction inst,
233   ValueType dst_vt,
234   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
235
236   (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
237   (inst src0_vt:$src0, src1_vt:$src1)
238 >;
239
240 class GISelVop3Pat2CommutePat <
241   SDPatternOperator node,
242   Instruction inst,
243   ValueType dst_vt,
244   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
245
246   (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
247   (inst src0_vt:$src1, src1_vt:$src0)
248 >;
249
250 class GISelVop3Pat2ModsPat <
251   SDPatternOperator node,
252   Instruction inst,
253   ValueType dst_vt,
254   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
255
256   (dst_vt (node (src0_vt (VOP3Mods0 src0_vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omods)),
257                 (src1_vt (VOP3Mods src1_vt:$src1, i32:$src1_modifiers)))),
258   (inst i32:$src0_modifiers, src0_vt:$src0,
259         i32:$src1_modifiers, src1_vt:$src1, $clamp, $omods)
260 >;
261
262 multiclass GISelVop2IntrPat <
263   SDPatternOperator node, Instruction inst,
264   ValueType dst_vt, ValueType src_vt = dst_vt> {
265
266   def : GISelVop2Pat <node, inst, dst_vt, src_vt>;
267
268   // FIXME: Intrinsics aren't marked as commutable, so we need to add an explicit
269   // pattern to handle commuting.  This is another reason why legalizing to a
270   // generic machine instruction may be better that matching the intrinsic
271   // directly.
272   def : GISelVop2CommutePat <node, inst, dst_vt, src_vt>;
273 }
274
275 // Since GlobalISel is more flexible then SelectionDAG, I think we can get
276 // away with adding patterns for integer types and not legalizing all
277 // loads and stores to vector types.  This should help simplify the load/store
278 // legalization.
279 foreach Ty = [i64, p0, p1, p4] in {
280   defm : SMRD_Pattern <"S_LOAD_DWORDX2",  Ty>;
281 }
282
283 def gi_as_i32timm : GICustomOperandRenderer<"renderTruncTImm32">,
284   GISDNodeXFormEquiv<as_i32timm>;
285
286 def gi_as_i16timm : GICustomOperandRenderer<"renderTruncTImm16">,
287   GISDNodeXFormEquiv<as_i16timm>;
288
289 def gi_as_i8timm : GICustomOperandRenderer<"renderTruncTImm8">,
290   GISDNodeXFormEquiv<as_i8timm>;
291
292 def gi_as_i1timm : GICustomOperandRenderer<"renderTruncTImm1">,
293   GISDNodeXFormEquiv<as_i1timm>;
294
295 def gi_NegateImm : GICustomOperandRenderer<"renderNegateImm">,
296   GISDNodeXFormEquiv<NegateImm>;
297
298 def gi_bitcast_fpimm_to_i32 : GICustomOperandRenderer<"renderBitcastImm">,
299   GISDNodeXFormEquiv<bitcast_fpimm_to_i32>;
300
301 def gi_IMMPopCount : GICustomOperandRenderer<"renderPopcntImm">,
302   GISDNodeXFormEquiv<IMMPopCount>;
303
304 def gi_extract_glc : GICustomOperandRenderer<"renderExtractGLC">,
305   GISDNodeXFormEquiv<extract_glc>;
306
307 def gi_extract_slc : GICustomOperandRenderer<"renderExtractSLC">,
308   GISDNodeXFormEquiv<extract_slc>;
309
310 def gi_extract_dlc : GICustomOperandRenderer<"renderExtractDLC">,
311   GISDNodeXFormEquiv<extract_dlc>;
312
313 def gi_extract_swz : GICustomOperandRenderer<"renderExtractSWZ">,
314   GISDNodeXFormEquiv<extract_swz>;