1 //===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines all of the R600-specific intrinsics.
12 //===----------------------------------------------------------------------===//
14 class AMDGPUReadPreloadRegisterIntrinsic
15 : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
17 class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
18 : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, GCCBuiltin<name>;
20 // Used to tag image and resource intrinsics with information used to generate
22 class AMDGPURsrcIntrinsic<int rsrcarg, bit isimage = 0> {
23 int RsrcArg = rsrcarg;
24 bit IsImage = isimage;
27 let TargetPrefix = "r600" in {
29 multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz {
30 def _x : AMDGPUReadPreloadRegisterIntrinsic;
31 def _y : AMDGPUReadPreloadRegisterIntrinsic;
32 def _z : AMDGPUReadPreloadRegisterIntrinsic;
35 multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz_named<string prefix> {
36 def _x : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_x")>;
37 def _y : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_y")>;
38 def _z : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_z")>;
41 defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
42 <"__builtin_r600_read_global_size">;
43 defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
44 <"__builtin_r600_read_ngroups">;
45 defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
46 <"__builtin_r600_read_tgid">;
48 defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
49 defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;
51 def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">,
52 Intrinsic<[], [], [IntrConvergent]>;
54 // AS 7 is PARAM_I_ADDRESS, used for kernel arguments
55 def int_r600_implicitarg_ptr :
56 GCCBuiltin<"__builtin_r600_implicitarg_ptr">,
57 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
58 [IntrNoMem, IntrSpeculatable]>;
60 def int_r600_rat_store_typed :
61 // 1st parameter: Data
62 // 2nd parameter: Index
63 // 3rd parameter: Constant RAT ID
64 Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
65 GCCBuiltin<"__builtin_r600_rat_store_typed">;
67 def int_r600_recipsqrt_ieee : Intrinsic<
68 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
71 def int_r600_recipsqrt_clamped : Intrinsic<
72 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
75 def int_r600_cube : Intrinsic<
76 [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
79 def int_r600_store_stream_output : Intrinsic<
80 [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []
83 class TextureIntrinsicFloatInput : Intrinsic<[llvm_v4f32_ty], [
84 llvm_v4f32_ty, // Coord
85 llvm_i32_ty, // offset_x
86 llvm_i32_ty, // offset_y,
87 llvm_i32_ty, // offset_z,
88 llvm_i32_ty, // resource_id
89 llvm_i32_ty, // samplerid
90 llvm_i32_ty, // coord_type_x
91 llvm_i32_ty, // coord_type_y
92 llvm_i32_ty, // coord_type_z
93 llvm_i32_ty], // coord_type_w
97 class TextureIntrinsicInt32Input : Intrinsic<[llvm_v4i32_ty], [
98 llvm_v4i32_ty, // Coord
99 llvm_i32_ty, // offset_x
100 llvm_i32_ty, // offset_y,
101 llvm_i32_ty, // offset_z,
102 llvm_i32_ty, // resource_id
103 llvm_i32_ty, // samplerid
104 llvm_i32_ty, // coord_type_x
105 llvm_i32_ty, // coord_type_y
106 llvm_i32_ty, // coord_type_z
107 llvm_i32_ty], // coord_type_w
111 def int_r600_store_swizzle :
112 Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []
115 def int_r600_tex : TextureIntrinsicFloatInput;
116 def int_r600_texc : TextureIntrinsicFloatInput;
117 def int_r600_txl : TextureIntrinsicFloatInput;
118 def int_r600_txlc : TextureIntrinsicFloatInput;
119 def int_r600_txb : TextureIntrinsicFloatInput;
120 def int_r600_txbc : TextureIntrinsicFloatInput;
121 def int_r600_txf : TextureIntrinsicInt32Input;
122 def int_r600_txq : TextureIntrinsicInt32Input;
123 def int_r600_ddx : TextureIntrinsicFloatInput;
124 def int_r600_ddy : TextureIntrinsicFloatInput;
126 def int_r600_dot4 : Intrinsic<[llvm_float_ty],
127 [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
130 def int_r600_kill : Intrinsic<[], [llvm_float_ty], []>;
132 } // End TargetPrefix = "r600"
134 let TargetPrefix = "amdgcn" in {
136 //===----------------------------------------------------------------------===//
137 // ABI Special Intrinsics
138 //===----------------------------------------------------------------------===//
140 defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz;
141 defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
142 <"__builtin_amdgcn_workgroup_id">;
144 def int_amdgcn_dispatch_ptr :
145 GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
146 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
147 [IntrNoMem, IntrSpeculatable]>;
149 def int_amdgcn_queue_ptr :
150 GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
151 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
152 [IntrNoMem, IntrSpeculatable]>;
154 def int_amdgcn_kernarg_segment_ptr :
155 GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
156 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
157 [IntrNoMem, IntrSpeculatable]>;
159 def int_amdgcn_implicitarg_ptr :
160 GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
161 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
162 [IntrNoMem, IntrSpeculatable]>;
164 def int_amdgcn_groupstaticsize :
165 GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
166 Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
168 def int_amdgcn_dispatch_id :
169 GCCBuiltin<"__builtin_amdgcn_dispatch_id">,
170 Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
172 def int_amdgcn_implicit_buffer_ptr :
173 GCCBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
174 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
175 [IntrNoMem, IntrSpeculatable]>;
177 // Set EXEC to the 64-bit value given.
178 // This is always moved to the beginning of the basic block.
179 def int_amdgcn_init_exec : Intrinsic<[],
180 [llvm_i64_ty], // 64-bit literal constant
183 // Set EXEC according to a thread count packed in an SGPR input:
184 // thread_count = (input >> bitoffset) & 0x7f;
185 // This is always moved to the beginning of the basic block.
186 def int_amdgcn_init_exec_from_input : Intrinsic<[],
187 [llvm_i32_ty, // 32-bit SGPR input
188 llvm_i32_ty], // bit offset of the thread count
192 //===----------------------------------------------------------------------===//
193 // Instruction Intrinsics
194 //===----------------------------------------------------------------------===//
196 // The first parameter is s_sendmsg immediate (i16),
197 // the second one is copied to m0
198 def int_amdgcn_s_sendmsg : GCCBuiltin<"__builtin_amdgcn_s_sendmsg">,
199 Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], []>;
200 def int_amdgcn_s_sendmsghalt : GCCBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
201 Intrinsic <[], [llvm_i32_ty, llvm_i32_ty], []>;
203 def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
204 Intrinsic<[], [], [IntrConvergent]>;
206 def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">,
207 Intrinsic<[], [], [IntrConvergent]>;
209 def int_amdgcn_s_waitcnt : GCCBuiltin<"__builtin_amdgcn_s_waitcnt">,
210 Intrinsic<[], [llvm_i32_ty], []>;
212 def int_amdgcn_div_scale : Intrinsic<
213 // 1st parameter: Numerator
214 // 2nd parameter: Denominator
215 // 3rd parameter: Constant to select select between first and
216 // second. (0 = first, 1 = second).
217 [llvm_anyfloat_ty, llvm_i1_ty],
218 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
219 [IntrNoMem, IntrSpeculatable]
222 def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty],
223 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
224 [IntrNoMem, IntrSpeculatable]
227 def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty],
228 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
229 [IntrNoMem, IntrSpeculatable]
232 def int_amdgcn_trig_preop : Intrinsic<
233 [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
234 [IntrNoMem, IntrSpeculatable]
237 def int_amdgcn_sin : Intrinsic<
238 [llvm_anyfloat_ty], [LLVMMatchType<0>],
239 [IntrNoMem, IntrSpeculatable]
242 def int_amdgcn_cos : Intrinsic<
243 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
246 def int_amdgcn_log_clamp : Intrinsic<
247 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
250 def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">,
251 Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
252 [IntrNoMem, IntrSpeculatable]
255 def int_amdgcn_rcp : Intrinsic<
256 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
259 def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">,
260 Intrinsic<[llvm_float_ty], [llvm_float_ty],
261 [IntrNoMem, IntrSpeculatable]
264 def int_amdgcn_rsq : Intrinsic<
265 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
268 def int_amdgcn_rsq_legacy : GCCBuiltin<"__builtin_amdgcn_rsq_legacy">,
270 [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
273 def int_amdgcn_rsq_clamp : Intrinsic<
274 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;
276 def int_amdgcn_ldexp : Intrinsic<
277 [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
278 [IntrNoMem, IntrSpeculatable]
281 def int_amdgcn_frexp_mant : Intrinsic<
282 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
285 def int_amdgcn_frexp_exp : Intrinsic<
286 [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
289 // v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
290 // and always uses rtz, so is not suitable for implementing the OpenCL
291 // fract function. It should be ok on VI.
292 def int_amdgcn_fract : Intrinsic<
293 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
296 def int_amdgcn_cvt_pkrtz : Intrinsic<
297 [llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
298 [IntrNoMem, IntrSpeculatable]
301 def int_amdgcn_cvt_pknorm_i16 : Intrinsic<
302 [llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
303 [IntrNoMem, IntrSpeculatable]
306 def int_amdgcn_cvt_pknorm_u16 : Intrinsic<
307 [llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
308 [IntrNoMem, IntrSpeculatable]
311 def int_amdgcn_cvt_pk_i16 : Intrinsic<
312 [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
313 [IntrNoMem, IntrSpeculatable]
316 def int_amdgcn_cvt_pk_u16 : Intrinsic<
317 [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
318 [IntrNoMem, IntrSpeculatable]
321 def int_amdgcn_class : Intrinsic<
322 [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
323 [IntrNoMem, IntrSpeculatable]
326 def int_amdgcn_fmed3 : GCCBuiltin<"__builtin_amdgcn_fmed3">,
327 Intrinsic<[llvm_anyfloat_ty],
328 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
329 [IntrNoMem, IntrSpeculatable]
332 def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">,
333 Intrinsic<[llvm_float_ty],
334 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
335 [IntrNoMem, IntrSpeculatable]
338 def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">,
339 Intrinsic<[llvm_float_ty],
340 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
341 [IntrNoMem, IntrSpeculatable]
344 def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">,
345 Intrinsic<[llvm_float_ty],
346 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
347 [IntrNoMem, IntrSpeculatable]
350 def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
351 Intrinsic<[llvm_float_ty],
352 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
353 [IntrNoMem, IntrSpeculatable]
356 // v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
358 def int_amdgcn_sffbh :
359 Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
360 [IntrNoMem, IntrSpeculatable]
363 // v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support.
364 def int_amdgcn_fmad_ftz :
365 Intrinsic<[llvm_anyfloat_ty],
366 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
367 [IntrNoMem, IntrSpeculatable]
370 // Fields should mirror atomicrmw
371 class AMDGPUAtomicIncIntrin : Intrinsic<[llvm_anyint_ty],
374 llvm_i32_ty, // ordering
375 llvm_i32_ty, // scope
376 llvm_i1_ty], // isVolatile
377 [IntrArgMemOnly, NoCapture<0>], "",
381 def int_amdgcn_atomic_inc : AMDGPUAtomicIncIntrin;
382 def int_amdgcn_atomic_dec : AMDGPUAtomicIncIntrin;
384 class AMDGPULDSF32Intrin<string clang_builtin> :
385 GCCBuiltin<clang_builtin>,
386 Intrinsic<[llvm_float_ty],
387 [LLVMQualPointerType<llvm_float_ty, 3>,
389 llvm_i32_ty, // ordering
390 llvm_i32_ty, // scope
391 llvm_i1_ty], // isVolatile
392 [IntrArgMemOnly, NoCapture<0>]
395 class AMDGPUDSOrderedIntrinsic : Intrinsic<
397 // M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that
398 // the bit packing can be optimized at the IR level.
399 [LLVMQualPointerType<llvm_i32_ty, 2>, // IntToPtr(M0)
400 llvm_i32_ty, // value to add or swap
401 llvm_i32_ty, // ordering
402 llvm_i32_ty, // scope
403 llvm_i1_ty, // isVolatile
404 llvm_i32_ty, // ordered count index (OA index), also added to the address
405 llvm_i1_ty, // wave release, usually set to 1
406 llvm_i1_ty], // wave done, set to 1 for the last ordered instruction
410 def int_amdgcn_ds_ordered_add : AMDGPUDSOrderedIntrinsic;
411 def int_amdgcn_ds_ordered_swap : AMDGPUDSOrderedIntrinsic;
413 def int_amdgcn_ds_fadd : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_faddf">;
414 def int_amdgcn_ds_fmin : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_fminf">;
415 def int_amdgcn_ds_fmax : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_fmaxf">;
417 } // TargetPrefix = "amdgcn"
419 // New-style image intrinsics
421 //////////////////////////////////////////////////////////////////////////
422 // Dimension-aware image intrinsics framework
423 //////////////////////////////////////////////////////////////////////////
425 // Helper class to represent (type, name) combinations of arguments. The
426 // argument names are explanatory and used as DAG operand names for codegen
428 class AMDGPUArg<LLVMType ty, string name> {
433 // Return [AMDGPUArg<basety, names[0]>, AMDGPUArg<LLVMMatchType<0>, names[1]>, ...]
434 class makeArgList<list<string> names, LLVMType basety> {
435 list<AMDGPUArg> ret =
436 !listconcat([AMDGPUArg<basety, names[0]>],
437 !foreach(name, !tail(names), AMDGPUArg<LLVMMatchType<0>, name>));
440 // Return arglist, with LLVMMatchType's references shifted by 'shift'.
441 class arglistmatchshift<list<AMDGPUArg> arglist, int shift> {
442 list<AMDGPUArg> ret =
443 !foreach(arg, arglist,
444 !if(!isa<LLVMMatchType>(arg.Type),
445 AMDGPUArg<LLVMMatchType<!add(!cast<LLVMMatchType>(arg.Type).Number, shift)>,
450 // Return the concatenation of the given arglists. LLVMMatchType's are adjusted
451 // accordingly, and shifted by an additional 'shift'.
452 class arglistconcat<list<list<AMDGPUArg>> arglists, int shift = 0> {
453 list<AMDGPUArg> ret =
454 !foldl([]<AMDGPUArg>, arglists, lhs, rhs,
457 arglistmatchshift<rhs,
458 !add(shift, !foldl(0, lhs, a, b,
459 !add(a, b.Type.isAny)))>.ret));
462 // Represent texture/image types / dimensionality.
463 class AMDGPUDimProps<string name, list<string> coord_names, list<string> slice_names> {
464 AMDGPUDimProps Dim = !cast<AMDGPUDimProps>(NAME);
465 string Name = name; // e.g. "2darraymsaa"
466 bit DA = 0; // DA bit in MIMG encoding
468 list<AMDGPUArg> CoordSliceArgs =
469 makeArgList<!listconcat(coord_names, slice_names), llvm_anyfloat_ty>.ret;
470 list<AMDGPUArg> CoordSliceIntArgs =
471 makeArgList<!listconcat(coord_names, slice_names), llvm_anyint_ty>.ret;
472 list<AMDGPUArg> GradientArgs =
473 makeArgList<!listconcat(!foreach(name, coord_names, "d" # name # "dh"),
474 !foreach(name, coord_names, "d" # name # "dv")),
475 llvm_anyfloat_ty>.ret;
477 bits<8> NumCoords = !size(CoordSliceArgs);
478 bits<8> NumGradients = !size(GradientArgs);
481 def AMDGPUDim1D : AMDGPUDimProps<"1d", ["s"], []>;
482 def AMDGPUDim2D : AMDGPUDimProps<"2d", ["s", "t"], []>;
483 def AMDGPUDim3D : AMDGPUDimProps<"3d", ["s", "t", "r"], []>;
485 def AMDGPUDimCube : AMDGPUDimProps<"cube", ["s", "t"], ["face"]>;
486 def AMDGPUDim1DArray : AMDGPUDimProps<"1darray", ["s"], ["slice"]>;
487 def AMDGPUDim2DArray : AMDGPUDimProps<"2darray", ["s", "t"], ["slice"]>;
489 def AMDGPUDim2DMsaa : AMDGPUDimProps<"2dmsaa", ["s", "t"], ["fragid"]>;
491 def AMDGPUDim2DArrayMsaa : AMDGPUDimProps<"2darraymsaa", ["s", "t"], ["slice", "fragid"]>;
495 list<AMDGPUDimProps> NoMsaa = [AMDGPUDim1D, AMDGPUDim2D, AMDGPUDim3D,
496 AMDGPUDimCube, AMDGPUDim1DArray,
498 list<AMDGPUDimProps> Msaa = [AMDGPUDim2DMsaa, AMDGPUDim2DArrayMsaa];
499 list<AMDGPUDimProps> All = !listconcat(NoMsaa, Msaa);
502 // Represent sample variants, i.e. _C, _O, _B, ... and combinations thereof.
503 class AMDGPUSampleVariant<string ucmod, string lcmod, list<AMDGPUArg> extra_addr> {
504 string UpperCaseMod = ucmod;
505 string LowerCaseMod = lcmod;
507 // {offset} {bias} {z-compare}
508 list<AMDGPUArg> ExtraAddrArgs = extra_addr;
511 // Name of the {lod} or {clamp} argument that is appended to the coordinates,
513 string LodOrClamp = "";
516 // AMDGPUSampleVariants: all variants supported by IMAGE_SAMPLE
517 // AMDGPUSampleVariantsNoGradients: variants supported by IMAGE_GATHER4
518 defset list<AMDGPUSampleVariant> AMDGPUSampleVariants = {
519 multiclass AMDGPUSampleHelper_Offset<string ucmod, string lcmod,
520 list<AMDGPUArg> extra_addr> {
521 def NAME#lcmod : AMDGPUSampleVariant<ucmod, lcmod, extra_addr>;
522 def NAME#lcmod#_o : AMDGPUSampleVariant<
523 ucmod#"_O", lcmod#"_o", !listconcat([AMDGPUArg<llvm_i32_ty, "offset">], extra_addr)>;
526 multiclass AMDGPUSampleHelper_Compare<string ucmod, string lcmod,
527 list<AMDGPUArg> extra_addr> {
528 defm NAME : AMDGPUSampleHelper_Offset<ucmod, lcmod, extra_addr>;
529 defm NAME : AMDGPUSampleHelper_Offset<
530 "_C"#ucmod, "_c"#lcmod, !listconcat(extra_addr, [AMDGPUArg<llvm_float_ty, "zcompare">])>;
533 multiclass AMDGPUSampleHelper_Clamp<string ucmod, string lcmod,
534 list<AMDGPUArg> extra_addr> {
535 defm NAME : AMDGPUSampleHelper_Compare<ucmod, lcmod, extra_addr>;
536 let LodOrClamp = "clamp" in
537 defm NAME : AMDGPUSampleHelper_Compare<ucmod#"_CL", lcmod#"_cl", extra_addr>;
540 defset list<AMDGPUSampleVariant> AMDGPUSampleVariantsNoGradients = {
541 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"", "", []>;
542 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<
543 "_B", "_b", [AMDGPUArg<llvm_anyfloat_ty, "bias">]>;
544 let LodOrClamp = "lod" in
545 defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_L", "_l", []>;
546 defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_LZ", "_lz", []>;
549 let Gradients = 1 in {
550 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_D", "_d", []>;
551 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_CD", "_cd", []>;
555 // Helper class to capture the profile of a dimension-aware image intrinsic.
556 // This information is used to generate the intrinsic's type and to inform
557 // codegen pattern matching.
558 class AMDGPUDimProfile<string opmod,
559 AMDGPUDimProps dim> {
560 AMDGPUDimProps Dim = dim;
561 string OpMod = opmod; // the corresponding instruction is named IMAGE_OpMod
563 // These are entended to be overwritten by subclasses
566 list<LLVMType> RetTypes = [];
567 list<AMDGPUArg> DataArgs = [];
568 list<AMDGPUArg> ExtraAddrArgs = [];
570 string LodClampMip = "";
572 int NumRetAndDataAnyTypes =
573 !foldl(0, !listconcat(RetTypes, !foreach(arg, DataArgs, arg.Type)), a, b,
576 list<AMDGPUArg> AddrArgs =
577 arglistconcat<[ExtraAddrArgs,
578 !if(Gradients, dim.GradientArgs, []),
579 !listconcat(!if(IsSample, dim.CoordSliceArgs, dim.CoordSliceIntArgs),
580 !if(!eq(LodClampMip, ""),
582 [AMDGPUArg<LLVMMatchType<0>, LodClampMip>]))],
583 NumRetAndDataAnyTypes>.ret;
584 list<LLVMType> AddrTypes = !foreach(arg, AddrArgs, arg.Type);
585 list<AMDGPUArg> AddrDefaultArgs =
586 !foreach(arg, AddrArgs,
587 AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
588 !if(IsSample, llvm_float_ty, llvm_i32_ty), arg.Type),
590 list<AMDGPUArg> AddrA16Args =
591 !foreach(arg, AddrArgs,
592 AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
593 !if(IsSample, llvm_half_ty, llvm_i16_ty), arg.Type),
597 class AMDGPUDimProfileCopy<AMDGPUDimProfile base> : AMDGPUDimProfile<base.OpMod, base.Dim> {
598 let IsSample = base.IsSample;
599 let IsAtomic = base.IsAtomic;
600 let RetTypes = base.RetTypes;
601 let DataArgs = base.DataArgs;
602 let ExtraAddrArgs = base.ExtraAddrArgs;
603 let Gradients = base.Gradients;
604 let LodClampMip = base.LodClampMip;
607 class AMDGPUDimSampleProfile<string opmod,
609 AMDGPUSampleVariant sample> : AMDGPUDimProfile<opmod, dim> {
611 let RetTypes = [llvm_any_ty];
612 let ExtraAddrArgs = sample.ExtraAddrArgs;
613 let Gradients = sample.Gradients;
614 let LodClampMip = sample.LodOrClamp;
617 class AMDGPUDimNoSampleProfile<string opmod,
619 list<LLVMType> retty,
620 list<AMDGPUArg> dataargs,
621 bit Mip = 0> : AMDGPUDimProfile<opmod, dim> {
622 let RetTypes = retty;
623 let DataArgs = dataargs;
624 let LodClampMip = !if(Mip, "mip", "");
627 class AMDGPUDimAtomicProfile<string opmod,
629 list<AMDGPUArg> dataargs> : AMDGPUDimProfile<opmod, dim> {
630 let RetTypes = [llvm_anyint_ty];
631 let DataArgs = dataargs;
635 class AMDGPUDimGetResInfoProfile<AMDGPUDimProps dim> : AMDGPUDimProfile<"GET_RESINFO", dim> {
636 let RetTypes = [llvm_anyfloat_ty];
638 let AddrArgs = [AMDGPUArg<llvm_anyint_ty, "mip">];
639 let LodClampMip = "mip";
642 // All dimension-aware intrinsics are derived from this class.
643 class AMDGPUImageDimIntrinsic<AMDGPUDimProfile P_,
644 list<IntrinsicProperty> props,
645 list<SDNodeProperty> sdnodeprops> : Intrinsic<
646 P_.RetTypes, // vdata(VGPR) -- for load/atomic-with-return
648 !foreach(arg, P_.DataArgs, arg.Type), // vdata(VGPR) -- for store/atomic
649 !if(P_.IsAtomic, [], [llvm_i32_ty]), // dmask(imm)
650 P_.AddrTypes, // vaddr(VGPR)
651 [llvm_v8i32_ty], // rsrc(SGPR)
652 !if(P_.IsSample, [llvm_v4i32_ty, // samp(SGPR)
653 llvm_i1_ty], []), // unorm(imm)
654 [llvm_i32_ty, // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe)
655 llvm_i32_ty]), // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
656 props, "", sdnodeprops>,
657 AMDGPURsrcIntrinsic<!add(!size(P_.DataArgs), !size(P_.AddrTypes),
658 !if(P_.IsAtomic, 0, 1)), 1> {
659 AMDGPUDimProfile P = P_;
661 AMDGPUImageDimIntrinsic Intr = !cast<AMDGPUImageDimIntrinsic>(NAME);
663 let TargetPrefix = "amdgcn";
666 // Marker class for intrinsics with a DMask that determines the returned
668 class AMDGPUImageDMaskIntrinsic;
670 defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
672 //////////////////////////////////////////////////////////////////////////
673 // Load and store intrinsics
674 //////////////////////////////////////////////////////////////////////////
675 multiclass AMDGPUImageDimIntrinsicsNoMsaa<string opmod,
676 list<LLVMType> retty,
677 list<AMDGPUArg> dataargs,
678 list<IntrinsicProperty> props,
679 list<SDNodeProperty> sdnodeprops,
681 foreach dim = AMDGPUDims.NoMsaa in {
682 def !strconcat(NAME, "_", dim.Name)
683 : AMDGPUImageDimIntrinsic<
684 AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
689 multiclass AMDGPUImageDimIntrinsicsAll<string opmod,
690 list<LLVMType> retty,
691 list<AMDGPUArg> dataargs,
692 list<IntrinsicProperty> props,
693 list<SDNodeProperty> sdnodeprops,
695 foreach dim = AMDGPUDims.All in {
696 def !strconcat(NAME, "_", dim.Name)
697 : AMDGPUImageDimIntrinsic<
698 AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
703 defm int_amdgcn_image_load
704 : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem],
706 AMDGPUImageDMaskIntrinsic;
707 defm int_amdgcn_image_load_mip
708 : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
709 [IntrReadMem], [SDNPMemOperand], 1>,
710 AMDGPUImageDMaskIntrinsic;
712 defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll<
713 "STORE", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
714 [IntrWriteMem], [SDNPMemOperand]>;
715 defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa<
716 "STORE_MIP", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
717 [IntrWriteMem], [SDNPMemOperand], 1>;
719 //////////////////////////////////////////////////////////////////////////
720 // sample and getlod intrinsics
721 //////////////////////////////////////////////////////////////////////////
722 multiclass AMDGPUImageDimSampleDims<string opmod,
723 AMDGPUSampleVariant sample,
725 foreach dim = AMDGPUDims.NoMsaa in {
726 def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic<
727 AMDGPUDimSampleProfile<opmod, dim, sample>,
728 !if(NoMem, [IntrNoMem], [IntrReadMem]),
729 !if(NoMem, [], [SDNPMemOperand])>;
733 foreach sample = AMDGPUSampleVariants in {
734 defm int_amdgcn_image_sample # sample.LowerCaseMod
735 : AMDGPUImageDimSampleDims<"SAMPLE" # sample.UpperCaseMod, sample>,
736 AMDGPUImageDMaskIntrinsic;
739 defm int_amdgcn_image_getlod
740 : AMDGPUImageDimSampleDims<"GET_LOD", AMDGPUSample, 1>,
741 AMDGPUImageDMaskIntrinsic;
743 //////////////////////////////////////////////////////////////////////////
744 // getresinfo intrinsics
745 //////////////////////////////////////////////////////////////////////////
746 foreach dim = AMDGPUDims.All in {
747 def !strconcat("int_amdgcn_image_getresinfo_", dim.Name)
748 : AMDGPUImageDimIntrinsic<AMDGPUDimGetResInfoProfile<dim>, [IntrNoMem], []>,
749 AMDGPUImageDMaskIntrinsic;
752 //////////////////////////////////////////////////////////////////////////
753 // gather4 intrinsics
754 //////////////////////////////////////////////////////////////////////////
755 foreach sample = AMDGPUSampleVariantsNoGradients in {
756 foreach dim = [AMDGPUDim2D, AMDGPUDimCube, AMDGPUDim2DArray] in {
757 def int_amdgcn_image_gather4 # sample.LowerCaseMod # _ # dim.Name:
758 AMDGPUImageDimIntrinsic<
759 AMDGPUDimSampleProfile<"GATHER4" # sample.UpperCaseMod, dim, sample>,
760 [IntrReadMem], [SDNPMemOperand]>;
765 //////////////////////////////////////////////////////////////////////////
767 //////////////////////////////////////////////////////////////////////////
768 defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimAtomicIntrinsics = {
769 multiclass AMDGPUImageDimAtomicX<string opmod, list<AMDGPUArg> dataargs> {
770 foreach dim = AMDGPUDims.All in {
771 def !strconcat(NAME, "_", dim.Name)
772 : AMDGPUImageDimIntrinsic<
773 AMDGPUDimAtomicProfile<opmod, dim, dataargs>,
774 [], [SDNPMemOperand]>;
778 multiclass AMDGPUImageDimAtomic<string opmod> {
779 defm "" : AMDGPUImageDimAtomicX<opmod, [AMDGPUArg<LLVMMatchType<0>, "vdata">]>;
782 defm int_amdgcn_image_atomic_swap : AMDGPUImageDimAtomic<"ATOMIC_SWAP">;
783 defm int_amdgcn_image_atomic_add : AMDGPUImageDimAtomic<"ATOMIC_ADD">;
784 defm int_amdgcn_image_atomic_sub : AMDGPUImageDimAtomic<"ATOMIC_SUB">;
785 defm int_amdgcn_image_atomic_smin : AMDGPUImageDimAtomic<"ATOMIC_SMIN">;
786 defm int_amdgcn_image_atomic_umin : AMDGPUImageDimAtomic<"ATOMIC_UMIN">;
787 defm int_amdgcn_image_atomic_smax : AMDGPUImageDimAtomic<"ATOMIC_SMAX">;
788 defm int_amdgcn_image_atomic_umax : AMDGPUImageDimAtomic<"ATOMIC_UMAX">;
789 defm int_amdgcn_image_atomic_and : AMDGPUImageDimAtomic<"ATOMIC_AND">;
790 defm int_amdgcn_image_atomic_or : AMDGPUImageDimAtomic<"ATOMIC_OR">;
791 defm int_amdgcn_image_atomic_xor : AMDGPUImageDimAtomic<"ATOMIC_XOR">;
793 // TODO: INC/DEC are weird: they seem to have a vdata argument in hardware,
794 // even though it clearly shouldn't be needed
795 defm int_amdgcn_image_atomic_inc : AMDGPUImageDimAtomic<"ATOMIC_INC">;
796 defm int_amdgcn_image_atomic_dec : AMDGPUImageDimAtomic<"ATOMIC_DEC">;
798 defm int_amdgcn_image_atomic_cmpswap :
799 AMDGPUImageDimAtomicX<"ATOMIC_CMPSWAP", [AMDGPUArg<LLVMMatchType<0>, "src">,
800 AMDGPUArg<LLVMMatchType<0>, "cmp">]>;
803 //////////////////////////////////////////////////////////////////////////
805 //////////////////////////////////////////////////////////////////////////
807 let TargetPrefix = "amdgcn" in {
809 defset list<AMDGPURsrcIntrinsic> AMDGPUBufferIntrinsics = {
811 class AMDGPUBufferLoad : Intrinsic <
813 [llvm_v4i32_ty, // rsrc(SGPR)
814 llvm_i32_ty, // vindex(VGPR)
815 llvm_i32_ty, // offset(SGPR/VGPR/imm)
816 llvm_i1_ty, // glc(imm)
817 llvm_i1_ty], // slc(imm)
818 [IntrReadMem], "", [SDNPMemOperand]>,
819 AMDGPURsrcIntrinsic<0>;
820 def int_amdgcn_buffer_load_format : AMDGPUBufferLoad;
821 def int_amdgcn_buffer_load : AMDGPUBufferLoad;
823 def int_amdgcn_s_buffer_load : Intrinsic <
825 [llvm_v4i32_ty, // rsrc(SGPR)
826 llvm_i32_ty, // byte offset(SGPR/VGPR/imm)
827 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc)
829 AMDGPURsrcIntrinsic<0>;
831 class AMDGPUBufferStore : Intrinsic <
833 [llvm_anyfloat_ty, // vdata(VGPR) -- can currently only select f32, v2f32, v4f32
834 llvm_v4i32_ty, // rsrc(SGPR)
835 llvm_i32_ty, // vindex(VGPR)
836 llvm_i32_ty, // offset(SGPR/VGPR/imm)
837 llvm_i1_ty, // glc(imm)
838 llvm_i1_ty], // slc(imm)
839 [IntrWriteMem], "", [SDNPMemOperand]>,
840 AMDGPURsrcIntrinsic<1>;
841 def int_amdgcn_buffer_store_format : AMDGPUBufferStore;
842 def int_amdgcn_buffer_store : AMDGPUBufferStore;
844 // New buffer intrinsics with separate raw and struct variants. The raw
845 // variant never has an index. The struct variant always has an index, even if
846 // it is const 0. A struct intrinsic with constant 0 index is different to the
847 // corresponding raw intrinsic on gfx9+ because the behavior of bound checking
848 // and swizzling changes depending on whether idxen is set in the instruction.
849 // These new instrinsics also keep the offset and soffset arguments separate as
850 // they behave differently in bounds checking and swizzling.
851 class AMDGPURawBufferLoad : Intrinsic <
853 [llvm_v4i32_ty, // rsrc(SGPR)
854 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
855 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
856 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
857 [IntrReadMem], "", [SDNPMemOperand]>,
858 AMDGPURsrcIntrinsic<0>;
859 def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad;
860 def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
862 class AMDGPUStructBufferLoad : Intrinsic <
864 [llvm_v4i32_ty, // rsrc(SGPR)
865 llvm_i32_ty, // vindex(VGPR)
866 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
867 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
868 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
869 [IntrReadMem], "", [SDNPMemOperand]>,
870 AMDGPURsrcIntrinsic<0>;
871 def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad;
872 def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
874 class AMDGPURawBufferStore : Intrinsic <
876 [llvm_any_ty, // vdata(VGPR)
877 llvm_v4i32_ty, // rsrc(SGPR)
878 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
879 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
880 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
881 [IntrWriteMem], "", [SDNPMemOperand]>,
882 AMDGPURsrcIntrinsic<1>;
883 def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore;
884 def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;
886 class AMDGPUStructBufferStore : Intrinsic <
888 [llvm_any_ty, // vdata(VGPR)
889 llvm_v4i32_ty, // rsrc(SGPR)
890 llvm_i32_ty, // vindex(VGPR)
891 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
892 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
893 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
894 [IntrWriteMem], "", [SDNPMemOperand]>,
895 AMDGPURsrcIntrinsic<1>;
896 def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore;
897 def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;
899 class AMDGPURawBufferAtomic : Intrinsic <
901 [LLVMMatchType<0>, // vdata(VGPR)
902 llvm_v4i32_ty, // rsrc(SGPR)
903 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
904 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
905 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
906 [], "", [SDNPMemOperand]>,
907 AMDGPURsrcIntrinsic<1, 0>;
908 def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic;
909 def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic;
910 def int_amdgcn_raw_buffer_atomic_sub : AMDGPURawBufferAtomic;
911 def int_amdgcn_raw_buffer_atomic_smin : AMDGPURawBufferAtomic;
912 def int_amdgcn_raw_buffer_atomic_umin : AMDGPURawBufferAtomic;
913 def int_amdgcn_raw_buffer_atomic_smax : AMDGPURawBufferAtomic;
914 def int_amdgcn_raw_buffer_atomic_umax : AMDGPURawBufferAtomic;
915 def int_amdgcn_raw_buffer_atomic_and : AMDGPURawBufferAtomic;
916 def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic;
917 def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
918 def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
920 [LLVMMatchType<0>, // src(VGPR)
921 LLVMMatchType<0>, // cmp(VGPR)
922 llvm_v4i32_ty, // rsrc(SGPR)
923 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
924 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
925 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
926 [], "", [SDNPMemOperand]>,
927 AMDGPURsrcIntrinsic<2, 0>;
929 class AMDGPUStructBufferAtomic : Intrinsic <
931 [LLVMMatchType<0>, // vdata(VGPR)
932 llvm_v4i32_ty, // rsrc(SGPR)
933 llvm_i32_ty, // vindex(VGPR)
934 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
935 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
936 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
937 [], "", [SDNPMemOperand]>,
938 AMDGPURsrcIntrinsic<1, 0>;
939 def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic;
940 def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic;
941 def int_amdgcn_struct_buffer_atomic_sub : AMDGPUStructBufferAtomic;
942 def int_amdgcn_struct_buffer_atomic_smin : AMDGPUStructBufferAtomic;
943 def int_amdgcn_struct_buffer_atomic_umin : AMDGPUStructBufferAtomic;
944 def int_amdgcn_struct_buffer_atomic_smax : AMDGPUStructBufferAtomic;
945 def int_amdgcn_struct_buffer_atomic_umax : AMDGPUStructBufferAtomic;
946 def int_amdgcn_struct_buffer_atomic_and : AMDGPUStructBufferAtomic;
947 def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic;
948 def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
949 def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
951 [LLVMMatchType<0>, // src(VGPR)
952 LLVMMatchType<0>, // cmp(VGPR)
953 llvm_v4i32_ty, // rsrc(SGPR)
954 llvm_i32_ty, // vindex(VGPR)
955 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
956 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
957 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
958 [], "", [SDNPMemOperand]>,
959 AMDGPURsrcIntrinsic<2, 0>;
961 // Obsolescent tbuffer intrinsics.
962 def int_amdgcn_tbuffer_load : Intrinsic <
963 [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
964 [llvm_v4i32_ty, // rsrc(SGPR)
965 llvm_i32_ty, // vindex(VGPR)
966 llvm_i32_ty, // voffset(VGPR)
967 llvm_i32_ty, // soffset(SGPR)
968 llvm_i32_ty, // offset(imm)
969 llvm_i32_ty, // dfmt(imm)
970 llvm_i32_ty, // nfmt(imm)
971 llvm_i1_ty, // glc(imm)
972 llvm_i1_ty], // slc(imm)
973 [IntrReadMem], "", [SDNPMemOperand]>,
974 AMDGPURsrcIntrinsic<0>;
976 def int_amdgcn_tbuffer_store : Intrinsic <
978 [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
979 llvm_v4i32_ty, // rsrc(SGPR)
980 llvm_i32_ty, // vindex(VGPR)
981 llvm_i32_ty, // voffset(VGPR)
982 llvm_i32_ty, // soffset(SGPR)
983 llvm_i32_ty, // offset(imm)
984 llvm_i32_ty, // dfmt(imm)
985 llvm_i32_ty, // nfmt(imm)
986 llvm_i1_ty, // glc(imm)
987 llvm_i1_ty], // slc(imm)
988 [IntrWriteMem], "", [SDNPMemOperand]>,
989 AMDGPURsrcIntrinsic<1>;
991 // New tbuffer intrinsics, with:
992 // - raw and struct variants
993 // - joint format field
994 // - joint cachepolicy field
995 def int_amdgcn_raw_tbuffer_load : Intrinsic <
996 [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
997 [llvm_v4i32_ty, // rsrc(SGPR)
998 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
999 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1000 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1001 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
1002 [IntrReadMem], "", [SDNPMemOperand]>,
1003 AMDGPURsrcIntrinsic<0>;
1005 def int_amdgcn_raw_tbuffer_store : Intrinsic <
1007 [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1008 llvm_v4i32_ty, // rsrc(SGPR)
1009 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1010 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1011 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1012 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
1013 [IntrWriteMem], "", [SDNPMemOperand]>,
1014 AMDGPURsrcIntrinsic<1>;
1016 def int_amdgcn_struct_tbuffer_load : Intrinsic <
1017 [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1018 [llvm_v4i32_ty, // rsrc(SGPR)
1019 llvm_i32_ty, // vindex(VGPR)
1020 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1021 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1022 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1023 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
1024 [IntrReadMem], "", [SDNPMemOperand]>,
1025 AMDGPURsrcIntrinsic<0>;
1027 def int_amdgcn_struct_tbuffer_store : Intrinsic <
1029 [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1030 llvm_v4i32_ty, // rsrc(SGPR)
1031 llvm_i32_ty, // vindex(VGPR)
1032 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1033 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1034 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1035 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc)
1036 [IntrWriteMem], "", [SDNPMemOperand]>,
1037 AMDGPURsrcIntrinsic<1>;
1039 class AMDGPUBufferAtomic : Intrinsic <
1041 [llvm_i32_ty, // vdata(VGPR)
1042 llvm_v4i32_ty, // rsrc(SGPR)
1043 llvm_i32_ty, // vindex(VGPR)
1044 llvm_i32_ty, // offset(SGPR/VGPR/imm)
1045 llvm_i1_ty], // slc(imm)
1046 [], "", [SDNPMemOperand]>,
1047 AMDGPURsrcIntrinsic<1, 0>;
1048 def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
1049 def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
1050 def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic;
1051 def int_amdgcn_buffer_atomic_smin : AMDGPUBufferAtomic;
1052 def int_amdgcn_buffer_atomic_umin : AMDGPUBufferAtomic;
1053 def int_amdgcn_buffer_atomic_smax : AMDGPUBufferAtomic;
1054 def int_amdgcn_buffer_atomic_umax : AMDGPUBufferAtomic;
1055 def int_amdgcn_buffer_atomic_and : AMDGPUBufferAtomic;
1056 def int_amdgcn_buffer_atomic_or : AMDGPUBufferAtomic;
1057 def int_amdgcn_buffer_atomic_xor : AMDGPUBufferAtomic;
1058 def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
1060 [llvm_i32_ty, // src(VGPR)
1061 llvm_i32_ty, // cmp(VGPR)
1062 llvm_v4i32_ty, // rsrc(SGPR)
1063 llvm_i32_ty, // vindex(VGPR)
1064 llvm_i32_ty, // offset(SGPR/VGPR/imm)
1065 llvm_i1_ty], // slc(imm)
1066 [], "", [SDNPMemOperand]>,
1067 AMDGPURsrcIntrinsic<2, 0>;
1069 } // defset AMDGPUBufferIntrinsics
1071 // Uses that do not set the done bit should set IntrWriteMem on the
1073 def int_amdgcn_exp : Intrinsic <[], [
1074 llvm_i32_ty, // tgt,
1076 llvm_any_ty, // src0 (f32 or i32)
1077 LLVMMatchType<0>, // src1
1078 LLVMMatchType<0>, // src2
1079 LLVMMatchType<0>, // src3
1086 // exp with compr bit set.
1087 def int_amdgcn_exp_compr : Intrinsic <[], [
1088 llvm_i32_ty, // tgt,
1090 llvm_anyvector_ty, // src0 (v2f16 or v2i16)
1091 LLVMMatchType<0>, // src1
1097 def int_amdgcn_buffer_wbinvl1_sc :
1098 GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
1099 Intrinsic<[], [], []>;
1101 def int_amdgcn_buffer_wbinvl1 :
1102 GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
1103 Intrinsic<[], [], []>;
1105 def int_amdgcn_s_dcache_inv :
1106 GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">,
1107 Intrinsic<[], [], []>;
1109 def int_amdgcn_s_memtime :
1110 GCCBuiltin<"__builtin_amdgcn_s_memtime">,
1111 Intrinsic<[llvm_i64_ty], [], [IntrReadMem]>;
1113 def int_amdgcn_s_sleep :
1114 GCCBuiltin<"__builtin_amdgcn_s_sleep">,
1115 Intrinsic<[], [llvm_i32_ty], []> {
1118 def int_amdgcn_s_incperflevel :
1119 GCCBuiltin<"__builtin_amdgcn_s_incperflevel">,
1120 Intrinsic<[], [llvm_i32_ty], []> {
1123 def int_amdgcn_s_decperflevel :
1124 GCCBuiltin<"__builtin_amdgcn_s_decperflevel">,
1125 Intrinsic<[], [llvm_i32_ty], []> {
1128 def int_amdgcn_s_getreg :
1129 GCCBuiltin<"__builtin_amdgcn_s_getreg">,
1130 Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
1131 [IntrReadMem, IntrSpeculatable]
1134 // int_amdgcn_s_getpc is provided to allow a specific style of position
1135 // independent code to determine the high part of its address when it is
1136 // known (through convention) that the code and any data of interest does
1137 // not cross a 4Gb address boundary. Use for any other purpose may not
1138 // produce the desired results as optimizations may cause code movement,
1139 // especially as we explicitly use IntrNoMem to allow optimizations.
1140 def int_amdgcn_s_getpc :
1141 GCCBuiltin<"__builtin_amdgcn_s_getpc">,
1142 Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
1144 // __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
1145 // param values: 0 = P10, 1 = P20, 2 = P0
1146 def int_amdgcn_interp_mov :
1147 GCCBuiltin<"__builtin_amdgcn_interp_mov">,
1148 Intrinsic<[llvm_float_ty],
1149 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1150 [IntrNoMem, IntrSpeculatable]>;
1152 // __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
1153 // This intrinsic reads from lds, but the memory values are constant,
1154 // so it behaves like IntrNoMem.
1155 def int_amdgcn_interp_p1 :
1156 GCCBuiltin<"__builtin_amdgcn_interp_p1">,
1157 Intrinsic<[llvm_float_ty],
1158 [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1159 [IntrNoMem, IntrSpeculatable]>;
1161 // __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
1162 def int_amdgcn_interp_p2 :
1163 GCCBuiltin<"__builtin_amdgcn_interp_p2">,
1164 Intrinsic<[llvm_float_ty],
1165 [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1166 [IntrNoMem, IntrSpeculatable]>;
1167 // See int_amdgcn_v_interp_p1 for why this is IntrNoMem.
1169 // Pixel shaders only: whether the current pixel is live (i.e. not a helper
1170 // invocation for derivative computation).
1171 def int_amdgcn_ps_live : Intrinsic <
1176 def int_amdgcn_mbcnt_lo :
1177 GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">,
1178 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
1180 def int_amdgcn_mbcnt_hi :
1181 GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">,
1182 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
1184 // llvm.amdgcn.ds.swizzle src offset
1185 def int_amdgcn_ds_swizzle :
1186 GCCBuiltin<"__builtin_amdgcn_ds_swizzle">,
1187 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1189 def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty],
1190 [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
1191 [IntrNoMem, IntrSpeculatable]
1194 def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty],
1195 [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
1196 [IntrNoMem, IntrSpeculatable]
1199 def int_amdgcn_lerp :
1200 GCCBuiltin<"__builtin_amdgcn_lerp">,
1201 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1202 [IntrNoMem, IntrSpeculatable]
1205 def int_amdgcn_sad_u8 :
1206 GCCBuiltin<"__builtin_amdgcn_sad_u8">,
1207 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1208 [IntrNoMem, IntrSpeculatable]
1211 def int_amdgcn_msad_u8 :
1212 GCCBuiltin<"__builtin_amdgcn_msad_u8">,
1213 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1214 [IntrNoMem, IntrSpeculatable]
1217 def int_amdgcn_sad_hi_u8 :
1218 GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">,
1219 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1220 [IntrNoMem, IntrSpeculatable]
1223 def int_amdgcn_sad_u16 :
1224 GCCBuiltin<"__builtin_amdgcn_sad_u16">,
1225 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1226 [IntrNoMem, IntrSpeculatable]
1229 def int_amdgcn_qsad_pk_u16_u8 :
1230 GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
1231 Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
1232 [IntrNoMem, IntrSpeculatable]
1235 def int_amdgcn_mqsad_pk_u16_u8 :
1236 GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
1237 Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
1238 [IntrNoMem, IntrSpeculatable]
1241 def int_amdgcn_mqsad_u32_u8 :
1242 GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
1243 Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
1244 [IntrNoMem, IntrSpeculatable]
1247 def int_amdgcn_cvt_pk_u8_f32 :
1248 GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
1249 Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
1250 [IntrNoMem, IntrSpeculatable]
1253 def int_amdgcn_icmp :
1254 Intrinsic<[llvm_i64_ty], [llvm_anyint_ty, LLVMMatchType<0>, llvm_i32_ty],
1255 [IntrNoMem, IntrConvergent]>;
1257 def int_amdgcn_fcmp :
1258 Intrinsic<[llvm_i64_ty], [llvm_anyfloat_ty, LLVMMatchType<0>, llvm_i32_ty],
1259 [IntrNoMem, IntrConvergent]>;
1261 def int_amdgcn_readfirstlane :
1262 GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
1263 Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1265 // The lane argument must be uniform across the currently active threads of the
1266 // current wave. Otherwise, the result is undefined.
1267 def int_amdgcn_readlane :
1268 GCCBuiltin<"__builtin_amdgcn_readlane">,
1269 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1271 // The value to write and lane select arguments must be uniform across the
1272 // currently active threads of the current wave. Otherwise, the result is
1274 def int_amdgcn_writelane :
1275 GCCBuiltin<"__builtin_amdgcn_writelane">,
1276 Intrinsic<[llvm_i32_ty], [
1277 llvm_i32_ty, // uniform value to write: returned by the selected lane
1278 llvm_i32_ty, // uniform lane select
1279 llvm_i32_ty // returned by all lanes other than the selected one
1281 [IntrNoMem, IntrConvergent]
1284 def int_amdgcn_alignbit : Intrinsic<[llvm_i32_ty],
1285 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1286 [IntrNoMem, IntrSpeculatable]
1289 def int_amdgcn_alignbyte : Intrinsic<[llvm_i32_ty],
1290 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1291 [IntrNoMem, IntrSpeculatable]
1295 // Copies the source value to the destination value, with the guarantee that
1296 // the source value is computed as if the entire program were executed in WQM.
1297 def int_amdgcn_wqm : Intrinsic<[llvm_any_ty],
1298 [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
1301 // Return true if at least one thread within the pixel quad passes true into
1303 def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty],
1304 [llvm_i1_ty], [IntrNoMem, IntrConvergent]
1307 // If false, set EXEC=0 for the current thread until the end of program.
1308 def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], []>;
1310 // Copies the active channels of the source value to the destination value,
1311 // with the guarantee that the source value is computed as if the entire
1312 // program were executed in Whole Wavefront Mode, i.e. with all channels
1313 // enabled, with a few exceptions: - Phi nodes with require WWM return an
1315 def int_amdgcn_wwm : Intrinsic<[llvm_any_ty],
1316 [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
1319 // Given a value, copies it while setting all the inactive lanes to a given
1320 // value. Note that OpenGL helper lanes are considered active, so if the
1321 // program ever uses WQM, then the instruction and the first source will be
1323 def int_amdgcn_set_inactive :
1324 Intrinsic<[llvm_anyint_ty],
1325 [LLVMMatchType<0>, // value to be copied
1326 LLVMMatchType<0>], // value for the inactive lanes to take
1327 [IntrNoMem, IntrConvergent]>;
1329 //===----------------------------------------------------------------------===//
1331 //===----------------------------------------------------------------------===//
1333 def int_amdgcn_s_dcache_inv_vol :
1334 GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
1335 Intrinsic<[], [], []>;
1337 def int_amdgcn_buffer_wbinvl1_vol :
1338 GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
1339 Intrinsic<[], [], []>;
1341 //===----------------------------------------------------------------------===//
1343 //===----------------------------------------------------------------------===//
1345 // llvm.amdgcn.mov.dpp.i32 <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
1346 def int_amdgcn_mov_dpp :
1347 Intrinsic<[llvm_anyint_ty],
1348 [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
1349 llvm_i1_ty], [IntrNoMem, IntrConvergent]>;
1351 // llvm.amdgcn.update.dpp.i32 <old> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
1352 // Should be equivalent to:
1353 // v_mov_b32 <dest> <old>
1354 // v_mov_b32 <dest> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
1355 def int_amdgcn_update_dpp :
1356 Intrinsic<[llvm_anyint_ty],
1357 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty,
1358 llvm_i32_ty, llvm_i1_ty], [IntrNoMem, IntrConvergent]>;
1360 def int_amdgcn_s_dcache_wb :
1361 GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">,
1362 Intrinsic<[], [], []>;
1364 def int_amdgcn_s_dcache_wb_vol :
1365 GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
1366 Intrinsic<[], [], []>;
1368 def int_amdgcn_s_memrealtime :
1369 GCCBuiltin<"__builtin_amdgcn_s_memrealtime">,
1370 Intrinsic<[llvm_i64_ty], [], [IntrReadMem]>;
1372 // llvm.amdgcn.ds.permute <index> <src>
1373 def int_amdgcn_ds_permute :
1374 GCCBuiltin<"__builtin_amdgcn_ds_permute">,
1375 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1377 // llvm.amdgcn.ds.bpermute <index> <src>
1378 def int_amdgcn_ds_bpermute :
1379 GCCBuiltin<"__builtin_amdgcn_ds_bpermute">,
1380 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1382 //===----------------------------------------------------------------------===//
1383 // Deep learning intrinsics.
1384 //===----------------------------------------------------------------------===//
1386 // f32 %r = llvm.amdgcn.fdot2(v2f16 %a, v2f16 %b, f32 %c, i1 %clamp)
1387 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
1388 def int_amdgcn_fdot2 :
1389 GCCBuiltin<"__builtin_amdgcn_fdot2">,
1391 [llvm_float_ty], // %r
1393 llvm_v2f16_ty, // %a
1394 llvm_v2f16_ty, // %b
1395 llvm_float_ty, // %c
1396 llvm_i1_ty // %clamp
1398 [IntrNoMem, IntrSpeculatable]
1401 // i32 %r = llvm.amdgcn.sdot2(v2i16 %a, v2i16 %b, i32 %c, i1 %clamp)
1402 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
1403 def int_amdgcn_sdot2 :
1404 GCCBuiltin<"__builtin_amdgcn_sdot2">,
1406 [llvm_i32_ty], // %r
1408 llvm_v2i16_ty, // %a
1409 llvm_v2i16_ty, // %b
1411 llvm_i1_ty // %clamp
1413 [IntrNoMem, IntrSpeculatable]
1416 // u32 %r = llvm.amdgcn.udot2(v2u16 %a, v2u16 %b, u32 %c, i1 %clamp)
1417 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
1418 def int_amdgcn_udot2 :
1419 GCCBuiltin<"__builtin_amdgcn_udot2">,
1421 [llvm_i32_ty], // %r
1423 llvm_v2i16_ty, // %a
1424 llvm_v2i16_ty, // %b
1426 llvm_i1_ty // %clamp
1428 [IntrNoMem, IntrSpeculatable]
1431 // i32 %r = llvm.amdgcn.sdot4(v4i8 (as i32) %a, v4i8 (as i32) %b, i32 %c, i1 %clamp)
1432 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
1433 def int_amdgcn_sdot4 :
1434 GCCBuiltin<"__builtin_amdgcn_sdot4">,
1436 [llvm_i32_ty], // %r
1441 llvm_i1_ty // %clamp
1443 [IntrNoMem, IntrSpeculatable]
1446 // u32 %r = llvm.amdgcn.udot4(v4u8 (as u32) %a, v4u8 (as u32) %b, u32 %c, i1 %clamp)
1447 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
1448 def int_amdgcn_udot4 :
1449 GCCBuiltin<"__builtin_amdgcn_udot4">,
1451 [llvm_i32_ty], // %r
1456 llvm_i1_ty // %clamp
1458 [IntrNoMem, IntrSpeculatable]
1461 // i32 %r = llvm.amdgcn.sdot8(v8i4 (as i32) %a, v8i4 (as i32) %b, i32 %c, i1 %clamp)
1462 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
1463 // %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
1464 def int_amdgcn_sdot8 :
1465 GCCBuiltin<"__builtin_amdgcn_sdot8">,
1467 [llvm_i32_ty], // %r
1472 llvm_i1_ty // %clamp
1474 [IntrNoMem, IntrSpeculatable]
1477 // u32 %r = llvm.amdgcn.udot8(v8u4 (as u32) %a, v8u4 (as u32) %b, u32 %c, i1 %clamp)
1478 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
1479 // %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
1480 def int_amdgcn_udot8 :
1481 GCCBuiltin<"__builtin_amdgcn_udot8">,
1483 [llvm_i32_ty], // %r
1488 llvm_i1_ty // %clamp
1490 [IntrNoMem, IntrSpeculatable]
1493 //===----------------------------------------------------------------------===//
1494 // Special Intrinsics for backend internal use only. No frontend
1495 // should emit calls to these.
1496 // ===----------------------------------------------------------------------===//
1497 def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_i64_ty],
1498 [llvm_i1_ty], [IntrConvergent]
1501 def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_i64_ty],
1502 [llvm_i64_ty], [IntrConvergent]
1505 def int_amdgcn_if_break : Intrinsic<[llvm_i64_ty],
1506 [llvm_i1_ty, llvm_i64_ty], [IntrNoMem, IntrConvergent]
1509 def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
1510 [llvm_i64_ty], [IntrConvergent]
1513 def int_amdgcn_end_cf : Intrinsic<[], [llvm_i64_ty], [IntrConvergent]>;
1515 // Represent unreachable in a divergent region.
1516 def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>;
1518 // Emit 2.5 ulp, no denormal division. Should only be inserted by
1519 // pass based on !fpmath metadata.
1520 def int_amdgcn_fdiv_fast : Intrinsic<
1521 [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
1522 [IntrNoMem, IntrSpeculatable]