1 //===-- FLATInstructions.td - FLAT Instruction Defintions -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 def FLATAtomic : ComplexPattern<i64, 2, "SelectFlat">;
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
16 class FLAT_Pseudo<string opName, dag outs, dag ins,
17 string asmOps, list<dag> pattern=[]> :
18 InstSI<outs, ins, "", pattern>,
19 SIMCInstr<opName, SIEncodingFamily.NONE> {
22 let isCodeGenOnly = 1;
24 let SubtargetPredicate = isCIVI;
27 // Internally, FLAT instruction are executed as both an LDS and a
28 // Buffer instruction; so, they increment both VM_CNT and LGKM_CNT
29 // and are not considered done until both have been decremented.
33 let Uses = [EXEC, FLAT_SCR]; // M0
35 let UseNamedOperandTable = 1;
36 let hasSideEffects = 0;
37 let SchedRW = [WriteVMEM];
39 string Mnemonic = opName;
40 string AsmOperands = asmOps;
48 class FLAT_Real <bits<7> op, FLAT_Pseudo ps> :
49 InstSI <ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []>,
53 let isCodeGenOnly = 0;
55 // copy relevant pseudo op flags
56 let SubtargetPredicate = ps.SubtargetPredicate;
57 let AsmMatchConverter = ps.AsmMatchConverter;
66 // We don't use tfe right now, and it was removed in gfx9.
70 let Inst{16} = !if(ps.has_glc, glc, ps.glcValue);
73 let Inst{31-26} = 0x37; // Encoding.
74 let Inst{39-32} = vaddr;
75 let Inst{47-40} = !if(ps.has_data, vdata, ?);
78 let Inst{63-56} = !if(ps.has_vdst, vdst, ?);
81 class FLAT_Load_Pseudo <string opName, RegisterClass regClass> : FLAT_Pseudo<
83 (outs regClass:$vdst),
84 (ins VReg_64:$vaddr, GLC:$glc, slc:$slc),
85 " $vdst, $vaddr$glc$slc"> {
90 class FLAT_Store_Pseudo <string opName, RegisterClass vdataClass> : FLAT_Pseudo<
93 (ins VReg_64:$vaddr, vdataClass:$vdata, GLC:$glc, slc:$slc),
94 " $vaddr, $vdata$glc$slc"> {
100 multiclass FLAT_Atomic_Pseudo<
102 RegisterClass vdst_rc,
104 SDPatternOperator atomic = null_frag,
105 ValueType data_vt = vt,
106 RegisterClass data_rc = vdst_rc> {
108 def "" : FLAT_Pseudo <opName,
110 (ins VReg_64:$vaddr, data_rc:$vdata, slc:$slc),
111 " $vaddr, $vdata$slc",
113 AtomicNoRet <NAME, 0> {
119 let PseudoInstr = NAME;
122 def _RTN : FLAT_Pseudo <opName,
123 (outs vdst_rc:$vdst),
124 (ins VReg_64:$vaddr, data_rc:$vdata, slc:$slc),
125 " $vdst, $vaddr, $vdata glc$slc",
127 (atomic (FLATAtomic i64:$vaddr, i1:$slc), data_vt:$vdata))]>,
128 AtomicNoRet <NAME, 1> {
131 let hasPostISelHook = 1;
134 let PseudoInstr = NAME # "_RTN";
138 class flat_binary_atomic_op<SDNode atomic_op> : PatFrag<
139 (ops node:$ptr, node:$value),
140 (atomic_op node:$ptr, node:$value),
141 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUASI.FLAT_ADDRESS;}]
144 def atomic_cmp_swap_flat : flat_binary_atomic_op<AMDGPUatomic_cmp_swap>;
145 def atomic_swap_flat : flat_binary_atomic_op<atomic_swap>;
146 def atomic_add_flat : flat_binary_atomic_op<atomic_load_add>;
147 def atomic_and_flat : flat_binary_atomic_op<atomic_load_and>;
148 def atomic_max_flat : flat_binary_atomic_op<atomic_load_max>;
149 def atomic_min_flat : flat_binary_atomic_op<atomic_load_min>;
150 def atomic_or_flat : flat_binary_atomic_op<atomic_load_or>;
151 def atomic_sub_flat : flat_binary_atomic_op<atomic_load_sub>;
152 def atomic_umax_flat : flat_binary_atomic_op<atomic_load_umax>;
153 def atomic_umin_flat : flat_binary_atomic_op<atomic_load_umin>;
154 def atomic_xor_flat : flat_binary_atomic_op<atomic_load_xor>;
155 def atomic_inc_flat : flat_binary_atomic_op<SIatomic_inc>;
156 def atomic_dec_flat : flat_binary_atomic_op<SIatomic_dec>;
160 //===----------------------------------------------------------------------===//
162 //===----------------------------------------------------------------------===//
164 def FLAT_LOAD_UBYTE : FLAT_Load_Pseudo <"flat_load_ubyte", VGPR_32>;
165 def FLAT_LOAD_SBYTE : FLAT_Load_Pseudo <"flat_load_sbyte", VGPR_32>;
166 def FLAT_LOAD_USHORT : FLAT_Load_Pseudo <"flat_load_ushort", VGPR_32>;
167 def FLAT_LOAD_SSHORT : FLAT_Load_Pseudo <"flat_load_sshort", VGPR_32>;
168 def FLAT_LOAD_DWORD : FLAT_Load_Pseudo <"flat_load_dword", VGPR_32>;
169 def FLAT_LOAD_DWORDX2 : FLAT_Load_Pseudo <"flat_load_dwordx2", VReg_64>;
170 def FLAT_LOAD_DWORDX4 : FLAT_Load_Pseudo <"flat_load_dwordx4", VReg_128>;
171 def FLAT_LOAD_DWORDX3 : FLAT_Load_Pseudo <"flat_load_dwordx3", VReg_96>;
173 def FLAT_STORE_BYTE : FLAT_Store_Pseudo <"flat_store_byte", VGPR_32>;
174 def FLAT_STORE_SHORT : FLAT_Store_Pseudo <"flat_store_short", VGPR_32>;
175 def FLAT_STORE_DWORD : FLAT_Store_Pseudo <"flat_store_dword", VGPR_32>;
176 def FLAT_STORE_DWORDX2 : FLAT_Store_Pseudo <"flat_store_dwordx2", VReg_64>;
177 def FLAT_STORE_DWORDX4 : FLAT_Store_Pseudo <"flat_store_dwordx4", VReg_128>;
178 def FLAT_STORE_DWORDX3 : FLAT_Store_Pseudo <"flat_store_dwordx3", VReg_96>;
180 defm FLAT_ATOMIC_CMPSWAP : FLAT_Atomic_Pseudo <"flat_atomic_cmpswap",
181 VGPR_32, i32, atomic_cmp_swap_flat,
184 defm FLAT_ATOMIC_CMPSWAP_X2 : FLAT_Atomic_Pseudo <"flat_atomic_cmpswap_x2",
185 VReg_64, i64, atomic_cmp_swap_flat,
188 defm FLAT_ATOMIC_SWAP : FLAT_Atomic_Pseudo <"flat_atomic_swap",
189 VGPR_32, i32, atomic_swap_flat>;
191 defm FLAT_ATOMIC_SWAP_X2 : FLAT_Atomic_Pseudo <"flat_atomic_swap_x2",
192 VReg_64, i64, atomic_swap_flat>;
194 defm FLAT_ATOMIC_ADD : FLAT_Atomic_Pseudo <"flat_atomic_add",
195 VGPR_32, i32, atomic_add_flat>;
197 defm FLAT_ATOMIC_SUB : FLAT_Atomic_Pseudo <"flat_atomic_sub",
198 VGPR_32, i32, atomic_sub_flat>;
200 defm FLAT_ATOMIC_SMIN : FLAT_Atomic_Pseudo <"flat_atomic_smin",
201 VGPR_32, i32, atomic_min_flat>;
203 defm FLAT_ATOMIC_UMIN : FLAT_Atomic_Pseudo <"flat_atomic_umin",
204 VGPR_32, i32, atomic_umin_flat>;
206 defm FLAT_ATOMIC_SMAX : FLAT_Atomic_Pseudo <"flat_atomic_smax",
207 VGPR_32, i32, atomic_max_flat>;
209 defm FLAT_ATOMIC_UMAX : FLAT_Atomic_Pseudo <"flat_atomic_umax",
210 VGPR_32, i32, atomic_umax_flat>;
212 defm FLAT_ATOMIC_AND : FLAT_Atomic_Pseudo <"flat_atomic_and",
213 VGPR_32, i32, atomic_and_flat>;
215 defm FLAT_ATOMIC_OR : FLAT_Atomic_Pseudo <"flat_atomic_or",
216 VGPR_32, i32, atomic_or_flat>;
218 defm FLAT_ATOMIC_XOR : FLAT_Atomic_Pseudo <"flat_atomic_xor",
219 VGPR_32, i32, atomic_xor_flat>;
221 defm FLAT_ATOMIC_INC : FLAT_Atomic_Pseudo <"flat_atomic_inc",
222 VGPR_32, i32, atomic_inc_flat>;
224 defm FLAT_ATOMIC_DEC : FLAT_Atomic_Pseudo <"flat_atomic_dec",
225 VGPR_32, i32, atomic_dec_flat>;
227 defm FLAT_ATOMIC_ADD_X2 : FLAT_Atomic_Pseudo <"flat_atomic_add_x2",
228 VReg_64, i64, atomic_add_flat>;
230 defm FLAT_ATOMIC_SUB_X2 : FLAT_Atomic_Pseudo <"flat_atomic_sub_x2",
231 VReg_64, i64, atomic_sub_flat>;
233 defm FLAT_ATOMIC_SMIN_X2 : FLAT_Atomic_Pseudo <"flat_atomic_smin_x2",
234 VReg_64, i64, atomic_min_flat>;
236 defm FLAT_ATOMIC_UMIN_X2 : FLAT_Atomic_Pseudo <"flat_atomic_umin_x2",
237 VReg_64, i64, atomic_umin_flat>;
239 defm FLAT_ATOMIC_SMAX_X2 : FLAT_Atomic_Pseudo <"flat_atomic_smax_x2",
240 VReg_64, i64, atomic_max_flat>;
242 defm FLAT_ATOMIC_UMAX_X2 : FLAT_Atomic_Pseudo <"flat_atomic_umax_x2",
243 VReg_64, i64, atomic_umax_flat>;
245 defm FLAT_ATOMIC_AND_X2 : FLAT_Atomic_Pseudo <"flat_atomic_and_x2",
246 VReg_64, i64, atomic_and_flat>;
248 defm FLAT_ATOMIC_OR_X2 : FLAT_Atomic_Pseudo <"flat_atomic_or_x2",
249 VReg_64, i64, atomic_or_flat>;
251 defm FLAT_ATOMIC_XOR_X2 : FLAT_Atomic_Pseudo <"flat_atomic_xor_x2",
252 VReg_64, i64, atomic_xor_flat>;
254 defm FLAT_ATOMIC_INC_X2 : FLAT_Atomic_Pseudo <"flat_atomic_inc_x2",
255 VReg_64, i64, atomic_inc_flat>;
257 defm FLAT_ATOMIC_DEC_X2 : FLAT_Atomic_Pseudo <"flat_atomic_dec_x2",
258 VReg_64, i64, atomic_dec_flat>;
260 let SubtargetPredicate = isCI in { // CI Only flat instructions : FIXME Only?
262 defm FLAT_ATOMIC_FCMPSWAP : FLAT_Atomic_Pseudo <"flat_atomic_fcmpswap",
263 VGPR_32, f32, null_frag, v2f32, VReg_64>;
265 defm FLAT_ATOMIC_FCMPSWAP_X2 : FLAT_Atomic_Pseudo <"flat_atomic_fcmpswap_x2",
266 VReg_64, f64, null_frag, v2f64, VReg_128>;
268 defm FLAT_ATOMIC_FMIN : FLAT_Atomic_Pseudo <"flat_atomic_fmin",
271 defm FLAT_ATOMIC_FMAX : FLAT_Atomic_Pseudo <"flat_atomic_fmax",
274 defm FLAT_ATOMIC_FMIN_X2 : FLAT_Atomic_Pseudo <"flat_atomic_fmin_x2",
277 defm FLAT_ATOMIC_FMAX_X2 : FLAT_Atomic_Pseudo <"flat_atomic_fmax_x2",
280 } // End SubtargetPredicate = isCI
282 //===----------------------------------------------------------------------===//
284 //===----------------------------------------------------------------------===//
286 class flat_ld <SDPatternOperator ld> : PatFrag<(ops node:$ptr),
288 auto const AS = cast<MemSDNode>(N)->getAddressSpace();
289 return AS == AMDGPUASI.FLAT_ADDRESS ||
290 AS == AMDGPUASI.GLOBAL_ADDRESS ||
291 AS == AMDGPUASI.CONSTANT_ADDRESS;
294 class flat_st <SDPatternOperator st> : PatFrag<(ops node:$val, node:$ptr),
295 (st node:$val, node:$ptr), [{
296 auto const AS = cast<MemSDNode>(N)->getAddressSpace();
297 return AS == AMDGPUASI.FLAT_ADDRESS ||
298 AS == AMDGPUASI.GLOBAL_ADDRESS;
301 def atomic_flat_load : flat_ld <atomic_load>;
302 def flat_load : flat_ld <load>;
303 def flat_az_extloadi8 : flat_ld <az_extloadi8>;
304 def flat_sextloadi8 : flat_ld <sextloadi8>;
305 def flat_az_extloadi16 : flat_ld <az_extloadi16>;
306 def flat_sextloadi16 : flat_ld <sextloadi16>;
308 def atomic_flat_store : flat_st <atomic_store>;
309 def flat_store : flat_st <store>;
310 def flat_truncstorei8 : flat_st <truncstorei8>;
311 def flat_truncstorei16 : flat_st <truncstorei16>;
313 // Patterns for global loads with no offset.
314 class FlatLoadPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
315 (vt (node i64:$addr)),
319 class FlatLoadAtomicPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
320 (vt (node i64:$addr)),
324 class FlatStorePat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
325 (node vt:$data, i64:$addr),
326 (inst $addr, $data, 0, 0)
329 class FlatStoreAtomicPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt> : Pat <
330 // atomic store follows atomic binop convention so the address comes
332 (node i64:$addr, vt:$data),
333 (inst $addr, $data, 1, 0)
336 class FlatAtomicPat <FLAT_Pseudo inst, SDPatternOperator node, ValueType vt,
337 ValueType data_vt = vt> : Pat <
338 (vt (node i64:$addr, data_vt:$data)),
339 (inst $addr, $data, 0)
342 let Predicates = [isCIVI] in {
344 def : FlatLoadPat <FLAT_LOAD_UBYTE, flat_az_extloadi8, i32>;
345 def : FlatLoadPat <FLAT_LOAD_SBYTE, flat_sextloadi8, i32>;
346 def : FlatLoadPat <FLAT_LOAD_UBYTE, flat_az_extloadi8, i16>;
347 def : FlatLoadPat <FLAT_LOAD_SBYTE, flat_sextloadi8, i16>;
348 def : FlatLoadPat <FLAT_LOAD_USHORT, flat_az_extloadi16, i32>;
349 def : FlatLoadPat <FLAT_LOAD_SSHORT, flat_sextloadi16, i32>;
350 def : FlatLoadPat <FLAT_LOAD_DWORD, flat_load, i32>;
351 def : FlatLoadPat <FLAT_LOAD_DWORDX2, flat_load, v2i32>;
352 def : FlatLoadPat <FLAT_LOAD_DWORDX4, flat_load, v4i32>;
354 def : FlatLoadAtomicPat <FLAT_LOAD_DWORD, atomic_flat_load, i32>;
355 def : FlatLoadAtomicPat <FLAT_LOAD_DWORDX2, atomic_flat_load, i64>;
357 def : FlatStorePat <FLAT_STORE_BYTE, flat_truncstorei8, i32>;
358 def : FlatStorePat <FLAT_STORE_SHORT, flat_truncstorei16, i32>;
359 def : FlatStorePat <FLAT_STORE_DWORD, flat_store, i32>;
360 def : FlatStorePat <FLAT_STORE_DWORDX2, flat_store, v2i32>;
361 def : FlatStorePat <FLAT_STORE_DWORDX4, flat_store, v4i32>;
363 def : FlatStoreAtomicPat <FLAT_STORE_DWORD, atomic_flat_store, i32>;
364 def : FlatStoreAtomicPat <FLAT_STORE_DWORDX2, atomic_flat_store, i64>;
366 def : FlatAtomicPat <FLAT_ATOMIC_ADD_RTN, atomic_add_global, i32>;
367 def : FlatAtomicPat <FLAT_ATOMIC_SUB_RTN, atomic_sub_global, i32>;
368 def : FlatAtomicPat <FLAT_ATOMIC_INC_RTN, atomic_inc_global, i32>;
369 def : FlatAtomicPat <FLAT_ATOMIC_DEC_RTN, atomic_dec_global, i32>;
370 def : FlatAtomicPat <FLAT_ATOMIC_AND_RTN, atomic_and_global, i32>;
371 def : FlatAtomicPat <FLAT_ATOMIC_SMAX_RTN, atomic_max_global, i32>;
372 def : FlatAtomicPat <FLAT_ATOMIC_UMAX_RTN, atomic_umax_global, i32>;
373 def : FlatAtomicPat <FLAT_ATOMIC_SMIN_RTN, atomic_min_global, i32>;
374 def : FlatAtomicPat <FLAT_ATOMIC_UMIN_RTN, atomic_umin_global, i32>;
375 def : FlatAtomicPat <FLAT_ATOMIC_OR_RTN, atomic_or_global, i32>;
376 def : FlatAtomicPat <FLAT_ATOMIC_SWAP_RTN, atomic_swap_global, i32>;
377 def : FlatAtomicPat <FLAT_ATOMIC_CMPSWAP_RTN, AMDGPUatomic_cmp_swap_global, i32, v2i32>;
378 def : FlatAtomicPat <FLAT_ATOMIC_XOR_RTN, atomic_xor_global, i32>;
380 def : FlatAtomicPat <FLAT_ATOMIC_ADD_X2_RTN, atomic_add_global, i64>;
381 def : FlatAtomicPat <FLAT_ATOMIC_SUB_X2_RTN, atomic_sub_global, i64>;
382 def : FlatAtomicPat <FLAT_ATOMIC_INC_X2_RTN, atomic_inc_global, i64>;
383 def : FlatAtomicPat <FLAT_ATOMIC_DEC_X2_RTN, atomic_dec_global, i64>;
384 def : FlatAtomicPat <FLAT_ATOMIC_AND_X2_RTN, atomic_and_global, i64>;
385 def : FlatAtomicPat <FLAT_ATOMIC_SMAX_X2_RTN, atomic_max_global, i64>;
386 def : FlatAtomicPat <FLAT_ATOMIC_UMAX_X2_RTN, atomic_umax_global, i64>;
387 def : FlatAtomicPat <FLAT_ATOMIC_SMIN_X2_RTN, atomic_min_global, i64>;
388 def : FlatAtomicPat <FLAT_ATOMIC_UMIN_X2_RTN, atomic_umin_global, i64>;
389 def : FlatAtomicPat <FLAT_ATOMIC_OR_X2_RTN, atomic_or_global, i64>;
390 def : FlatAtomicPat <FLAT_ATOMIC_SWAP_X2_RTN, atomic_swap_global, i64>;
391 def : FlatAtomicPat <FLAT_ATOMIC_CMPSWAP_X2_RTN, AMDGPUatomic_cmp_swap_global, i64, v2i64>;
392 def : FlatAtomicPat <FLAT_ATOMIC_XOR_X2_RTN, atomic_xor_global, i64>;
394 } // End Predicates = [isCIVI]
396 let Predicates = [isVI] in {
397 def : FlatStorePat <FLAT_STORE_BYTE, flat_truncstorei8, i16>;
398 def : FlatStorePat <FLAT_STORE_SHORT, flat_store, i16>;
402 //===----------------------------------------------------------------------===//
404 //===----------------------------------------------------------------------===//
406 //===----------------------------------------------------------------------===//
408 //===----------------------------------------------------------------------===//
410 class FLAT_Real_ci <bits<7> op, FLAT_Pseudo ps> :
412 SIMCInstr <ps.PseudoInstr, SIEncodingFamily.SI> {
413 let AssemblerPredicate = isCIOnly;
414 let DecoderNamespace="CI";
417 def FLAT_LOAD_UBYTE_ci : FLAT_Real_ci <0x8, FLAT_LOAD_UBYTE>;
418 def FLAT_LOAD_SBYTE_ci : FLAT_Real_ci <0x9, FLAT_LOAD_SBYTE>;
419 def FLAT_LOAD_USHORT_ci : FLAT_Real_ci <0xa, FLAT_LOAD_USHORT>;
420 def FLAT_LOAD_SSHORT_ci : FLAT_Real_ci <0xb, FLAT_LOAD_SSHORT>;
421 def FLAT_LOAD_DWORD_ci : FLAT_Real_ci <0xc, FLAT_LOAD_DWORD>;
422 def FLAT_LOAD_DWORDX2_ci : FLAT_Real_ci <0xd, FLAT_LOAD_DWORDX2>;
423 def FLAT_LOAD_DWORDX4_ci : FLAT_Real_ci <0xe, FLAT_LOAD_DWORDX4>;
424 def FLAT_LOAD_DWORDX3_ci : FLAT_Real_ci <0xf, FLAT_LOAD_DWORDX3>;
426 def FLAT_STORE_BYTE_ci : FLAT_Real_ci <0x18, FLAT_STORE_BYTE>;
427 def FLAT_STORE_SHORT_ci : FLAT_Real_ci <0x1a, FLAT_STORE_SHORT>;
428 def FLAT_STORE_DWORD_ci : FLAT_Real_ci <0x1c, FLAT_STORE_DWORD>;
429 def FLAT_STORE_DWORDX2_ci : FLAT_Real_ci <0x1d, FLAT_STORE_DWORDX2>;
430 def FLAT_STORE_DWORDX4_ci : FLAT_Real_ci <0x1e, FLAT_STORE_DWORDX4>;
431 def FLAT_STORE_DWORDX3_ci : FLAT_Real_ci <0x1f, FLAT_STORE_DWORDX3>;
433 multiclass FLAT_Real_Atomics_ci <bits<7> op, FLAT_Pseudo ps> {
434 def _ci : FLAT_Real_ci<op, !cast<FLAT_Pseudo>(ps.PseudoInstr)>;
435 def _RTN_ci : FLAT_Real_ci<op, !cast<FLAT_Pseudo>(ps.PseudoInstr # "_RTN")>;
438 defm FLAT_ATOMIC_SWAP : FLAT_Real_Atomics_ci <0x30, FLAT_ATOMIC_SWAP>;
439 defm FLAT_ATOMIC_CMPSWAP : FLAT_Real_Atomics_ci <0x31, FLAT_ATOMIC_CMPSWAP>;
440 defm FLAT_ATOMIC_ADD : FLAT_Real_Atomics_ci <0x32, FLAT_ATOMIC_ADD>;
441 defm FLAT_ATOMIC_SUB : FLAT_Real_Atomics_ci <0x33, FLAT_ATOMIC_SUB>;
442 defm FLAT_ATOMIC_SMIN : FLAT_Real_Atomics_ci <0x35, FLAT_ATOMIC_SMIN>;
443 defm FLAT_ATOMIC_UMIN : FLAT_Real_Atomics_ci <0x36, FLAT_ATOMIC_UMIN>;
444 defm FLAT_ATOMIC_SMAX : FLAT_Real_Atomics_ci <0x37, FLAT_ATOMIC_SMAX>;
445 defm FLAT_ATOMIC_UMAX : FLAT_Real_Atomics_ci <0x38, FLAT_ATOMIC_UMAX>;
446 defm FLAT_ATOMIC_AND : FLAT_Real_Atomics_ci <0x39, FLAT_ATOMIC_AND>;
447 defm FLAT_ATOMIC_OR : FLAT_Real_Atomics_ci <0x3a, FLAT_ATOMIC_OR>;
448 defm FLAT_ATOMIC_XOR : FLAT_Real_Atomics_ci <0x3b, FLAT_ATOMIC_XOR>;
449 defm FLAT_ATOMIC_INC : FLAT_Real_Atomics_ci <0x3c, FLAT_ATOMIC_INC>;
450 defm FLAT_ATOMIC_DEC : FLAT_Real_Atomics_ci <0x3d, FLAT_ATOMIC_DEC>;
451 defm FLAT_ATOMIC_SWAP_X2 : FLAT_Real_Atomics_ci <0x50, FLAT_ATOMIC_SWAP_X2>;
452 defm FLAT_ATOMIC_CMPSWAP_X2 : FLAT_Real_Atomics_ci <0x51, FLAT_ATOMIC_CMPSWAP_X2>;
453 defm FLAT_ATOMIC_ADD_X2 : FLAT_Real_Atomics_ci <0x52, FLAT_ATOMIC_ADD_X2>;
454 defm FLAT_ATOMIC_SUB_X2 : FLAT_Real_Atomics_ci <0x53, FLAT_ATOMIC_SUB_X2>;
455 defm FLAT_ATOMIC_SMIN_X2 : FLAT_Real_Atomics_ci <0x55, FLAT_ATOMIC_SMIN_X2>;
456 defm FLAT_ATOMIC_UMIN_X2 : FLAT_Real_Atomics_ci <0x56, FLAT_ATOMIC_UMIN_X2>;
457 defm FLAT_ATOMIC_SMAX_X2 : FLAT_Real_Atomics_ci <0x57, FLAT_ATOMIC_SMAX_X2>;
458 defm FLAT_ATOMIC_UMAX_X2 : FLAT_Real_Atomics_ci <0x58, FLAT_ATOMIC_UMAX_X2>;
459 defm FLAT_ATOMIC_AND_X2 : FLAT_Real_Atomics_ci <0x59, FLAT_ATOMIC_AND_X2>;
460 defm FLAT_ATOMIC_OR_X2 : FLAT_Real_Atomics_ci <0x5a, FLAT_ATOMIC_OR_X2>;
461 defm FLAT_ATOMIC_XOR_X2 : FLAT_Real_Atomics_ci <0x5b, FLAT_ATOMIC_XOR_X2>;
462 defm FLAT_ATOMIC_INC_X2 : FLAT_Real_Atomics_ci <0x5c, FLAT_ATOMIC_INC_X2>;
463 defm FLAT_ATOMIC_DEC_X2 : FLAT_Real_Atomics_ci <0x5d, FLAT_ATOMIC_DEC_X2>;
465 // CI Only flat instructions
466 defm FLAT_ATOMIC_FCMPSWAP : FLAT_Real_Atomics_ci <0x3e, FLAT_ATOMIC_FCMPSWAP>;
467 defm FLAT_ATOMIC_FMIN : FLAT_Real_Atomics_ci <0x3f, FLAT_ATOMIC_FMIN>;
468 defm FLAT_ATOMIC_FMAX : FLAT_Real_Atomics_ci <0x40, FLAT_ATOMIC_FMAX>;
469 defm FLAT_ATOMIC_FCMPSWAP_X2 : FLAT_Real_Atomics_ci <0x5e, FLAT_ATOMIC_FCMPSWAP_X2>;
470 defm FLAT_ATOMIC_FMIN_X2 : FLAT_Real_Atomics_ci <0x5f, FLAT_ATOMIC_FMIN_X2>;
471 defm FLAT_ATOMIC_FMAX_X2 : FLAT_Real_Atomics_ci <0x60, FLAT_ATOMIC_FMAX_X2>;
474 //===----------------------------------------------------------------------===//
476 //===----------------------------------------------------------------------===//
478 class FLAT_Real_vi <bits<7> op, FLAT_Pseudo ps> :
480 SIMCInstr <ps.PseudoInstr, SIEncodingFamily.VI> {
481 let AssemblerPredicate = isVI;
482 let DecoderNamespace="VI";
485 def FLAT_LOAD_UBYTE_vi : FLAT_Real_vi <0x10, FLAT_LOAD_UBYTE>;
486 def FLAT_LOAD_SBYTE_vi : FLAT_Real_vi <0x11, FLAT_LOAD_SBYTE>;
487 def FLAT_LOAD_USHORT_vi : FLAT_Real_vi <0x12, FLAT_LOAD_USHORT>;
488 def FLAT_LOAD_SSHORT_vi : FLAT_Real_vi <0x13, FLAT_LOAD_SSHORT>;
489 def FLAT_LOAD_DWORD_vi : FLAT_Real_vi <0x14, FLAT_LOAD_DWORD>;
490 def FLAT_LOAD_DWORDX2_vi : FLAT_Real_vi <0x15, FLAT_LOAD_DWORDX2>;
491 def FLAT_LOAD_DWORDX4_vi : FLAT_Real_vi <0x17, FLAT_LOAD_DWORDX4>;
492 def FLAT_LOAD_DWORDX3_vi : FLAT_Real_vi <0x16, FLAT_LOAD_DWORDX3>;
494 def FLAT_STORE_BYTE_vi : FLAT_Real_vi <0x18, FLAT_STORE_BYTE>;
495 def FLAT_STORE_SHORT_vi : FLAT_Real_vi <0x1a, FLAT_STORE_SHORT>;
496 def FLAT_STORE_DWORD_vi : FLAT_Real_vi <0x1c, FLAT_STORE_DWORD>;
497 def FLAT_STORE_DWORDX2_vi : FLAT_Real_vi <0x1d, FLAT_STORE_DWORDX2>;
498 def FLAT_STORE_DWORDX4_vi : FLAT_Real_vi <0x1f, FLAT_STORE_DWORDX4>;
499 def FLAT_STORE_DWORDX3_vi : FLAT_Real_vi <0x1e, FLAT_STORE_DWORDX3>;
501 multiclass FLAT_Real_Atomics_vi <bits<7> op, FLAT_Pseudo ps> {
502 def _vi : FLAT_Real_vi<op, !cast<FLAT_Pseudo>(ps.PseudoInstr)>;
503 def _RTN_vi : FLAT_Real_vi<op, !cast<FLAT_Pseudo>(ps.PseudoInstr # "_RTN")>;
506 defm FLAT_ATOMIC_SWAP : FLAT_Real_Atomics_vi <0x40, FLAT_ATOMIC_SWAP>;
507 defm FLAT_ATOMIC_CMPSWAP : FLAT_Real_Atomics_vi <0x41, FLAT_ATOMIC_CMPSWAP>;
508 defm FLAT_ATOMIC_ADD : FLAT_Real_Atomics_vi <0x42, FLAT_ATOMIC_ADD>;
509 defm FLAT_ATOMIC_SUB : FLAT_Real_Atomics_vi <0x43, FLAT_ATOMIC_SUB>;
510 defm FLAT_ATOMIC_SMIN : FLAT_Real_Atomics_vi <0x44, FLAT_ATOMIC_SMIN>;
511 defm FLAT_ATOMIC_UMIN : FLAT_Real_Atomics_vi <0x45, FLAT_ATOMIC_UMIN>;
512 defm FLAT_ATOMIC_SMAX : FLAT_Real_Atomics_vi <0x46, FLAT_ATOMIC_SMAX>;
513 defm FLAT_ATOMIC_UMAX : FLAT_Real_Atomics_vi <0x47, FLAT_ATOMIC_UMAX>;
514 defm FLAT_ATOMIC_AND : FLAT_Real_Atomics_vi <0x48, FLAT_ATOMIC_AND>;
515 defm FLAT_ATOMIC_OR : FLAT_Real_Atomics_vi <0x49, FLAT_ATOMIC_OR>;
516 defm FLAT_ATOMIC_XOR : FLAT_Real_Atomics_vi <0x4a, FLAT_ATOMIC_XOR>;
517 defm FLAT_ATOMIC_INC : FLAT_Real_Atomics_vi <0x4b, FLAT_ATOMIC_INC>;
518 defm FLAT_ATOMIC_DEC : FLAT_Real_Atomics_vi <0x4c, FLAT_ATOMIC_DEC>;
519 defm FLAT_ATOMIC_SWAP_X2 : FLAT_Real_Atomics_vi <0x60, FLAT_ATOMIC_SWAP_X2>;
520 defm FLAT_ATOMIC_CMPSWAP_X2 : FLAT_Real_Atomics_vi <0x61, FLAT_ATOMIC_CMPSWAP_X2>;
521 defm FLAT_ATOMIC_ADD_X2 : FLAT_Real_Atomics_vi <0x62, FLAT_ATOMIC_ADD_X2>;
522 defm FLAT_ATOMIC_SUB_X2 : FLAT_Real_Atomics_vi <0x63, FLAT_ATOMIC_SUB_X2>;
523 defm FLAT_ATOMIC_SMIN_X2 : FLAT_Real_Atomics_vi <0x64, FLAT_ATOMIC_SMIN_X2>;
524 defm FLAT_ATOMIC_UMIN_X2 : FLAT_Real_Atomics_vi <0x65, FLAT_ATOMIC_UMIN_X2>;
525 defm FLAT_ATOMIC_SMAX_X2 : FLAT_Real_Atomics_vi <0x66, FLAT_ATOMIC_SMAX_X2>;
526 defm FLAT_ATOMIC_UMAX_X2 : FLAT_Real_Atomics_vi <0x67, FLAT_ATOMIC_UMAX_X2>;
527 defm FLAT_ATOMIC_AND_X2 : FLAT_Real_Atomics_vi <0x68, FLAT_ATOMIC_AND_X2>;
528 defm FLAT_ATOMIC_OR_X2 : FLAT_Real_Atomics_vi <0x69, FLAT_ATOMIC_OR_X2>;
529 defm FLAT_ATOMIC_XOR_X2 : FLAT_Real_Atomics_vi <0x6a, FLAT_ATOMIC_XOR_X2>;
530 defm FLAT_ATOMIC_INC_X2 : FLAT_Real_Atomics_vi <0x6b, FLAT_ATOMIC_INC_X2>;
531 defm FLAT_ATOMIC_DEC_X2 : FLAT_Real_Atomics_vi <0x6c, FLAT_ATOMIC_DEC_X2>;