1 //===-- AMDGPUInstructions.td - Common instruction defs ---*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains instruction defs that are common to all hw codegen
13 //===----------------------------------------------------------------------===//
15 class AMDGPUInst <dag outs, dag ins, string asm = "",
16 list<dag> pattern = []> : Instruction {
17 field bit isRegisterLoad = 0;
18 field bit isRegisterStore = 0;
20 let Namespace = "AMDGPU";
21 let OutOperandList = outs;
22 let InOperandList = ins;
24 let Pattern = pattern;
25 let Itinerary = NullALU;
27 // SoftFail is a field the disassembler can use to provide a way for
28 // instructions to not match without killing the whole decode process. It is
29 // mainly used for ARM, but Tablegen expects this field to exist or it fails
30 // to build the decode table.
31 field bits<64> SoftFail = 0;
33 let DecoderNamespace = Namespace;
35 let TSFlags{63} = isRegisterLoad;
36 let TSFlags{62} = isRegisterStore;
39 class AMDGPUShaderInst <dag outs, dag ins, string asm = "",
40 list<dag> pattern = []> : AMDGPUInst<outs, ins, asm, pattern> {
42 field bits<32> Inst = 0xffffffff;
45 //===---------------------------------------------------------------------===//
47 //===---------------------------------------------------------------------===//
49 class ILFormat<dag outs, dag ins, string asmstr, list<dag> pattern>
52 let Namespace = "AMDGPU";
53 dag OutOperandList = outs;
54 dag InOperandList = ins;
55 let Pattern = pattern;
56 let AsmString = !strconcat(asmstr, "\n");
58 let Itinerary = NullALU;
60 bit hasZeroOpFlag = 0;
63 let hasSideEffects = 0;
64 let isCodeGenOnly = 1;
67 def TruePredicate : Predicate<"true">;
69 // Exists to help track down where SubtargetPredicate isn't set rather
70 // than letting tablegen crash with an unhelpful error.
71 def InvalidPred : Predicate<"predicate not set on instruction or pattern">;
73 class PredicateControl {
74 Predicate SubtargetPredicate = InvalidPred;
75 list<Predicate> AssemblerPredicates = [];
76 Predicate AssemblerPredicate = TruePredicate;
77 list<Predicate> OtherPredicates = [];
78 list<Predicate> Predicates = !listconcat([SubtargetPredicate,
83 class AMDGPUPat<dag pattern, dag result> : Pat<pattern, result>,
86 def FP16Denormals : Predicate<"Subtarget->hasFP16Denormals()">;
87 def FP32Denormals : Predicate<"Subtarget->hasFP32Denormals()">;
88 def FP64Denormals : Predicate<"Subtarget->hasFP64Denormals()">;
89 def NoFP16Denormals : Predicate<"!Subtarget->hasFP16Denormals()">;
90 def NoFP32Denormals : Predicate<"!Subtarget->hasFP32Denormals()">;
91 def NoFP64Denormals : Predicate<"!Subtarget->hasFP64Denormals()">;
92 def UnsafeFPMath : Predicate<"TM.Options.UnsafeFPMath">;
93 def FMA : Predicate<"Subtarget->hasFMA()">;
95 def InstFlag : OperandWithDefaultOps <i32, (ops (i32 0))>;
97 def u16ImmTarget : AsmOperandClass {
99 let RenderMethod = "addImmOperands";
102 def s16ImmTarget : AsmOperandClass {
104 let RenderMethod = "addImmOperands";
107 let OperandType = "OPERAND_IMMEDIATE" in {
109 def u32imm : Operand<i32> {
110 let PrintMethod = "printU32ImmOperand";
113 def u16imm : Operand<i16> {
114 let PrintMethod = "printU16ImmOperand";
115 let ParserMatchClass = u16ImmTarget;
118 def s16imm : Operand<i16> {
119 let PrintMethod = "printU16ImmOperand";
120 let ParserMatchClass = s16ImmTarget;
123 def u8imm : Operand<i8> {
124 let PrintMethod = "printU8ImmOperand";
127 } // End OperandType = "OPERAND_IMMEDIATE"
129 //===--------------------------------------------------------------------===//
131 //===--------------------------------------------------------------------===//
132 def brtarget : Operand<OtherVT>;
134 //===----------------------------------------------------------------------===//
136 //===----------------------------------------------------------------------===//
138 class HasOneUseUnaryOp<SDPatternOperator op> : PatFrag<
141 [{ return N->hasOneUse(); }]
144 class HasOneUseBinOp<SDPatternOperator op> : PatFrag<
145 (ops node:$src0, node:$src1),
147 [{ return N->hasOneUse(); }]
150 class HasOneUseTernaryOp<SDPatternOperator op> : PatFrag<
151 (ops node:$src0, node:$src1, node:$src2),
152 (op $src0, $src1, $src2),
153 [{ return N->hasOneUse(); }]
156 let Properties = [SDNPCommutative, SDNPAssociative] in {
157 def smax_oneuse : HasOneUseBinOp<smax>;
158 def smin_oneuse : HasOneUseBinOp<smin>;
159 def umax_oneuse : HasOneUseBinOp<umax>;
160 def umin_oneuse : HasOneUseBinOp<umin>;
162 def fminnum_oneuse : HasOneUseBinOp<fminnum>;
163 def fmaxnum_oneuse : HasOneUseBinOp<fmaxnum>;
165 def fminnum_ieee_oneuse : HasOneUseBinOp<fminnum_ieee>;
166 def fmaxnum_ieee_oneuse : HasOneUseBinOp<fmaxnum_ieee>;
169 def and_oneuse : HasOneUseBinOp<and>;
170 def or_oneuse : HasOneUseBinOp<or>;
171 def xor_oneuse : HasOneUseBinOp<xor>;
172 } // Properties = [SDNPCommutative, SDNPAssociative]
174 def not_oneuse : HasOneUseUnaryOp<not>;
176 def add_oneuse : HasOneUseBinOp<add>;
177 def sub_oneuse : HasOneUseBinOp<sub>;
179 def srl_oneuse : HasOneUseBinOp<srl>;
180 def shl_oneuse : HasOneUseBinOp<shl>;
182 def select_oneuse : HasOneUseTernaryOp<select>;
184 def AMDGPUmul_u24_oneuse : HasOneUseBinOp<AMDGPUmul_u24>;
185 def AMDGPUmul_i24_oneuse : HasOneUseBinOp<AMDGPUmul_i24>;
187 def srl_16 : PatFrag<
188 (ops node:$src0), (srl_oneuse node:$src0, (i32 16))
192 def hi_i16_elt : PatFrag<
193 (ops node:$src0), (i16 (trunc (i32 (srl_16 node:$src0))))
197 def hi_f16_elt : PatLeaf<
199 if (N->getOpcode() != ISD::BITCAST)
201 SDValue Tmp = N->getOperand(0);
203 if (Tmp.getOpcode() != ISD::SRL)
205 if (const auto *RHS = dyn_cast<ConstantSDNode>(Tmp.getOperand(1))
206 return RHS->getZExtValue() == 16;
210 //===----------------------------------------------------------------------===//
211 // PatLeafs for floating-point comparisons
212 //===----------------------------------------------------------------------===//
214 def COND_OEQ : PatLeaf <
216 [{return N->get() == ISD::SETOEQ || N->get() == ISD::SETEQ;}]
219 def COND_ONE : PatLeaf <
221 [{return N->get() == ISD::SETONE || N->get() == ISD::SETNE;}]
224 def COND_OGT : PatLeaf <
226 [{return N->get() == ISD::SETOGT || N->get() == ISD::SETGT;}]
229 def COND_OGE : PatLeaf <
231 [{return N->get() == ISD::SETOGE || N->get() == ISD::SETGE;}]
234 def COND_OLT : PatLeaf <
236 [{return N->get() == ISD::SETOLT || N->get() == ISD::SETLT;}]
239 def COND_OLE : PatLeaf <
241 [{return N->get() == ISD::SETOLE || N->get() == ISD::SETLE;}]
244 def COND_O : PatLeaf <(cond), [{return N->get() == ISD::SETO;}]>;
245 def COND_UO : PatLeaf <(cond), [{return N->get() == ISD::SETUO;}]>;
247 //===----------------------------------------------------------------------===//
248 // PatLeafs for unsigned / unordered comparisons
249 //===----------------------------------------------------------------------===//
251 def COND_UEQ : PatLeaf <(cond), [{return N->get() == ISD::SETUEQ;}]>;
252 def COND_UNE : PatLeaf <(cond), [{return N->get() == ISD::SETUNE;}]>;
253 def COND_UGT : PatLeaf <(cond), [{return N->get() == ISD::SETUGT;}]>;
254 def COND_UGE : PatLeaf <(cond), [{return N->get() == ISD::SETUGE;}]>;
255 def COND_ULT : PatLeaf <(cond), [{return N->get() == ISD::SETULT;}]>;
256 def COND_ULE : PatLeaf <(cond), [{return N->get() == ISD::SETULE;}]>;
258 // XXX - For some reason R600 version is preferring to use unordered
260 def COND_UNE_NE : PatLeaf <
262 [{return N->get() == ISD::SETUNE || N->get() == ISD::SETNE;}]
265 //===----------------------------------------------------------------------===//
266 // PatLeafs for signed comparisons
267 //===----------------------------------------------------------------------===//
269 def COND_SGT : PatLeaf <(cond), [{return N->get() == ISD::SETGT;}]>;
270 def COND_SGE : PatLeaf <(cond), [{return N->get() == ISD::SETGE;}]>;
271 def COND_SLT : PatLeaf <(cond), [{return N->get() == ISD::SETLT;}]>;
272 def COND_SLE : PatLeaf <(cond), [{return N->get() == ISD::SETLE;}]>;
274 //===----------------------------------------------------------------------===//
275 // PatLeafs for integer equality
276 //===----------------------------------------------------------------------===//
278 def COND_EQ : PatLeaf <
280 [{return N->get() == ISD::SETEQ || N->get() == ISD::SETUEQ;}]
283 def COND_NE : PatLeaf <
285 [{return N->get() == ISD::SETNE || N->get() == ISD::SETUNE;}]
288 def COND_NULL : PatLeaf <
290 [{(void)N; return false;}]
293 //===----------------------------------------------------------------------===//
294 // PatLeafs for Texture Constants
295 //===----------------------------------------------------------------------===//
297 def TEX_ARRAY : PatLeaf<
299 [{uint32_t TType = (uint32_t)N->getZExtValue();
300 return TType == 9 || TType == 10 || TType == 16;
304 def TEX_RECT : PatLeaf<
306 [{uint32_t TType = (uint32_t)N->getZExtValue();
311 def TEX_SHADOW : PatLeaf<
313 [{uint32_t TType = (uint32_t)N->getZExtValue();
314 return (TType >= 6 && TType <= 8) || TType == 13;
318 def TEX_SHADOW_ARRAY : PatLeaf<
320 [{uint32_t TType = (uint32_t)N->getZExtValue();
321 return TType == 11 || TType == 12 || TType == 17;
325 //===----------------------------------------------------------------------===//
326 // Load/Store Pattern Fragments
327 //===----------------------------------------------------------------------===//
329 class Aligned8Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
330 return cast<MemSDNode>(N)->getAlignment() % 8 == 0;
333 class Aligned16Bytes <dag ops, dag frag> : PatFrag <ops, frag, [{
334 return cast<MemSDNode>(N)->getAlignment() >= 16;
337 class LoadFrag <SDPatternOperator op> : PatFrag<(ops node:$ptr), (op node:$ptr)>;
339 class StoreFrag<SDPatternOperator op> : PatFrag <
340 (ops node:$value, node:$ptr), (op node:$value, node:$ptr)
343 class StoreHi16<SDPatternOperator op> : PatFrag <
344 (ops node:$value, node:$ptr), (op (srl node:$value, (i32 16)), node:$ptr)
347 class PrivateAddress : CodePatPred<[{
348 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::PRIVATE_ADDRESS;
351 class ConstantAddress : CodePatPred<[{
352 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
355 class LocalAddress : CodePatPred<[{
356 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
359 class GlobalAddress : CodePatPred<[{
360 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
363 class GlobalLoadAddress : CodePatPred<[{
364 auto AS = cast<MemSDNode>(N)->getAddressSpace();
365 return AS == AMDGPUAS::GLOBAL_ADDRESS || AS == AMDGPUAS::CONSTANT_ADDRESS;
368 class FlatLoadAddress : CodePatPred<[{
369 const auto AS = cast<MemSDNode>(N)->getAddressSpace();
370 return AS == AMDGPUAS::FLAT_ADDRESS ||
371 AS == AMDGPUAS::GLOBAL_ADDRESS ||
372 AS == AMDGPUAS::CONSTANT_ADDRESS;
375 class FlatStoreAddress : CodePatPred<[{
376 const auto AS = cast<MemSDNode>(N)->getAddressSpace();
377 return AS == AMDGPUAS::FLAT_ADDRESS ||
378 AS == AMDGPUAS::GLOBAL_ADDRESS;
381 class AZExtLoadBase <SDPatternOperator ld_node>: PatFrag<(ops node:$ptr),
382 (ld_node node:$ptr), [{
383 LoadSDNode *L = cast<LoadSDNode>(N);
384 return L->getExtensionType() == ISD::ZEXTLOAD ||
385 L->getExtensionType() == ISD::EXTLOAD;
388 def az_extload : AZExtLoadBase <unindexedload>;
390 def az_extloadi8 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
391 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
394 def az_extloadi16 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
395 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
398 def az_extloadi32 : PatFrag<(ops node:$ptr), (az_extload node:$ptr), [{
399 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
402 class PrivateLoad <SDPatternOperator op> : LoadFrag <op>, PrivateAddress;
403 class PrivateStore <SDPatternOperator op> : StoreFrag <op>, PrivateAddress;
405 class LocalLoad <SDPatternOperator op> : LoadFrag <op>, LocalAddress;
406 class LocalStore <SDPatternOperator op> : StoreFrag <op>, LocalAddress;
408 class GlobalLoad <SDPatternOperator op> : LoadFrag<op>, GlobalLoadAddress;
409 class GlobalStore <SDPatternOperator op> : StoreFrag<op>, GlobalAddress;
411 class FlatLoad <SDPatternOperator op> : LoadFrag <op>, FlatLoadAddress;
412 class FlatStore <SDPatternOperator op> : StoreFrag <op>, FlatStoreAddress;
414 class ConstantLoad <SDPatternOperator op> : LoadFrag <op>, ConstantAddress;
417 def load_private : PrivateLoad <load>;
418 def az_extloadi8_private : PrivateLoad <az_extloadi8>;
419 def sextloadi8_private : PrivateLoad <sextloadi8>;
420 def az_extloadi16_private : PrivateLoad <az_extloadi16>;
421 def sextloadi16_private : PrivateLoad <sextloadi16>;
423 def store_private : PrivateStore <store>;
424 def truncstorei8_private : PrivateStore<truncstorei8>;
425 def truncstorei16_private : PrivateStore <truncstorei16>;
426 def store_hi16_private : StoreHi16 <truncstorei16>, PrivateAddress;
427 def truncstorei8_hi16_private : StoreHi16<truncstorei8>, PrivateAddress;
430 def load_global : GlobalLoad <load>;
431 def sextloadi8_global : GlobalLoad <sextloadi8>;
432 def az_extloadi8_global : GlobalLoad <az_extloadi8>;
433 def sextloadi16_global : GlobalLoad <sextloadi16>;
434 def az_extloadi16_global : GlobalLoad <az_extloadi16>;
435 def atomic_load_global : GlobalLoad<atomic_load>;
437 def store_global : GlobalStore <store>;
438 def truncstorei8_global : GlobalStore <truncstorei8>;
439 def truncstorei16_global : GlobalStore <truncstorei16>;
440 def store_atomic_global : GlobalStore<atomic_store>;
441 def truncstorei8_hi16_global : StoreHi16 <truncstorei8>, GlobalAddress;
442 def truncstorei16_hi16_global : StoreHi16 <truncstorei16>, GlobalAddress;
444 def load_local : LocalLoad <load>;
445 def az_extloadi8_local : LocalLoad <az_extloadi8>;
446 def sextloadi8_local : LocalLoad <sextloadi8>;
447 def az_extloadi16_local : LocalLoad <az_extloadi16>;
448 def sextloadi16_local : LocalLoad <sextloadi16>;
449 def atomic_load_32_local : LocalLoad<atomic_load_32>;
450 def atomic_load_64_local : LocalLoad<atomic_load_64>;
452 def store_local : LocalStore <store>;
453 def truncstorei8_local : LocalStore <truncstorei8>;
454 def truncstorei16_local : LocalStore <truncstorei16>;
455 def store_local_hi16 : StoreHi16 <truncstorei16>, LocalAddress;
456 def truncstorei8_local_hi16 : StoreHi16<truncstorei8>, LocalAddress;
457 def atomic_store_local : LocalStore <atomic_store>;
459 def load_align8_local : Aligned8Bytes <
460 (ops node:$ptr), (load_local node:$ptr)
463 def load_align16_local : Aligned16Bytes <
464 (ops node:$ptr), (load_local node:$ptr)
467 def store_align8_local : Aligned8Bytes <
468 (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
471 def store_align16_local : Aligned16Bytes <
472 (ops node:$val, node:$ptr), (store_local node:$val, node:$ptr)
475 def load_flat : FlatLoad <load>;
476 def az_extloadi8_flat : FlatLoad <az_extloadi8>;
477 def sextloadi8_flat : FlatLoad <sextloadi8>;
478 def az_extloadi16_flat : FlatLoad <az_extloadi16>;
479 def sextloadi16_flat : FlatLoad <sextloadi16>;
480 def atomic_load_flat : FlatLoad<atomic_load>;
482 def store_flat : FlatStore <store>;
483 def truncstorei8_flat : FlatStore <truncstorei8>;
484 def truncstorei16_flat : FlatStore <truncstorei16>;
485 def atomic_store_flat : FlatStore <atomic_store>;
486 def truncstorei8_hi16_flat : StoreHi16<truncstorei8>, FlatStoreAddress;
487 def truncstorei16_hi16_flat : StoreHi16<truncstorei16>, FlatStoreAddress;
490 def constant_load : ConstantLoad<load>;
491 def sextloadi8_constant : ConstantLoad <sextloadi8>;
492 def az_extloadi8_constant : ConstantLoad <az_extloadi8>;
493 def sextloadi16_constant : ConstantLoad <sextloadi16>;
494 def az_extloadi16_constant : ConstantLoad <az_extloadi16>;
497 class local_binary_atomic_op<SDNode atomic_op> :
498 PatFrag<(ops node:$ptr, node:$value),
499 (atomic_op node:$ptr, node:$value), [{
500 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
503 def atomic_swap_local : local_binary_atomic_op<atomic_swap>;
504 def atomic_load_add_local : local_binary_atomic_op<atomic_load_add>;
505 def atomic_load_sub_local : local_binary_atomic_op<atomic_load_sub>;
506 def atomic_load_and_local : local_binary_atomic_op<atomic_load_and>;
507 def atomic_load_or_local : local_binary_atomic_op<atomic_load_or>;
508 def atomic_load_xor_local : local_binary_atomic_op<atomic_load_xor>;
509 def atomic_load_nand_local : local_binary_atomic_op<atomic_load_nand>;
510 def atomic_load_min_local : local_binary_atomic_op<atomic_load_min>;
511 def atomic_load_max_local : local_binary_atomic_op<atomic_load_max>;
512 def atomic_load_umin_local : local_binary_atomic_op<atomic_load_umin>;
513 def atomic_load_umax_local : local_binary_atomic_op<atomic_load_umax>;
515 def mskor_global : PatFrag<(ops node:$val, node:$ptr),
516 (AMDGPUstore_mskor node:$val, node:$ptr), [{
517 return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
520 class AtomicCmpSwapLocal <SDNode cmp_swap_node> : PatFrag<
521 (ops node:$ptr, node:$cmp, node:$swap),
522 (cmp_swap_node node:$ptr, node:$cmp, node:$swap), [{
523 AtomicSDNode *AN = cast<AtomicSDNode>(N);
524 return AN->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
527 def atomic_cmp_swap_local : AtomicCmpSwapLocal <atomic_cmp_swap>;
529 multiclass global_binary_atomic_op<SDNode atomic_op> {
531 (ops node:$ptr, node:$value),
532 (atomic_op node:$ptr, node:$value),
533 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;}]>;
535 def _noret : PatFrag<
536 (ops node:$ptr, node:$value),
537 (atomic_op node:$ptr, node:$value),
538 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>;
541 (ops node:$ptr, node:$value),
542 (atomic_op node:$ptr, node:$value),
543 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>;
546 defm atomic_swap_global : global_binary_atomic_op<atomic_swap>;
547 defm atomic_add_global : global_binary_atomic_op<atomic_load_add>;
548 defm atomic_and_global : global_binary_atomic_op<atomic_load_and>;
549 defm atomic_max_global : global_binary_atomic_op<atomic_load_max>;
550 defm atomic_min_global : global_binary_atomic_op<atomic_load_min>;
551 defm atomic_or_global : global_binary_atomic_op<atomic_load_or>;
552 defm atomic_sub_global : global_binary_atomic_op<atomic_load_sub>;
553 defm atomic_umax_global : global_binary_atomic_op<atomic_load_umax>;
554 defm atomic_umin_global : global_binary_atomic_op<atomic_load_umin>;
555 defm atomic_xor_global : global_binary_atomic_op<atomic_load_xor>;
558 def AMDGPUatomic_cmp_swap_global : PatFrag<
559 (ops node:$ptr, node:$value),
560 (AMDGPUatomic_cmp_swap node:$ptr, node:$value)>, GlobalAddress;
562 def atomic_cmp_swap_global : PatFrag<
563 (ops node:$ptr, node:$cmp, node:$value),
564 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value)>, GlobalAddress;
567 def atomic_cmp_swap_global_noret : PatFrag<
568 (ops node:$ptr, node:$cmp, node:$value),
569 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value),
570 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (SDValue(N, 0).use_empty());}]>;
572 def atomic_cmp_swap_global_ret : PatFrag<
573 (ops node:$ptr, node:$cmp, node:$value),
574 (atomic_cmp_swap node:$ptr, node:$cmp, node:$value),
575 [{return cast<MemSDNode>(N)->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS && (!SDValue(N, 0).use_empty());}]>;
577 //===----------------------------------------------------------------------===//
578 // Misc Pattern Fragments
579 //===----------------------------------------------------------------------===//
582 int TWO_PI = 0x40c90fdb;
584 int TWO_PI_INV = 0x3e22f983;
585 int FP_UINT_MAX_PLUS_1 = 0x4f800000; // 1 << 32 in floating point encoding
586 int FP16_ONE = 0x3C00;
587 int FP16_NEG_ONE = 0xBC00;
588 int V2FP16_ONE = 0x3C003C00;
589 int FP32_ONE = 0x3f800000;
590 int FP32_NEG_ONE = 0xbf800000;
591 int FP64_ONE = 0x3ff0000000000000;
592 int FP64_NEG_ONE = 0xbff0000000000000;
594 def CONST : Constants;
596 def FP_ZERO : PatLeaf <
598 [{return N->getValueAPF().isZero();}]
601 def FP_ONE : PatLeaf <
603 [{return N->isExactlyValue(1.0);}]
606 def FP_HALF : PatLeaf <
608 [{return N->isExactlyValue(0.5);}]
611 /* Generic helper patterns for intrinsics */
612 /* -------------------------------------- */
614 class POW_Common <AMDGPUInst log_ieee, AMDGPUInst exp_ieee, AMDGPUInst mul>
616 (fpow f32:$src0, f32:$src1),
617 (exp_ieee (mul f32:$src1, (log_ieee f32:$src0)))
620 /* Other helper patterns */
621 /* --------------------- */
623 /* Extract element pattern */
624 class Extract_Element <ValueType sub_type, ValueType vec_type, int sub_idx,
627 (sub_type (extractelt vec_type:$src, sub_idx)),
628 (EXTRACT_SUBREG $src, sub_reg)
630 let SubtargetPredicate = TruePredicate;
633 /* Insert element pattern */
634 class Insert_Element <ValueType elem_type, ValueType vec_type,
635 int sub_idx, SubRegIndex sub_reg>
637 (insertelt vec_type:$vec, elem_type:$elem, sub_idx),
638 (INSERT_SUBREG $vec, $elem, sub_reg)
640 let SubtargetPredicate = TruePredicate;
643 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
644 // can handle COPY instructions.
645 // bitconvert pattern
646 class BitConvert <ValueType dt, ValueType st, RegisterClass rc> : AMDGPUPat <
647 (dt (bitconvert (st rc:$src0))),
651 // XXX: Convert to new syntax and use COPY_TO_REG, once the DFAPacketizer
652 // can handle COPY instructions.
653 class DwordAddrPat<ValueType vt, RegisterClass rc> : AMDGPUPat <
654 (vt (AMDGPUdwordaddr (vt rc:$addr))),
660 multiclass BFIPatterns <Instruction BFI_INT,
661 Instruction LoadImm32,
662 RegisterClass RC64> {
663 // Definition from ISA doc:
664 // (y & x) | (z & ~x)
666 (or (and i32:$y, i32:$x), (and i32:$z, (not i32:$x))),
672 (or (and i64:$y, i64:$x), (and i64:$z, (not i64:$x))),
674 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub0)),
675 (i32 (EXTRACT_SUBREG $y, sub0)),
676 (i32 (EXTRACT_SUBREG $z, sub0))), sub0,
677 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub1)),
678 (i32 (EXTRACT_SUBREG $y, sub1)),
679 (i32 (EXTRACT_SUBREG $z, sub1))), sub1)
682 // SHA-256 Ch function
685 (xor i32:$z, (and i32:$x, (xor i32:$y, i32:$z))),
691 (xor i64:$z, (and i64:$x, (xor i64:$y, i64:$z))),
693 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub0)),
694 (i32 (EXTRACT_SUBREG $y, sub0)),
695 (i32 (EXTRACT_SUBREG $z, sub0))), sub0,
696 (BFI_INT (i32 (EXTRACT_SUBREG $x, sub1)),
697 (i32 (EXTRACT_SUBREG $y, sub1)),
698 (i32 (EXTRACT_SUBREG $z, sub1))), sub1)
702 (fcopysign f32:$src0, f32:$src1),
703 (BFI_INT (LoadImm32 (i32 0x7fffffff)), $src0, $src1)
707 (f32 (fcopysign f32:$src0, f64:$src1)),
708 (BFI_INT (LoadImm32 (i32 0x7fffffff)), $src0,
709 (i32 (EXTRACT_SUBREG $src1, sub1)))
713 (f64 (fcopysign f64:$src0, f64:$src1)),
715 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
716 (BFI_INT (LoadImm32 (i32 0x7fffffff)),
717 (i32 (EXTRACT_SUBREG $src0, sub1)),
718 (i32 (EXTRACT_SUBREG $src1, sub1))), sub1)
722 (f64 (fcopysign f64:$src0, f32:$src1)),
724 (i32 (EXTRACT_SUBREG $src0, sub0)), sub0,
725 (BFI_INT (LoadImm32 (i32 0x7fffffff)),
726 (i32 (EXTRACT_SUBREG $src0, sub1)),
731 // SHA-256 Ma patterns
733 // ((x & z) | (y & (x | z))) -> BFI_INT (XOR x, y), z, y
734 multiclass SHA256MaPattern <Instruction BFI_INT, Instruction XOR, RegisterClass RC64> {
736 (or (and i32:$x, i32:$z), (and i32:$y, (or i32:$x, i32:$z))),
737 (BFI_INT (XOR i32:$x, i32:$y), i32:$z, i32:$y)
741 (or (and i64:$x, i64:$z), (and i64:$y, (or i64:$x, i64:$z))),
743 (BFI_INT (XOR (i32 (EXTRACT_SUBREG $x, sub0)),
744 (i32 (EXTRACT_SUBREG $y, sub0))),
745 (i32 (EXTRACT_SUBREG $z, sub0)),
746 (i32 (EXTRACT_SUBREG $y, sub0))), sub0,
747 (BFI_INT (XOR (i32 (EXTRACT_SUBREG $x, sub1)),
748 (i32 (EXTRACT_SUBREG $y, sub1))),
749 (i32 (EXTRACT_SUBREG $z, sub1)),
750 (i32 (EXTRACT_SUBREG $y, sub1))), sub1)
754 // Bitfield extract patterns
756 def IMMZeroBasedBitfieldMask : PatLeaf <(imm), [{
757 return isMask_32(N->getZExtValue());
760 def IMMPopCount : SDNodeXForm<imm, [{
761 return CurDAG->getTargetConstant(countPopulation(N->getZExtValue()), SDLoc(N),
765 multiclass BFEPattern <Instruction UBFE, Instruction SBFE, Instruction MOV> {
767 (i32 (and (i32 (srl i32:$src, i32:$rshift)), IMMZeroBasedBitfieldMask:$mask)),
768 (UBFE $src, $rshift, (MOV (i32 (IMMPopCount $mask))))
771 // x & ((1 << y) - 1)
773 (and i32:$src, (add_oneuse (shl_oneuse 1, i32:$width), -1)),
774 (UBFE $src, (MOV (i32 0)), $width)
779 (and i32:$src, (xor_oneuse (shl_oneuse -1, i32:$width), -1)),
780 (UBFE $src, (MOV (i32 0)), $width)
783 // x & (-1 >> (bitwidth - y))
785 (and i32:$src, (srl_oneuse -1, (sub 32, i32:$width))),
786 (UBFE $src, (MOV (i32 0)), $width)
789 // x << (bitwidth - y) >> (bitwidth - y)
791 (srl (shl_oneuse i32:$src, (sub 32, i32:$width)), (sub 32, i32:$width)),
792 (UBFE $src, (MOV (i32 0)), $width)
796 (sra (shl_oneuse i32:$src, (sub 32, i32:$width)), (sub 32, i32:$width)),
797 (SBFE $src, (MOV (i32 0)), $width)
802 class ROTRPattern <Instruction BIT_ALIGN> : AMDGPUPat <
803 (rotr i32:$src0, i32:$src1),
804 (BIT_ALIGN $src0, $src0, $src1)
807 multiclass IntMed3Pat<Instruction med3Inst,
808 SDPatternOperator min,
809 SDPatternOperator max,
810 SDPatternOperator min_oneuse,
811 SDPatternOperator max_oneuse,
812 ValueType vt = i32> {
814 // This matches 16 permutations of
815 // min(max(a, b), max(min(a, b), c))
817 (min (max_oneuse vt:$src0, vt:$src1),
818 (max_oneuse (min_oneuse vt:$src0, vt:$src1), vt:$src2)),
819 (med3Inst vt:$src0, vt:$src1, vt:$src2)
822 // This matches 16 permutations of
823 // max(min(x, y), min(max(x, y), z))
825 (max (min_oneuse vt:$src0, vt:$src1),
826 (min_oneuse (max_oneuse vt:$src0, vt:$src1), vt:$src2)),
827 (med3Inst $src0, $src1, $src2)
831 // Special conversion patterns
833 def cvt_rpi_i32_f32 : PatFrag <
835 (fp_to_sint (ffloor (fadd $src, FP_HALF))),
836 [{ (void) N; return TM.Options.NoNaNsFPMath; }]
839 def cvt_flr_i32_f32 : PatFrag <
841 (fp_to_sint (ffloor $src)),
842 [{ (void)N; return TM.Options.NoNaNsFPMath; }]
845 let AddedComplexity = 2 in {
846 class IMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
847 (add (AMDGPUmul_i24 i32:$src0, i32:$src1), i32:$src2),
848 !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
849 (Inst $src0, $src1, $src2))
852 class UMad24Pat<Instruction Inst, bit HasClamp = 0> : AMDGPUPat <
853 (add (AMDGPUmul_u24 i32:$src0, i32:$src1), i32:$src2),
854 !if(HasClamp, (Inst $src0, $src1, $src2, (i1 0)),
855 (Inst $src0, $src1, $src2))
857 } // AddedComplexity.
859 class RcpPat<Instruction RcpInst, ValueType vt> : AMDGPUPat <
860 (fdiv FP_ONE, vt:$src),
864 class RsqPat<Instruction RsqInst, ValueType vt> : AMDGPUPat <
865 (AMDGPUrcp (fsqrt vt:$src)),
869 // Instructions which select to the same v_min_f*
870 def fminnum_like : PatFrags<(ops node:$src0, node:$src1),
871 [(fminnum_ieee node:$src0, node:$src1),
872 (fminnum node:$src0, node:$src1)]
875 // Instructions which select to the same v_max_f*
876 def fmaxnum_like : PatFrags<(ops node:$src0, node:$src1),
877 [(fmaxnum_ieee node:$src0, node:$src1),
878 (fmaxnum node:$src0, node:$src1)]
881 def fminnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
882 [(fminnum_ieee_oneuse node:$src0, node:$src1),
883 (fminnum_oneuse node:$src0, node:$src1)]
886 def fmaxnum_like_oneuse : PatFrags<(ops node:$src0, node:$src1),
887 [(fmaxnum_ieee_oneuse node:$src0, node:$src1),
888 (fmaxnum_oneuse node:$src0, node:$src1)]