1 //===-- SystemZOperators.td - SystemZ-specific operators ------*- tblgen-*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
11 //===----------------------------------------------------------------------===//
12 def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i64>,
14 def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i64>,
16 def SDT_ZCall : SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>;
17 def SDT_ZCmp : SDTypeProfile<1, 2,
20 def SDT_ZICmp : SDTypeProfile<1, 3,
24 def SDT_ZBRCCMask : SDTypeProfile<0, 4,
29 def SDT_ZSelectCCMask : SDTypeProfile<1, 5,
35 def SDT_ZWrapPtr : SDTypeProfile<1, 1,
38 def SDT_ZWrapOffset : SDTypeProfile<1, 2,
42 def SDT_ZAdjDynAlloc : SDTypeProfile<1, 0, [SDTCisVT<0, i64>]>;
43 def SDT_ZGR128Binary : SDTypeProfile<1, 2,
44 [SDTCisVT<0, untyped>,
47 def SDT_ZBinaryWithFlags : SDTypeProfile<2, 2,
52 def SDT_ZBinaryWithCarry : SDTypeProfile<2, 3,
58 def SDT_ZAtomicLoadBinaryW : SDTypeProfile<1, 5,
65 def SDT_ZAtomicCmpSwapW : SDTypeProfile<2, 6,
74 def SDT_ZAtomicCmpSwap : SDTypeProfile<2, 3,
80 def SDT_ZAtomicLoad128 : SDTypeProfile<1, 1,
81 [SDTCisVT<0, untyped>,
83 def SDT_ZAtomicStore128 : SDTypeProfile<0, 2,
84 [SDTCisVT<0, untyped>,
86 def SDT_ZAtomicCmpSwap128 : SDTypeProfile<2, 3,
87 [SDTCisVT<0, untyped>,
91 SDTCisVT<4, untyped>]>;
92 def SDT_ZMemMemLength : SDTypeProfile<0, 3,
96 def SDT_ZMemMemLengthCC : SDTypeProfile<1, 3,
101 def SDT_ZMemMemLoop : SDTypeProfile<0, 4,
106 def SDT_ZMemMemLoopCC : SDTypeProfile<1, 4,
112 def SDT_ZString : SDTypeProfile<1, 3,
117 def SDT_ZStringCC : SDTypeProfile<2, 3,
123 def SDT_ZIPM : SDTypeProfile<1, 1,
126 def SDT_ZPrefetch : SDTypeProfile<0, 2,
129 def SDT_ZTBegin : SDTypeProfile<1, 2,
133 def SDT_ZTEnd : SDTypeProfile<1, 0,
135 def SDT_ZInsertVectorElt : SDTypeProfile<1, 3,
139 def SDT_ZExtractVectorElt : SDTypeProfile<1, 2,
142 def SDT_ZReplicate : SDTypeProfile<1, 1,
144 def SDT_ZVecUnaryConv : SDTypeProfile<1, 1,
147 def SDT_ZVecUnary : SDTypeProfile<1, 1,
149 SDTCisSameAs<0, 1>]>;
150 def SDT_ZVecUnaryCC : SDTypeProfile<2, 1,
153 SDTCisSameAs<0, 2>]>;
154 def SDT_ZVecBinary : SDTypeProfile<1, 2,
157 SDTCisSameAs<0, 2>]>;
158 def SDT_ZVecBinaryCC : SDTypeProfile<2, 2,
162 SDTCisSameAs<0, 2>]>;
163 def SDT_ZVecBinaryInt : SDTypeProfile<1, 2,
167 def SDT_ZVecBinaryConv : SDTypeProfile<1, 2,
170 SDTCisSameAs<1, 2>]>;
171 def SDT_ZVecBinaryConvCC : SDTypeProfile<2, 2,
175 SDTCisSameAs<2, 3>]>;
176 def SDT_ZVecBinaryConvIntCC : SDTypeProfile<2, 2,
181 def SDT_ZRotateMask : SDTypeProfile<1, 2,
185 def SDT_ZJoinDwords : SDTypeProfile<1, 2,
189 def SDT_ZVecTernary : SDTypeProfile<1, 3,
193 SDTCisSameAs<0, 3>]>;
194 def SDT_ZVecTernaryConvCC : SDTypeProfile<2, 3,
199 SDTCisSameAs<0, 4>]>;
200 def SDT_ZVecTernaryInt : SDTypeProfile<1, 3,
205 def SDT_ZVecTernaryIntCC : SDTypeProfile<2, 3,
211 def SDT_ZVecQuaternaryInt : SDTypeProfile<1, 4,
217 def SDT_ZVecQuaternaryIntCC : SDTypeProfile<2, 4,
224 def SDT_ZTest : SDTypeProfile<1, 2,
228 //===----------------------------------------------------------------------===//
230 //===----------------------------------------------------------------------===//
232 // These are target-independent nodes, but have target-specific formats.
233 def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart,
234 [SDNPHasChain, SDNPSideEffect, SDNPOutGlue]>;
235 def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd,
236 [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue,
238 def global_offset_table : SDNode<"ISD::GLOBAL_OFFSET_TABLE", SDTPtrLeaf>;
240 // Nodes for SystemZISD::*. See SystemZISelLowering.h for more details.
241 def z_retflag : SDNode<"SystemZISD::RET_FLAG", SDTNone,
242 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
243 def z_call : SDNode<"SystemZISD::CALL", SDT_ZCall,
244 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
246 def z_sibcall : SDNode<"SystemZISD::SIBCALL", SDT_ZCall,
247 [SDNPHasChain, SDNPOutGlue, SDNPOptInGlue,
249 def z_tls_gdcall : SDNode<"SystemZISD::TLS_GDCALL", SDT_ZCall,
250 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
252 def z_tls_ldcall : SDNode<"SystemZISD::TLS_LDCALL", SDT_ZCall,
253 [SDNPHasChain, SDNPInGlue, SDNPOutGlue,
255 def z_pcrel_wrapper : SDNode<"SystemZISD::PCREL_WRAPPER", SDT_ZWrapPtr, []>;
256 def z_pcrel_offset : SDNode<"SystemZISD::PCREL_OFFSET",
257 SDT_ZWrapOffset, []>;
258 def z_iabs : SDNode<"SystemZISD::IABS", SDTIntUnaryOp, []>;
259 def z_icmp : SDNode<"SystemZISD::ICMP", SDT_ZICmp>;
260 def z_fcmp : SDNode<"SystemZISD::FCMP", SDT_ZCmp>;
261 def z_strict_fcmp : SDNode<"SystemZISD::STRICT_FCMP", SDT_ZCmp,
263 def z_strict_fcmps : SDNode<"SystemZISD::STRICT_FCMPS", SDT_ZCmp,
265 def z_tm : SDNode<"SystemZISD::TM", SDT_ZICmp>;
266 def z_br_ccmask_1 : SDNode<"SystemZISD::BR_CCMASK", SDT_ZBRCCMask,
268 def z_select_ccmask_1 : SDNode<"SystemZISD::SELECT_CCMASK",
270 def z_ipm_1 : SDNode<"SystemZISD::IPM", SDT_ZIPM>;
271 def z_adjdynalloc : SDNode<"SystemZISD::ADJDYNALLOC", SDT_ZAdjDynAlloc>;
272 def z_popcnt : SDNode<"SystemZISD::POPCNT", SDTIntUnaryOp>;
273 def z_smul_lohi : SDNode<"SystemZISD::SMUL_LOHI", SDT_ZGR128Binary>;
274 def z_umul_lohi : SDNode<"SystemZISD::UMUL_LOHI", SDT_ZGR128Binary>;
275 def z_sdivrem : SDNode<"SystemZISD::SDIVREM", SDT_ZGR128Binary>;
276 def z_udivrem : SDNode<"SystemZISD::UDIVREM", SDT_ZGR128Binary>;
277 def z_saddo : SDNode<"SystemZISD::SADDO", SDT_ZBinaryWithFlags>;
278 def z_ssubo : SDNode<"SystemZISD::SSUBO", SDT_ZBinaryWithFlags>;
279 def z_uaddo : SDNode<"SystemZISD::UADDO", SDT_ZBinaryWithFlags>;
280 def z_usubo : SDNode<"SystemZISD::USUBO", SDT_ZBinaryWithFlags>;
281 def z_addcarry_1 : SDNode<"SystemZISD::ADDCARRY", SDT_ZBinaryWithCarry>;
282 def z_subcarry_1 : SDNode<"SystemZISD::SUBCARRY", SDT_ZBinaryWithCarry>;
284 def z_membarrier : SDNode<"SystemZISD::MEMBARRIER", SDTNone,
285 [SDNPHasChain, SDNPSideEffect]>;
287 def z_loadbswap : SDNode<"SystemZISD::LRV", SDTLoad,
288 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
289 def z_storebswap : SDNode<"SystemZISD::STRV", SDTStore,
290 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
291 def z_loadeswap : SDNode<"SystemZISD::VLER", SDTLoad,
292 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
293 def z_storeeswap : SDNode<"SystemZISD::VSTER", SDTStore,
294 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
296 def z_tdc : SDNode<"SystemZISD::TDC", SDT_ZTest>;
298 // Defined because the index is an i32 rather than a pointer.
299 def z_vector_insert : SDNode<"ISD::INSERT_VECTOR_ELT",
300 SDT_ZInsertVectorElt>;
301 def z_vector_extract : SDNode<"ISD::EXTRACT_VECTOR_ELT",
302 SDT_ZExtractVectorElt>;
303 def z_byte_mask : SDNode<"SystemZISD::BYTE_MASK", SDT_ZReplicate>;
304 def z_rotate_mask : SDNode<"SystemZISD::ROTATE_MASK", SDT_ZRotateMask>;
305 def z_replicate : SDNode<"SystemZISD::REPLICATE", SDT_ZReplicate>;
306 def z_join_dwords : SDNode<"SystemZISD::JOIN_DWORDS", SDT_ZJoinDwords>;
307 def z_splat : SDNode<"SystemZISD::SPLAT", SDT_ZVecBinaryInt>;
308 def z_merge_high : SDNode<"SystemZISD::MERGE_HIGH", SDT_ZVecBinary>;
309 def z_merge_low : SDNode<"SystemZISD::MERGE_LOW", SDT_ZVecBinary>;
310 def z_shl_double : SDNode<"SystemZISD::SHL_DOUBLE", SDT_ZVecTernaryInt>;
311 def z_permute_dwords : SDNode<"SystemZISD::PERMUTE_DWORDS",
313 def z_permute : SDNode<"SystemZISD::PERMUTE", SDT_ZVecTernary>;
314 def z_pack : SDNode<"SystemZISD::PACK", SDT_ZVecBinaryConv>;
315 def z_packs_cc : SDNode<"SystemZISD::PACKS_CC", SDT_ZVecBinaryConvCC>;
316 def z_packls_cc : SDNode<"SystemZISD::PACKLS_CC", SDT_ZVecBinaryConvCC>;
317 def z_unpack_high : SDNode<"SystemZISD::UNPACK_HIGH", SDT_ZVecUnaryConv>;
318 def z_unpackl_high : SDNode<"SystemZISD::UNPACKL_HIGH", SDT_ZVecUnaryConv>;
319 def z_unpack_low : SDNode<"SystemZISD::UNPACK_LOW", SDT_ZVecUnaryConv>;
320 def z_unpackl_low : SDNode<"SystemZISD::UNPACKL_LOW", SDT_ZVecUnaryConv>;
321 def z_vshl_by_scalar : SDNode<"SystemZISD::VSHL_BY_SCALAR",
323 def z_vsrl_by_scalar : SDNode<"SystemZISD::VSRL_BY_SCALAR",
325 def z_vsra_by_scalar : SDNode<"SystemZISD::VSRA_BY_SCALAR",
327 def z_vsum : SDNode<"SystemZISD::VSUM", SDT_ZVecBinaryConv>;
328 def z_vicmpe : SDNode<"SystemZISD::VICMPE", SDT_ZVecBinary>;
329 def z_vicmph : SDNode<"SystemZISD::VICMPH", SDT_ZVecBinary>;
330 def z_vicmphl : SDNode<"SystemZISD::VICMPHL", SDT_ZVecBinary>;
331 def z_vicmpes : SDNode<"SystemZISD::VICMPES", SDT_ZVecBinaryCC>;
332 def z_vicmphs : SDNode<"SystemZISD::VICMPHS", SDT_ZVecBinaryCC>;
333 def z_vicmphls : SDNode<"SystemZISD::VICMPHLS", SDT_ZVecBinaryCC>;
334 def z_vfcmpe : SDNode<"SystemZISD::VFCMPE", SDT_ZVecBinaryConv>;
335 def z_strict_vfcmpe : SDNode<"SystemZISD::STRICT_VFCMPE",
336 SDT_ZVecBinaryConv, [SDNPHasChain]>;
337 def z_strict_vfcmpes : SDNode<"SystemZISD::STRICT_VFCMPES",
338 SDT_ZVecBinaryConv, [SDNPHasChain]>;
339 def z_vfcmph : SDNode<"SystemZISD::VFCMPH", SDT_ZVecBinaryConv>;
340 def z_strict_vfcmph : SDNode<"SystemZISD::STRICT_VFCMPH",
341 SDT_ZVecBinaryConv, [SDNPHasChain]>;
342 def z_strict_vfcmphs : SDNode<"SystemZISD::STRICT_VFCMPHS",
343 SDT_ZVecBinaryConv, [SDNPHasChain]>;
344 def z_vfcmphe : SDNode<"SystemZISD::VFCMPHE", SDT_ZVecBinaryConv>;
345 def z_strict_vfcmphe : SDNode<"SystemZISD::STRICT_VFCMPHE",
346 SDT_ZVecBinaryConv, [SDNPHasChain]>;
347 def z_strict_vfcmphes : SDNode<"SystemZISD::STRICT_VFCMPHES",
348 SDT_ZVecBinaryConv, [SDNPHasChain]>;
349 def z_vfcmpes : SDNode<"SystemZISD::VFCMPES", SDT_ZVecBinaryConvCC>;
350 def z_vfcmphs : SDNode<"SystemZISD::VFCMPHS", SDT_ZVecBinaryConvCC>;
351 def z_vfcmphes : SDNode<"SystemZISD::VFCMPHES", SDT_ZVecBinaryConvCC>;
352 def z_vextend : SDNode<"SystemZISD::VEXTEND", SDT_ZVecUnaryConv>;
353 def z_strict_vextend : SDNode<"SystemZISD::STRICT_VEXTEND",
354 SDT_ZVecUnaryConv, [SDNPHasChain]>;
355 def z_vround : SDNode<"SystemZISD::VROUND", SDT_ZVecUnaryConv>;
356 def z_strict_vround : SDNode<"SystemZISD::STRICT_VROUND",
357 SDT_ZVecUnaryConv, [SDNPHasChain]>;
358 def z_vtm : SDNode<"SystemZISD::VTM", SDT_ZCmp>;
359 def z_vfae_cc : SDNode<"SystemZISD::VFAE_CC", SDT_ZVecTernaryIntCC>;
360 def z_vfaez_cc : SDNode<"SystemZISD::VFAEZ_CC", SDT_ZVecTernaryIntCC>;
361 def z_vfee_cc : SDNode<"SystemZISD::VFEE_CC", SDT_ZVecBinaryCC>;
362 def z_vfeez_cc : SDNode<"SystemZISD::VFEEZ_CC", SDT_ZVecBinaryCC>;
363 def z_vfene_cc : SDNode<"SystemZISD::VFENE_CC", SDT_ZVecBinaryCC>;
364 def z_vfenez_cc : SDNode<"SystemZISD::VFENEZ_CC", SDT_ZVecBinaryCC>;
365 def z_vistr_cc : SDNode<"SystemZISD::VISTR_CC", SDT_ZVecUnaryCC>;
366 def z_vstrc_cc : SDNode<"SystemZISD::VSTRC_CC",
367 SDT_ZVecQuaternaryIntCC>;
368 def z_vstrcz_cc : SDNode<"SystemZISD::VSTRCZ_CC",
369 SDT_ZVecQuaternaryIntCC>;
370 def z_vstrs_cc : SDNode<"SystemZISD::VSTRS_CC",
371 SDT_ZVecTernaryConvCC>;
372 def z_vstrsz_cc : SDNode<"SystemZISD::VSTRSZ_CC",
373 SDT_ZVecTernaryConvCC>;
374 def z_vftci : SDNode<"SystemZISD::VFTCI", SDT_ZVecBinaryConvIntCC>;
376 class AtomicWOp<string name, SDTypeProfile profile = SDT_ZAtomicLoadBinaryW>
377 : SDNode<"SystemZISD::"##name, profile,
378 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
380 def z_atomic_swapw : AtomicWOp<"ATOMIC_SWAPW">;
381 def z_atomic_loadw_add : AtomicWOp<"ATOMIC_LOADW_ADD">;
382 def z_atomic_loadw_sub : AtomicWOp<"ATOMIC_LOADW_SUB">;
383 def z_atomic_loadw_and : AtomicWOp<"ATOMIC_LOADW_AND">;
384 def z_atomic_loadw_or : AtomicWOp<"ATOMIC_LOADW_OR">;
385 def z_atomic_loadw_xor : AtomicWOp<"ATOMIC_LOADW_XOR">;
386 def z_atomic_loadw_nand : AtomicWOp<"ATOMIC_LOADW_NAND">;
387 def z_atomic_loadw_min : AtomicWOp<"ATOMIC_LOADW_MIN">;
388 def z_atomic_loadw_max : AtomicWOp<"ATOMIC_LOADW_MAX">;
389 def z_atomic_loadw_umin : AtomicWOp<"ATOMIC_LOADW_UMIN">;
390 def z_atomic_loadw_umax : AtomicWOp<"ATOMIC_LOADW_UMAX">;
392 def z_atomic_cmp_swap : SDNode<"SystemZISD::ATOMIC_CMP_SWAP",
394 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
396 def z_atomic_cmp_swapw : SDNode<"SystemZISD::ATOMIC_CMP_SWAPW",
398 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
401 def z_atomic_load_128 : SDNode<"SystemZISD::ATOMIC_LOAD_128",
403 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
404 def z_atomic_store_128 : SDNode<"SystemZISD::ATOMIC_STORE_128",
406 [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
407 def z_atomic_cmp_swap_128 : SDNode<"SystemZISD::ATOMIC_CMP_SWAP_128",
408 SDT_ZAtomicCmpSwap128,
409 [SDNPHasChain, SDNPMayStore, SDNPMayLoad,
412 def z_mvc : SDNode<"SystemZISD::MVC", SDT_ZMemMemLength,
413 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
414 def z_mvc_loop : SDNode<"SystemZISD::MVC_LOOP", SDT_ZMemMemLoop,
415 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
416 def z_nc : SDNode<"SystemZISD::NC", SDT_ZMemMemLength,
417 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
418 def z_nc_loop : SDNode<"SystemZISD::NC_LOOP", SDT_ZMemMemLoop,
419 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
420 def z_oc : SDNode<"SystemZISD::OC", SDT_ZMemMemLength,
421 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
422 def z_oc_loop : SDNode<"SystemZISD::OC_LOOP", SDT_ZMemMemLoop,
423 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
424 def z_xc : SDNode<"SystemZISD::XC", SDT_ZMemMemLength,
425 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
426 def z_xc_loop : SDNode<"SystemZISD::XC_LOOP", SDT_ZMemMemLoop,
427 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
428 def z_clc : SDNode<"SystemZISD::CLC", SDT_ZMemMemLengthCC,
429 [SDNPHasChain, SDNPMayLoad]>;
430 def z_clc_loop : SDNode<"SystemZISD::CLC_LOOP", SDT_ZMemMemLoopCC,
431 [SDNPHasChain, SDNPMayLoad]>;
432 def z_strcmp : SDNode<"SystemZISD::STRCMP", SDT_ZStringCC,
433 [SDNPHasChain, SDNPMayLoad]>;
434 def z_stpcpy : SDNode<"SystemZISD::STPCPY", SDT_ZString,
435 [SDNPHasChain, SDNPMayStore, SDNPMayLoad]>;
436 def z_search_string : SDNode<"SystemZISD::SEARCH_STRING", SDT_ZStringCC,
437 [SDNPHasChain, SDNPMayLoad]>;
438 def z_prefetch : SDNode<"SystemZISD::PREFETCH", SDT_ZPrefetch,
439 [SDNPHasChain, SDNPMayLoad, SDNPMayStore,
442 def z_tbegin : SDNode<"SystemZISD::TBEGIN", SDT_ZTBegin,
443 [SDNPHasChain, SDNPMayStore, SDNPSideEffect]>;
444 def z_tbegin_nofloat : SDNode<"SystemZISD::TBEGIN_NOFLOAT", SDT_ZTBegin,
445 [SDNPHasChain, SDNPMayStore, SDNPSideEffect]>;
446 def z_tend : SDNode<"SystemZISD::TEND", SDT_ZTEnd,
447 [SDNPHasChain, SDNPSideEffect]>;
449 def z_vshl : SDNode<"ISD::SHL", SDT_ZVecBinary>;
450 def z_vsra : SDNode<"ISD::SRA", SDT_ZVecBinary>;
451 def z_vsrl : SDNode<"ISD::SRL", SDT_ZVecBinary>;
453 //===----------------------------------------------------------------------===//
455 //===----------------------------------------------------------------------===//
457 def z_loadbswap16 : PatFrag<(ops node:$addr), (z_loadbswap node:$addr), [{
458 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
460 def z_loadbswap32 : PatFrag<(ops node:$addr), (z_loadbswap node:$addr), [{
461 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
463 def z_loadbswap64 : PatFrag<(ops node:$addr), (z_loadbswap node:$addr), [{
464 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
467 def z_storebswap16 : PatFrag<(ops node:$src, node:$addr),
468 (z_storebswap node:$src, node:$addr), [{
469 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i16;
471 def z_storebswap32 : PatFrag<(ops node:$src, node:$addr),
472 (z_storebswap node:$src, node:$addr), [{
473 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i32;
475 def z_storebswap64 : PatFrag<(ops node:$src, node:$addr),
476 (z_storebswap node:$src, node:$addr), [{
477 return cast<MemIntrinsicSDNode>(N)->getMemoryVT() == MVT::i64;
480 // Fragments including CC as an implicit source.
482 : PatFrag<(ops node:$valid, node:$mask, node:$bb),
483 (z_br_ccmask_1 node:$valid, node:$mask, node:$bb, CC)>;
485 : PatFrag<(ops node:$true, node:$false, node:$valid, node:$mask),
486 (z_select_ccmask_1 node:$true, node:$false,
487 node:$valid, node:$mask, CC)>;
488 def z_ipm : PatFrag<(ops), (z_ipm_1 CC)>;
489 def z_addcarry : PatFrag<(ops node:$lhs, node:$rhs),
490 (z_addcarry_1 node:$lhs, node:$rhs, CC)>;
491 def z_subcarry : PatFrag<(ops node:$lhs, node:$rhs),
492 (z_subcarry_1 node:$lhs, node:$rhs, CC)>;
494 // Signed and unsigned comparisons.
495 def z_scmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{
496 unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
497 return Type != SystemZICMP::UnsignedOnly;
499 def z_ucmp : PatFrag<(ops node:$a, node:$b), (z_icmp node:$a, node:$b, timm), [{
500 unsigned Type = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
501 return Type != SystemZICMP::SignedOnly;
504 // Register- and memory-based TEST UNDER MASK.
505 def z_tm_reg : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, timm)>;
506 def z_tm_mem : PatFrag<(ops node:$a, node:$b), (z_tm node:$a, node:$b, 0)>;
508 // Register sign-extend operations. Sub-32-bit values are represented as i32s.
509 def sext8 : PatFrag<(ops node:$src), (sext_inreg node:$src, i8)>;
510 def sext16 : PatFrag<(ops node:$src), (sext_inreg node:$src, i16)>;
511 def sext32 : PatFrag<(ops node:$src), (sext (i32 node:$src))>;
513 // Match extensions of an i32 to an i64, followed by an in-register sign
514 // extension from a sub-i32 value.
515 def sext8dbl : PatFrag<(ops node:$src), (sext8 (anyext node:$src))>;
516 def sext16dbl : PatFrag<(ops node:$src), (sext16 (anyext node:$src))>;
518 // Register zero-extend operations. Sub-32-bit values are represented as i32s.
519 def zext8 : PatFrag<(ops node:$src), (and node:$src, 0xff)>;
520 def zext16 : PatFrag<(ops node:$src), (and node:$src, 0xffff)>;
521 def zext32 : PatFrag<(ops node:$src), (zext (i32 node:$src))>;
523 // Extending loads in which the extension type can be signed.
524 def asextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
525 unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
526 return Type == ISD::EXTLOAD || Type == ISD::SEXTLOAD;
528 def asextloadi8 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
529 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
531 def asextloadi16 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
532 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
534 def asextloadi32 : PatFrag<(ops node:$ptr), (asextload node:$ptr), [{
535 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
538 // Extending loads in which the extension type can be unsigned.
539 def azextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
540 unsigned Type = cast<LoadSDNode>(N)->getExtensionType();
541 return Type == ISD::EXTLOAD || Type == ISD::ZEXTLOAD;
543 def azextloadi8 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
544 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
546 def azextloadi16 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
547 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
549 def azextloadi32 : PatFrag<(ops node:$ptr), (azextload node:$ptr), [{
550 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
553 // Extending loads in which the extension type doesn't matter.
554 def anyextload : PatFrag<(ops node:$ptr), (unindexedload node:$ptr), [{
555 return cast<LoadSDNode>(N)->getExtensionType() != ISD::NON_EXTLOAD;
557 def anyextloadi8 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
558 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
560 def anyextloadi16 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
561 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
563 def anyextloadi32 : PatFrag<(ops node:$ptr), (anyextload node:$ptr), [{
564 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i32;
568 class AlignedLoad<SDPatternOperator load>
569 : PatFrag<(ops node:$addr), (load node:$addr), [{
570 auto *Load = cast<LoadSDNode>(N);
571 return Load->getAlignment() >= Load->getMemoryVT().getStoreSize();
573 def aligned_load : AlignedLoad<load>;
574 def aligned_asextloadi16 : AlignedLoad<asextloadi16>;
575 def aligned_asextloadi32 : AlignedLoad<asextloadi32>;
576 def aligned_azextloadi16 : AlignedLoad<azextloadi16>;
577 def aligned_azextloadi32 : AlignedLoad<azextloadi32>;
580 class AlignedStore<SDPatternOperator store>
581 : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
582 auto *Store = cast<StoreSDNode>(N);
583 return Store->getAlignment() >= Store->getMemoryVT().getStoreSize();
585 def aligned_store : AlignedStore<store>;
586 def aligned_truncstorei16 : AlignedStore<truncstorei16>;
587 def aligned_truncstorei32 : AlignedStore<truncstorei32>;
589 // Non-volatile loads. Used for instructions that might access the storage
590 // location multiple times.
591 class NonvolatileLoad<SDPatternOperator load>
592 : PatFrag<(ops node:$addr), (load node:$addr), [{
593 auto *Load = cast<LoadSDNode>(N);
594 return !Load->isVolatile();
596 def nonvolatile_anyextloadi8 : NonvolatileLoad<anyextloadi8>;
597 def nonvolatile_anyextloadi16 : NonvolatileLoad<anyextloadi16>;
598 def nonvolatile_anyextloadi32 : NonvolatileLoad<anyextloadi32>;
600 // Non-volatile stores.
601 class NonvolatileStore<SDPatternOperator store>
602 : PatFrag<(ops node:$src, node:$addr), (store node:$src, node:$addr), [{
603 auto *Store = cast<StoreSDNode>(N);
604 return !Store->isVolatile();
606 def nonvolatile_truncstorei8 : NonvolatileStore<truncstorei8>;
607 def nonvolatile_truncstorei16 : NonvolatileStore<truncstorei16>;
608 def nonvolatile_truncstorei32 : NonvolatileStore<truncstorei32>;
610 // A store of a load that can be implemented using MVC.
611 def mvc_store : PatFrag<(ops node:$value, node:$addr),
612 (unindexedstore node:$value, node:$addr),
613 [{ return storeLoadCanUseMVC(N); }]>;
615 // Binary read-modify-write operations on memory in which the other
616 // operand is also memory and for which block operations like NC can
617 // be used. There are two patterns for each operator, depending on
618 // which operand contains the "other" load.
619 multiclass block_op<SDPatternOperator operator> {
620 def "1" : PatFrag<(ops node:$value, node:$addr),
621 (unindexedstore (operator node:$value,
622 (unindexedload node:$addr)),
624 [{ return storeLoadCanUseBlockBinary(N, 0); }]>;
625 def "2" : PatFrag<(ops node:$value, node:$addr),
626 (unindexedstore (operator (unindexedload node:$addr),
629 [{ return storeLoadCanUseBlockBinary(N, 1); }]>;
631 defm block_and : block_op<and>;
632 defm block_or : block_op<or>;
633 defm block_xor : block_op<xor>;
636 def inserti8 : PatFrag<(ops node:$src1, node:$src2),
637 (or (and node:$src1, -256), node:$src2)>;
638 def insertll : PatFrag<(ops node:$src1, node:$src2),
639 (or (and node:$src1, 0xffffffffffff0000), node:$src2)>;
640 def insertlh : PatFrag<(ops node:$src1, node:$src2),
641 (or (and node:$src1, 0xffffffff0000ffff), node:$src2)>;
642 def inserthl : PatFrag<(ops node:$src1, node:$src2),
643 (or (and node:$src1, 0xffff0000ffffffff), node:$src2)>;
644 def inserthh : PatFrag<(ops node:$src1, node:$src2),
645 (or (and node:$src1, 0x0000ffffffffffff), node:$src2)>;
646 def insertlf : PatFrag<(ops node:$src1, node:$src2),
647 (or (and node:$src1, 0xffffffff00000000), node:$src2)>;
648 def inserthf : PatFrag<(ops node:$src1, node:$src2),
649 (or (and node:$src1, 0x00000000ffffffff), node:$src2)>;
651 // ORs that can be treated as insertions.
652 def or_as_inserti8 : PatFrag<(ops node:$src1, node:$src2),
653 (or node:$src1, node:$src2), [{
654 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
655 return CurDAG->MaskedValueIsZero(N->getOperand(0),
656 APInt::getLowBitsSet(BitWidth, 8));
659 // ORs that can be treated as reversed insertions.
660 def or_as_revinserti8 : PatFrag<(ops node:$src1, node:$src2),
661 (or node:$src1, node:$src2), [{
662 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
663 return CurDAG->MaskedValueIsZero(N->getOperand(1),
664 APInt::getLowBitsSet(BitWidth, 8));
667 // Negative integer absolute.
668 def z_inegabs : PatFrag<(ops node:$src), (ineg (z_iabs node:$src))>;
670 // Integer absolute, matching the canonical form generated by DAGCombiner.
671 def z_iabs32 : PatFrag<(ops node:$src),
672 (xor (add node:$src, (sra node:$src, (i32 31))),
673 (sra node:$src, (i32 31)))>;
674 def z_iabs64 : PatFrag<(ops node:$src),
675 (xor (add node:$src, (sra node:$src, (i32 63))),
676 (sra node:$src, (i32 63)))>;
677 def z_inegabs32 : PatFrag<(ops node:$src), (ineg (z_iabs32 node:$src))>;
678 def z_inegabs64 : PatFrag<(ops node:$src), (ineg (z_iabs64 node:$src))>;
680 // Integer multiply-and-add
681 def z_muladd : PatFrag<(ops node:$src1, node:$src2, node:$src3),
682 (add (mul node:$src1, node:$src2), node:$src3)>;
684 // Alternatives to match operations with or without an overflow CC result.
685 def z_sadd : PatFrags<(ops node:$src1, node:$src2),
686 [(z_saddo node:$src1, node:$src2),
687 (add node:$src1, node:$src2)]>;
688 def z_uadd : PatFrags<(ops node:$src1, node:$src2),
689 [(z_uaddo node:$src1, node:$src2),
690 (add node:$src1, node:$src2)]>;
691 def z_ssub : PatFrags<(ops node:$src1, node:$src2),
692 [(z_ssubo node:$src1, node:$src2),
693 (sub node:$src1, node:$src2)]>;
694 def z_usub : PatFrags<(ops node:$src1, node:$src2),
695 [(z_usubo node:$src1, node:$src2),
696 (sub node:$src1, node:$src2)]>;
698 // Combined logical operations.
699 def andc : PatFrag<(ops node:$src1, node:$src2),
700 (and node:$src1, (not node:$src2))>;
701 def orc : PatFrag<(ops node:$src1, node:$src2),
702 (or node:$src1, (not node:$src2))>;
703 def nand : PatFrag<(ops node:$src1, node:$src2),
704 (not (and node:$src1, node:$src2))>;
705 def nor : PatFrag<(ops node:$src1, node:$src2),
706 (not (or node:$src1, node:$src2))>;
707 def nxor : PatFrag<(ops node:$src1, node:$src2),
708 (not (xor node:$src1, node:$src2))>;
710 // Fused multiply-subtract, using the natural operand order.
711 def any_fms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
712 (any_fma node:$src1, node:$src2, (fneg node:$src3))>;
714 // Fused multiply-add and multiply-subtract, but with the order of the
715 // operands matching SystemZ's MA and MS instructions.
716 def z_any_fma : PatFrag<(ops node:$src1, node:$src2, node:$src3),
717 (any_fma node:$src2, node:$src3, node:$src1)>;
718 def z_any_fms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
719 (any_fma node:$src2, node:$src3, (fneg node:$src1))>;
721 // Negative fused multiply-add and multiply-subtract.
722 def any_fnma : PatFrag<(ops node:$src1, node:$src2, node:$src3),
723 (fneg (any_fma node:$src1, node:$src2, node:$src3))>;
724 def any_fnms : PatFrag<(ops node:$src1, node:$src2, node:$src3),
725 (fneg (any_fms node:$src1, node:$src2, node:$src3))>;
727 // Floating-point negative absolute.
728 def fnabs : PatFrag<(ops node:$ptr), (fneg (fabs node:$ptr))>;
730 // Strict floating-point fragments.
731 def z_any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
732 [(z_strict_fcmp node:$lhs, node:$rhs),
733 (z_fcmp node:$lhs, node:$rhs)]>;
734 def z_any_vfcmpe : PatFrags<(ops node:$lhs, node:$rhs),
735 [(z_strict_vfcmpe node:$lhs, node:$rhs),
736 (z_vfcmpe node:$lhs, node:$rhs)]>;
737 def z_any_vfcmph : PatFrags<(ops node:$lhs, node:$rhs),
738 [(z_strict_vfcmph node:$lhs, node:$rhs),
739 (z_vfcmph node:$lhs, node:$rhs)]>;
740 def z_any_vfcmphe : PatFrags<(ops node:$lhs, node:$rhs),
741 [(z_strict_vfcmphe node:$lhs, node:$rhs),
742 (z_vfcmphe node:$lhs, node:$rhs)]>;
743 def z_any_vextend : PatFrags<(ops node:$src),
744 [(z_strict_vextend node:$src),
745 (z_vextend node:$src)]>;
746 def z_any_vround : PatFrags<(ops node:$src),
747 [(z_strict_vround node:$src),
748 (z_vround node:$src)]>;
750 // Create a unary operator that loads from memory and then performs
751 // the given operation on it.
752 class loadu<SDPatternOperator operator, SDPatternOperator load = load>
753 : PatFrag<(ops node:$addr), (operator (load node:$addr))>;
755 // Create a store operator that performs the given unary operation
756 // on the value before storing it.
757 class storeu<SDPatternOperator operator, SDPatternOperator store = store>
758 : PatFrag<(ops node:$value, node:$addr),
759 (store (operator node:$value), node:$addr)>;
761 // Create a store operator that performs the given inherent operation
762 // and stores the resulting value.
763 class storei<SDPatternOperator operator, SDPatternOperator store = store>
764 : PatFrag<(ops node:$addr),
765 (store (operator), node:$addr)>;
767 // Create a shift operator that optionally ignores an AND of the
768 // shift count with an immediate if the bottom 6 bits are all set.
769 def imm32bottom6set : PatLeaf<(i32 imm), [{
770 return (N->getZExtValue() & 0x3f) == 0x3f;
772 class shiftop<SDPatternOperator operator>
773 : PatFrags<(ops node:$val, node:$count),
774 [(operator node:$val, node:$count),
775 (operator node:$val, (and node:$count, imm32bottom6set))]>;
777 def imm32mod64 : PatLeaf<(i32 imm), [{
778 return (N->getZExtValue() % 64 == 0);
781 // Load a scalar and replicate it in all elements of a vector.
782 class z_replicate_load<ValueType scalartype, SDPatternOperator load>
783 : PatFrag<(ops node:$addr),
784 (z_replicate (scalartype (load node:$addr)))>;
785 def z_replicate_loadi8 : z_replicate_load<i32, anyextloadi8>;
786 def z_replicate_loadi16 : z_replicate_load<i32, anyextloadi16>;
787 def z_replicate_loadi32 : z_replicate_load<i32, load>;
788 def z_replicate_loadi64 : z_replicate_load<i64, load>;
789 def z_replicate_loadf32 : z_replicate_load<f32, load>;
790 def z_replicate_loadf64 : z_replicate_load<f64, load>;
791 // Byte-swapped replicated vector element loads.
792 def z_replicate_loadbswapi16 : z_replicate_load<i32, z_loadbswap16>;
793 def z_replicate_loadbswapi32 : z_replicate_load<i32, z_loadbswap32>;
794 def z_replicate_loadbswapi64 : z_replicate_load<i64, z_loadbswap64>;
796 // Load a scalar and insert it into a single element of a vector.
797 class z_vle<ValueType scalartype, SDPatternOperator load>
798 : PatFrag<(ops node:$vec, node:$addr, node:$index),
799 (z_vector_insert node:$vec, (scalartype (load node:$addr)),
801 def z_vlei8 : z_vle<i32, anyextloadi8>;
802 def z_vlei16 : z_vle<i32, anyextloadi16>;
803 def z_vlei32 : z_vle<i32, load>;
804 def z_vlei64 : z_vle<i64, load>;
805 def z_vlef32 : z_vle<f32, load>;
806 def z_vlef64 : z_vle<f64, load>;
807 // Byte-swapped vector element loads.
808 def z_vlebri16 : z_vle<i32, z_loadbswap16>;
809 def z_vlebri32 : z_vle<i32, z_loadbswap32>;
810 def z_vlebri64 : z_vle<i64, z_loadbswap64>;
812 // Load a scalar and insert it into the low element of the high i64 of a
814 class z_vllez<ValueType scalartype, SDPatternOperator load, int index>
815 : PatFrag<(ops node:$addr),
816 (z_vector_insert immAllZerosV,
817 (scalartype (load node:$addr)), (i32 index))>;
818 def z_vllezi8 : z_vllez<i32, anyextloadi8, 7>;
819 def z_vllezi16 : z_vllez<i32, anyextloadi16, 3>;
820 def z_vllezi32 : z_vllez<i32, load, 1>;
821 def z_vllezi64 : PatFrags<(ops node:$addr),
822 [(z_vector_insert immAllZerosV,
823 (i64 (load node:$addr)), (i32 0)),
824 (z_join_dwords (i64 (load node:$addr)), (i64 0))]>;
825 // We use high merges to form a v4f32 from four f32s. Propagating zero
826 // into all elements but index 1 gives this expression.
827 def z_vllezf32 : PatFrag<(ops node:$addr),
833 (v4f32 (scalar_to_vector
834 (f32 (load node:$addr)))))))),
836 (bitconvert (v4f32 immAllZerosV))))>;
837 def z_vllezf64 : PatFrag<(ops node:$addr),
839 (v2f64 (scalar_to_vector (f64 (load node:$addr)))),
842 // Similarly for the high element of a zeroed vector.
843 def z_vllezli32 : z_vllez<i32, load, 0>;
844 def z_vllezlf32 : PatFrag<(ops node:$addr),
849 (v4f32 (scalar_to_vector
850 (f32 (load node:$addr)))),
851 (v4f32 immAllZerosV)))),
853 (bitconvert (v4f32 immAllZerosV))))>;
855 // Byte-swapped variants.
856 def z_vllebrzi16 : z_vllez<i32, z_loadbswap16, 3>;
857 def z_vllebrzi32 : z_vllez<i32, z_loadbswap32, 1>;
858 def z_vllebrzli32 : z_vllez<i32, z_loadbswap32, 0>;
859 def z_vllebrzi64 : PatFrags<(ops node:$addr),
860 [(z_vector_insert immAllZerosV,
861 (i64 (z_loadbswap64 node:$addr)),
863 (z_join_dwords (i64 (z_loadbswap64 node:$addr)),
867 // Store one element of a vector.
868 class z_vste<ValueType scalartype, SDPatternOperator store>
869 : PatFrag<(ops node:$vec, node:$addr, node:$index),
870 (store (scalartype (z_vector_extract node:$vec, node:$index)),
872 def z_vstei8 : z_vste<i32, truncstorei8>;
873 def z_vstei16 : z_vste<i32, truncstorei16>;
874 def z_vstei32 : z_vste<i32, store>;
875 def z_vstei64 : z_vste<i64, store>;
876 def z_vstef32 : z_vste<f32, store>;
877 def z_vstef64 : z_vste<f64, store>;
878 // Byte-swapped vector element stores.
879 def z_vstebri16 : z_vste<i32, z_storebswap16>;
880 def z_vstebri32 : z_vste<i32, z_storebswap32>;
881 def z_vstebri64 : z_vste<i64, z_storebswap64>;
883 // Arithmetic negation on vectors.
884 def z_vneg : PatFrag<(ops node:$x), (sub immAllZerosV, node:$x)>;
886 // Bitwise negation on vectors.
887 def z_vnot : PatFrag<(ops node:$x), (xor node:$x, immAllOnesV)>;
889 // Signed "integer greater than zero" on vectors.
890 def z_vicmph_zero : PatFrag<(ops node:$x), (z_vicmph node:$x, immAllZerosV)>;
892 // Signed "integer less than zero" on vectors.
893 def z_vicmpl_zero : PatFrag<(ops node:$x), (z_vicmph immAllZerosV, node:$x)>;
895 // Integer absolute on vectors.
896 class z_viabs<int shift>
897 : PatFrag<(ops node:$src),
898 (xor (add node:$src, (z_vsra_by_scalar node:$src, (i32 shift))),
899 (z_vsra_by_scalar node:$src, (i32 shift)))>;
900 def z_viabs8 : z_viabs<7>;
901 def z_viabs16 : z_viabs<15>;
902 def z_viabs32 : z_viabs<31>;
903 def z_viabs64 : z_viabs<63>;
905 // Sign-extend the i64 elements of a vector.
906 class z_vse<int shift>
907 : PatFrag<(ops node:$src),
908 (z_vsra_by_scalar (z_vshl_by_scalar node:$src, shift), shift)>;
909 def z_vsei8 : z_vse<56>;
910 def z_vsei16 : z_vse<48>;
911 def z_vsei32 : z_vse<32>;
913 // ...and again with the extensions being done on individual i64 scalars.
914 class z_vse_by_parts<SDPatternOperator operator, int index1, int index2>
915 : PatFrag<(ops node:$src),
917 (operator (z_vector_extract node:$src, index1)),
918 (operator (z_vector_extract node:$src, index2)))>;
919 def z_vsei8_by_parts : z_vse_by_parts<sext8dbl, 7, 15>;
920 def z_vsei16_by_parts : z_vse_by_parts<sext16dbl, 3, 7>;
921 def z_vsei32_by_parts : z_vse_by_parts<sext32, 1, 3>;