1 //=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // AArch64 Instruction definitions.
11 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
14 // ARM Instruction Predicate Definitions.
16 def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">,
17 AssemblerPredicate<"HasV8_1aOps", "armv8.1a">;
18 def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">,
19 AssemblerPredicate<"HasV8_2aOps", "armv8.2a">;
20 def HasV8_3a : Predicate<"Subtarget->hasV8_3aOps()">,
21 AssemblerPredicate<"HasV8_3aOps", "armv8.3a">;
22 def HasV8_4a : Predicate<"Subtarget->hasV8_4aOps()">,
23 AssemblerPredicate<"HasV8_4aOps", "armv8.4a">;
24 def HasV8_5a : Predicate<"Subtarget->hasV8_5aOps()">,
25 AssemblerPredicate<"HasV8_5aOps", "armv8.5a">;
26 def HasVH : Predicate<"Subtarget->hasVH()">,
27 AssemblerPredicate<"FeatureVH", "vh">;
29 def HasLOR : Predicate<"Subtarget->hasLOR()">,
30 AssemblerPredicate<"FeatureLOR", "lor">;
32 def HasPA : Predicate<"Subtarget->hasPA()">,
33 AssemblerPredicate<"FeaturePA", "pa">;
35 def HasJS : Predicate<"Subtarget->hasJS()">,
36 AssemblerPredicate<"FeatureJS", "jsconv">;
38 def HasCCIDX : Predicate<"Subtarget->hasCCIDX()">,
39 AssemblerPredicate<"FeatureCCIDX", "ccidx">;
41 def HasComplxNum : Predicate<"Subtarget->hasComplxNum()">,
42 AssemblerPredicate<"FeatureComplxNum", "complxnum">;
44 def HasNV : Predicate<"Subtarget->hasNV()">,
45 AssemblerPredicate<"FeatureNV", "nv">;
47 def HasRASv8_4 : Predicate<"Subtarget->hasRASv8_4()">,
48 AssemblerPredicate<"FeatureRASv8_4", "rasv8_4">;
50 def HasMPAM : Predicate<"Subtarget->hasMPAM()">,
51 AssemblerPredicate<"FeatureMPAM", "mpam">;
53 def HasDIT : Predicate<"Subtarget->hasDIT()">,
54 AssemblerPredicate<"FeatureDIT", "dit">;
56 def HasTRACEV8_4 : Predicate<"Subtarget->hasTRACEV8_4()">,
57 AssemblerPredicate<"FeatureTRACEV8_4", "tracev8.4">;
59 def HasAM : Predicate<"Subtarget->hasAM()">,
60 AssemblerPredicate<"FeatureAM", "am">;
62 def HasSEL2 : Predicate<"Subtarget->hasSEL2()">,
63 AssemblerPredicate<"FeatureSEL2", "sel2">;
65 def HasPMU : Predicate<"Subtarget->hasPMU()">,
66 AssemblerPredicate<"FeaturePMU", "pmu">;
68 def HasTLB_RMI : Predicate<"Subtarget->hasTLB_RMI()">,
69 AssemblerPredicate<"FeatureTLB_RMI", "tlb-rmi">;
71 def HasFMI : Predicate<"Subtarget->hasFMI()">,
72 AssemblerPredicate<"FeatureFMI", "fmi">;
74 def HasRCPC_IMMO : Predicate<"Subtarget->hasRCPCImm()">,
75 AssemblerPredicate<"FeatureRCPC_IMMO", "rcpc-immo">;
77 def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
78 AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
79 def HasNEON : Predicate<"Subtarget->hasNEON()">,
80 AssemblerPredicate<"FeatureNEON", "neon">;
81 def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
82 AssemblerPredicate<"FeatureCrypto", "crypto">;
83 def HasSM4 : Predicate<"Subtarget->hasSM4()">,
84 AssemblerPredicate<"FeatureSM4", "sm4">;
85 def HasSHA3 : Predicate<"Subtarget->hasSHA3()">,
86 AssemblerPredicate<"FeatureSHA3", "sha3">;
87 def HasSHA2 : Predicate<"Subtarget->hasSHA2()">,
88 AssemblerPredicate<"FeatureSHA2", "sha2">;
89 def HasAES : Predicate<"Subtarget->hasAES()">,
90 AssemblerPredicate<"FeatureAES", "aes">;
91 def HasDotProd : Predicate<"Subtarget->hasDotProd()">,
92 AssemblerPredicate<"FeatureDotProd", "dotprod">;
93 def HasCRC : Predicate<"Subtarget->hasCRC()">,
94 AssemblerPredicate<"FeatureCRC", "crc">;
95 def HasLSE : Predicate<"Subtarget->hasLSE()">,
96 AssemblerPredicate<"FeatureLSE", "lse">;
97 def HasRAS : Predicate<"Subtarget->hasRAS()">,
98 AssemblerPredicate<"FeatureRAS", "ras">;
99 def HasRDM : Predicate<"Subtarget->hasRDM()">,
100 AssemblerPredicate<"FeatureRDM", "rdm">;
101 def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">;
102 def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
103 AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
104 def HasFP16FML : Predicate<"Subtarget->hasFP16FML()">,
105 AssemblerPredicate<"FeatureFP16FML", "fp16fml">;
106 def HasSPE : Predicate<"Subtarget->hasSPE()">,
107 AssemblerPredicate<"FeatureSPE", "spe">;
108 def HasFuseAES : Predicate<"Subtarget->hasFuseAES()">,
109 AssemblerPredicate<"FeatureFuseAES",
111 def HasSVE : Predicate<"Subtarget->hasSVE()">,
112 AssemblerPredicate<"FeatureSVE", "sve">;
113 def HasSVE2 : Predicate<"Subtarget->hasSVE2()">,
114 AssemblerPredicate<"FeatureSVE2", "sve2">;
115 def HasSVE2AES : Predicate<"Subtarget->hasSVE2AES()">,
116 AssemblerPredicate<"FeatureSVE2AES", "sve2-aes">;
117 def HasSVE2SM4 : Predicate<"Subtarget->hasSVE2SM4()">,
118 AssemblerPredicate<"FeatureSVE2SM4", "sve2-sm4">;
119 def HasSVE2SHA3 : Predicate<"Subtarget->hasSVE2SHA3()">,
120 AssemblerPredicate<"FeatureSVE2SHA3", "sve2-sha3">;
121 def HasSVE2BitPerm : Predicate<"Subtarget->hasSVE2BitPerm()">,
122 AssemblerPredicate<"FeatureSVE2BitPerm", "sve2-bitperm">;
123 def HasRCPC : Predicate<"Subtarget->hasRCPC()">,
124 AssemblerPredicate<"FeatureRCPC", "rcpc">;
125 def HasAltNZCV : Predicate<"Subtarget->hasAlternativeNZCV()">,
126 AssemblerPredicate<"FeatureAltFPCmp", "altnzcv">;
127 def HasFRInt3264 : Predicate<"Subtarget->hasFRInt3264()">,
128 AssemblerPredicate<"FeatureFRInt3264", "frint3264">;
129 def HasSB : Predicate<"Subtarget->hasSB()">,
130 AssemblerPredicate<"FeatureSB", "sb">;
131 def HasPredRes : Predicate<"Subtarget->hasPredRes()">,
132 AssemblerPredicate<"FeaturePredRes", "predres">;
133 def HasCCDP : Predicate<"Subtarget->hasCCDP()">,
134 AssemblerPredicate<"FeatureCacheDeepPersist", "ccdp">;
135 def HasBTI : Predicate<"Subtarget->hasBTI()">,
136 AssemblerPredicate<"FeatureBranchTargetId", "bti">;
137 def HasMTE : Predicate<"Subtarget->hasMTE()">,
138 AssemblerPredicate<"FeatureMTE", "mte">;
139 def HasTME : Predicate<"Subtarget->hasTME()">,
140 AssemblerPredicate<"FeatureTME", "tme">;
141 def HasETE : Predicate<"Subtarget->hasETE()">,
142 AssemblerPredicate<"FeatureETE", "ete">;
143 def HasTRBE : Predicate<"Subtarget->hasTRBE()">,
144 AssemblerPredicate<"FeatureTRBE", "trbe">;
145 def IsLE : Predicate<"Subtarget->isLittleEndian()">;
146 def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
147 def IsWindows : Predicate<"Subtarget->isTargetWindows()">;
148 def UseAlternateSExtLoadCVTF32
149 : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
151 def UseNegativeImmediates
152 : Predicate<"false">, AssemblerPredicate<"!FeatureNoNegativeImmediates",
153 "NegativeImmediates">;
155 def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
156 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
160 //===----------------------------------------------------------------------===//
161 // AArch64-specific DAG Nodes.
164 // SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
165 def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
168 SDTCisInt<0>, SDTCisVT<1, i32>]>;
170 // SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
171 def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
177 // SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
178 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
185 def SDT_AArch64Brcond : SDTypeProfile<0, 3,
186 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
188 def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
189 def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
190 SDTCisVT<2, OtherVT>]>;
193 def SDT_AArch64CSel : SDTypeProfile<1, 4,
198 def SDT_AArch64CCMP : SDTypeProfile<1, 5,
205 def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
212 def SDT_AArch64FCmp : SDTypeProfile<0, 2,
214 SDTCisSameAs<0, 1>]>;
215 def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
216 def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
217 def SDT_AArch64Insr : SDTypeProfile<1, 2, [SDTCisVec<0>]>;
218 def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>,
220 SDTCisSameAs<0, 2>]>;
221 def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
222 def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
223 def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
224 SDTCisInt<2>, SDTCisInt<3>]>;
225 def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
226 def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
227 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
228 def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
230 def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
231 def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
232 def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
233 def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
235 def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
238 def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
239 def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
241 def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
243 def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
246 def SDT_AArch64ldp : SDTypeProfile<2, 1, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
247 def SDT_AArch64stp : SDTypeProfile<0, 3, [SDTCisVT<0, i64>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
249 // Generates the general dynamic sequences, i.e.
250 // adrp x0, :tlsdesc:var
251 // ldr x1, [x0, #:tlsdesc_lo12:var]
252 // add x0, x0, #:tlsdesc_lo12:var
256 // (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
257 // number of operands (the variable)
258 def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
261 def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
262 [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
263 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
264 SDTCisSameAs<1, 4>]>;
266 def SDT_AArch64TBL : SDTypeProfile<1, 2, [
267 SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2>
270 // non-extending masked load fragment.
271 def nonext_masked_load :
272 PatFrag<(ops node:$ptr, node:$pred, node:$def),
273 (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
274 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
275 cast<MaskedLoadSDNode>(N)->isUnindexed() &&
276 !cast<MaskedLoadSDNode>(N)->isNonTemporal();
278 // sign extending masked load fragments.
279 def asext_masked_load :
280 PatFrag<(ops node:$ptr, node:$pred, node:$def),
281 (masked_ld node:$ptr, undef, node:$pred, node:$def),[{
282 return (cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD ||
283 cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD) &&
284 cast<MaskedLoadSDNode>(N)->isUnindexed();
286 def asext_masked_load_i8 :
287 PatFrag<(ops node:$ptr, node:$pred, node:$def),
288 (asext_masked_load node:$ptr, node:$pred, node:$def), [{
289 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
291 def asext_masked_load_i16 :
292 PatFrag<(ops node:$ptr, node:$pred, node:$def),
293 (asext_masked_load node:$ptr, node:$pred, node:$def), [{
294 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
296 def asext_masked_load_i32 :
297 PatFrag<(ops node:$ptr, node:$pred, node:$def),
298 (asext_masked_load node:$ptr, node:$pred, node:$def), [{
299 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
301 // zero extending masked load fragments.
302 def zext_masked_load :
303 PatFrag<(ops node:$ptr, node:$pred, node:$def),
304 (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
305 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD &&
306 cast<MaskedLoadSDNode>(N)->isUnindexed();
308 def zext_masked_load_i8 :
309 PatFrag<(ops node:$ptr, node:$pred, node:$def),
310 (zext_masked_load node:$ptr, node:$pred, node:$def), [{
311 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
313 def zext_masked_load_i16 :
314 PatFrag<(ops node:$ptr, node:$pred, node:$def),
315 (zext_masked_load node:$ptr, node:$pred, node:$def), [{
316 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
318 def zext_masked_load_i32 :
319 PatFrag<(ops node:$ptr, node:$pred, node:$def),
320 (zext_masked_load node:$ptr, node:$pred, node:$def), [{
321 return cast<MaskedLoadSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
324 def non_temporal_load :
325 PatFrag<(ops node:$ptr, node:$pred, node:$def),
326 (masked_ld node:$ptr, undef, node:$pred, node:$def), [{
327 return cast<MaskedLoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD &&
328 cast<MaskedLoadSDNode>(N)->isUnindexed() &&
329 cast<MaskedLoadSDNode>(N)->isNonTemporal();
332 // non-truncating masked store fragment.
333 def nontrunc_masked_store :
334 PatFrag<(ops node:$val, node:$ptr, node:$pred),
335 (masked_st node:$val, node:$ptr, undef, node:$pred), [{
336 return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
337 cast<MaskedStoreSDNode>(N)->isUnindexed() &&
338 !cast<MaskedStoreSDNode>(N)->isNonTemporal();
340 // truncating masked store fragments.
341 def trunc_masked_store :
342 PatFrag<(ops node:$val, node:$ptr, node:$pred),
343 (masked_st node:$val, node:$ptr, undef, node:$pred), [{
344 return cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
345 cast<MaskedStoreSDNode>(N)->isUnindexed();
347 def trunc_masked_store_i8 :
348 PatFrag<(ops node:$val, node:$ptr, node:$pred),
349 (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
350 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i8;
352 def trunc_masked_store_i16 :
353 PatFrag<(ops node:$val, node:$ptr, node:$pred),
354 (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
355 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i16;
357 def trunc_masked_store_i32 :
358 PatFrag<(ops node:$val, node:$ptr, node:$pred),
359 (trunc_masked_store node:$val, node:$ptr, node:$pred), [{
360 return cast<MaskedStoreSDNode>(N)->getMemoryVT().getScalarType() == MVT::i32;
363 def non_temporal_store :
364 PatFrag<(ops node:$val, node:$ptr, node:$pred),
365 (masked_st node:$val, node:$ptr, undef, node:$pred), [{
366 return !cast<MaskedStoreSDNode>(N)->isTruncatingStore() &&
367 cast<MaskedStoreSDNode>(N)->isUnindexed() &&
368 cast<MaskedStoreSDNode>(N)->isNonTemporal();
372 def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
373 def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
374 def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
375 def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
376 def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
377 SDCallSeqStart<[ SDTCisVT<0, i32>,
379 [SDNPHasChain, SDNPOutGlue]>;
380 def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END",
381 SDCallSeqEnd<[ SDTCisVT<0, i32>,
383 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
384 def AArch64call : SDNode<"AArch64ISD::CALL",
385 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
386 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
388 def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
390 def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
392 def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
394 def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
396 def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
400 def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
401 def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
402 def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
403 def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
404 def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
405 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
406 def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >;
407 def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>;
408 def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut,
410 def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>;
411 def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut,
413 def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
414 def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
416 def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>;
417 def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>;
418 def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
420 def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
422 def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
423 def AArch64strict_fcmp : SDNode<"AArch64ISD::STRICT_FCMP", SDT_AArch64FCmp,
425 def AArch64strict_fcmpe : SDNode<"AArch64ISD::STRICT_FCMPE", SDT_AArch64FCmp,
427 def AArch64any_fcmp : PatFrags<(ops node:$lhs, node:$rhs),
428 [(AArch64strict_fcmp node:$lhs, node:$rhs),
429 (AArch64fcmp node:$lhs, node:$rhs)]>;
431 def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
432 def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
433 def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
434 def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
435 def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
437 def AArch64insr : SDNode<"AArch64ISD::INSR", SDT_AArch64Insr>;
439 def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
440 def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
441 def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
442 def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
443 def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
444 def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
446 def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
447 def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
448 def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
449 def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
450 def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
451 def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
452 def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
454 def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
455 def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
456 def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
457 def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
459 def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
460 def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
461 def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
462 def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
463 def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
464 def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
465 def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
466 def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
468 def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
469 def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
470 def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>;
472 def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
473 def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
474 def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
475 def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
476 def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
478 def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
479 def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
480 def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
482 def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
483 def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
484 def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
485 def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
486 def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
487 def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
488 (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
490 def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
491 def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
492 def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
493 def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
494 def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
496 def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
497 def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
499 def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
501 def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
502 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
504 def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
505 [SDNPHasChain, SDNPSideEffect]>;
507 def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
508 def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
510 def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
511 SDT_AArch64TLSDescCallSeq,
512 [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
516 def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
517 SDT_AArch64WrapperLarge>;
519 def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
521 def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
522 SDTCisSameAs<1, 2>]>;
523 def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
524 def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
526 def AArch64frecpe : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
527 def AArch64frecps : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
528 def AArch64frsqrte : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
529 def AArch64frsqrts : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
531 def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
532 def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
533 def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
534 def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
535 def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
536 def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
538 def SDT_AArch64SETTAG : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
539 def AArch64stg : SDNode<"AArch64ISD::STG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
540 def AArch64stzg : SDNode<"AArch64ISD::STZG", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
541 def AArch64st2g : SDNode<"AArch64ISD::ST2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
542 def AArch64stz2g : SDNode<"AArch64ISD::STZ2G", SDT_AArch64SETTAG, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
544 def SDT_AArch64unpk : SDTypeProfile<1, 1, [
545 SDTCisInt<0>, SDTCisInt<1>, SDTCisOpSmallerThanOp<1, 0>
547 def AArch64sunpkhi : SDNode<"AArch64ISD::SUNPKHI", SDT_AArch64unpk>;
548 def AArch64sunpklo : SDNode<"AArch64ISD::SUNPKLO", SDT_AArch64unpk>;
549 def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>;
550 def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>;
552 def AArch64ldp : SDNode<"AArch64ISD::LDP", SDT_AArch64ldp, [SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
553 def AArch64stp : SDNode<"AArch64ISD::STP", SDT_AArch64stp, [SDNPHasChain, SDNPMayStore, SDNPMemOperand]>;
555 def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>;
557 //===----------------------------------------------------------------------===//
559 //===----------------------------------------------------------------------===//
561 // AArch64 Instruction Predicate Definitions.
562 // We could compute these on a per-module basis but doing so requires accessing
563 // the Function object through the <Target>Subtarget and objections were raised
564 // to that (see post-commit review comments for r301750).
565 let RecomputePerFunction = 1 in {
566 def ForCodeSize : Predicate<"shouldOptForSize(MF)">;
567 def NotForCodeSize : Predicate<"!shouldOptForSize(MF)">;
568 // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
569 def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || shouldOptForSize(MF)">;
571 def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
572 def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
574 // Toggles patterns which aren't beneficial in GlobalISel when we aren't
575 // optimizing. This allows us to selectively use patterns without impacting
576 // SelectionDAG's behaviour.
577 // FIXME: One day there will probably be a nicer way to check for this, but
578 // today is not that day.
579 def OptimizedGISelOrOtherSelector : Predicate<"!MF->getFunction().hasOptNone() || MF->getProperties().hasProperty(MachineFunctionProperties::Property::FailedISel) || !MF->getProperties().hasProperty(MachineFunctionProperties::Property::Legalized)">;
582 include "AArch64InstrFormats.td"
583 include "SVEInstrFormats.td"
585 //===----------------------------------------------------------------------===//
587 //===----------------------------------------------------------------------===//
588 // Miscellaneous instructions.
589 //===----------------------------------------------------------------------===//
591 let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
592 // We set Sched to empty list because we expect these instructions to simply get
593 // removed in most cases.
594 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
595 [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
597 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
598 [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
600 } // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
602 let isReMaterializable = 1, isCodeGenOnly = 1 in {
603 // FIXME: The following pseudo instructions are only needed because remat
604 // cannot handle multiple instructions. When that changes, they can be
605 // removed, along with the AArch64Wrapper node.
607 let AddedComplexity = 10 in
608 def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
609 [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
612 // The MOVaddr instruction should match only when the add is not folded
613 // into a load or store address.
615 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
616 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
617 tglobaladdr:$low))]>,
618 Sched<[WriteAdrAdr]>;
620 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
621 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
623 Sched<[WriteAdrAdr]>;
625 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
626 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
628 Sched<[WriteAdrAdr]>;
630 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
631 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
632 tblockaddress:$low))]>,
633 Sched<[WriteAdrAdr]>;
635 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
636 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
637 tglobaltlsaddr:$low))]>,
638 Sched<[WriteAdrAdr]>;
640 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
641 [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
642 texternalsym:$low))]>,
643 Sched<[WriteAdrAdr]>;
644 // Normally AArch64addlow either gets folded into a following ldr/str,
645 // or together with an adrp into MOVaddr above. For cases with TLS, it
646 // might appear without either of them, so allow lowering it into a plain
649 : Pseudo<(outs GPR64:$dst), (ins GPR64:$src, i64imm:$low),
650 [(set GPR64:$dst, (AArch64addlow GPR64:$src,
651 tglobaltlsaddr:$low))]>,
654 } // isReMaterializable, isCodeGenOnly
656 def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
657 (LOADgot tglobaltlsaddr:$addr)>;
659 def : Pat<(AArch64LOADgot texternalsym:$addr),
660 (LOADgot texternalsym:$addr)>;
662 def : Pat<(AArch64LOADgot tconstpool:$addr),
663 (LOADgot tconstpool:$addr)>;
665 // 32-bit jump table destination is actually only 2 instructions since we can
666 // use the table itself as a PC-relative base. But optimization occurs after
667 // branch relaxation so be pessimistic.
668 let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch" in {
669 def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
670 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
672 def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
673 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
675 def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
676 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
680 // Space-consuming pseudo to aid testing of placement and reachability
681 // algorithms. Immediate operand is the number of bytes this "instruction"
682 // occupies; register operands can be used to enforce dependency and constrain
684 let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
685 def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
686 [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
689 let hasSideEffects = 1, isCodeGenOnly = 1 in {
690 def SpeculationSafeValueX
691 : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
692 def SpeculationSafeValueW
693 : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
697 //===----------------------------------------------------------------------===//
698 // System instructions.
699 //===----------------------------------------------------------------------===//
701 def HINT : HintI<"hint">;
702 def : InstAlias<"nop", (HINT 0b000)>;
703 def : InstAlias<"yield",(HINT 0b001)>;
704 def : InstAlias<"wfe", (HINT 0b010)>;
705 def : InstAlias<"wfi", (HINT 0b011)>;
706 def : InstAlias<"sev", (HINT 0b100)>;
707 def : InstAlias<"sevl", (HINT 0b101)>;
708 def : InstAlias<"esb", (HINT 0b10000)>, Requires<[HasRAS]>;
709 def : InstAlias<"csdb", (HINT 20)>;
710 def : InstAlias<"bti", (HINT 32)>, Requires<[HasBTI]>;
711 def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
713 // v8.2a Statistical Profiling extension
714 def : InstAlias<"psb $op", (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
716 // As far as LLVM is concerned this writes to the system's exclusive monitors.
717 let mayLoad = 1, mayStore = 1 in
718 def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
720 // NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
721 // model patterns with sufficiently fine granularity.
722 let mayLoad = ?, mayStore = ? in {
723 def DMB : CRmSystemI<barrier_op, 0b101, "dmb",
724 [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
726 def DSB : CRmSystemI<barrier_op, 0b100, "dsb",
727 [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
729 def ISB : CRmSystemI<barrier_op, 0b110, "isb",
730 [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
732 def TSB : CRmSystemI<barrier_op, 0b010, "tsb", []> {
735 let Predicates = [HasTRACEV8_4];
739 // ARMv8.2-A Dot Product
740 let Predicates = [HasDotProd] in {
741 defm SDOT : SIMDThreeSameVectorDot<0, "sdot", int_aarch64_neon_sdot>;
742 defm UDOT : SIMDThreeSameVectorDot<1, "udot", int_aarch64_neon_udot>;
743 defm SDOTlane : SIMDThreeSameVectorDotIndex<0, "sdot", int_aarch64_neon_sdot>;
744 defm UDOTlane : SIMDThreeSameVectorDotIndex<1, "udot", int_aarch64_neon_udot>;
747 // ARMv8.2-A FP16 Fused Multiply-Add Long
748 let Predicates = [HasNEON, HasFP16FML] in {
749 defm FMLAL : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
750 defm FMLSL : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
751 defm FMLAL2 : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
752 defm FMLSL2 : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
753 defm FMLALlane : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
754 defm FMLSLlane : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
755 defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
756 defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
759 // Armv8.2-A Crypto extensions
760 let Predicates = [HasSHA3] in {
761 def SHA512H : CryptoRRRTied<0b0, 0b00, "sha512h">;
762 def SHA512H2 : CryptoRRRTied<0b0, 0b01, "sha512h2">;
763 def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
764 def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
765 def RAX1 : CryptoRRR_2D<0b0,0b11, "rax1">;
766 def EOR3 : CryptoRRRR_16B<0b00, "eor3">;
767 def BCAX : CryptoRRRR_16B<0b01, "bcax">;
768 def XAR : CryptoRRRi6<"xar">;
771 let Predicates = [HasSM4] in {
772 def SM3TT1A : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
773 def SM3TT1B : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
774 def SM3TT2A : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
775 def SM3TT2B : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
776 def SM3SS1 : CryptoRRRR_4S<0b10, "sm3ss1">;
777 def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
778 def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
779 def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
780 def SM4E : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
783 let Predicates = [HasRCPC] in {
784 // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
785 def LDAPRB : RCPCLoad<0b00, "ldaprb", GPR32>;
786 def LDAPRH : RCPCLoad<0b01, "ldaprh", GPR32>;
787 def LDAPRW : RCPCLoad<0b10, "ldapr", GPR32>;
788 def LDAPRX : RCPCLoad<0b11, "ldapr", GPR64>;
791 // v8.3a complex add and multiply-accumulate. No predicate here, that is done
792 // inside the multiclass as the FP16 versions need different predicates.
793 defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
795 defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
797 defm FCMLA : SIMDIndexedTiedComplexHSD<1, 0, 1, complexrotateop, "fcmla",
800 let Predicates = [HasComplxNum, HasNEON, HasFullFP16] in {
801 def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot90 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
802 (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 0))>;
803 def : Pat<(v4f16 (int_aarch64_neon_vcadd_rot270 (v4f16 V64:$Rn), (v4f16 V64:$Rm))),
804 (FCADDv4f16 (v4f16 V64:$Rn), (v4f16 V64:$Rm), (i32 1))>;
805 def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot90 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
806 (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 0))>;
807 def : Pat<(v8f16 (int_aarch64_neon_vcadd_rot270 (v8f16 V128:$Rn), (v8f16 V128:$Rm))),
808 (FCADDv8f16 (v8f16 V128:$Rn), (v8f16 V128:$Rm), (i32 1))>;
810 let Predicates = [HasComplxNum, HasNEON] in {
811 def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot90 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
812 (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 0))>;
813 def : Pat<(v2f32 (int_aarch64_neon_vcadd_rot270 (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
814 (FCADDv2f32 (v2f32 V64:$Rn), (v2f32 V64:$Rm), (i32 1))>;
815 foreach Ty = [v4f32, v2f64] in {
816 def : Pat<(Ty (int_aarch64_neon_vcadd_rot90 (Ty V128:$Rn), (Ty V128:$Rm))),
817 (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 0))>;
818 def : Pat<(Ty (int_aarch64_neon_vcadd_rot270 (Ty V128:$Rn), (Ty V128:$Rm))),
819 (!cast<Instruction>("FCADD"#Ty) (Ty V128:$Rn), (Ty V128:$Rm), (i32 1))>;
823 // v8.3a Pointer Authentication
824 // These instructions inhabit part of the hint space and so can be used for
825 // armv8 targets. Keeping the old HINT mnemonic when compiling without PA is
826 // important for compatibility with other assemblers (e.g. GAS) when building
827 // software compatible with both CPUs that do or don't implement PA.
828 let Uses = [LR], Defs = [LR] in {
829 def PACIAZ : SystemNoOperands<0b000, "hint #24">;
830 def PACIBZ : SystemNoOperands<0b010, "hint #26">;
831 let isAuthenticated = 1 in {
832 def AUTIAZ : SystemNoOperands<0b100, "hint #28">;
833 def AUTIBZ : SystemNoOperands<0b110, "hint #30">;
836 let Uses = [LR, SP], Defs = [LR] in {
837 def PACIASP : SystemNoOperands<0b001, "hint #25">;
838 def PACIBSP : SystemNoOperands<0b011, "hint #27">;
839 let isAuthenticated = 1 in {
840 def AUTIASP : SystemNoOperands<0b101, "hint #29">;
841 def AUTIBSP : SystemNoOperands<0b111, "hint #31">;
844 let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
845 def PACIA1716 : SystemNoOperands<0b000, "hint #8">;
846 def PACIB1716 : SystemNoOperands<0b010, "hint #10">;
847 let isAuthenticated = 1 in {
848 def AUTIA1716 : SystemNoOperands<0b100, "hint #12">;
849 def AUTIB1716 : SystemNoOperands<0b110, "hint #14">;
853 let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
854 def XPACLRI : SystemNoOperands<0b111, "hint #7">;
857 // These pointer authentication instructions require armv8.3a
858 let Predicates = [HasPA] in {
860 // When compiling with PA, there is a better mnemonic for these instructions.
861 def : InstAlias<"paciaz", (PACIAZ), 1>;
862 def : InstAlias<"pacibz", (PACIBZ), 1>;
863 def : InstAlias<"autiaz", (AUTIAZ), 1>;
864 def : InstAlias<"autibz", (AUTIBZ), 1>;
865 def : InstAlias<"paciasp", (PACIASP), 1>;
866 def : InstAlias<"pacibsp", (PACIBSP), 1>;
867 def : InstAlias<"autiasp", (AUTIASP), 1>;
868 def : InstAlias<"autibsp", (AUTIBSP), 1>;
869 def : InstAlias<"pacia1716", (PACIA1716), 1>;
870 def : InstAlias<"pacib1716", (PACIB1716), 1>;
871 def : InstAlias<"autia1716", (AUTIA1716), 1>;
872 def : InstAlias<"autib1716", (AUTIB1716), 1>;
873 def : InstAlias<"xpaclri", (XPACLRI), 1>;
875 multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm> {
876 def IA : SignAuthOneData<prefix, 0b00, !strconcat(asm, "ia")>;
877 def IB : SignAuthOneData<prefix, 0b01, !strconcat(asm, "ib")>;
878 def DA : SignAuthOneData<prefix, 0b10, !strconcat(asm, "da")>;
879 def DB : SignAuthOneData<prefix, 0b11, !strconcat(asm, "db")>;
880 def IZA : SignAuthZero<prefix_z, 0b00, !strconcat(asm, "iza")>;
881 def DZA : SignAuthZero<prefix_z, 0b10, !strconcat(asm, "dza")>;
882 def IZB : SignAuthZero<prefix_z, 0b01, !strconcat(asm, "izb")>;
883 def DZB : SignAuthZero<prefix_z, 0b11, !strconcat(asm, "dzb")>;
886 defm PAC : SignAuth<0b000, 0b010, "pac">;
887 defm AUT : SignAuth<0b001, 0b011, "aut">;
889 def XPACI : SignAuthZero<0b100, 0b00, "xpaci">;
890 def XPACD : SignAuthZero<0b100, 0b01, "xpacd">;
891 def PACGA : SignAuthTwoOperand<0b1100, "pacga", null_frag>;
893 // Combined Instructions
894 def BRAA : AuthBranchTwoOperands<0, 0, "braa">;
895 def BRAB : AuthBranchTwoOperands<0, 1, "brab">;
896 def BLRAA : AuthBranchTwoOperands<1, 0, "blraa">;
897 def BLRAB : AuthBranchTwoOperands<1, 1, "blrab">;
899 def BRAAZ : AuthOneOperand<0b000, 0, "braaz">;
900 def BRABZ : AuthOneOperand<0b000, 1, "brabz">;
901 def BLRAAZ : AuthOneOperand<0b001, 0, "blraaz">;
902 def BLRABZ : AuthOneOperand<0b001, 1, "blrabz">;
904 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
905 def RETAA : AuthReturn<0b010, 0, "retaa">;
906 def RETAB : AuthReturn<0b010, 1, "retab">;
907 def ERETAA : AuthReturn<0b100, 0, "eretaa">;
908 def ERETAB : AuthReturn<0b100, 1, "eretab">;
911 defm LDRAA : AuthLoad<0, "ldraa", simm10Scaled>;
912 defm LDRAB : AuthLoad<1, "ldrab", simm10Scaled>;
916 // v8.3a floating point conversion for javascript
917 let Predicates = [HasJS, HasFPARMv8] in
918 def FJCVTZS : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
921 (int_aarch64_fjcvtzs FPR64:$Rn))]> {
923 } // HasJS, HasFPARMv8
925 // v8.4 Flag manipulation instructions
926 let Predicates = [HasFMI] in {
927 def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
928 let Inst{20-5} = 0b0000001000000000;
930 def SETF8 : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
931 def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
932 def RMIF : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
933 "{\t$Rn, $imm, $mask}">;
936 // v8.5 flag manipulation instructions
937 let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
939 def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
940 let Inst{18-16} = 0b000;
941 let Inst{11-8} = 0b0000;
942 let Unpredictable{11-8} = 0b1111;
943 let Inst{7-5} = 0b001;
946 def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
947 let Inst{18-16} = 0b000;
948 let Inst{11-8} = 0b0000;
949 let Unpredictable{11-8} = 0b1111;
950 let Inst{7-5} = 0b010;
955 // Armv8.5-A speculation barrier
956 def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
957 let Inst{20-5} = 0b0001100110000111;
958 let Unpredictable{11-8} = 0b1111;
959 let Predicates = [HasSB];
960 let hasSideEffects = 1;
963 def : InstAlias<"clrex", (CLREX 0xf)>;
964 def : InstAlias<"isb", (ISB 0xf)>;
965 def : InstAlias<"ssbb", (DSB 0)>;
966 def : InstAlias<"pssbb", (DSB 4)>;
970 def MSRpstateImm1 : MSRpstateImm0_1;
971 def MSRpstateImm4 : MSRpstateImm0_15;
973 // The thread pointer (on Linux, at least, where this has been implemented) is
975 def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
976 [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
978 let Uses = [ X9 ], Defs = [ X16, X17, LR, NZCV ] in {
979 def HWASAN_CHECK_MEMACCESS : Pseudo<
980 (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
981 [(int_hwasan_check_memaccess X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
983 def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
984 (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo),
985 [(int_hwasan_check_memaccess_shortgranules X9, GPR64noip:$ptr, (i32 timm:$accessinfo))]>,
989 // The cycle counter PMC register is PMCCNTR_EL0.
990 let Predicates = [HasPerfMon] in
991 def : Pat<(readcyclecounter), (MRS 0xdce8)>;
994 def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
996 // Generic system instructions
997 def SYSxt : SystemXtI<0, "sys">;
998 def SYSLxt : SystemLXtI<1, "sysl">;
1000 def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
1001 (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
1002 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
1005 let Predicates = [HasTME] in {
1007 def TSTART : TMSystemI<0b0000, "tstart",
1008 [(set GPR64:$Rt, (int_aarch64_tstart))]>;
1010 def TCOMMIT : TMSystemINoOperand<0b0000, "tcommit", [(int_aarch64_tcommit)]>;
1012 def TCANCEL : TMSystemException<0b011, "tcancel",
1013 [(int_aarch64_tcancel i64_imm0_65535:$imm)]>;
1015 def TTEST : TMSystemI<0b0001, "ttest", [(set GPR64:$Rt, (int_aarch64_ttest))]> {
1021 //===----------------------------------------------------------------------===//
1022 // Move immediate instructions.
1023 //===----------------------------------------------------------------------===//
1025 defm MOVK : InsertImmediate<0b11, "movk">;
1026 defm MOVN : MoveImmediate<0b00, "movn">;
1028 let PostEncoderMethod = "fixMOVZ" in
1029 defm MOVZ : MoveImmediate<0b10, "movz">;
1031 // First group of aliases covers an implicit "lsl #0".
1032 def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, i32_imm0_65535:$imm, 0), 0>;
1033 def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, i32_imm0_65535:$imm, 0), 0>;
1034 def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1035 def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1036 def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, i32_imm0_65535:$imm, 0)>;
1037 def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, i32_imm0_65535:$imm, 0)>;
1039 // Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
1040 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1041 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1042 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1043 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1045 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g3:$sym, 48)>;
1046 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g2:$sym, 32)>;
1047 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g1:$sym, 16)>;
1048 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movw_symbol_g0:$sym, 0)>;
1050 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g3:$sym, 48), 0>;
1051 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g2:$sym, 32), 0>;
1052 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g1:$sym, 16), 0>;
1053 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movw_symbol_g0:$sym, 0), 0>;
1055 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1056 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1058 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g1:$sym, 16)>;
1059 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movw_symbol_g0:$sym, 0)>;
1061 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g1:$sym, 16), 0>;
1062 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movw_symbol_g0:$sym, 0), 0>;
1064 // Final group of aliases covers true "mov $Rd, $imm" cases.
1065 multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
1066 int width, int shift> {
1067 def _asmoperand : AsmOperandClass {
1068 let Name = basename # width # "_lsl" # shift # "MovAlias";
1069 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
1071 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
1074 def _movimm : Operand<i32> {
1075 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
1078 def : InstAlias<"mov $Rd, $imm",
1079 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
1082 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
1083 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
1085 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
1086 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
1087 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
1088 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
1090 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
1091 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
1093 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
1094 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
1095 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
1096 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
1098 let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
1099 isAsCheapAsAMove = 1 in {
1100 // FIXME: The following pseudo instructions are only needed because remat
1101 // cannot handle multiple instructions. When that changes, we can select
1102 // directly to the real instructions and get rid of these pseudos.
1105 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
1106 [(set GPR32:$dst, imm:$src)]>,
1109 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
1110 [(set GPR64:$dst, imm:$src)]>,
1112 } // isReMaterializable, isCodeGenOnly
1114 // If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
1115 // eventual expansion code fewer bits to worry about getting right. Marshalling
1116 // the types is a little tricky though:
1117 def i64imm_32bit : ImmLeaf<i64, [{
1118 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
1121 def s64imm_32bit : ImmLeaf<i64, [{
1122 int64_t Imm64 = static_cast<int64_t>(Imm);
1123 return Imm64 >= std::numeric_limits<int32_t>::min() &&
1124 Imm64 <= std::numeric_limits<int32_t>::max();
1127 def trunc_imm : SDNodeXForm<imm, [{
1128 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
1131 def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
1132 GISDNodeXFormEquiv<trunc_imm>;
1134 let Predicates = [OptimizedGISelOrOtherSelector] in {
1135 // The SUBREG_TO_REG isn't eliminated at -O0, which can result in pointless
1137 def : Pat<(i64 i64imm_32bit:$src),
1138 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
1141 // Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
1142 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
1143 return CurDAG->getTargetConstant(
1144 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
1147 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
1148 return CurDAG->getTargetConstant(
1149 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
1153 def : Pat<(f32 fpimm:$in),
1154 (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
1155 def : Pat<(f64 fpimm:$in),
1156 (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
1159 // Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
1161 def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
1162 tglobaladdr:$g1, tglobaladdr:$g0),
1163 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
1164 tglobaladdr:$g1, 16),
1165 tglobaladdr:$g2, 32),
1166 tglobaladdr:$g3, 48)>;
1168 def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
1169 tblockaddress:$g1, tblockaddress:$g0),
1170 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
1171 tblockaddress:$g1, 16),
1172 tblockaddress:$g2, 32),
1173 tblockaddress:$g3, 48)>;
1175 def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
1176 tconstpool:$g1, tconstpool:$g0),
1177 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
1178 tconstpool:$g1, 16),
1179 tconstpool:$g2, 32),
1180 tconstpool:$g3, 48)>;
1182 def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
1183 tjumptable:$g1, tjumptable:$g0),
1184 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
1185 tjumptable:$g1, 16),
1186 tjumptable:$g2, 32),
1187 tjumptable:$g3, 48)>;
1190 //===----------------------------------------------------------------------===//
1191 // Arithmetic instructions.
1192 //===----------------------------------------------------------------------===//
1194 // Add/subtract with carry.
1195 defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
1196 defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
1198 def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>;
1199 def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>;
1200 def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
1201 def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
1204 defm ADD : AddSub<0, "add", "sub", add>;
1205 defm SUB : AddSub<1, "sub", "add">;
1207 def : InstAlias<"mov $dst, $src",
1208 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
1209 def : InstAlias<"mov $dst, $src",
1210 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
1211 def : InstAlias<"mov $dst, $src",
1212 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
1213 def : InstAlias<"mov $dst, $src",
1214 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
1216 defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
1217 defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
1219 // Use SUBS instead of SUB to enable CSE between SUBS and SUB.
1220 def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
1221 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
1222 def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
1223 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
1224 def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
1225 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
1226 def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
1227 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
1228 def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
1229 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
1230 def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
1231 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
1232 let AddedComplexity = 1 in {
1233 def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
1234 (SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
1235 def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
1236 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
1239 // Because of the immediate format for add/sub-imm instructions, the
1240 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1241 // These patterns capture that transformation.
1242 let AddedComplexity = 1 in {
1243 def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1244 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1245 def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1246 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1247 def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1248 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1249 def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1250 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1253 // Because of the immediate format for add/sub-imm instructions, the
1254 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1255 // These patterns capture that transformation.
1256 let AddedComplexity = 1 in {
1257 def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1258 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1259 def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1260 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1261 def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1262 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1263 def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1264 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1267 def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1268 def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1269 def : InstAlias<"neg $dst, $src$shift",
1270 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1271 def : InstAlias<"neg $dst, $src$shift",
1272 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1274 def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1275 def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1276 def : InstAlias<"negs $dst, $src$shift",
1277 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1278 def : InstAlias<"negs $dst, $src$shift",
1279 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1282 // Unsigned/Signed divide
1283 defm UDIV : Div<0, "udiv", udiv>;
1284 defm SDIV : Div<1, "sdiv", sdiv>;
1286 def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1287 def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1288 def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1289 def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1292 defm ASRV : Shift<0b10, "asr", sra>;
1293 defm LSLV : Shift<0b00, "lsl", shl>;
1294 defm LSRV : Shift<0b01, "lsr", srl>;
1295 defm RORV : Shift<0b11, "ror", rotr>;
1297 def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1298 def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1299 def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1300 def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1301 def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1302 def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1303 def : ShiftAlias<"rorv", RORVWr, GPR32>;
1304 def : ShiftAlias<"rorv", RORVXr, GPR64>;
1307 let AddedComplexity = 5 in {
1308 defm MADD : MulAccum<0, "madd", add>;
1309 defm MSUB : MulAccum<1, "msub", sub>;
1311 def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1312 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1313 def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1314 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1316 def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1317 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1318 def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1319 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1320 def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1321 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1322 def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1323 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1324 } // AddedComplexity = 5
1326 let AddedComplexity = 5 in {
1327 def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1328 def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1329 def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1330 def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1332 def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1333 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1334 def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1335 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1337 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1338 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1339 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1340 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1342 def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1343 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1344 def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1345 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1346 def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1347 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1348 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1350 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1351 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1352 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1353 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1354 def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1355 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1356 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1358 def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1359 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1360 def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1361 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1362 def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1364 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1365 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1367 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1368 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1369 def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1370 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1371 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1372 (s64imm_32bit:$C)))),
1373 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1374 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1375 } // AddedComplexity = 5
1377 def : MulAccumWAlias<"mul", MADDWrrr>;
1378 def : MulAccumXAlias<"mul", MADDXrrr>;
1379 def : MulAccumWAlias<"mneg", MSUBWrrr>;
1380 def : MulAccumXAlias<"mneg", MSUBXrrr>;
1381 def : WideMulAccumAlias<"smull", SMADDLrrr>;
1382 def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1383 def : WideMulAccumAlias<"umull", UMADDLrrr>;
1384 def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1387 def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1388 def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1391 def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1392 def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1393 def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1394 def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1396 def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1397 def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1398 def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1399 def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1402 defm CAS : CompareAndSwap<0, 0, "">;
1403 defm CASA : CompareAndSwap<1, 0, "a">;
1404 defm CASL : CompareAndSwap<0, 1, "l">;
1405 defm CASAL : CompareAndSwap<1, 1, "al">;
1408 defm CASP : CompareAndSwapPair<0, 0, "">;
1409 defm CASPA : CompareAndSwapPair<1, 0, "a">;
1410 defm CASPL : CompareAndSwapPair<0, 1, "l">;
1411 defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1414 defm SWP : Swap<0, 0, "">;
1415 defm SWPA : Swap<1, 0, "a">;
1416 defm SWPL : Swap<0, 1, "l">;
1417 defm SWPAL : Swap<1, 1, "al">;
1419 // v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1420 defm LDADD : LDOPregister<0b000, "add", 0, 0, "">;
1421 defm LDADDA : LDOPregister<0b000, "add", 1, 0, "a">;
1422 defm LDADDL : LDOPregister<0b000, "add", 0, 1, "l">;
1423 defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1425 defm LDCLR : LDOPregister<0b001, "clr", 0, 0, "">;
1426 defm LDCLRA : LDOPregister<0b001, "clr", 1, 0, "a">;
1427 defm LDCLRL : LDOPregister<0b001, "clr", 0, 1, "l">;
1428 defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1430 defm LDEOR : LDOPregister<0b010, "eor", 0, 0, "">;
1431 defm LDEORA : LDOPregister<0b010, "eor", 1, 0, "a">;
1432 defm LDEORL : LDOPregister<0b010, "eor", 0, 1, "l">;
1433 defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1435 defm LDSET : LDOPregister<0b011, "set", 0, 0, "">;
1436 defm LDSETA : LDOPregister<0b011, "set", 1, 0, "a">;
1437 defm LDSETL : LDOPregister<0b011, "set", 0, 1, "l">;
1438 defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1440 defm LDSMAX : LDOPregister<0b100, "smax", 0, 0, "">;
1441 defm LDSMAXA : LDOPregister<0b100, "smax", 1, 0, "a">;
1442 defm LDSMAXL : LDOPregister<0b100, "smax", 0, 1, "l">;
1443 defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1445 defm LDSMIN : LDOPregister<0b101, "smin", 0, 0, "">;
1446 defm LDSMINA : LDOPregister<0b101, "smin", 1, 0, "a">;
1447 defm LDSMINL : LDOPregister<0b101, "smin", 0, 1, "l">;
1448 defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1450 defm LDUMAX : LDOPregister<0b110, "umax", 0, 0, "">;
1451 defm LDUMAXA : LDOPregister<0b110, "umax", 1, 0, "a">;
1452 defm LDUMAXL : LDOPregister<0b110, "umax", 0, 1, "l">;
1453 defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1455 defm LDUMIN : LDOPregister<0b111, "umin", 0, 0, "">;
1456 defm LDUMINA : LDOPregister<0b111, "umin", 1, 0, "a">;
1457 defm LDUMINL : LDOPregister<0b111, "umin", 0, 1, "l">;
1458 defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1460 // v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1461 defm : STOPregister<"stadd","LDADD">; // STADDx
1462 defm : STOPregister<"stclr","LDCLR">; // STCLRx
1463 defm : STOPregister<"steor","LDEOR">; // STEORx
1464 defm : STOPregister<"stset","LDSET">; // STSETx
1465 defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1466 defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1467 defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1468 defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1470 // v8.5 Memory Tagging Extension
1471 let Predicates = [HasMTE] in {
1473 def IRG : BaseTwoOperand<0b0100, GPR64sp, "irg", int_aarch64_irg, GPR64sp, GPR64>,
1477 def GMI : BaseTwoOperand<0b0101, GPR64, "gmi", int_aarch64_gmi, GPR64sp>, Sched<[]>{
1479 let isNotDuplicable = 1;
1481 def ADDG : AddSubG<0, "addg", null_frag>;
1482 def SUBG : AddSubG<1, "subg", null_frag>;
1484 def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1486 def SUBP : SUBP<0, "subp", int_aarch64_subp>, Sched<[]>;
1487 def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1491 def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1493 def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1495 def : Pat<(int_aarch64_addg (am_indexedu6s128 GPR64sp:$Rn, uimm6s16:$imm6), imm0_15:$imm4),
1496 (ADDG GPR64sp:$Rn, imm0_63:$imm6, imm0_15:$imm4)>;
1497 def : Pat<(int_aarch64_ldg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1498 (LDG GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>;
1500 def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1502 def LDGM : MemTagVector<1, "ldgm", "\t$Rt, [$Rn]",
1503 (outs GPR64:$Rt), (ins GPR64sp:$Rn)>;
1504 def STGM : MemTagVector<0, "stgm", "\t$Rt, [$Rn]",
1505 (outs), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1506 def STZGM : MemTagVector<0, "stzgm", "\t$Rt, [$Rn]",
1507 (outs), (ins GPR64:$Rt, GPR64sp:$Rn)> {
1511 defm STG : MemTagStore<0b00, "stg">;
1512 defm STZG : MemTagStore<0b01, "stzg">;
1513 defm ST2G : MemTagStore<0b10, "st2g">;
1514 defm STZ2G : MemTagStore<0b11, "stz2g">;
1516 def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1517 (STGOffset $Rn, $Rm, $imm)>;
1518 def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1519 (STZGOffset $Rn, $Rm, $imm)>;
1520 def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1521 (ST2GOffset $Rn, $Rm, $imm)>;
1522 def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
1523 (STZ2GOffset $Rn, $Rm, $imm)>;
1525 defm STGP : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1526 def STGPpre : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1527 def STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1529 def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
1530 (STGOffset GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>;
1532 def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
1533 (STGPi $Rt, $Rt2, $Rn, $imm)>;
1536 : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rsp, GPR64:$Rm), []>,
1539 : Pseudo<(outs GPR64sp:$Rd), (ins GPR64sp:$Rn, uimm6s16:$imm6, GPR64sp:$Rm, imm0_15:$imm4), []>,
1542 // Explicit SP in the first operand prevents ShrinkWrap optimization
1543 // from leaving this instruction out of the stack frame. When IRGstack
1544 // is transformed into IRG, this operand is replaced with the actual
1545 // register / expression for the tagged base pointer of the current function.
1546 def : Pat<(int_aarch64_irg_sp i64:$Rm), (IRGstack SP, i64:$Rm)>;
1548 // Large STG to be expanded into a loop. $Rm is the size, $Rn is start address.
1549 // $Rn_wback is one past the end of the range.
1550 let isCodeGenOnly=1, mayStore=1 in {
1552 : Pseudo<(outs GPR64common:$Rm_wback, GPR64sp:$Rn_wback), (ins GPR64common:$Rm, GPR64sp:$Rn),
1553 [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,$Rm = $Rm_wback,@earlyclobber $Rm_wback" >,
1554 Sched<[WriteAdr, WriteST]>;
1557 : Pseudo<(outs GPR64common:$Rm_wback, GPR64sp:$Rn_wback), (ins GPR64common:$Rm, GPR64sp:$Rn),
1558 [], "$Rn = $Rn_wback,@earlyclobber $Rn_wback,$Rm = $Rm_wback,@earlyclobber $Rm_wback" >,
1559 Sched<[WriteAdr, WriteST]>;
1562 } // Predicates = [HasMTE]
1564 //===----------------------------------------------------------------------===//
1565 // Logical instructions.
1566 //===----------------------------------------------------------------------===//
1569 defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1570 defm AND : LogicalImm<0b00, "and", and, "bic">;
1571 defm EOR : LogicalImm<0b10, "eor", xor, "eon">;
1572 defm ORR : LogicalImm<0b01, "orr", or, "orn">;
1574 // FIXME: these aliases *are* canonical sometimes (when movz can't be
1575 // used). Actually, it seems to be working right now, but putting logical_immXX
1576 // here is a bit dodgy on the AsmParser side too.
1577 def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1578 logical_imm32:$imm), 0>;
1579 def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1580 logical_imm64:$imm), 0>;
1584 defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1585 defm BICS : LogicalRegS<0b11, 1, "bics",
1586 BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1587 defm AND : LogicalReg<0b00, 0, "and", and>;
1588 defm BIC : LogicalReg<0b00, 1, "bic",
1589 BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1590 defm EON : LogicalReg<0b10, 1, "eon",
1591 BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1592 defm EOR : LogicalReg<0b10, 0, "eor", xor>;
1593 defm ORN : LogicalReg<0b01, 1, "orn",
1594 BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1595 defm ORR : LogicalReg<0b01, 0, "orr", or>;
1597 def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
1598 def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
1600 def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
1601 def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
1603 def : InstAlias<"mvn $Wd, $Wm$sh",
1604 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
1605 def : InstAlias<"mvn $Xd, $Xm$sh",
1606 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
1608 def : InstAlias<"tst $src1, $src2",
1609 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
1610 def : InstAlias<"tst $src1, $src2",
1611 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
1613 def : InstAlias<"tst $src1, $src2",
1614 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
1615 def : InstAlias<"tst $src1, $src2",
1616 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
1618 def : InstAlias<"tst $src1, $src2$sh",
1619 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
1620 def : InstAlias<"tst $src1, $src2$sh",
1621 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
1624 def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
1625 def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
1628 //===----------------------------------------------------------------------===//
1629 // One operand data processing instructions.
1630 //===----------------------------------------------------------------------===//
1632 defm CLS : OneOperandData<0b101, "cls">;
1633 defm CLZ : OneOperandData<0b100, "clz", ctlz>;
1634 defm RBIT : OneOperandData<0b000, "rbit", bitreverse>;
1636 def REV16Wr : OneWRegData<0b001, "rev16",
1637 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
1638 def REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
1640 def : Pat<(cttz GPR32:$Rn),
1641 (CLZWr (RBITWr GPR32:$Rn))>;
1642 def : Pat<(cttz GPR64:$Rn),
1643 (CLZXr (RBITXr GPR64:$Rn))>;
1644 def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
1647 def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
1650 def : Pat<(int_aarch64_cls GPR32:$Rn), (CLSWr GPR32:$Rn)>;
1651 def : Pat<(int_aarch64_cls64 GPR64:$Rm), (EXTRACT_SUBREG (CLSXr GPR64:$Rm), sub_32)>;
1653 // Unlike the other one operand instructions, the instructions with the "rev"
1654 // mnemonic do *not* just different in the size bit, but actually use different
1655 // opcode bits for the different sizes.
1656 def REVWr : OneWRegData<0b010, "rev", bswap>;
1657 def REVXr : OneXRegData<0b011, "rev", bswap>;
1658 def REV32Xr : OneXRegData<0b010, "rev32",
1659 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
1661 def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
1663 // The bswap commutes with the rotr so we want a pattern for both possible
1665 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
1666 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
1668 //===----------------------------------------------------------------------===//
1669 // Bitfield immediate extraction instruction.
1670 //===----------------------------------------------------------------------===//
1671 let hasSideEffects = 0 in
1672 defm EXTR : ExtractImm<"extr">;
1673 def : InstAlias<"ror $dst, $src, $shift",
1674 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
1675 def : InstAlias<"ror $dst, $src, $shift",
1676 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
1678 def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
1679 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
1680 def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
1681 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
1683 //===----------------------------------------------------------------------===//
1684 // Other bitfield immediate instructions.
1685 //===----------------------------------------------------------------------===//
1686 let hasSideEffects = 0 in {
1687 defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
1688 defm SBFM : BitfieldImm<0b00, "sbfm">;
1689 defm UBFM : BitfieldImm<0b10, "ubfm">;
1692 def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
1693 uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
1694 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1697 def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
1698 uint64_t enc = 31 - N->getZExtValue();
1699 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1702 // min(7, 31 - shift_amt)
1703 def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1704 uint64_t enc = 31 - N->getZExtValue();
1705 enc = enc > 7 ? 7 : enc;
1706 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1709 // min(15, 31 - shift_amt)
1710 def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1711 uint64_t enc = 31 - N->getZExtValue();
1712 enc = enc > 15 ? 15 : enc;
1713 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1716 def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
1717 uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
1718 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1721 def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
1722 uint64_t enc = 63 - N->getZExtValue();
1723 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1726 // min(7, 63 - shift_amt)
1727 def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1728 uint64_t enc = 63 - N->getZExtValue();
1729 enc = enc > 7 ? 7 : enc;
1730 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1733 // min(15, 63 - shift_amt)
1734 def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1735 uint64_t enc = 63 - N->getZExtValue();
1736 enc = enc > 15 ? 15 : enc;
1737 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1740 // min(31, 63 - shift_amt)
1741 def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1742 uint64_t enc = 63 - N->getZExtValue();
1743 enc = enc > 31 ? 31 : enc;
1744 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1747 def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1748 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1749 (i64 (i32shift_b imm0_31:$imm)))>;
1750 def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
1751 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
1752 (i64 (i64shift_b imm0_63:$imm)))>;
1754 let AddedComplexity = 10 in {
1755 def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
1756 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1757 def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
1758 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1761 def : InstAlias<"asr $dst, $src, $shift",
1762 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1763 def : InstAlias<"asr $dst, $src, $shift",
1764 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1765 def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1766 def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1767 def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1768 def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1769 def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1771 def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
1772 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1773 def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
1774 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1776 def : InstAlias<"lsr $dst, $src, $shift",
1777 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1778 def : InstAlias<"lsr $dst, $src, $shift",
1779 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1780 def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1781 def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1782 def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1783 def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1784 def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1786 //===----------------------------------------------------------------------===//
1787 // Conditional comparison instructions.
1788 //===----------------------------------------------------------------------===//
1789 defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
1790 defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
1792 //===----------------------------------------------------------------------===//
1793 // Conditional select instructions.
1794 //===----------------------------------------------------------------------===//
1795 defm CSEL : CondSelect<0, 0b00, "csel">;
1797 def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
1798 defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
1799 defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
1800 defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
1802 def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1803 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1804 def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1805 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1806 def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1807 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1808 def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1809 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1810 def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1811 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1812 def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1813 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1815 def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
1816 (CSINCWr WZR, WZR, (i32 imm:$cc))>;
1817 def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
1818 (CSINCXr XZR, XZR, (i32 imm:$cc))>;
1819 def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
1820 (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1821 def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
1822 (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1823 def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
1824 (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1825 def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
1826 (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1827 def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
1828 (CSINVWr WZR, WZR, (i32 imm:$cc))>;
1829 def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
1830 (CSINVXr XZR, XZR, (i32 imm:$cc))>;
1831 def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
1832 (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1833 def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
1834 (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1835 def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
1836 (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1837 def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
1838 (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1840 // The inverse of the condition code from the alias instruction is what is used
1841 // in the aliased instruction. The parser all ready inverts the condition code
1842 // for these aliases.
1843 def : InstAlias<"cset $dst, $cc",
1844 (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1845 def : InstAlias<"cset $dst, $cc",
1846 (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1848 def : InstAlias<"csetm $dst, $cc",
1849 (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1850 def : InstAlias<"csetm $dst, $cc",
1851 (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1853 def : InstAlias<"cinc $dst, $src, $cc",
1854 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1855 def : InstAlias<"cinc $dst, $src, $cc",
1856 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1858 def : InstAlias<"cinv $dst, $src, $cc",
1859 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1860 def : InstAlias<"cinv $dst, $src, $cc",
1861 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1863 def : InstAlias<"cneg $dst, $src, $cc",
1864 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1865 def : InstAlias<"cneg $dst, $src, $cc",
1866 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1868 //===----------------------------------------------------------------------===//
1869 // PC-relative instructions.
1870 //===----------------------------------------------------------------------===//
1871 let isReMaterializable = 1 in {
1872 let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
1873 def ADR : ADRI<0, "adr", adrlabel,
1874 [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
1875 } // hasSideEffects = 0
1877 def ADRP : ADRI<1, "adrp", adrplabel,
1878 [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
1879 } // isReMaterializable = 1
1881 // page address of a constant pool entry, block address
1882 def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
1883 def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
1884 def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
1885 def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
1886 def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
1887 def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
1888 def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
1890 //===----------------------------------------------------------------------===//
1891 // Unconditional branch (register) instructions.
1892 //===----------------------------------------------------------------------===//
1894 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1895 def RET : BranchReg<0b0010, "ret", []>;
1896 def DRPS : SpecialReturn<0b0101, "drps">;
1897 def ERET : SpecialReturn<0b0100, "eret">;
1898 } // isReturn = 1, isTerminator = 1, isBarrier = 1
1900 // Default to the LR register.
1901 def : InstAlias<"ret", (RET LR)>;
1903 let isCall = 1, Defs = [LR], Uses = [SP] in {
1904 def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>;
1907 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
1908 def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
1909 } // isBranch, isTerminator, isBarrier, isIndirectBranch
1911 // Create a separate pseudo-instruction for codegen to use so that we don't
1912 // flag lr as used in every function. It'll be restored before the RET by the
1913 // epilogue if it's legitimately used.
1914 def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
1915 Sched<[WriteBrReg]> {
1916 let isTerminator = 1;
1921 // This is a directive-like pseudo-instruction. The purpose is to insert an
1922 // R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
1923 // (which in the usual case is a BLR).
1924 let hasSideEffects = 1 in
1925 def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
1926 let AsmString = ".tlsdesccall $sym";
1929 // Pseudo instruction to tell the streamer to emit a 'B' character into the
1930 // augmentation string.
1931 def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
1933 // FIXME: maybe the scratch register used shouldn't be fixed to X1?
1934 // FIXME: can "hasSideEffects be dropped?
1935 let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
1936 isCodeGenOnly = 1 in
1938 : Pseudo<(outs), (ins i64imm:$sym),
1939 [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
1940 Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
1941 def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
1942 (TLSDESC_CALLSEQ texternalsym:$sym)>;
1944 //===----------------------------------------------------------------------===//
1945 // Conditional branch (immediate) instruction.
1946 //===----------------------------------------------------------------------===//
1947 def Bcc : BranchCond;
1949 //===----------------------------------------------------------------------===//
1950 // Compare-and-branch instructions.
1951 //===----------------------------------------------------------------------===//
1952 defm CBZ : CmpBranch<0, "cbz", AArch64cbz>;
1953 defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
1955 //===----------------------------------------------------------------------===//
1956 // Test-bit-and-branch instructions.
1957 //===----------------------------------------------------------------------===//
1958 defm TBZ : TestBranch<0, "tbz", AArch64tbz>;
1959 defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
1961 //===----------------------------------------------------------------------===//
1962 // Unconditional branch (immediate) instructions.
1963 //===----------------------------------------------------------------------===//
1964 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
1965 def B : BranchImm<0, "b", [(br bb:$addr)]>;
1966 } // isBranch, isTerminator, isBarrier
1968 let isCall = 1, Defs = [LR], Uses = [SP] in {
1969 def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
1971 def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
1973 //===----------------------------------------------------------------------===//
1974 // Exception generation instructions.
1975 //===----------------------------------------------------------------------===//
1977 def BRK : ExceptionGeneration<0b001, 0b00, "brk">;
1979 def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
1980 def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
1981 def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
1982 def HLT : ExceptionGeneration<0b010, 0b00, "hlt">;
1983 def HVC : ExceptionGeneration<0b000, 0b10, "hvc">;
1984 def SMC : ExceptionGeneration<0b000, 0b11, "smc">;
1985 def SVC : ExceptionGeneration<0b000, 0b01, "svc">;
1987 // DCPSn defaults to an immediate operand of zero if unspecified.
1988 def : InstAlias<"dcps1", (DCPS1 0)>;
1989 def : InstAlias<"dcps2", (DCPS2 0)>;
1990 def : InstAlias<"dcps3", (DCPS3 0)>;
1992 def UDF : UDFType<0, "udf">;
1994 //===----------------------------------------------------------------------===//
1995 // Load instructions.
1996 //===----------------------------------------------------------------------===//
1998 // Pair (indexed, offset)
1999 defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
2000 defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
2001 defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
2002 defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
2003 defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
2005 defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2007 // Pair (pre-indexed)
2008 def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2009 def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2010 def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2011 def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2012 def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2014 def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2016 // Pair (post-indexed)
2017 def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
2018 def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
2019 def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
2020 def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
2021 def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
2023 def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
2026 // Pair (no allocate)
2027 defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
2028 defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
2029 defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
2030 defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
2031 defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
2033 def : Pat<(AArch64ldp (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2034 (LDPXi GPR64sp:$Rn, simm7s8:$offset)>;
2037 // (register offset)
2041 defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
2042 defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
2043 defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
2044 defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
2047 defm LDRB : Load8RO<0b00, 1, 0b01, FPR8Op, "ldr", untyped, load>;
2048 defm LDRH : Load16RO<0b01, 1, 0b01, FPR16Op, "ldr", f16, load>;
2049 defm LDRS : Load32RO<0b10, 1, 0b01, FPR32Op, "ldr", f32, load>;
2050 defm LDRD : Load64RO<0b11, 1, 0b01, FPR64Op, "ldr", f64, load>;
2051 defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
2053 // Load sign-extended half-word
2054 defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
2055 defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
2057 // Load sign-extended byte
2058 defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
2059 defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
2061 // Load sign-extended word
2062 defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
2065 defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
2067 // For regular load, we do not have any alignment requirement.
2068 // Thus, it is safe to directly map the vector loads with interesting
2069 // addressing modes.
2070 // FIXME: We could do the same for bitconvert to floating point vectors.
2071 multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
2072 ValueType ScalTy, ValueType VecTy,
2073 Instruction LOADW, Instruction LOADX,
2075 def : Pat<(VecTy (scalar_to_vector (ScalTy
2076 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
2077 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2078 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
2081 def : Pat<(VecTy (scalar_to_vector (ScalTy
2082 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
2083 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
2084 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
2088 let AddedComplexity = 10 in {
2089 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>;
2090 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
2092 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
2093 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
2095 defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>;
2096 defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>;
2098 defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
2099 defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
2101 defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>;
2102 defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>;
2104 defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>;
2106 defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>;
2109 def : Pat <(v1i64 (scalar_to_vector (i64
2110 (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
2111 ro_Wextend64:$extend))))),
2112 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
2114 def : Pat <(v1i64 (scalar_to_vector (i64
2115 (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
2116 ro_Xextend64:$extend))))),
2117 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
2120 // Match all load 64 bits width whose type is compatible with FPR64
2121 multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
2122 Instruction LOADW, Instruction LOADX> {
2124 def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2125 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2127 def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2128 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2131 let AddedComplexity = 10 in {
2132 let Predicates = [IsLE] in {
2133 // We must do vector loads with LD1 in big-endian.
2134 defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
2135 defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
2136 defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>;
2137 defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
2138 defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
2141 defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>;
2142 defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>;
2144 // Match all load 128 bits width whose type is compatible with FPR128
2145 let Predicates = [IsLE] in {
2146 // We must do vector loads with LD1 in big-endian.
2147 defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>;
2148 defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>;
2149 defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>;
2150 defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>;
2151 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>;
2152 defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>;
2153 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
2155 } // AddedComplexity = 10
2158 multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
2159 Instruction INSTW, Instruction INSTX> {
2160 def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2161 (SUBREG_TO_REG (i64 0),
2162 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
2165 def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2166 (SUBREG_TO_REG (i64 0),
2167 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
2171 let AddedComplexity = 10 in {
2172 defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>;
2173 defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
2174 defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>;
2176 // zextloadi1 -> zextloadi8
2177 defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2179 // extload -> zextload
2180 defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
2181 defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
2182 defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
2184 // extloadi1 -> zextloadi8
2185 defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>;
2190 multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
2191 Instruction INSTW, Instruction INSTX> {
2192 def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
2193 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2195 def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
2196 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2200 let AddedComplexity = 10 in {
2201 // extload -> zextload
2202 defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
2203 defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
2204 defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
2206 // zextloadi1 -> zextloadi8
2207 defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
2211 // (unsigned immediate)
2213 defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
2215 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2216 defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
2218 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2219 defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
2221 (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
2222 defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
2223 [(set (f16 FPR16Op:$Rt),
2224 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
2225 defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
2226 [(set (f32 FPR32Op:$Rt),
2227 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
2228 defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
2229 [(set (f64 FPR64Op:$Rt),
2230 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
2231 defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
2232 [(set (f128 FPR128Op:$Rt),
2233 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
2235 // For regular load, we do not have any alignment requirement.
2236 // Thus, it is safe to directly map the vector loads with interesting
2237 // addressing modes.
2238 // FIXME: We could do the same for bitconvert to floating point vectors.
2239 def : Pat <(v8i8 (scalar_to_vector (i32
2240 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2241 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
2242 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2243 def : Pat <(v16i8 (scalar_to_vector (i32
2244 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
2245 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
2246 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
2247 def : Pat <(v4i16 (scalar_to_vector (i32
2248 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2249 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
2250 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2251 def : Pat <(v8i16 (scalar_to_vector (i32
2252 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
2253 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
2254 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
2255 def : Pat <(v2i32 (scalar_to_vector (i32
2256 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2257 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
2258 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2259 def : Pat <(v4i32 (scalar_to_vector (i32
2260 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
2261 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
2262 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
2263 def : Pat <(v1i64 (scalar_to_vector (i64
2264 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2265 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2266 def : Pat <(v2i64 (scalar_to_vector (i64
2267 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
2268 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
2269 (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
2271 // Match all load 64 bits width whose type is compatible with FPR64
2272 let Predicates = [IsLE] in {
2273 // We must use LD1 to perform vector loads in big-endian.
2274 def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2275 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2276 def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2277 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2278 def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2279 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2280 def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2281 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2282 def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2283 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2285 def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2286 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2287 def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
2288 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
2290 // Match all load 128 bits width whose type is compatible with FPR128
2291 let Predicates = [IsLE] in {
2292 // We must use LD1 to perform vector loads in big-endian.
2293 def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2294 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2295 def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2296 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2297 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2298 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2299 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2300 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2301 def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2302 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2303 def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2304 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2305 def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2306 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2308 def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2309 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2311 defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2313 (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2314 uimm12s2:$offset)))]>;
2315 defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2317 (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2318 uimm12s1:$offset)))]>;
2320 def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2321 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2322 def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2323 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2325 // zextloadi1 -> zextloadi8
2326 def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2327 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2328 def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2329 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2331 // extload -> zextload
2332 def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2333 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2334 def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2335 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2336 def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2337 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2338 def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2339 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2340 def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2341 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2342 def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2343 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2344 def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2345 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2347 // load sign-extended half-word
2348 defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2350 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2351 uimm12s2:$offset)))]>;
2352 defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2354 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2355 uimm12s2:$offset)))]>;
2357 // load sign-extended byte
2358 defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2360 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2361 uimm12s1:$offset)))]>;
2362 defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2364 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2365 uimm12s1:$offset)))]>;
2367 // load sign-extended word
2368 defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2370 (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2371 uimm12s4:$offset)))]>;
2373 // load zero-extended word
2374 def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2375 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2378 def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2379 [(AArch64Prefetch imm:$Rt,
2380 (am_indexed64 GPR64sp:$Rn,
2381 uimm12s8:$offset))]>;
2383 def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2388 def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2389 if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2390 const DataLayout &DL = MF->getDataLayout();
2391 MaybeAlign Align = G->getGlobal()->getPointerAlignment(DL);
2392 return Align && *Align >= 4 && G->getOffset() % 4 == 0;
2394 if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2395 return C->getAlignment() >= 4 && C->getOffset() % 4 == 0;
2399 def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2400 [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2401 def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2402 [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2403 def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2404 [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2405 def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2406 [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2407 def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2408 [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2410 // load sign-extended word
2411 def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2412 [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2414 let AddedComplexity = 20 in {
2415 def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2416 (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2420 def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2421 // [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2424 // (unscaled immediate)
2425 defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2427 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2428 defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2430 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2431 defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2433 (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2434 defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2436 (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2437 defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2438 [(set (f32 FPR32Op:$Rt),
2439 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2440 defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2441 [(set (f64 FPR64Op:$Rt),
2442 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2443 defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2444 [(set (f128 FPR128Op:$Rt),
2445 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2448 : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2450 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2452 : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2454 (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2456 // Match all load 64 bits width whose type is compatible with FPR64
2457 let Predicates = [IsLE] in {
2458 def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2459 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2460 def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2461 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2462 def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2463 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2464 def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2465 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2466 def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2467 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2469 def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2470 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2471 def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2472 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2474 // Match all load 128 bits width whose type is compatible with FPR128
2475 let Predicates = [IsLE] in {
2476 def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2477 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2478 def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2479 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2480 def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2481 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2482 def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2483 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2484 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2485 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2486 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2487 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2488 def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2489 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2493 def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2494 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2495 def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2496 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2497 def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2498 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2499 def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2500 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2501 def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2502 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2503 def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2504 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2505 def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2506 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2508 def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2509 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2510 def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2511 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2512 def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2513 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2514 def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2515 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2516 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2517 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2518 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2519 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2520 def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2521 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2525 // LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2527 // Define new assembler match classes as we want to only match these when
2528 // the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2529 // associate a DiagnosticType either, as we want the diagnostic for the
2530 // canonical form (the scaled operand) to take precedence.
2531 class SImm9OffsetOperand<int Width> : AsmOperandClass {
2532 let Name = "SImm9OffsetFB" # Width;
2533 let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2534 let RenderMethod = "addImmOperands";
2537 def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2538 def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2539 def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2540 def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2541 def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2543 def simm9_offset_fb8 : Operand<i64> {
2544 let ParserMatchClass = SImm9OffsetFB8Operand;
2546 def simm9_offset_fb16 : Operand<i64> {
2547 let ParserMatchClass = SImm9OffsetFB16Operand;
2549 def simm9_offset_fb32 : Operand<i64> {
2550 let ParserMatchClass = SImm9OffsetFB32Operand;
2552 def simm9_offset_fb64 : Operand<i64> {
2553 let ParserMatchClass = SImm9OffsetFB64Operand;
2555 def simm9_offset_fb128 : Operand<i64> {
2556 let ParserMatchClass = SImm9OffsetFB128Operand;
2559 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2560 (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2561 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2562 (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2563 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2564 (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2565 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2566 (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2567 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2568 (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2569 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2570 (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2571 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2572 (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2575 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2576 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2577 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2578 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2580 // load sign-extended half-word
2582 : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
2584 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2586 : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
2588 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2590 // load sign-extended byte
2592 : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
2594 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2596 : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
2598 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2600 // load sign-extended word
2602 : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
2604 (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2606 // zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
2607 def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
2608 (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2609 def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
2610 (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2611 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2612 (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2613 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2614 (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2615 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2616 (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2617 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2618 (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2619 def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
2620 (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2623 defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
2624 [(AArch64Prefetch imm:$Rt,
2625 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2628 // (unscaled immediate, unprivileged)
2629 defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
2630 defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
2632 defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
2633 defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
2635 // load sign-extended half-word
2636 defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
2637 defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
2639 // load sign-extended byte
2640 defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
2641 defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
2643 // load sign-extended word
2644 defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
2647 // (immediate pre-indexed)
2648 def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2649 def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2650 def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op, "ldr">;
2651 def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2652 def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2653 def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2654 def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2656 // load sign-extended half-word
2657 def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2658 def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2660 // load sign-extended byte
2661 def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2662 def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2664 // load zero-extended byte
2665 def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2666 def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2668 // load sign-extended word
2669 def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2672 // (immediate post-indexed)
2673 def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2674 def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2675 def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op, "ldr">;
2676 def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2677 def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2678 def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2679 def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2681 // load sign-extended half-word
2682 def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2683 def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2685 // load sign-extended byte
2686 def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2687 def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2689 // load zero-extended byte
2690 def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2691 def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2693 // load sign-extended word
2694 def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2696 //===----------------------------------------------------------------------===//
2697 // Store instructions.
2698 //===----------------------------------------------------------------------===//
2700 // Pair (indexed, offset)
2701 // FIXME: Use dedicated range-checked addressing mode operand here.
2702 defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
2703 defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
2704 defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
2705 defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
2706 defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
2708 // Pair (pre-indexed)
2709 def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2710 def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2711 def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2712 def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2713 def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2715 // Pair (pre-indexed)
2716 def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2717 def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2718 def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2719 def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2720 def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2722 // Pair (no allocate)
2723 defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
2724 defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
2725 defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
2726 defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
2727 defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
2729 def : Pat<(AArch64stp GPR64z:$Rt, GPR64z:$Rt2, (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
2730 (STPXi GPR64z:$Rt, GPR64z:$Rt2, GPR64sp:$Rn, simm7s8:$offset)>;
2733 // (Register offset)
2736 defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
2737 defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
2738 defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>;
2739 defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>;
2743 defm STRB : Store8RO< 0b00, 1, 0b00, FPR8Op, "str", untyped, store>;
2744 defm STRH : Store16RO<0b01, 1, 0b00, FPR16Op, "str", f16, store>;
2745 defm STRS : Store32RO<0b10, 1, 0b00, FPR32Op, "str", f32, store>;
2746 defm STRD : Store64RO<0b11, 1, 0b00, FPR64Op, "str", f64, store>;
2747 defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str", f128, store>;
2749 let Predicates = [UseSTRQro], AddedComplexity = 10 in {
2750 def : Pat<(store (f128 FPR128:$Rt),
2751 (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
2752 ro_Wextend128:$extend)),
2753 (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
2754 def : Pat<(store (f128 FPR128:$Rt),
2755 (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
2756 ro_Xextend128:$extend)),
2757 (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
2760 multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
2761 Instruction STRW, Instruction STRX> {
2763 def : Pat<(storeop GPR64:$Rt,
2764 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2765 (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2766 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2768 def : Pat<(storeop GPR64:$Rt,
2769 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2770 (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2771 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2774 let AddedComplexity = 10 in {
2776 defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>;
2777 defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
2778 defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>;
2781 multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
2782 Instruction STRW, Instruction STRX> {
2783 def : Pat<(store (VecTy FPR:$Rt),
2784 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2785 (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2787 def : Pat<(store (VecTy FPR:$Rt),
2788 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2789 (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2792 let AddedComplexity = 10 in {
2793 // Match all store 64 bits width whose type is compatible with FPR64
2794 let Predicates = [IsLE] in {
2795 // We must use ST1 to store vectors in big-endian.
2796 defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
2797 defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
2798 defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
2799 defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
2800 defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
2803 defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
2804 defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
2806 // Match all store 128 bits width whose type is compatible with FPR128
2807 let Predicates = [IsLE, UseSTRQro] in {
2808 // We must use ST1 to store vectors in big-endian.
2809 defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
2810 defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
2811 defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
2812 defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
2813 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
2814 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
2815 defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
2817 } // AddedComplexity = 10
2819 // Match stores from lane 0 to the appropriate subreg's store.
2820 multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
2821 ValueType VecTy, ValueType STy,
2822 SubRegIndex SubRegIdx,
2823 Instruction STRW, Instruction STRX> {
2825 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2826 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2827 (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2828 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2830 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2831 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2832 (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2833 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2836 let AddedComplexity = 19 in {
2837 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
2838 defm : VecROStoreLane0Pat<ro16, store, v8f16, f16, hsub, STRHroW, STRHroX>;
2839 defm : VecROStoreLane0Pat<ro32, store, v4i32, i32, ssub, STRSroW, STRSroX>;
2840 defm : VecROStoreLane0Pat<ro32, store, v4f32, f32, ssub, STRSroW, STRSroX>;
2841 defm : VecROStoreLane0Pat<ro64, store, v2i64, i64, dsub, STRDroW, STRDroX>;
2842 defm : VecROStoreLane0Pat<ro64, store, v2f64, f64, dsub, STRDroW, STRDroX>;
2846 // (unsigned immediate)
2847 defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
2849 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2850 defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
2852 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2853 defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
2855 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
2856 defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
2857 [(store (f16 FPR16Op:$Rt),
2858 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
2859 defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
2860 [(store (f32 FPR32Op:$Rt),
2861 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2862 defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
2863 [(store (f64 FPR64Op:$Rt),
2864 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2865 defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
2867 defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
2868 [(truncstorei16 GPR32z:$Rt,
2869 (am_indexed16 GPR64sp:$Rn,
2870 uimm12s2:$offset))]>;
2871 defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1, "strb",
2872 [(truncstorei8 GPR32z:$Rt,
2873 (am_indexed8 GPR64sp:$Rn,
2874 uimm12s1:$offset))]>;
2876 let AddedComplexity = 10 in {
2878 // Match all store 64 bits width whose type is compatible with FPR64
2879 def : Pat<(store (v1i64 FPR64:$Rt),
2880 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2881 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2882 def : Pat<(store (v1f64 FPR64:$Rt),
2883 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2884 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2886 let Predicates = [IsLE] in {
2887 // We must use ST1 to store vectors in big-endian.
2888 def : Pat<(store (v2f32 FPR64:$Rt),
2889 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2890 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2891 def : Pat<(store (v8i8 FPR64:$Rt),
2892 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2893 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2894 def : Pat<(store (v4i16 FPR64:$Rt),
2895 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2896 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2897 def : Pat<(store (v2i32 FPR64:$Rt),
2898 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2899 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2900 def : Pat<(store (v4f16 FPR64:$Rt),
2901 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2902 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2905 // Match all store 128 bits width whose type is compatible with FPR128
2906 def : Pat<(store (f128 FPR128:$Rt),
2907 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2908 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2910 let Predicates = [IsLE] in {
2911 // We must use ST1 to store vectors in big-endian.
2912 def : Pat<(store (v4f32 FPR128:$Rt),
2913 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2914 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2915 def : Pat<(store (v2f64 FPR128:$Rt),
2916 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2917 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2918 def : Pat<(store (v16i8 FPR128:$Rt),
2919 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2920 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2921 def : Pat<(store (v8i16 FPR128:$Rt),
2922 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2923 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2924 def : Pat<(store (v4i32 FPR128:$Rt),
2925 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2926 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2927 def : Pat<(store (v2i64 FPR128:$Rt),
2928 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2929 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2930 def : Pat<(store (v8f16 FPR128:$Rt),
2931 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2932 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2936 def : Pat<(truncstorei32 GPR64:$Rt,
2937 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
2938 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
2939 def : Pat<(truncstorei16 GPR64:$Rt,
2940 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
2941 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
2942 def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
2943 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
2945 } // AddedComplexity = 10
2947 // Match stores from lane 0 to the appropriate subreg's store.
2948 multiclass VecStoreLane0Pat<Operand UIAddrMode, SDPatternOperator storeop,
2949 ValueType VTy, ValueType STy,
2950 SubRegIndex SubRegIdx, Operand IndexType,
2952 def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
2953 (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
2954 (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2955 GPR64sp:$Rn, IndexType:$offset)>;
2958 let AddedComplexity = 19 in {
2959 defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
2960 defm : VecStoreLane0Pat<am_indexed16, store, v8f16, f16, hsub, uimm12s2, STRHui>;
2961 defm : VecStoreLane0Pat<am_indexed32, store, v4i32, i32, ssub, uimm12s4, STRSui>;
2962 defm : VecStoreLane0Pat<am_indexed32, store, v4f32, f32, ssub, uimm12s4, STRSui>;
2963 defm : VecStoreLane0Pat<am_indexed64, store, v2i64, i64, dsub, uimm12s8, STRDui>;
2964 defm : VecStoreLane0Pat<am_indexed64, store, v2f64, f64, dsub, uimm12s8, STRDui>;
2968 // (unscaled immediate)
2969 defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
2971 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2972 defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
2974 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2975 defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
2977 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2978 defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
2979 [(store (f16 FPR16Op:$Rt),
2980 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2981 defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
2982 [(store (f32 FPR32Op:$Rt),
2983 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2984 defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
2985 [(store (f64 FPR64Op:$Rt),
2986 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2987 defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
2988 [(store (f128 FPR128Op:$Rt),
2989 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
2990 defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
2991 [(truncstorei16 GPR32z:$Rt,
2992 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2993 defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
2994 [(truncstorei8 GPR32z:$Rt,
2995 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2997 // Armv8.4 Weaker Release Consistency enhancements
2998 // LDAPR & STLR with Immediate Offset instructions
2999 let Predicates = [HasRCPC_IMMO] in {
3000 defm STLURB : BaseStoreUnscaleV84<"stlurb", 0b00, 0b00, GPR32>;
3001 defm STLURH : BaseStoreUnscaleV84<"stlurh", 0b01, 0b00, GPR32>;
3002 defm STLURW : BaseStoreUnscaleV84<"stlur", 0b10, 0b00, GPR32>;
3003 defm STLURX : BaseStoreUnscaleV84<"stlur", 0b11, 0b00, GPR64>;
3004 defm LDAPURB : BaseLoadUnscaleV84<"ldapurb", 0b00, 0b01, GPR32>;
3005 defm LDAPURSBW : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
3006 defm LDAPURSBX : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
3007 defm LDAPURH : BaseLoadUnscaleV84<"ldapurh", 0b01, 0b01, GPR32>;
3008 defm LDAPURSHW : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
3009 defm LDAPURSHX : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
3010 defm LDAPUR : BaseLoadUnscaleV84<"ldapur", 0b10, 0b01, GPR32>;
3011 defm LDAPURSW : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
3012 defm LDAPURX : BaseLoadUnscaleV84<"ldapur", 0b11, 0b01, GPR64>;
3015 // Match all store 64 bits width whose type is compatible with FPR64
3016 def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3017 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3018 def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3019 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3021 let AddedComplexity = 10 in {
3023 let Predicates = [IsLE] in {
3024 // We must use ST1 to store vectors in big-endian.
3025 def : Pat<(store (v2f32 FPR64:$Rt),
3026 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3027 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3028 def : Pat<(store (v8i8 FPR64:$Rt),
3029 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3030 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3031 def : Pat<(store (v4i16 FPR64:$Rt),
3032 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3033 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3034 def : Pat<(store (v2i32 FPR64:$Rt),
3035 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3036 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3037 def : Pat<(store (v4f16 FPR64:$Rt),
3038 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
3039 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3042 // Match all store 128 bits width whose type is compatible with FPR128
3043 def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3044 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3046 let Predicates = [IsLE] in {
3047 // We must use ST1 to store vectors in big-endian.
3048 def : Pat<(store (v4f32 FPR128:$Rt),
3049 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3050 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3051 def : Pat<(store (v2f64 FPR128:$Rt),
3052 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3053 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3054 def : Pat<(store (v16i8 FPR128:$Rt),
3055 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3056 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3057 def : Pat<(store (v8i16 FPR128:$Rt),
3058 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3059 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3060 def : Pat<(store (v4i32 FPR128:$Rt),
3061 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3062 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3063 def : Pat<(store (v2i64 FPR128:$Rt),
3064 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3065 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3066 def : Pat<(store (v2f64 FPR128:$Rt),
3067 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3068 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3069 def : Pat<(store (v8f16 FPR128:$Rt),
3070 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
3071 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
3074 } // AddedComplexity = 10
3076 // unscaled i64 truncating stores
3077 def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
3078 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3079 def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
3080 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3081 def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
3082 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
3084 // Match stores from lane 0 to the appropriate subreg's store.
3085 multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
3086 ValueType VTy, ValueType STy,
3087 SubRegIndex SubRegIdx, Instruction STR> {
3088 defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
3091 let AddedComplexity = 19 in {
3092 defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
3093 defm : VecStoreULane0Pat<store, v8f16, f16, hsub, STURHi>;
3094 defm : VecStoreULane0Pat<store, v4i32, i32, ssub, STURSi>;
3095 defm : VecStoreULane0Pat<store, v4f32, f32, ssub, STURSi>;
3096 defm : VecStoreULane0Pat<store, v2i64, i64, dsub, STURDi>;
3097 defm : VecStoreULane0Pat<store, v2f64, f64, dsub, STURDi>;
3101 // STR mnemonics fall back to STUR for negative or unaligned offsets.
3102 def : InstAlias<"str $Rt, [$Rn, $offset]",
3103 (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3104 def : InstAlias<"str $Rt, [$Rn, $offset]",
3105 (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3106 def : InstAlias<"str $Rt, [$Rn, $offset]",
3107 (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3108 def : InstAlias<"str $Rt, [$Rn, $offset]",
3109 (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3110 def : InstAlias<"str $Rt, [$Rn, $offset]",
3111 (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
3112 def : InstAlias<"str $Rt, [$Rn, $offset]",
3113 (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
3114 def : InstAlias<"str $Rt, [$Rn, $offset]",
3115 (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
3117 def : InstAlias<"strb $Rt, [$Rn, $offset]",
3118 (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
3119 def : InstAlias<"strh $Rt, [$Rn, $offset]",
3120 (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
3123 // (unscaled immediate, unprivileged)
3124 defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
3125 defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
3127 defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
3128 defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
3131 // (immediate pre-indexed)
3132 def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str", pre_store, i32>;
3133 def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str", pre_store, i64>;
3134 def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op, "str", pre_store, untyped>;
3135 def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str", pre_store, f16>;
3136 def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str", pre_store, f32>;
3137 def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str", pre_store, f64>;
3138 def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
3140 def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8, i32>;
3141 def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
3144 def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3145 (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3147 def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3148 (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3150 def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3151 (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3154 def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3155 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3156 def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3157 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3158 def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3159 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3160 def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3161 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3162 def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3163 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3164 def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3165 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3166 def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3167 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3169 def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3170 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3171 def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3172 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3173 def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3174 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3175 def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3176 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3177 def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3178 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3179 def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3180 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3181 def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3182 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3185 // (immediate post-indexed)
3186 def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z, "str", post_store, i32>;
3187 def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z, "str", post_store, i64>;
3188 def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op, "str", post_store, untyped>;
3189 def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op, "str", post_store, f16>;
3190 def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op, "str", post_store, f32>;
3191 def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op, "str", post_store, f64>;
3192 def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
3194 def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
3195 def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
3198 def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3199 (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3201 def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3202 (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3204 def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
3205 (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
3208 def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3209 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3210 def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3211 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3212 def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3213 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3214 def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3215 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3216 def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3217 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3218 def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3219 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3220 def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
3221 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
3223 def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3224 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3225 def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3226 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3227 def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3228 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3229 def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3230 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3231 def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3232 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3233 def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3234 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3235 def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
3236 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
3238 //===----------------------------------------------------------------------===//
3239 // Load/store exclusive instructions.
3240 //===----------------------------------------------------------------------===//
3242 def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">;
3243 def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">;
3244 def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
3245 def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
3247 def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
3248 def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
3249 def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
3250 def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
3252 def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
3253 def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
3254 def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
3255 def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
3257 def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">;
3258 def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">;
3259 def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
3260 def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
3262 def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
3263 def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
3264 def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
3265 def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
3267 def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
3268 def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
3269 def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
3270 def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
3272 def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
3273 def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
3275 def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
3276 def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
3278 def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
3279 def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
3281 def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
3282 def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
3284 let Predicates = [HasLOR] in {
3285 // v8.1a "Limited Order Region" extension load-acquire instructions
3286 def LDLARW : LoadAcquire <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
3287 def LDLARX : LoadAcquire <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
3288 def LDLARB : LoadAcquire <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
3289 def LDLARH : LoadAcquire <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
3291 // v8.1a "Limited Order Region" extension store-release instructions
3292 def STLLRW : StoreRelease <0b10, 1, 0, 0, 0, GPR32, "stllr">;
3293 def STLLRX : StoreRelease <0b11, 1, 0, 0, 0, GPR64, "stllr">;
3294 def STLLRB : StoreRelease <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
3295 def STLLRH : StoreRelease <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
3298 //===----------------------------------------------------------------------===//
3299 // Scaled floating point to integer conversion instructions.
3300 //===----------------------------------------------------------------------===//
3302 defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3303 defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3304 defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3305 defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3306 defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3307 defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3308 defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3309 defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3310 defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3311 defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3312 defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", any_fp_to_sint>;
3313 defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", any_fp_to_uint>;
3315 multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3316 def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3317 def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3318 def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3319 def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3320 def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3321 def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3323 def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3324 (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3325 def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3326 (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3327 def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3328 (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3329 def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3330 (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3331 def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3332 (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3333 def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3334 (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3337 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3338 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3340 multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
3341 def : Pat<(i32 (to_int (round f32:$Rn))),
3342 (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3343 def : Pat<(i64 (to_int (round f32:$Rn))),
3344 (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3345 def : Pat<(i32 (to_int (round f64:$Rn))),
3346 (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3347 def : Pat<(i64 (to_int (round f64:$Rn))),
3348 (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3351 defm : FPToIntegerPats<fp_to_sint, fceil, "FCVTPS">;
3352 defm : FPToIntegerPats<fp_to_uint, fceil, "FCVTPU">;
3353 defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
3354 defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
3355 defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
3356 defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
3357 defm : FPToIntegerPats<fp_to_sint, fround, "FCVTAS">;
3358 defm : FPToIntegerPats<fp_to_uint, fround, "FCVTAU">;
3360 let Predicates = [HasFullFP16] in {
3361 def : Pat<(i32 (lround f16:$Rn)),
3362 (!cast<Instruction>(FCVTASUWHr) f16:$Rn)>;
3363 def : Pat<(i64 (lround f16:$Rn)),
3364 (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3365 def : Pat<(i64 (llround f16:$Rn)),
3366 (!cast<Instruction>(FCVTASUXHr) f16:$Rn)>;
3368 def : Pat<(i32 (lround f32:$Rn)),
3369 (!cast<Instruction>(FCVTASUWSr) f32:$Rn)>;
3370 def : Pat<(i32 (lround f64:$Rn)),
3371 (!cast<Instruction>(FCVTASUWDr) f64:$Rn)>;
3372 def : Pat<(i64 (lround f32:$Rn)),
3373 (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3374 def : Pat<(i64 (lround f64:$Rn)),
3375 (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3376 def : Pat<(i64 (llround f32:$Rn)),
3377 (!cast<Instruction>(FCVTASUXSr) f32:$Rn)>;
3378 def : Pat<(i64 (llround f64:$Rn)),
3379 (!cast<Instruction>(FCVTASUXDr) f64:$Rn)>;
3381 //===----------------------------------------------------------------------===//
3382 // Scaled integer to floating point conversion instructions.
3383 //===----------------------------------------------------------------------===//
3385 defm SCVTF : IntegerToFP<0, "scvtf", any_sint_to_fp>;
3386 defm UCVTF : IntegerToFP<1, "ucvtf", any_uint_to_fp>;
3388 //===----------------------------------------------------------------------===//
3389 // Unscaled integer to floating point conversion instruction.
3390 //===----------------------------------------------------------------------===//
3392 defm FMOV : UnscaledConversion<"fmov">;
3394 // Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3395 let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3396 def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3397 Sched<[WriteF]>, Requires<[HasFullFP16]>;
3398 def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3400 def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3403 // Similarly add aliases
3404 def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3405 Requires<[HasFullFP16]>;
3406 def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3407 def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3409 //===----------------------------------------------------------------------===//
3410 // Floating point conversion instruction.
3411 //===----------------------------------------------------------------------===//
3413 defm FCVT : FPConversion<"fcvt">;
3415 //===----------------------------------------------------------------------===//
3416 // Floating point single operand instructions.
3417 //===----------------------------------------------------------------------===//
3419 defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>;
3420 defm FMOV : SingleOperandFPData<0b0000, "fmov">;
3421 defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>;
3422 defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3423 defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3424 defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3425 defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
3426 defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3428 def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
3429 (FRINTNDr FPR64:$Rn)>;
3431 defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3432 defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3434 let SchedRW = [WriteFDiv] in {
3435 defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3438 let Predicates = [HasFRInt3264] in {
3439 defm FRINT32Z : FRIntNNT<0b00, "frint32z">;
3440 defm FRINT64Z : FRIntNNT<0b10, "frint64z">;
3441 defm FRINT32X : FRIntNNT<0b01, "frint32x">;
3442 defm FRINT64X : FRIntNNT<0b11, "frint64x">;
3445 let Predicates = [HasFullFP16] in {
3446 def : Pat<(i32 (lrint f16:$Rn)),
3447 (FCVTZSUWHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3448 def : Pat<(i64 (lrint f16:$Rn)),
3449 (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3450 def : Pat<(i64 (llrint f16:$Rn)),
3451 (FCVTZSUXHr (!cast<Instruction>(FRINTXHr) f16:$Rn))>;
3453 def : Pat<(i32 (lrint f32:$Rn)),
3454 (FCVTZSUWSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3455 def : Pat<(i32 (lrint f64:$Rn)),
3456 (FCVTZSUWDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3457 def : Pat<(i64 (lrint f32:$Rn)),
3458 (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3459 def : Pat<(i64 (lrint f64:$Rn)),
3460 (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3461 def : Pat<(i64 (llrint f32:$Rn)),
3462 (FCVTZSUXSr (!cast<Instruction>(FRINTXSr) f32:$Rn))>;
3463 def : Pat<(i64 (llrint f64:$Rn)),
3464 (FCVTZSUXDr (!cast<Instruction>(FRINTXDr) f64:$Rn))>;
3466 //===----------------------------------------------------------------------===//
3467 // Floating point two operand instructions.
3468 //===----------------------------------------------------------------------===//
3470 defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>;
3471 let SchedRW = [WriteFDiv] in {
3472 defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>;
3474 defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
3475 defm FMAX : TwoOperandFPData<0b0100, "fmax", fmaximum>;
3476 defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
3477 defm FMIN : TwoOperandFPData<0b0101, "fmin", fminimum>;
3478 let SchedRW = [WriteFMul] in {
3479 defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>;
3480 defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
3482 defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>;
3484 def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3485 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
3486 def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3487 (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
3488 def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3489 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
3490 def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3491 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
3493 //===----------------------------------------------------------------------===//
3494 // Floating point three operand instructions.
3495 //===----------------------------------------------------------------------===//
3497 defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>;
3498 defm FMSUB : ThreeOperandFPData<0, 1, "fmsub",
3499 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
3500 defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
3501 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
3502 defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
3503 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
3505 // The following def pats catch the case where the LHS of an FMA is negated.
3506 // The TriOpFrag above catches the case where the middle operand is negated.
3508 // N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
3509 // the NEON variant.
3511 // Here we handle first -(a + b*c) for FNMADD:
3513 let Predicates = [HasNEON, HasFullFP16] in
3514 def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, FPR16:$Ra)),
3515 (FMSUBHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3517 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
3518 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3520 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
3521 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3523 // Now it's time for "(-a) + (-b)*c"
3525 let Predicates = [HasNEON, HasFullFP16] in
3526 def : Pat<(f16 (fma (fneg FPR16:$Rn), FPR16:$Rm, (fneg FPR16:$Ra))),
3527 (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3529 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
3530 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3532 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
3533 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3535 // And here "(-a) + b*(-c)"
3537 let Predicates = [HasNEON, HasFullFP16] in
3538 def : Pat<(f16 (fma FPR16:$Rn, (fneg FPR16:$Rm), (fneg FPR16:$Ra))),
3539 (FNMADDHrrr FPR16:$Rn, FPR16:$Rm, FPR16:$Ra)>;
3541 def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
3542 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3544 def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
3545 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3547 //===----------------------------------------------------------------------===//
3548 // Floating point comparison instructions.
3549 //===----------------------------------------------------------------------===//
3551 defm FCMPE : FPComparison<1, "fcmpe", AArch64strict_fcmpe>;
3552 defm FCMP : FPComparison<0, "fcmp", AArch64any_fcmp>;
3554 //===----------------------------------------------------------------------===//
3555 // Floating point conditional comparison instructions.
3556 //===----------------------------------------------------------------------===//
3558 defm FCCMPE : FPCondComparison<1, "fccmpe">;
3559 defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>;
3561 //===----------------------------------------------------------------------===//
3562 // Floating point conditional select instruction.
3563 //===----------------------------------------------------------------------===//
3565 defm FCSEL : FPCondSelect<"fcsel">;
3567 // CSEL instructions providing f128 types need to be handled by a
3568 // pseudo-instruction since the eventual code will need to introduce basic
3569 // blocks and control flow.
3570 def F128CSEL : Pseudo<(outs FPR128:$Rd),
3571 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
3572 [(set (f128 FPR128:$Rd),
3573 (AArch64csel FPR128:$Rn, FPR128:$Rm,
3574 (i32 imm:$cond), NZCV))]> {
3576 let usesCustomInserter = 1;
3577 let hasNoSchedulingInfo = 1;
3580 //===----------------------------------------------------------------------===//
3581 // Instructions used for emitting unwind opcodes on ARM64 Windows.
3582 //===----------------------------------------------------------------------===//
3583 let isPseudo = 1 in {
3584 def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
3585 def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3586 def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3587 def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3588 def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3589 def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3590 def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3591 def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3592 def SEH_SaveFReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3593 def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3594 def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3595 def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
3596 def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3597 def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
3598 def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3599 def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
3600 def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3603 // Pseudo instructions for Windows EH
3604 //===----------------------------------------------------------------------===//
3605 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
3606 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
3607 def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
3608 let usesCustomInserter = 1 in
3609 def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
3613 let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
3614 usesCustomInserter = 1 in
3615 def CATCHPAD : Pseudo<(outs), (ins), [(catchpad)]>, Sched<[]>;
3617 //===----------------------------------------------------------------------===//
3618 // Floating point immediate move.
3619 //===----------------------------------------------------------------------===//
3621 let isReMaterializable = 1 in {
3622 defm FMOV : FPMoveImmediate<"fmov">;
3625 //===----------------------------------------------------------------------===//
3626 // Advanced SIMD two vector instructions.
3627 //===----------------------------------------------------------------------===//
3629 defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
3630 int_aarch64_neon_uabd>;
3631 // Match UABDL in log2-shuffle patterns.
3632 def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
3633 (zext (v8i8 V64:$opB))))),
3634 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3635 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3636 (v8i16 (add (sub (zext (v8i8 V64:$opA)),
3637 (zext (v8i8 V64:$opB))),
3638 (AArch64vashr v8i16:$src, (i32 15))))),
3639 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3640 def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
3641 (zext (extract_high_v16i8 V128:$opB))))),
3642 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3643 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3644 (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
3645 (zext (extract_high_v16i8 V128:$opB))),
3646 (AArch64vashr v8i16:$src, (i32 15))))),
3647 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3648 def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
3649 (zext (v4i16 V64:$opB))))),
3650 (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
3651 def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
3652 (zext (extract_high_v8i16 V128:$opB))))),
3653 (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
3654 def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
3655 (zext (v2i32 V64:$opB))))),
3656 (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
3657 def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
3658 (zext (extract_high_v4i32 V128:$opB))))),
3659 (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
3661 defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
3662 defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
3663 defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
3664 defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
3665 defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
3666 defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
3667 defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
3668 defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
3669 defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
3670 defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
3672 defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3673 defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3674 defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3675 defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3676 defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3677 defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
3678 defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
3679 defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
3680 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
3681 (FCVTLv4i16 V64:$Rn)>;
3682 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
3684 (FCVTLv8i16 V128:$Rn)>;
3685 def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
3687 def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
3689 defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
3690 defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
3691 defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
3692 defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
3693 defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
3694 def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
3695 (FCVTNv4i16 V128:$Rn)>;
3696 def : Pat<(concat_vectors V64:$Rd,
3697 (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
3698 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3699 def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
3700 def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
3701 def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
3702 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3703 defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
3704 defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
3705 defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
3706 int_aarch64_neon_fcvtxn>;
3707 defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
3708 defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
3710 def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
3711 def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
3712 def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
3713 def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
3714 def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
3716 def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
3717 def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
3718 def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
3719 def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
3720 def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
3722 defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
3723 defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
3724 defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
3725 defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
3726 defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
3727 defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
3728 defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
3729 defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
3730 defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
3732 let Predicates = [HasFRInt3264] in {
3733 defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z">;
3734 defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z">;
3735 defm FRINT32X : FRIntNNTVector<1, 0, "frint32x">;
3736 defm FRINT64X : FRIntNNTVector<1, 1, "frint64x">;
3739 defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
3740 defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
3741 defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
3742 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3743 defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
3744 // Aliases for MVN -> NOT.
3745 def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
3746 (NOTv8i8 V64:$Vd, V64:$Vn)>;
3747 def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
3748 (NOTv16i8 V128:$Vd, V128:$Vn)>;
3750 def : Pat<(AArch64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
3751 def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
3752 def : Pat<(AArch64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>;
3753 def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
3754 def : Pat<(AArch64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>;
3755 def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
3756 def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
3758 def : Pat<(AArch64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3759 def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3760 def : Pat<(AArch64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3761 def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3762 def : Pat<(AArch64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3763 def : Pat<(AArch64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3764 def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3765 def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3767 def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3768 def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3769 def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3770 def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3771 def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3773 defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
3774 defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
3775 defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
3776 defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
3777 defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
3778 BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
3779 defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
3780 defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
3781 defm SHLL : SIMDVectorLShiftLongBySizeBHS;
3782 defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3783 defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3784 defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
3785 defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
3786 defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
3787 defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
3788 BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
3789 defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
3790 int_aarch64_neon_uaddlp>;
3791 defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
3792 defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
3793 defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
3794 defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
3795 defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
3796 defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
3798 def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
3799 def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
3800 def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
3801 def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
3802 def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
3803 def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
3805 // Patterns for vector long shift (by element width). These need to match all
3806 // three of zext, sext and anyext so it's easier to pull the patterns out of the
3808 multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
3809 def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
3810 (SHLLv8i8 V64:$Rn)>;
3811 def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
3812 (SHLLv16i8 V128:$Rn)>;
3813 def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
3814 (SHLLv4i16 V64:$Rn)>;
3815 def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
3816 (SHLLv8i16 V128:$Rn)>;
3817 def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
3818 (SHLLv2i32 V64:$Rn)>;
3819 def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
3820 (SHLLv4i32 V128:$Rn)>;
3823 defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
3824 defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
3825 defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
3827 //===----------------------------------------------------------------------===//
3828 // Advanced SIMD three vector instructions.
3829 //===----------------------------------------------------------------------===//
3831 defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>;
3832 defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
3833 defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
3834 defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
3835 defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
3836 defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
3837 defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
3838 defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
3839 defm FABD : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
3840 let Predicates = [HasNEON] in {
3841 foreach VT = [ v2f32, v4f32, v2f64 ] in
3842 def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
3844 let Predicates = [HasNEON, HasFullFP16] in {
3845 foreach VT = [ v4f16, v8f16 ] in
3846 def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
3848 defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
3849 defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
3850 defm FADDP : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_faddp>;
3851 defm FADD : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
3852 defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
3853 defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
3854 defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
3855 defm FDIV : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
3856 defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
3857 defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
3858 defm FMAXP : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
3859 defm FMAX : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
3860 defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
3861 defm FMINNM : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
3862 defm FMINP : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
3863 defm FMIN : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
3865 // NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
3866 // instruction expects the addend first, while the fma intrinsic puts it last.
3867 defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
3868 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
3869 defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
3870 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
3872 // The following def pats catch the case where the LHS of an FMA is negated.
3873 // The TriOpFrag above catches the case where the middle operand is negated.
3874 def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
3875 (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
3877 def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
3878 (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
3880 def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
3881 (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
3883 defm FMULX : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
3884 defm FMUL : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
3885 defm FRECPS : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
3886 defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
3887 defm FSUB : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
3889 // MLA and MLS are generated in MachineCombine
3890 defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla", null_frag>;
3891 defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls", null_frag>;
3893 defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
3894 defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
3895 defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
3896 TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >;
3897 defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>;
3898 defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
3899 defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
3900 defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
3901 defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
3902 defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
3903 defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
3904 defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
3905 defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
3906 defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
3907 defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
3908 defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
3909 defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
3910 defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>;
3911 defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
3912 defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
3913 defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
3914 defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
3915 TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >;
3916 defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>;
3917 defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
3918 defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
3919 defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
3920 defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
3921 defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
3922 defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
3923 defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
3924 defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
3925 defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
3926 defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
3927 defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>;
3928 defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
3929 defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
3930 defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
3931 int_aarch64_neon_sqadd>;
3932 defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
3933 int_aarch64_neon_sqsub>;
3935 // Extra saturate patterns, other than the intrinsics matches above
3936 defm : SIMDThreeSameVectorExtraPatterns<"SQADD", saddsat>;
3937 defm : SIMDThreeSameVectorExtraPatterns<"UQADD", uaddsat>;
3938 defm : SIMDThreeSameVectorExtraPatterns<"SQSUB", ssubsat>;
3939 defm : SIMDThreeSameVectorExtraPatterns<"UQSUB", usubsat>;
3941 defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
3942 defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
3943 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
3944 defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
3945 defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
3946 defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
3947 TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
3948 defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
3949 defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
3950 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
3951 defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
3954 def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
3955 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3956 def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
3957 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3958 def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
3959 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3960 def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
3961 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3963 def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
3964 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3965 def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
3966 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3967 def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
3968 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3969 def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
3970 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3972 def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
3973 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
3974 def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
3975 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3976 def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
3977 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3978 def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
3979 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3981 def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
3982 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
3983 def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
3984 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3985 def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
3986 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3987 def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
3988 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3990 def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
3991 "|cmls.8b\t$dst, $src1, $src2}",
3992 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3993 def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
3994 "|cmls.16b\t$dst, $src1, $src2}",
3995 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3996 def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
3997 "|cmls.4h\t$dst, $src1, $src2}",
3998 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3999 def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
4000 "|cmls.8h\t$dst, $src1, $src2}",
4001 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4002 def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
4003 "|cmls.2s\t$dst, $src1, $src2}",
4004 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4005 def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
4006 "|cmls.4s\t$dst, $src1, $src2}",
4007 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4008 def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
4009 "|cmls.2d\t$dst, $src1, $src2}",
4010 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4012 def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
4013 "|cmlo.8b\t$dst, $src1, $src2}",
4014 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4015 def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
4016 "|cmlo.16b\t$dst, $src1, $src2}",
4017 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4018 def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
4019 "|cmlo.4h\t$dst, $src1, $src2}",
4020 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4021 def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
4022 "|cmlo.8h\t$dst, $src1, $src2}",
4023 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4024 def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
4025 "|cmlo.2s\t$dst, $src1, $src2}",
4026 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4027 def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
4028 "|cmlo.4s\t$dst, $src1, $src2}",
4029 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4030 def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
4031 "|cmlo.2d\t$dst, $src1, $src2}",
4032 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4034 def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
4035 "|cmle.8b\t$dst, $src1, $src2}",
4036 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4037 def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
4038 "|cmle.16b\t$dst, $src1, $src2}",
4039 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4040 def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
4041 "|cmle.4h\t$dst, $src1, $src2}",
4042 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4043 def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
4044 "|cmle.8h\t$dst, $src1, $src2}",
4045 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4046 def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
4047 "|cmle.2s\t$dst, $src1, $src2}",
4048 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4049 def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
4050 "|cmle.4s\t$dst, $src1, $src2}",
4051 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4052 def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
4053 "|cmle.2d\t$dst, $src1, $src2}",
4054 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4056 def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
4057 "|cmlt.8b\t$dst, $src1, $src2}",
4058 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
4059 def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
4060 "|cmlt.16b\t$dst, $src1, $src2}",
4061 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
4062 def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
4063 "|cmlt.4h\t$dst, $src1, $src2}",
4064 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
4065 def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
4066 "|cmlt.8h\t$dst, $src1, $src2}",
4067 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
4068 def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
4069 "|cmlt.2s\t$dst, $src1, $src2}",
4070 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
4071 def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
4072 "|cmlt.4s\t$dst, $src1, $src2}",
4073 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
4074 def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
4075 "|cmlt.2d\t$dst, $src1, $src2}",
4076 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
4078 let Predicates = [HasNEON, HasFullFP16] in {
4079 def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
4080 "|fcmle.4h\t$dst, $src1, $src2}",
4081 (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4082 def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
4083 "|fcmle.8h\t$dst, $src1, $src2}",
4084 (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4086 def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
4087 "|fcmle.2s\t$dst, $src1, $src2}",
4088 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4089 def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
4090 "|fcmle.4s\t$dst, $src1, $src2}",
4091 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4092 def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
4093 "|fcmle.2d\t$dst, $src1, $src2}",
4094 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4096 let Predicates = [HasNEON, HasFullFP16] in {
4097 def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
4098 "|fcmlt.4h\t$dst, $src1, $src2}",
4099 (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4100 def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
4101 "|fcmlt.8h\t$dst, $src1, $src2}",
4102 (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4104 def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
4105 "|fcmlt.2s\t$dst, $src1, $src2}",
4106 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4107 def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
4108 "|fcmlt.4s\t$dst, $src1, $src2}",
4109 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4110 def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
4111 "|fcmlt.2d\t$dst, $src1, $src2}",
4112 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4114 let Predicates = [HasNEON, HasFullFP16] in {
4115 def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
4116 "|facle.4h\t$dst, $src1, $src2}",
4117 (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4118 def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
4119 "|facle.8h\t$dst, $src1, $src2}",
4120 (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4122 def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
4123 "|facle.2s\t$dst, $src1, $src2}",
4124 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4125 def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
4126 "|facle.4s\t$dst, $src1, $src2}",
4127 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4128 def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
4129 "|facle.2d\t$dst, $src1, $src2}",
4130 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4132 let Predicates = [HasNEON, HasFullFP16] in {
4133 def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
4134 "|faclt.4h\t$dst, $src1, $src2}",
4135 (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
4136 def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
4137 "|faclt.8h\t$dst, $src1, $src2}",
4138 (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
4140 def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
4141 "|faclt.2s\t$dst, $src1, $src2}",
4142 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
4143 def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
4144 "|faclt.4s\t$dst, $src1, $src2}",
4145 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
4146 def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
4147 "|faclt.2d\t$dst, $src1, $src2}",
4148 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
4150 //===----------------------------------------------------------------------===//
4151 // Advanced SIMD three scalar instructions.
4152 //===----------------------------------------------------------------------===//
4154 defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>;
4155 defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
4156 defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
4157 defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
4158 defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
4159 defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
4160 defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
4161 defm FABD : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
4162 def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
4163 (FABD64 FPR64:$Rn, FPR64:$Rm)>;
4164 let Predicates = [HasFullFP16] in {
4165 def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
4167 def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
4168 def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
4169 defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
4170 int_aarch64_neon_facge>;
4171 defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
4172 int_aarch64_neon_facgt>;
4173 defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
4174 defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
4175 defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
4176 defm FMULX : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>;
4177 defm FRECPS : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>;
4178 defm FRSQRTS : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>;
4179 defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
4180 defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
4181 defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4182 defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
4183 defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
4184 defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
4185 defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>;
4186 defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>;
4187 defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>;
4188 defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
4189 defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
4190 defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
4191 defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
4192 defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>;
4193 defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>;
4194 let Predicates = [HasRDM] in {
4195 defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
4196 defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
4197 def : Pat<(i32 (int_aarch64_neon_sqadd
4199 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4200 (i32 FPR32:$Rm))))),
4201 (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4202 def : Pat<(i32 (int_aarch64_neon_sqsub
4204 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
4205 (i32 FPR32:$Rm))))),
4206 (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4209 def : InstAlias<"cmls $dst, $src1, $src2",
4210 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4211 def : InstAlias<"cmle $dst, $src1, $src2",
4212 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4213 def : InstAlias<"cmlo $dst, $src1, $src2",
4214 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4215 def : InstAlias<"cmlt $dst, $src1, $src2",
4216 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4217 def : InstAlias<"fcmle $dst, $src1, $src2",
4218 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4219 def : InstAlias<"fcmle $dst, $src1, $src2",
4220 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4221 def : InstAlias<"fcmlt $dst, $src1, $src2",
4222 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4223 def : InstAlias<"fcmlt $dst, $src1, $src2",
4224 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4225 def : InstAlias<"facle $dst, $src1, $src2",
4226 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4227 def : InstAlias<"facle $dst, $src1, $src2",
4228 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4229 def : InstAlias<"faclt $dst, $src1, $src2",
4230 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
4231 def : InstAlias<"faclt $dst, $src1, $src2",
4232 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
4234 //===----------------------------------------------------------------------===//
4235 // Advanced SIMD three scalar instructions (mixed operands).
4236 //===----------------------------------------------------------------------===//
4237 defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
4238 int_aarch64_neon_sqdmulls_scalar>;
4239 defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
4240 defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
4242 def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
4243 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4244 (i32 FPR32:$Rm))))),
4245 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4246 def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
4247 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4248 (i32 FPR32:$Rm))))),
4249 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
4251 //===----------------------------------------------------------------------===//
4252 // Advanced SIMD two scalar instructions.
4253 //===----------------------------------------------------------------------===//
4255 defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", abs>;
4256 defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
4257 defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
4258 defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
4259 defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
4260 defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
4261 defm FCMEQ : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
4262 defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
4263 defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
4264 defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
4265 defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
4266 defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">;
4267 defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">;
4268 defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">;
4269 defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">;
4270 defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">;
4271 defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">;
4272 defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">;
4273 defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">;
4274 def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
4275 defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">;
4276 defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">;
4277 defm FRECPE : SIMDFPTwoScalar< 0, 1, 0b11101, "frecpe">;
4278 defm FRECPX : SIMDFPTwoScalar< 0, 1, 0b11111, "frecpx">;
4279 defm FRSQRTE : SIMDFPTwoScalar< 1, 1, 0b11101, "frsqrte">;
4280 defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg",
4281 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
4282 defm SCVTF : SIMDFPTwoScalarCVT< 0, 0, 0b11101, "scvtf", AArch64sitof>;
4283 defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
4284 defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
4285 defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
4286 defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
4287 defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
4288 int_aarch64_neon_suqadd>;
4289 defm UCVTF : SIMDFPTwoScalarCVT< 1, 0, 0b11101, "ucvtf", AArch64uitof>;
4290 defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
4291 defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
4292 int_aarch64_neon_usqadd>;
4294 def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
4296 def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
4297 (FCVTASv1i64 FPR64:$Rn)>;
4298 def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
4299 (FCVTAUv1i64 FPR64:$Rn)>;
4300 def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
4301 (FCVTMSv1i64 FPR64:$Rn)>;
4302 def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
4303 (FCVTMUv1i64 FPR64:$Rn)>;
4304 def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
4305 (FCVTNSv1i64 FPR64:$Rn)>;
4306 def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
4307 (FCVTNUv1i64 FPR64:$Rn)>;
4308 def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
4309 (FCVTPSv1i64 FPR64:$Rn)>;
4310 def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
4311 (FCVTPUv1i64 FPR64:$Rn)>;
4313 def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
4314 (FRECPEv1f16 FPR16:$Rn)>;
4315 def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
4316 (FRECPEv1i32 FPR32:$Rn)>;
4317 def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
4318 (FRECPEv1i64 FPR64:$Rn)>;
4319 def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
4320 (FRECPEv1i64 FPR64:$Rn)>;
4322 def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
4323 (FRECPEv1i32 FPR32:$Rn)>;
4324 def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
4325 (FRECPEv2f32 V64:$Rn)>;
4326 def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
4327 (FRECPEv4f32 FPR128:$Rn)>;
4328 def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
4329 (FRECPEv1i64 FPR64:$Rn)>;
4330 def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
4331 (FRECPEv1i64 FPR64:$Rn)>;
4332 def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
4333 (FRECPEv2f64 FPR128:$Rn)>;
4335 def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4336 (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
4337 def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4338 (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
4339 def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4340 (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4341 def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4342 (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
4343 def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4344 (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4346 def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
4347 (FRECPXv1f16 FPR16:$Rn)>;
4348 def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
4349 (FRECPXv1i32 FPR32:$Rn)>;
4350 def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
4351 (FRECPXv1i64 FPR64:$Rn)>;
4353 def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
4354 (FRSQRTEv1f16 FPR16:$Rn)>;
4355 def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
4356 (FRSQRTEv1i32 FPR32:$Rn)>;
4357 def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
4358 (FRSQRTEv1i64 FPR64:$Rn)>;
4359 def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
4360 (FRSQRTEv1i64 FPR64:$Rn)>;
4362 def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4363 (FRSQRTEv1i32 FPR32:$Rn)>;
4364 def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4365 (FRSQRTEv2f32 V64:$Rn)>;
4366 def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4367 (FRSQRTEv4f32 FPR128:$Rn)>;
4368 def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4369 (FRSQRTEv1i64 FPR64:$Rn)>;
4370 def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4371 (FRSQRTEv1i64 FPR64:$Rn)>;
4372 def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4373 (FRSQRTEv2f64 FPR128:$Rn)>;
4375 def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4376 (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4377 def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4378 (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4379 def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4380 (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4381 def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4382 (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4383 def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4384 (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4386 // If an integer is about to be converted to a floating point value,
4387 // just load it on the floating point unit.
4388 // Here are the patterns for 8 and 16-bits to float.
4390 multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
4391 SDPatternOperator loadop, Instruction UCVTF,
4392 ROAddrMode ro, Instruction LDRW, Instruction LDRX,
4394 def : Pat<(DstTy (uint_to_fp (SrcTy
4395 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
4396 ro.Wext:$extend))))),
4397 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4398 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
4401 def : Pat<(DstTy (uint_to_fp (SrcTy
4402 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
4403 ro.Wext:$extend))))),
4404 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4405 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
4409 defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
4410 UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
4411 def : Pat <(f32 (uint_to_fp (i32
4412 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4413 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4414 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4415 def : Pat <(f32 (uint_to_fp (i32
4416 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4417 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4418 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4419 // 16-bits -> float.
4420 defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
4421 UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
4422 def : Pat <(f32 (uint_to_fp (i32
4423 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4424 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4425 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4426 def : Pat <(f32 (uint_to_fp (i32
4427 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4428 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4429 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4430 // 32-bits are handled in target specific dag combine:
4431 // performIntToFpCombine.
4432 // 64-bits integer to 32-bits floating point, not possible with
4433 // UCVTF on floating point registers (both source and destination
4434 // must have the same size).
4436 // Here are the patterns for 8, 16, 32, and 64-bits to double.
4437 // 8-bits -> double.
4438 defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
4439 UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
4440 def : Pat <(f64 (uint_to_fp (i32
4441 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4442 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4443 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4444 def : Pat <(f64 (uint_to_fp (i32
4445 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4446 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4447 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4448 // 16-bits -> double.
4449 defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
4450 UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
4451 def : Pat <(f64 (uint_to_fp (i32
4452 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4453 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4454 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4455 def : Pat <(f64 (uint_to_fp (i32
4456 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4457 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4458 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4459 // 32-bits -> double.
4460 defm : UIntToFPROLoadPat<f64, i32, load,
4461 UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
4462 def : Pat <(f64 (uint_to_fp (i32
4463 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
4464 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4465 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
4466 def : Pat <(f64 (uint_to_fp (i32
4467 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
4468 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4469 (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
4470 // 64-bits -> double are handled in target specific dag combine:
4471 // performIntToFpCombine.
4473 //===----------------------------------------------------------------------===//
4474 // Advanced SIMD three different-sized vector instructions.
4475 //===----------------------------------------------------------------------===//
4477 defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
4478 defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
4479 defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
4480 defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
4481 defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
4482 defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
4483 int_aarch64_neon_sabd>;
4484 defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
4485 int_aarch64_neon_sabd>;
4486 defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl",
4487 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
4488 defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw",
4489 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
4490 defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
4491 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4492 defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
4493 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4494 defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
4495 defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
4496 int_aarch64_neon_sqadd>;
4497 defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
4498 int_aarch64_neon_sqsub>;
4499 defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
4500 int_aarch64_neon_sqdmull>;
4501 defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
4502 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
4503 defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
4504 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
4505 defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
4506 int_aarch64_neon_uabd>;
4507 defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
4508 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
4509 defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
4510 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
4511 defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
4512 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4513 defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
4514 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4515 defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
4516 defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
4517 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
4518 defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
4519 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
4521 // Additional patterns for SMULL and UMULL
4522 multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
4523 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4524 def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4525 (INST8B V64:$Rn, V64:$Rm)>;
4526 def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4527 (INST4H V64:$Rn, V64:$Rm)>;
4528 def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4529 (INST2S V64:$Rn, V64:$Rm)>;
4532 defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
4533 SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
4534 defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
4535 UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
4537 // Patterns for smull2/umull2.
4538 multiclass Neon_mul_high_patterns<SDPatternOperator opnode,
4539 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4540 def : Pat<(v8i16 (opnode (extract_high_v16i8 V128:$Rn),
4541 (extract_high_v16i8 V128:$Rm))),
4542 (INST8B V128:$Rn, V128:$Rm)>;
4543 def : Pat<(v4i32 (opnode (extract_high_v8i16 V128:$Rn),
4544 (extract_high_v8i16 V128:$Rm))),
4545 (INST4H V128:$Rn, V128:$Rm)>;
4546 def : Pat<(v2i64 (opnode (extract_high_v4i32 V128:$Rn),
4547 (extract_high_v4i32 V128:$Rm))),
4548 (INST2S V128:$Rn, V128:$Rm)>;
4551 defm : Neon_mul_high_patterns<AArch64smull, SMULLv16i8_v8i16,
4552 SMULLv8i16_v4i32, SMULLv4i32_v2i64>;
4553 defm : Neon_mul_high_patterns<AArch64umull, UMULLv16i8_v8i16,
4554 UMULLv8i16_v4i32, UMULLv4i32_v2i64>;
4556 // Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
4557 multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
4558 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4559 def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4560 (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
4561 def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4562 (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
4563 def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4564 (INST2S V128:$Rd, V64:$Rn, V64:$Rm)>;
4567 defm : Neon_mulacc_widen_patterns<
4568 TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4569 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
4570 defm : Neon_mulacc_widen_patterns<
4571 TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4572 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
4573 defm : Neon_mulacc_widen_patterns<
4574 TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4575 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
4576 defm : Neon_mulacc_widen_patterns<
4577 TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4578 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
4580 // Patterns for 64-bit pmull
4581 def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
4582 (PMULLv1i64 V64:$Rn, V64:$Rm)>;
4583 def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
4584 (extractelt (v2i64 V128:$Rm), (i64 1))),
4585 (PMULLv2i64 V128:$Rn, V128:$Rm)>;
4587 // CodeGen patterns for addhn and subhn instructions, which can actually be
4588 // written in LLVM IR without too much difficulty.
4591 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
4592 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4593 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4595 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4596 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4598 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4599 def : Pat<(concat_vectors (v8i8 V64:$Rd),
4600 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4602 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4603 V128:$Rn, V128:$Rm)>;
4604 def : Pat<(concat_vectors (v4i16 V64:$Rd),
4605 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4607 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4608 V128:$Rn, V128:$Rm)>;
4609 def : Pat<(concat_vectors (v2i32 V64:$Rd),
4610 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4612 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4613 V128:$Rn, V128:$Rm)>;
4616 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
4617 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4618 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4620 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4621 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4623 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4624 def : Pat<(concat_vectors (v8i8 V64:$Rd),
4625 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4627 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4628 V128:$Rn, V128:$Rm)>;
4629 def : Pat<(concat_vectors (v4i16 V64:$Rd),
4630 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4632 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4633 V128:$Rn, V128:$Rm)>;
4634 def : Pat<(concat_vectors (v2i32 V64:$Rd),
4635 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4637 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4638 V128:$Rn, V128:$Rm)>;
4640 //----------------------------------------------------------------------------
4641 // AdvSIMD bitwise extract from vector instruction.
4642 //----------------------------------------------------------------------------
4644 defm EXT : SIMDBitwiseExtract<"ext">;
4646 def AdjustExtImm : SDNodeXForm<imm, [{
4647 return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
4649 multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
4650 def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
4651 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
4652 def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
4653 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
4654 // We use EXT to handle extract_subvector to copy the upper 64-bits of a
4656 def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
4657 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
4658 // A 64-bit EXT of two halves of the same 128-bit register can be done as a
4659 // single 128-bit EXT.
4660 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
4661 (extract_subvector V128:$Rn, (i64 N)),
4663 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
4664 // A 64-bit EXT of the high half of a 128-bit register can be done using a
4665 // 128-bit EXT of the whole register with an adjustment to the immediate. The
4666 // top half of the other operand will be unset, but that doesn't matter as it
4667 // will not be used.
4668 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
4671 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
4672 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4673 (AdjustExtImm imm:$imm)), dsub)>;
4676 defm : ExtPat<v8i8, v16i8, 8>;
4677 defm : ExtPat<v4i16, v8i16, 4>;
4678 defm : ExtPat<v4f16, v8f16, 4>;
4679 defm : ExtPat<v2i32, v4i32, 2>;
4680 defm : ExtPat<v2f32, v4f32, 2>;
4681 defm : ExtPat<v1i64, v2i64, 1>;
4682 defm : ExtPat<v1f64, v2f64, 1>;
4684 //----------------------------------------------------------------------------
4685 // AdvSIMD zip vector
4686 //----------------------------------------------------------------------------
4688 defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
4689 defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
4690 defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
4691 defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
4692 defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
4693 defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
4695 //----------------------------------------------------------------------------
4696 // AdvSIMD TBL/TBX instructions
4697 //----------------------------------------------------------------------------
4699 defm TBL : SIMDTableLookup< 0, "tbl">;
4700 defm TBX : SIMDTableLookupTied<1, "tbx">;
4702 def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4703 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
4704 def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4705 (TBLv16i8One V128:$Ri, V128:$Rn)>;
4707 def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
4708 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4709 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
4710 def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
4711 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4712 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
4715 //----------------------------------------------------------------------------
4716 // AdvSIMD scalar CPY instruction
4717 //----------------------------------------------------------------------------
4719 defm CPY : SIMDScalarCPY<"cpy">;
4721 //----------------------------------------------------------------------------
4722 // AdvSIMD scalar pairwise instructions
4723 //----------------------------------------------------------------------------
4725 defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">;
4726 defm FADDP : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
4727 defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
4728 defm FMAXP : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
4729 defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
4730 defm FMINP : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
4731 def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
4732 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4733 def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
4734 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4735 def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
4736 (FADDPv2i32p V64:$Rn)>;
4737 def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
4738 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
4739 def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
4740 (FADDPv2i64p V128:$Rn)>;
4741 def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
4742 (FMAXNMPv2i32p V64:$Rn)>;
4743 def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
4744 (FMAXNMPv2i64p V128:$Rn)>;
4745 def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
4746 (FMAXPv2i32p V64:$Rn)>;
4747 def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
4748 (FMAXPv2i64p V128:$Rn)>;
4749 def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
4750 (FMINNMPv2i32p V64:$Rn)>;
4751 def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
4752 (FMINNMPv2i64p V128:$Rn)>;
4753 def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
4754 (FMINPv2i32p V64:$Rn)>;
4755 def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
4756 (FMINPv2i64p V128:$Rn)>;
4758 //----------------------------------------------------------------------------
4759 // AdvSIMD INS/DUP instructions
4760 //----------------------------------------------------------------------------
4762 def DUPv8i8gpr : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
4763 def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
4764 def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
4765 def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
4766 def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
4767 def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
4768 def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
4770 def DUPv2i64lane : SIMDDup64FromElement;
4771 def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
4772 def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
4773 def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
4774 def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
4775 def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
4776 def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
4778 // DUP from a 64-bit register to a 64-bit register is just a copy
4779 def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
4780 (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
4781 def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
4782 (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
4784 def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
4785 (v2f32 (DUPv2i32lane
4786 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4788 def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
4789 (v4f32 (DUPv4i32lane
4790 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4792 def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
4793 (v2f64 (DUPv2i64lane
4794 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
4796 def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
4797 (v4f16 (DUPv4i16lane
4798 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4800 def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
4801 (v8f16 (DUPv8i16lane
4802 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4805 def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4806 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
4807 def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4808 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
4810 def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4811 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
4812 def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4813 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
4814 def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
4815 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
4817 // If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
4818 // instruction even if the types don't match: we just have to remap the lane
4819 // carefully. N.b. this trick only applies to truncations.
4820 def VecIndex_x2 : SDNodeXForm<imm, [{
4821 return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
4823 def VecIndex_x4 : SDNodeXForm<imm, [{
4824 return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
4826 def VecIndex_x8 : SDNodeXForm<imm, [{
4827 return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
4830 multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
4831 ValueType Src128VT, ValueType ScalVT,
4832 Instruction DUP, SDNodeXForm IdxXFORM> {
4833 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
4835 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
4837 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
4839 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
4842 defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>;
4843 defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>;
4844 defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
4846 defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
4847 defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
4848 defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
4850 multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
4851 SDNodeXForm IdxXFORM> {
4852 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
4854 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
4856 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
4858 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
4861 defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>;
4862 defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>;
4863 defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>;
4865 defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
4866 defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
4867 defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
4869 // SMOV and UMOV definitions, with some extra patterns for convenience
4873 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
4874 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
4875 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
4876 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
4877 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4878 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
4879 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4880 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
4881 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4882 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
4883 def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
4884 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
4886 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
4887 VectorIndexB:$idx)))), i8),
4888 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
4889 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
4890 VectorIndexH:$idx)))), i16),
4891 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
4893 // Extracting i8 or i16 elements will have the zero-extend transformed to
4894 // an 'and' mask by type legalization since neither i8 nor i16 are legal types
4895 // for AArch64. Match these patterns here since UMOV already zeroes out the high
4896 // bits of the destination register.
4897 def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
4899 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
4900 def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
4902 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
4906 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
4907 (SUBREG_TO_REG (i32 0),
4908 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4909 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
4910 (SUBREG_TO_REG (i32 0),
4911 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4913 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
4914 (SUBREG_TO_REG (i32 0),
4915 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4916 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
4917 (SUBREG_TO_REG (i32 0),
4918 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4920 def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
4921 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4922 def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
4923 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4925 def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
4926 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
4927 (i32 FPR32:$Rn), ssub))>;
4928 def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
4929 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4930 (i32 FPR32:$Rn), ssub))>;
4932 def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
4933 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
4934 (i64 FPR64:$Rn), dsub))>;
4936 def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
4937 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4938 def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
4939 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4941 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
4942 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4943 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
4944 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4946 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
4947 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
4949 def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
4950 (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
4953 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4955 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4959 def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
4960 (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
4962 V128:$Rn, VectorIndexH:$imm,
4963 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4966 def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
4967 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4970 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4972 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4975 def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
4976 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4978 V128:$Rn, VectorIndexS:$imm,
4979 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4981 def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
4982 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
4984 V128:$Rn, VectorIndexD:$imm,
4985 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
4988 // Copy an element at a constant index in one vector into a constant indexed
4989 // element of another.
4990 // FIXME refactor to a shared class/dev parameterized on vector type, vector
4991 // index type and INS extension
4992 def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
4993 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
4994 VectorIndexB:$idx2)),
4996 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
4998 def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
4999 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
5000 VectorIndexH:$idx2)),
5002 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
5004 def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
5005 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
5006 VectorIndexS:$idx2)),
5008 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
5010 def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
5011 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
5012 VectorIndexD:$idx2)),
5014 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
5017 multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
5018 ValueType VTScal, Instruction INS> {
5019 def : Pat<(VT128 (vector_insert V128:$src,
5020 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5022 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
5024 def : Pat<(VT128 (vector_insert V128:$src,
5025 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5027 (INS V128:$src, imm:$Immd,
5028 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
5030 def : Pat<(VT64 (vector_insert V64:$src,
5031 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
5033 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
5034 imm:$Immd, V128:$Rn, imm:$Immn),
5037 def : Pat<(VT64 (vector_insert V64:$src,
5038 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
5041 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
5042 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
5046 defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
5047 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
5048 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
5051 // Floating point vector extractions are codegen'd as either a sequence of
5052 // subregister extractions, or a MOV (aka CPY here, alias for DUP) if
5053 // the lane number is anything other than zero.
5054 def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
5055 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
5056 def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
5057 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
5058 def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
5059 (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
5061 def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
5062 (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
5063 def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
5064 (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
5065 def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
5066 (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
5068 // All concat_vectors operations are canonicalised to act on i64 vectors for
5069 // AArch64. In the general case we need an instruction, which had just as well be
5071 class ConcatPat<ValueType DstTy, ValueType SrcTy>
5072 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
5073 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
5074 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
5076 def : ConcatPat<v2i64, v1i64>;
5077 def : ConcatPat<v2f64, v1f64>;
5078 def : ConcatPat<v4i32, v2i32>;
5079 def : ConcatPat<v4f32, v2f32>;
5080 def : ConcatPat<v8i16, v4i16>;
5081 def : ConcatPat<v8f16, v4f16>;
5082 def : ConcatPat<v16i8, v8i8>;
5084 // If the high lanes are undef, though, we can just ignore them:
5085 class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
5086 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
5087 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
5089 def : ConcatUndefPat<v2i64, v1i64>;
5090 def : ConcatUndefPat<v2f64, v1f64>;
5091 def : ConcatUndefPat<v4i32, v2i32>;
5092 def : ConcatUndefPat<v4f32, v2f32>;
5093 def : ConcatUndefPat<v8i16, v4i16>;
5094 def : ConcatUndefPat<v16i8, v8i8>;
5096 //----------------------------------------------------------------------------
5097 // AdvSIMD across lanes instructions
5098 //----------------------------------------------------------------------------
5100 defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
5101 defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
5102 defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
5103 defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
5104 defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
5105 defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
5106 defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
5107 defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
5108 defm FMAXV : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
5109 defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
5110 defm FMINV : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
5112 // Patterns for across-vector intrinsics, that have a node equivalent, that
5113 // returns a vector (with only the low lane defined) instead of a scalar.
5114 // In effect, opNode is the same as (scalar_to_vector (IntNode)).
5115 multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
5116 SDPatternOperator opNode> {
5117 // If a lane instruction caught the vector_extract around opNode, we can
5118 // directly match the latter to the instruction.
5119 def : Pat<(v8i8 (opNode V64:$Rn)),
5120 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5121 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
5122 def : Pat<(v16i8 (opNode V128:$Rn)),
5123 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5124 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
5125 def : Pat<(v4i16 (opNode V64:$Rn)),
5126 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5127 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
5128 def : Pat<(v8i16 (opNode V128:$Rn)),
5129 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5130 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
5131 def : Pat<(v4i32 (opNode V128:$Rn)),
5132 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5133 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
5136 // If none did, fallback to the explicit patterns, consuming the vector_extract.
5137 def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
5138 (i32 0)), (i64 0))),
5139 (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
5140 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
5142 def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
5143 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5144 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
5146 def : Pat<(i32 (vector_extract (insert_subvector undef,
5147 (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
5148 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
5149 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
5151 def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
5152 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
5153 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
5155 def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
5156 (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
5157 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
5162 multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
5163 SDPatternOperator opNode>
5164 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5165 // If there is a sign extension after this intrinsic, consume it as smov already
5167 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5168 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
5170 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5171 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5173 def : Pat<(i32 (sext_inreg (i32 (vector_extract
5174 (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
5176 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5177 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5179 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
5180 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
5182 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5183 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5185 def : Pat<(i32 (sext_inreg (i32 (vector_extract
5186 (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
5188 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5189 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5193 multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
5194 SDPatternOperator opNode>
5195 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
5196 // If there is a masking operation keeping only what has been actually
5197 // generated, consume it.
5198 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5199 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
5200 (i32 (EXTRACT_SUBREG
5201 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5202 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
5204 def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
5206 (i32 (EXTRACT_SUBREG
5207 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5208 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
5210 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
5211 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
5212 (i32 (EXTRACT_SUBREG
5213 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5214 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
5216 def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
5218 (i32 (EXTRACT_SUBREG
5219 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5220 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
5224 defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>;
5225 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5226 def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
5227 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5229 defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
5230 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
5231 def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
5232 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
5234 defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
5235 def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
5236 (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
5238 defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
5239 def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
5240 (SMINPv2i32 V64:$Rn, V64:$Rn)>;
5242 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
5243 def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
5244 (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
5246 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
5247 def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
5248 (UMINPv2i32 V64:$Rn, V64:$Rn)>;
5250 multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
5251 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5253 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5254 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5256 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5258 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5259 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5262 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5263 (i32 (EXTRACT_SUBREG
5264 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5265 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5267 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5268 (i32 (EXTRACT_SUBREG
5269 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5270 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5273 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5274 (i64 (EXTRACT_SUBREG
5275 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5276 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5280 multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
5282 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
5283 (i32 (EXTRACT_SUBREG
5284 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5285 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
5287 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
5288 (i32 (EXTRACT_SUBREG
5289 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5290 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
5293 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
5294 (i32 (EXTRACT_SUBREG
5295 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5296 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
5298 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
5299 (i32 (EXTRACT_SUBREG
5300 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5301 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
5304 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
5305 (i64 (EXTRACT_SUBREG
5306 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5307 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
5311 defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
5312 defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
5314 // The vaddlv_s32 intrinsic gets mapped to SADDLP.
5315 def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
5316 (i64 (EXTRACT_SUBREG
5317 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5318 (SADDLPv2i32_v1i64 V64:$Rn), dsub),
5320 // The vaddlv_u32 intrinsic gets mapped to UADDLP.
5321 def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
5322 (i64 (EXTRACT_SUBREG
5323 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
5324 (UADDLPv2i32_v1i64 V64:$Rn), dsub),
5327 //------------------------------------------------------------------------------
5328 // AdvSIMD modified immediate instructions
5329 //------------------------------------------------------------------------------
5332 defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
5334 defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
5336 def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
5337 def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5338 def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
5339 def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5341 def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
5342 def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5343 def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
5344 def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5346 def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
5347 def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5348 def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
5349 def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5351 def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
5352 def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
5353 def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
5354 def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
5357 def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
5359 [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5360 def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64, fpimm8,
5362 [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5363 def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
5365 [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5366 let Predicates = [HasNEON, HasFullFP16] in {
5367 def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64, fpimm8,
5369 [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5370 def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
5372 [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
5373 } // Predicates = [HasNEON, HasFullFP16]
5377 // EDIT byte mask: scalar
5378 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5379 def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
5380 [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
5381 // The movi_edit node has the immediate value already encoded, so we use
5382 // a plain imm0_255 here.
5383 def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
5384 (MOVID imm0_255:$shift)>;
5386 // EDIT byte mask: 2d
5388 // The movi_edit node has the immediate value already encoded, so we use
5389 // a plain imm0_255 in the pattern
5390 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5391 def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
5394 [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
5396 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5397 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5398 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5399 def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5401 def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5402 def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5403 def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5404 def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5406 // Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
5407 // extract is free and this gives better MachineCSE results.
5408 def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5409 def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5410 def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5411 def : Pat<(v8i8 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5413 def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5414 def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5415 def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5416 def : Pat<(v8i8 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5418 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
5419 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5420 defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
5422 def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5423 def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5424 def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5425 def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5427 def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5428 def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5429 def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5430 def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5432 def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5433 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
5434 def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5435 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
5436 def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5437 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
5438 def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5439 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
5441 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5442 // EDIT per word: 2s & 4s with MSL shifter
5443 def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
5444 [(set (v2i32 V64:$Rd),
5445 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5446 def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
5447 [(set (v4i32 V128:$Rd),
5448 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5450 // Per byte: 8b & 16b
5451 def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64, imm0_255,
5453 [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
5455 def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
5457 [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
5462 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
5463 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5464 defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
5466 def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5467 def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5468 def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5469 def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5471 def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5472 def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5473 def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5474 def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5476 def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5477 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
5478 def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5479 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
5480 def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5481 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
5482 def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5483 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
5485 // EDIT per word: 2s & 4s with MSL shifter
5486 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5487 def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
5488 [(set (v2i32 V64:$Rd),
5489 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5490 def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
5491 [(set (v4i32 V128:$Rd),
5492 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5495 //----------------------------------------------------------------------------
5496 // AdvSIMD indexed element
5497 //----------------------------------------------------------------------------
5499 let hasSideEffects = 0 in {
5500 defm FMLA : SIMDFPIndexedTied<0, 0b0001, "fmla">;
5501 defm FMLS : SIMDFPIndexedTied<0, 0b0101, "fmls">;
5504 // NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
5505 // instruction expects the addend first, while the intrinsic expects it last.
5507 // On the other hand, there are quite a few valid combinatorial options due to
5508 // the commutativity of multiplication and the fact that (-x) * y = x * (-y).
5509 defm : SIMDFPIndexedTiedPatterns<"FMLA",
5510 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
5511 defm : SIMDFPIndexedTiedPatterns<"FMLA",
5512 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
5514 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5515 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
5516 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5517 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
5518 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5519 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
5520 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5521 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
5523 multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
5524 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5526 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5527 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5528 VectorIndexS:$idx))),
5529 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5530 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5531 (v2f32 (AArch64duplane32
5532 (v4f32 (insert_subvector undef,
5533 (v2f32 (fneg V64:$Rm)),
5535 VectorIndexS:$idx)))),
5536 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5537 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5538 VectorIndexS:$idx)>;
5539 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5540 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5541 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5542 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5544 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5546 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5547 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5548 VectorIndexS:$idx))),
5549 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
5550 VectorIndexS:$idx)>;
5551 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5552 (v4f32 (AArch64duplane32
5553 (v4f32 (insert_subvector undef,
5554 (v2f32 (fneg V64:$Rm)),
5556 VectorIndexS:$idx)))),
5557 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5558 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5559 VectorIndexS:$idx)>;
5560 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5561 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5562 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5563 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5565 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
5566 // (DUPLANE from 64-bit would be trivial).
5567 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5568 (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
5569 VectorIndexD:$idx))),
5571 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5572 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5573 (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
5574 (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
5575 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
5577 // 2 variants for 32-bit scalar version: extract from .2s or from .4s
5578 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5579 (vector_extract (v4f32 (fneg V128:$Rm)),
5580 VectorIndexS:$idx))),
5581 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5582 V128:$Rm, VectorIndexS:$idx)>;
5583 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5584 (vector_extract (v4f32 (insert_subvector undef,
5585 (v2f32 (fneg V64:$Rm)),
5587 VectorIndexS:$idx))),
5588 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5589 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
5591 // 1 variant for 64-bit scalar version: extract from .1d or from .2d
5592 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
5593 (vector_extract (v2f64 (fneg V128:$Rm)),
5594 VectorIndexS:$idx))),
5595 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
5596 V128:$Rm, VectorIndexS:$idx)>;
5599 defm : FMLSIndexedAfterNegPatterns<
5600 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
5601 defm : FMLSIndexedAfterNegPatterns<
5602 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
5604 defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
5605 defm FMUL : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
5607 def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5608 (FMULv2i32_indexed V64:$Rn,
5609 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5611 def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5612 (FMULv4i32_indexed V128:$Rn,
5613 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5615 def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
5616 (FMULv2i64_indexed V128:$Rn,
5617 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
5620 defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
5621 defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
5623 // Generated by MachineCombine
5624 defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla", null_frag>;
5625 defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls", null_frag>;
5627 defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
5628 defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
5629 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5630 defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
5631 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5632 defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
5633 int_aarch64_neon_smull>;
5634 defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
5635 int_aarch64_neon_sqadd>;
5636 defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
5637 int_aarch64_neon_sqsub>;
5638 defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
5639 int_aarch64_neon_sqadd>;
5640 defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
5641 int_aarch64_neon_sqsub>;
5642 defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
5643 defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
5644 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5645 defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
5646 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5647 defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
5648 int_aarch64_neon_umull>;
5650 // A scalar sqdmull with the second operand being a vector lane can be
5651 // handled directly with the indexed instruction encoding.
5652 def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
5653 (vector_extract (v4i32 V128:$Vm),
5654 VectorIndexS:$idx)),
5655 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
5657 //----------------------------------------------------------------------------
5658 // AdvSIMD scalar shift instructions
5659 //----------------------------------------------------------------------------
5660 defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
5661 defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
5662 defm SCVTF : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
5663 defm UCVTF : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
5664 // Codegen patterns for the above. We don't put these directly on the
5665 // instructions because TableGen's type inference can't handle the truth.
5666 // Having the same base pattern for fp <--> int totally freaks it out.
5667 def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
5668 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
5669 def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
5670 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
5671 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
5672 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5673 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
5674 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5675 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
5677 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5678 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
5680 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5681 def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
5682 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5683 def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5684 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5685 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
5687 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5688 def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5689 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5690 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
5692 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5693 def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
5694 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5696 // Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
5698 def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
5699 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5700 def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
5701 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5702 def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
5703 (SCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
5704 def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
5705 (and FPR32:$Rn, (i32 65535)),
5707 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5708 def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
5709 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5710 def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
5711 (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
5712 def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
5714 (i32 (IMPLICIT_DEF)),
5715 (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
5717 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
5719 (i64 (IMPLICIT_DEF)),
5720 (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
5722 def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
5724 (i32 (IMPLICIT_DEF)),
5725 (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
5727 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
5729 (i64 (IMPLICIT_DEF)),
5730 (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
5732 def : Pat<(i32 (int_aarch64_neon_facge (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
5734 (i32 (IMPLICIT_DEF)),
5735 (FACGE16 FPR16:$Rn, FPR16:$Rm),
5737 def : Pat<(i32 (int_aarch64_neon_facgt (f16 FPR16:$Rn), (f16 FPR16:$Rm))),
5739 (i32 (IMPLICIT_DEF)),
5740 (FACGT16 FPR16:$Rn, FPR16:$Rm),
5743 defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>;
5744 defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
5745 defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
5746 int_aarch64_neon_sqrshrn>;
5747 defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
5748 int_aarch64_neon_sqrshrun>;
5749 defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
5750 defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
5751 defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
5752 int_aarch64_neon_sqshrn>;
5753 defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
5754 int_aarch64_neon_sqshrun>;
5755 defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">;
5756 defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>;
5757 defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
5758 TriOpFrag<(add node:$LHS,
5759 (AArch64srshri node:$MHS, node:$RHS))>>;
5760 defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>;
5761 defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
5762 TriOpFrag<(add node:$LHS,
5763 (AArch64vashr node:$MHS, node:$RHS))>>;
5764 defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
5765 int_aarch64_neon_uqrshrn>;
5766 defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
5767 defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
5768 int_aarch64_neon_uqshrn>;
5769 defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>;
5770 defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
5771 TriOpFrag<(add node:$LHS,
5772 (AArch64urshri node:$MHS, node:$RHS))>>;
5773 defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>;
5774 defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
5775 TriOpFrag<(add node:$LHS,
5776 (AArch64vlshr node:$MHS, node:$RHS))>>;
5778 //----------------------------------------------------------------------------
5779 // AdvSIMD vector shift instructions
5780 //----------------------------------------------------------------------------
5781 defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
5782 defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
5783 defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
5784 int_aarch64_neon_vcvtfxs2fp>;
5785 defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
5786 int_aarch64_neon_rshrn>;
5787 defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
5788 defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
5789 BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
5790 defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>;
5791 def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
5792 (i32 vecshiftL64:$imm))),
5793 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
5794 defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
5795 int_aarch64_neon_sqrshrn>;
5796 defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
5797 int_aarch64_neon_sqrshrun>;
5798 defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
5799 defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
5800 defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
5801 int_aarch64_neon_sqshrn>;
5802 defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
5803 int_aarch64_neon_sqshrun>;
5804 defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>;
5805 def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
5806 (i32 vecshiftR64:$imm))),
5807 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
5808 defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
5809 defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
5810 TriOpFrag<(add node:$LHS,
5811 (AArch64srshri node:$MHS, node:$RHS))> >;
5812 defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
5813 BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
5815 defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
5816 defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
5817 TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
5818 defm UCVTF : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
5819 int_aarch64_neon_vcvtfxu2fp>;
5820 defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
5821 int_aarch64_neon_uqrshrn>;
5822 defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
5823 defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
5824 int_aarch64_neon_uqshrn>;
5825 defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
5826 defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
5827 TriOpFrag<(add node:$LHS,
5828 (AArch64urshri node:$MHS, node:$RHS))> >;
5829 defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
5830 BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
5831 defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
5832 defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
5833 TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
5835 // SHRN patterns for when a logical right shift was used instead of arithmetic
5836 // (the immediate guarantees no sign bits actually end up in the result so it
5838 def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
5839 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
5840 def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
5841 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
5842 def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
5843 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
5845 def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
5846 (trunc (AArch64vlshr (v8i16 V128:$Rn),
5847 vecshiftR16Narrow:$imm)))),
5848 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5849 V128:$Rn, vecshiftR16Narrow:$imm)>;
5850 def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
5851 (trunc (AArch64vlshr (v4i32 V128:$Rn),
5852 vecshiftR32Narrow:$imm)))),
5853 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5854 V128:$Rn, vecshiftR32Narrow:$imm)>;
5855 def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
5856 (trunc (AArch64vlshr (v2i64 V128:$Rn),
5857 vecshiftR64Narrow:$imm)))),
5858 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5859 V128:$Rn, vecshiftR32Narrow:$imm)>;
5861 // Vector sign and zero extensions are implemented with SSHLL and USSHLL.
5862 // Anyexts are implemented as zexts.
5863 def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>;
5864 def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
5865 def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
5866 def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
5867 def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
5868 def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
5869 def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
5870 def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
5871 def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
5872 // Also match an extend from the upper half of a 128 bit source register.
5873 def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5874 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
5875 def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5876 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
5877 def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5878 (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
5879 def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5880 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
5881 def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5882 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
5883 def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5884 (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
5885 def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5886 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
5887 def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5888 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
5889 def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5890 (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
5892 // Vector shift sxtl aliases
5893 def : InstAlias<"sxtl.8h $dst, $src1",
5894 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5895 def : InstAlias<"sxtl $dst.8h, $src1.8b",
5896 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5897 def : InstAlias<"sxtl.4s $dst, $src1",
5898 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5899 def : InstAlias<"sxtl $dst.4s, $src1.4h",
5900 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5901 def : InstAlias<"sxtl.2d $dst, $src1",
5902 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5903 def : InstAlias<"sxtl $dst.2d, $src1.2s",
5904 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5906 // Vector shift sxtl2 aliases
5907 def : InstAlias<"sxtl2.8h $dst, $src1",
5908 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5909 def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
5910 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5911 def : InstAlias<"sxtl2.4s $dst, $src1",
5912 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5913 def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
5914 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5915 def : InstAlias<"sxtl2.2d $dst, $src1",
5916 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5917 def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
5918 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5920 // Vector shift uxtl aliases
5921 def : InstAlias<"uxtl.8h $dst, $src1",
5922 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5923 def : InstAlias<"uxtl $dst.8h, $src1.8b",
5924 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5925 def : InstAlias<"uxtl.4s $dst, $src1",
5926 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5927 def : InstAlias<"uxtl $dst.4s, $src1.4h",
5928 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5929 def : InstAlias<"uxtl.2d $dst, $src1",
5930 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5931 def : InstAlias<"uxtl $dst.2d, $src1.2s",
5932 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5934 // Vector shift uxtl2 aliases
5935 def : InstAlias<"uxtl2.8h $dst, $src1",
5936 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5937 def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
5938 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5939 def : InstAlias<"uxtl2.4s $dst, $src1",
5940 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5941 def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
5942 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5943 def : InstAlias<"uxtl2.2d $dst, $src1",
5944 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5945 def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
5946 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5948 // If an integer is about to be converted to a floating point value,
5949 // just load it on the floating point unit.
5950 // These patterns are more complex because floating point loads do not
5951 // support sign extension.
5952 // The sign extension has to be explicitly added and is only supported for
5953 // one step: byte-to-half, half-to-word, word-to-doubleword.
5954 // SCVTF GPR -> FPR is 9 cycles.
5955 // SCVTF FPR -> FPR is 4 cyclces.
5956 // (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
5957 // Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
5958 // and still being faster.
5959 // However, this is not good for code size.
5960 // 8-bits -> float. 2 sizes step-up.
5961 class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
5962 : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
5963 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
5968 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5975 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
5977 def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
5978 (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
5979 def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
5980 (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
5981 def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
5982 (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
5983 def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
5984 (LDURBi GPR64sp:$Rn, simm9:$offset)>;
5986 // 16-bits -> float. 1 size step-up.
5987 class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
5988 : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
5989 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
5991 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5995 ssub)))>, Requires<[NotForCodeSize]>;
5997 def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
5998 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
5999 def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6000 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6001 def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6002 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6003 def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6004 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6006 // 32-bits to 32-bits are handled in target specific dag combine:
6007 // performIntToFpCombine.
6008 // 64-bits integer to 32-bits floating point, not possible with
6009 // SCVTF on floating point registers (both source and destination
6010 // must have the same size).
6012 // Here are the patterns for 8, 16, 32, and 64-bits to double.
6013 // 8-bits -> double. 3 size step-up: give up.
6014 // 16-bits -> double. 2 size step.
6015 class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
6016 : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
6017 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6022 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6029 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
6031 def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
6032 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
6033 def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
6034 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
6035 def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
6036 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
6037 def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
6038 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
6039 // 32-bits -> double. 1 size step-up.
6040 class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
6041 : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
6042 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
6044 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
6048 dsub)))>, Requires<[NotForCodeSize]>;
6050 def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
6051 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
6052 def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
6053 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
6054 def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
6055 (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
6056 def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
6057 (LDURSi GPR64sp:$Rn, simm9:$offset)>;
6059 // 64-bits -> double are handled in target specific dag combine:
6060 // performIntToFpCombine.
6063 //----------------------------------------------------------------------------
6064 // AdvSIMD Load-Store Structure
6065 //----------------------------------------------------------------------------
6066 defm LD1 : SIMDLd1Multiple<"ld1">;
6067 defm LD2 : SIMDLd2Multiple<"ld2">;
6068 defm LD3 : SIMDLd3Multiple<"ld3">;
6069 defm LD4 : SIMDLd4Multiple<"ld4">;
6071 defm ST1 : SIMDSt1Multiple<"st1">;
6072 defm ST2 : SIMDSt2Multiple<"st2">;
6073 defm ST3 : SIMDSt3Multiple<"st3">;
6074 defm ST4 : SIMDSt4Multiple<"st4">;
6076 class Ld1Pat<ValueType ty, Instruction INST>
6077 : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
6079 def : Ld1Pat<v16i8, LD1Onev16b>;
6080 def : Ld1Pat<v8i16, LD1Onev8h>;
6081 def : Ld1Pat<v4i32, LD1Onev4s>;
6082 def : Ld1Pat<v2i64, LD1Onev2d>;
6083 def : Ld1Pat<v8i8, LD1Onev8b>;
6084 def : Ld1Pat<v4i16, LD1Onev4h>;
6085 def : Ld1Pat<v2i32, LD1Onev2s>;
6086 def : Ld1Pat<v1i64, LD1Onev1d>;
6088 class St1Pat<ValueType ty, Instruction INST>
6089 : Pat<(store ty:$Vt, GPR64sp:$Rn),
6090 (INST ty:$Vt, GPR64sp:$Rn)>;
6092 def : St1Pat<v16i8, ST1Onev16b>;
6093 def : St1Pat<v8i16, ST1Onev8h>;
6094 def : St1Pat<v4i32, ST1Onev4s>;
6095 def : St1Pat<v2i64, ST1Onev2d>;
6096 def : St1Pat<v8i8, ST1Onev8b>;
6097 def : St1Pat<v4i16, ST1Onev4h>;
6098 def : St1Pat<v2i32, ST1Onev2s>;
6099 def : St1Pat<v1i64, ST1Onev1d>;
6105 defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
6106 defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
6107 defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
6108 defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
6109 let mayLoad = 1, hasSideEffects = 0 in {
6110 defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
6111 defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
6112 defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
6113 defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>;
6114 defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>;
6115 defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>;
6116 defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>;
6117 defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>;
6118 defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>;
6119 defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>;
6120 defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
6121 defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
6122 defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>;
6123 defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>;
6124 defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>;
6125 defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>;
6128 def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6129 (LD1Rv8b GPR64sp:$Rn)>;
6130 def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
6131 (LD1Rv16b GPR64sp:$Rn)>;
6132 def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6133 (LD1Rv4h GPR64sp:$Rn)>;
6134 def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
6135 (LD1Rv8h GPR64sp:$Rn)>;
6136 def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6137 (LD1Rv2s GPR64sp:$Rn)>;
6138 def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
6139 (LD1Rv4s GPR64sp:$Rn)>;
6140 def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6141 (LD1Rv2d GPR64sp:$Rn)>;
6142 def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
6143 (LD1Rv1d GPR64sp:$Rn)>;
6144 // Grab the floating point version too
6145 def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6146 (LD1Rv2s GPR64sp:$Rn)>;
6147 def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
6148 (LD1Rv4s GPR64sp:$Rn)>;
6149 def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6150 (LD1Rv2d GPR64sp:$Rn)>;
6151 def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
6152 (LD1Rv1d GPR64sp:$Rn)>;
6153 def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6154 (LD1Rv4h GPR64sp:$Rn)>;
6155 def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
6156 (LD1Rv8h GPR64sp:$Rn)>;
6158 class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
6159 ValueType VTy, ValueType STy, Instruction LD1>
6160 : Pat<(vector_insert (VTy VecListOne128:$Rd),
6161 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6162 (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
6164 def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
6165 def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
6166 def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
6167 def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
6168 def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
6169 def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
6170 def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>;
6172 class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
6173 ValueType VTy, ValueType STy, Instruction LD1>
6174 : Pat<(vector_insert (VTy VecListOne64:$Rd),
6175 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
6177 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
6178 VecIndex:$idx, GPR64sp:$Rn),
6181 def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
6182 def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
6183 def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
6184 def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
6185 def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>;
6188 defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
6189 defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
6190 defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
6191 defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
6194 defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>;
6195 defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
6196 defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
6197 defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
6199 let AddedComplexity = 19 in
6200 class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6201 ValueType VTy, ValueType STy, Instruction ST1>
6203 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6205 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
6207 def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>;
6208 def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
6209 def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
6210 def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
6211 def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
6212 def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
6213 def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>;
6215 let AddedComplexity = 19 in
6216 class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6217 ValueType VTy, ValueType STy, Instruction ST1>
6219 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6221 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6222 VecIndex:$idx, GPR64sp:$Rn)>;
6224 def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
6225 def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
6226 def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
6227 def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
6228 def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>;
6230 multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
6231 ValueType VTy, ValueType STy, Instruction ST1,
6233 def : Pat<(scalar_store
6234 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6235 GPR64sp:$Rn, offset),
6236 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6237 VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6239 def : Pat<(scalar_store
6240 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
6241 GPR64sp:$Rn, GPR64:$Rm),
6242 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
6243 VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6246 defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
6247 defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
6249 defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
6250 defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
6251 defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
6252 defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
6253 defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
6255 multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
6256 ValueType VTy, ValueType STy, Instruction ST1,
6258 def : Pat<(scalar_store
6259 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6260 GPR64sp:$Rn, offset),
6261 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
6263 def : Pat<(scalar_store
6264 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
6265 GPR64sp:$Rn, GPR64:$Rm),
6266 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
6269 defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
6271 defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
6273 defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
6274 defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
6275 defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
6276 defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
6277 defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
6279 let mayStore = 1, hasSideEffects = 0 in {
6280 defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
6281 defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
6282 defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
6283 defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>;
6284 defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>;
6285 defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>;
6286 defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
6287 defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
6288 defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>;
6289 defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>;
6290 defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>;
6291 defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>;
6294 defm ST1 : SIMDLdSt1SingleAliases<"st1">;
6295 defm ST2 : SIMDLdSt2SingleAliases<"st2">;
6296 defm ST3 : SIMDLdSt3SingleAliases<"st3">;
6297 defm ST4 : SIMDLdSt4SingleAliases<"st4">;
6299 //----------------------------------------------------------------------------
6300 // Crypto extensions
6301 //----------------------------------------------------------------------------
6303 let Predicates = [HasAES] in {
6304 def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>;
6305 def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>;
6306 def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>;
6307 def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>;
6310 // Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
6311 // for AES fusion on some CPUs.
6312 let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
6313 def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6315 def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
6319 // Only use constrained versions of AES(I)MC instructions if they are paired with
6321 def : Pat<(v16i8 (int_aarch64_crypto_aesmc
6322 (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
6323 (v16i8 V128:$src2))))),
6324 (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
6325 (v16i8 V128:$src2)))))>,
6326 Requires<[HasFuseAES]>;
6328 def : Pat<(v16i8 (int_aarch64_crypto_aesimc
6329 (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
6330 (v16i8 V128:$src2))))),
6331 (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
6332 (v16i8 V128:$src2)))))>,
6333 Requires<[HasFuseAES]>;
6335 let Predicates = [HasSHA2] in {
6336 def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>;
6337 def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>;
6338 def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>;
6339 def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
6340 def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
6341 def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
6342 def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
6344 def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>;
6345 def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>;
6346 def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
6349 //----------------------------------------------------------------------------
6351 //----------------------------------------------------------------------------
6352 // FIXME: Like for X86, these should go in their own separate .td file.
6354 def def32 : PatLeaf<(i32 GPR32:$src), [{
6358 // In the case of a 32-bit def that is known to implicitly zero-extend,
6359 // we can use a SUBREG_TO_REG.
6360 def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
6362 // For an anyext, we don't care what the high bits are, so we can perform an
6363 // INSERT_SUBREF into an IMPLICIT_DEF.
6364 def : Pat<(i64 (anyext GPR32:$src)),
6365 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
6367 // When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
6368 // then assert the extension has happened.
6369 def : Pat<(i64 (zext GPR32:$src)),
6370 (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
6372 // To sign extend, we use a signed bitfield move instruction (SBFM) on the
6373 // containing super-reg.
6374 def : Pat<(i64 (sext GPR32:$src)),
6375 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
6376 def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
6377 def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
6378 def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>;
6379 def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>;
6380 def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
6381 def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>;
6382 def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>;
6384 def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
6385 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
6386 (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
6387 def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
6388 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
6389 (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
6391 def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
6392 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
6393 (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
6394 def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
6395 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
6396 (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
6398 def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
6399 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6400 (i64 (i64shift_a imm0_63:$imm)),
6401 (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
6403 // sra patterns have an AddedComplexity of 10, so make sure we have a higher
6404 // AddedComplexity for the following patterns since we want to match sext + sra
6405 // patterns before we attempt to match a single sra node.
6406 let AddedComplexity = 20 in {
6407 // We support all sext + sra combinations which preserve at least one bit of the
6408 // original value which is to be sign extended. E.g. we support shifts up to
6410 def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
6411 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
6412 def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
6413 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
6415 def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
6416 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
6417 def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
6418 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
6420 def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
6421 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6422 (i64 imm0_31:$imm), 31)>;
6423 } // AddedComplexity = 20
6425 // To truncate, we can simply extract from a subregister.
6426 def : Pat<(i32 (trunc GPR64sp:$src)),
6427 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
6429 // __builtin_trap() uses the BRK instruction on AArch64.
6430 def : Pat<(trap), (BRK 1)>;
6431 def : Pat<(debugtrap), (BRK 0xF000)>, Requires<[IsWindows]>;
6433 // Multiply high patterns which multiply the lower subvector using smull/umull
6434 // and the upper subvector with smull2/umull2. Then shuffle the high the high
6435 // part of both results together.
6436 def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
6438 (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6439 (EXTRACT_SUBREG V128:$Rm, dsub)),
6440 (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6441 def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
6443 (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6444 (EXTRACT_SUBREG V128:$Rm, dsub)),
6445 (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6446 def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
6448 (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6449 (EXTRACT_SUBREG V128:$Rm, dsub)),
6450 (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6452 def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
6454 (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6455 (EXTRACT_SUBREG V128:$Rm, dsub)),
6456 (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6457 def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
6459 (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6460 (EXTRACT_SUBREG V128:$Rm, dsub)),
6461 (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6462 def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
6464 (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6465 (EXTRACT_SUBREG V128:$Rm, dsub)),
6466 (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6468 // Conversions within AdvSIMD types in the same register size are free.
6469 // But because we need a consistent lane ordering, in big endian many
6470 // conversions require one or more REV instructions.
6472 // Consider a simple memory load followed by a bitconvert then a store.
6474 // v1 = BITCAST v2i32 v0 to v4i16
6477 // In big endian mode every memory access has an implicit byte swap. LDR and
6478 // STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
6479 // is, they treat the vector as a sequence of elements to be byte-swapped.
6480 // The two pairs of instructions are fundamentally incompatible. We've decided
6481 // to use LD1/ST1 only to simplify compiler implementation.
6483 // LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
6484 // the original code sequence:
6486 // v1 = REV v2i32 (implicit)
6487 // v2 = BITCAST v2i32 v1 to v4i16
6488 // v3 = REV v4i16 v2 (implicit)
6491 // But this is now broken - the value stored is different to the value loaded
6492 // due to lane reordering. To fix this, on every BITCAST we must perform two
6495 // v1 = REV v2i32 (implicit)
6497 // v3 = BITCAST v2i32 v2 to v4i16
6499 // v5 = REV v4i16 v4 (implicit)
6502 // This means an extra two instructions, but actually in most cases the two REV
6503 // instructions can be combined into one. For example:
6504 // (REV64_2s (REV64_4h X)) === (REV32_4h X)
6506 // There is also no 128-bit REV instruction. This must be synthesized with an
6509 // Most bitconverts require some sort of conversion. The only exceptions are:
6510 // a) Identity conversions - vNfX <-> vNiX
6511 // b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
6514 // Natural vector casts (64 bit)
6515 def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
6516 def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6517 def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6518 def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
6519 def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6520 def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6522 def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
6523 def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
6524 def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6525 def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6526 def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6528 def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
6529 def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
6530 def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
6531 def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
6532 def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
6533 def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
6535 def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6536 def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6537 def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6538 def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6539 def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6540 def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6541 def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
6543 def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
6544 def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6545 def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6546 def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
6547 def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6548 def : Pat<(v1f64 (AArch64NvCast (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
6550 // Natural vector casts (128 bit)
6551 def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
6552 def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6553 def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
6554 def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
6555 def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6556 def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6557 def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6559 def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
6560 def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
6561 def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
6562 def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6563 def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6564 def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6565 def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6567 def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
6568 def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6569 def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
6570 def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6571 def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6572 def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6573 def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6575 def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
6576 def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6577 def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
6578 def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
6579 def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
6580 def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
6581 def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
6583 def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
6584 def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
6585 def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
6586 def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
6587 def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
6588 def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
6589 def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
6591 def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
6592 def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
6593 def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
6594 def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
6595 def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
6596 def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
6597 def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
6599 let Predicates = [IsLE] in {
6600 def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6601 def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6602 def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6603 def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6604 def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6606 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
6607 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6608 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6609 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6610 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6611 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6612 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6613 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6614 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6615 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6616 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6617 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6619 let Predicates = [IsBE] in {
6620 def : Pat<(v8i8 (bitconvert GPR64:$Xn)),
6621 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6622 def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
6623 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6624 def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
6625 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6626 def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
6627 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6628 def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
6629 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6631 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
6632 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6633 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6634 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6635 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6636 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6637 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6638 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6639 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6640 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6642 def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6643 def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6644 def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
6645 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6646 def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
6647 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6648 def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
6649 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6650 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
6652 def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
6653 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
6654 def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
6655 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
6656 def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
6657 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6658 def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
6659 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
6660 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6661 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6663 let Predicates = [IsLE] in {
6664 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6665 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6666 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
6667 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
6668 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6670 let Predicates = [IsBE] in {
6671 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
6672 (v1i64 (REV64v2i32 FPR64:$src))>;
6673 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
6674 (v1i64 (REV64v4i16 FPR64:$src))>;
6675 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
6676 (v1i64 (REV64v8i8 FPR64:$src))>;
6677 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
6678 (v1i64 (REV64v4i16 FPR64:$src))>;
6679 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
6680 (v1i64 (REV64v2i32 FPR64:$src))>;
6682 def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6683 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6685 let Predicates = [IsLE] in {
6686 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
6687 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6688 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
6689 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6690 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6691 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
6693 let Predicates = [IsBE] in {
6694 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
6695 (v2i32 (REV64v2i32 FPR64:$src))>;
6696 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
6697 (v2i32 (REV32v4i16 FPR64:$src))>;
6698 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))),
6699 (v2i32 (REV32v8i8 FPR64:$src))>;
6700 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
6701 (v2i32 (REV64v2i32 FPR64:$src))>;
6702 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
6703 (v2i32 (REV64v2i32 FPR64:$src))>;
6704 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
6705 (v2i32 (REV32v4i16 FPR64:$src))>;
6707 def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6709 let Predicates = [IsLE] in {
6710 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
6711 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6712 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
6713 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6714 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6715 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6717 let Predicates = [IsBE] in {
6718 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
6719 (v4i16 (REV64v4i16 FPR64:$src))>;
6720 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
6721 (v4i16 (REV32v4i16 FPR64:$src))>;
6722 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
6723 (v4i16 (REV16v8i8 FPR64:$src))>;
6724 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
6725 (v4i16 (REV64v4i16 FPR64:$src))>;
6726 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
6727 (v4i16 (REV32v4i16 FPR64:$src))>;
6728 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
6729 (v4i16 (REV64v4i16 FPR64:$src))>;
6731 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
6733 let Predicates = [IsLE] in {
6734 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
6735 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6736 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
6737 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6738 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
6739 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6741 let Predicates = [IsBE] in {
6742 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
6743 (v4f16 (REV64v4i16 FPR64:$src))>;
6744 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
6745 (v4f16 (REV32v4i16 FPR64:$src))>;
6746 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))),
6747 (v4f16 (REV16v8i8 FPR64:$src))>;
6748 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))),
6749 (v4f16 (REV64v4i16 FPR64:$src))>;
6750 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
6751 (v4f16 (REV32v4i16 FPR64:$src))>;
6752 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
6753 (v4f16 (REV64v4i16 FPR64:$src))>;
6755 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6757 let Predicates = [IsLE] in {
6758 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
6759 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
6760 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
6761 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6762 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
6763 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6764 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>;
6766 let Predicates = [IsBE] in {
6767 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
6768 (v8i8 (REV64v8i8 FPR64:$src))>;
6769 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))),
6770 (v8i8 (REV32v8i8 FPR64:$src))>;
6771 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))),
6772 (v8i8 (REV16v8i8 FPR64:$src))>;
6773 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
6774 (v8i8 (REV64v8i8 FPR64:$src))>;
6775 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
6776 (v8i8 (REV32v8i8 FPR64:$src))>;
6777 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
6778 (v8i8 (REV64v8i8 FPR64:$src))>;
6779 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))),
6780 (v8i8 (REV16v8i8 FPR64:$src))>;
6783 let Predicates = [IsLE] in {
6784 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
6785 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
6786 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
6787 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
6788 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>;
6790 let Predicates = [IsBE] in {
6791 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
6792 (f64 (REV64v2i32 FPR64:$src))>;
6793 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))),
6794 (f64 (REV64v4i16 FPR64:$src))>;
6795 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
6796 (f64 (REV64v2i32 FPR64:$src))>;
6797 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
6798 (f64 (REV64v8i8 FPR64:$src))>;
6799 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))),
6800 (f64 (REV64v4i16 FPR64:$src))>;
6802 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
6803 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
6805 let Predicates = [IsLE] in {
6806 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
6807 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
6808 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
6809 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
6810 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
6812 let Predicates = [IsBE] in {
6813 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
6814 (v1f64 (REV64v2i32 FPR64:$src))>;
6815 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
6816 (v1f64 (REV64v4i16 FPR64:$src))>;
6817 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
6818 (v1f64 (REV64v8i8 FPR64:$src))>;
6819 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
6820 (v1f64 (REV64v2i32 FPR64:$src))>;
6821 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
6822 (v1f64 (REV64v4i16 FPR64:$src))>;
6824 def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
6825 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
6827 let Predicates = [IsLE] in {
6828 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
6829 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
6830 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
6831 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6832 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6833 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
6835 let Predicates = [IsBE] in {
6836 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
6837 (v2f32 (REV64v2i32 FPR64:$src))>;
6838 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
6839 (v2f32 (REV32v4i16 FPR64:$src))>;
6840 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))),
6841 (v2f32 (REV32v8i8 FPR64:$src))>;
6842 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
6843 (v2f32 (REV64v2i32 FPR64:$src))>;
6844 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
6845 (v2f32 (REV64v2i32 FPR64:$src))>;
6846 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
6847 (v2f32 (REV32v4i16 FPR64:$src))>;
6849 def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6851 let Predicates = [IsLE] in {
6852 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
6853 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
6854 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
6855 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
6856 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
6857 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
6858 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
6860 let Predicates = [IsBE] in {
6861 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
6862 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
6863 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
6864 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
6865 (REV64v4i32 FPR128:$src), (i32 8)))>;
6866 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
6867 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
6868 (REV64v8i16 FPR128:$src), (i32 8)))>;
6869 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
6870 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
6871 (REV64v8i16 FPR128:$src), (i32 8)))>;
6872 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
6873 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
6874 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
6875 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
6876 (REV64v4i32 FPR128:$src), (i32 8)))>;
6877 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
6878 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
6879 (REV64v16i8 FPR128:$src), (i32 8)))>;
6882 let Predicates = [IsLE] in {
6883 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
6884 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6885 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6886 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
6887 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6888 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
6890 let Predicates = [IsBE] in {
6891 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
6892 (v2f64 (EXTv16i8 FPR128:$src,
6893 FPR128:$src, (i32 8)))>;
6894 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
6895 (v2f64 (REV64v4i32 FPR128:$src))>;
6896 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
6897 (v2f64 (REV64v8i16 FPR128:$src))>;
6898 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
6899 (v2f64 (REV64v8i16 FPR128:$src))>;
6900 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
6901 (v2f64 (REV64v16i8 FPR128:$src))>;
6902 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
6903 (v2f64 (REV64v4i32 FPR128:$src))>;
6905 def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
6907 let Predicates = [IsLE] in {
6908 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
6909 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6910 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
6911 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6912 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
6913 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
6915 let Predicates = [IsBE] in {
6916 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
6917 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
6918 (REV64v4i32 FPR128:$src), (i32 8)))>;
6919 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
6920 (v4f32 (REV32v8i16 FPR128:$src))>;
6921 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
6922 (v4f32 (REV32v8i16 FPR128:$src))>;
6923 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
6924 (v4f32 (REV32v16i8 FPR128:$src))>;
6925 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
6926 (v4f32 (REV64v4i32 FPR128:$src))>;
6927 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
6928 (v4f32 (REV64v4i32 FPR128:$src))>;
6930 def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6932 let Predicates = [IsLE] in {
6933 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
6934 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6935 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6936 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6937 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
6938 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
6940 let Predicates = [IsBE] in {
6941 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
6942 (v2i64 (EXTv16i8 FPR128:$src,
6943 FPR128:$src, (i32 8)))>;
6944 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
6945 (v2i64 (REV64v4i32 FPR128:$src))>;
6946 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
6947 (v2i64 (REV64v8i16 FPR128:$src))>;
6948 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
6949 (v2i64 (REV64v16i8 FPR128:$src))>;
6950 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
6951 (v2i64 (REV64v4i32 FPR128:$src))>;
6952 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
6953 (v2i64 (REV64v8i16 FPR128:$src))>;
6955 def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
6957 let Predicates = [IsLE] in {
6958 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
6959 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
6960 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6961 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6962 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
6963 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
6965 let Predicates = [IsBE] in {
6966 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
6967 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
6968 (REV64v4i32 FPR128:$src),
6970 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
6971 (v4i32 (REV64v4i32 FPR128:$src))>;
6972 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
6973 (v4i32 (REV32v8i16 FPR128:$src))>;
6974 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
6975 (v4i32 (REV32v16i8 FPR128:$src))>;
6976 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
6977 (v4i32 (REV64v4i32 FPR128:$src))>;
6978 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
6979 (v4i32 (REV32v8i16 FPR128:$src))>;
6981 def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
6983 let Predicates = [IsLE] in {
6984 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
6985 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6986 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6987 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6988 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
6989 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
6991 let Predicates = [IsBE] in {
6992 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
6993 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
6994 (REV64v8i16 FPR128:$src),
6996 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
6997 (v8i16 (REV64v8i16 FPR128:$src))>;
6998 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
6999 (v8i16 (REV32v8i16 FPR128:$src))>;
7000 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
7001 (v8i16 (REV16v16i8 FPR128:$src))>;
7002 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
7003 (v8i16 (REV64v8i16 FPR128:$src))>;
7004 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
7005 (v8i16 (REV32v8i16 FPR128:$src))>;
7007 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
7009 let Predicates = [IsLE] in {
7010 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>;
7011 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
7012 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
7013 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
7014 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
7015 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
7017 let Predicates = [IsBE] in {
7018 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))),
7019 (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
7020 (REV64v8i16 FPR128:$src),
7022 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
7023 (v8f16 (REV64v8i16 FPR128:$src))>;
7024 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
7025 (v8f16 (REV32v8i16 FPR128:$src))>;
7026 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
7027 (v8f16 (REV16v16i8 FPR128:$src))>;
7028 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
7029 (v8f16 (REV64v8i16 FPR128:$src))>;
7030 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
7031 (v8f16 (REV32v8i16 FPR128:$src))>;
7033 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
7035 let Predicates = [IsLE] in {
7036 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
7037 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
7038 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
7039 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
7040 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
7041 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
7042 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
7044 let Predicates = [IsBE] in {
7045 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
7046 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
7047 (REV64v16i8 FPR128:$src),
7049 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
7050 (v16i8 (REV64v16i8 FPR128:$src))>;
7051 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
7052 (v16i8 (REV32v16i8 FPR128:$src))>;
7053 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
7054 (v16i8 (REV16v16i8 FPR128:$src))>;
7055 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
7056 (v16i8 (REV64v16i8 FPR128:$src))>;
7057 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
7058 (v16i8 (REV32v16i8 FPR128:$src))>;
7059 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
7060 (v16i8 (REV16v16i8 FPR128:$src))>;
7063 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
7064 (EXTRACT_SUBREG V128:$Rn, dsub)>;
7065 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
7066 (EXTRACT_SUBREG V128:$Rn, dsub)>;
7067 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
7068 (EXTRACT_SUBREG V128:$Rn, dsub)>;
7069 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
7070 (EXTRACT_SUBREG V128:$Rn, dsub)>;
7071 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
7072 (EXTRACT_SUBREG V128:$Rn, dsub)>;
7073 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
7074 (EXTRACT_SUBREG V128:$Rn, dsub)>;
7075 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
7076 (EXTRACT_SUBREG V128:$Rn, dsub)>;
7078 def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
7079 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7080 def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
7081 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7082 def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
7083 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7084 def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
7085 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
7087 // A 64-bit subvector insert to the first 128-bit vector position
7088 // is a subregister copy that needs no instruction.
7089 multiclass InsertSubvectorUndef<ValueType Ty> {
7090 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
7091 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7092 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
7093 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7094 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
7095 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7096 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
7097 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7098 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
7099 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7100 def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
7101 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7102 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
7103 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
7106 defm : InsertSubvectorUndef<i32>;
7107 defm : InsertSubvectorUndef<i64>;
7109 // Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
7111 def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
7112 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
7113 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
7114 def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
7115 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
7116 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
7117 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
7118 // so we match on v4f32 here, not v2f32. This will also catch adding
7119 // the low two lanes of a true v4f32 vector.
7120 def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
7121 (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
7122 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
7124 // Scalar 64-bit shifts in FPR64 registers.
7125 def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7126 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7127 def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7128 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7129 def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7130 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7131 def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
7132 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
7134 // Patterns for nontemporal/no-allocate stores.
7135 // We have to resort to tricks to turn a single-input store into a store pair,
7136 // because there is no single-input nontemporal store, only STNP.
7137 let Predicates = [IsLE] in {
7138 let AddedComplexity = 15 in {
7139 class NTStore128Pat<ValueType VT> :
7140 Pat<(nontemporalstore (VT FPR128:$Rt),
7141 (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
7142 (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
7143 (CPYi64 FPR128:$Rt, (i64 1)),
7144 GPR64sp:$Rn, simm7s8:$offset)>;
7146 def : NTStore128Pat<v2i64>;
7147 def : NTStore128Pat<v4i32>;
7148 def : NTStore128Pat<v8i16>;
7149 def : NTStore128Pat<v16i8>;
7151 class NTStore64Pat<ValueType VT> :
7152 Pat<(nontemporalstore (VT FPR64:$Rt),
7153 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7154 (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
7155 (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
7156 GPR64sp:$Rn, simm7s4:$offset)>;
7158 // FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
7159 def : NTStore64Pat<v1f64>;
7160 def : NTStore64Pat<v1i64>;
7161 def : NTStore64Pat<v2i32>;
7162 def : NTStore64Pat<v4i16>;
7163 def : NTStore64Pat<v8i8>;
7165 def : Pat<(nontemporalstore GPR64:$Rt,
7166 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
7167 (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
7168 (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
7169 GPR64sp:$Rn, simm7s4:$offset)>;
7170 } // AddedComplexity=10
7171 } // Predicates = [IsLE]
7173 // Tail call return handling. These are all compiler pseudo-instructions,
7174 // so no encoding information or anything like that.
7175 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
7176 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
7177 Sched<[WriteBrReg]>;
7178 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
7179 Sched<[WriteBrReg]>;
7180 // Indirect tail-call with any register allowed, used by MachineOutliner when
7181 // this is proven safe.
7182 // FIXME: If we have to add any more hacks like this, we should instead relax
7183 // some verifier checks for outlined functions.
7184 def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
7185 Sched<[WriteBrReg]>;
7186 // Indirect tail-call limited to only use registers (x16 and x17) which are
7187 // allowed to tail-call a "BTI c" instruction.
7188 def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
7189 Sched<[WriteBrReg]>;
7192 def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
7193 (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
7194 Requires<[NotUseBTI]>;
7195 def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
7196 (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
7198 def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
7199 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7200 def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
7201 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
7203 def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
7204 def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
7206 // Extracting lane zero is a special case where we can just use a plain
7207 // EXTRACT_SUBREG instruction, which will become FMOV. This is easier for the
7208 // rest of the compiler, especially the register allocator and copy propagation,
7209 // to reason about, so is preferred when it's possible to use it.
7210 let AddedComplexity = 10 in {
7211 def : Pat<(i64 (extractelt (v2i64 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, dsub)>;
7212 def : Pat<(i32 (extractelt (v4i32 V128:$V), (i64 0))), (EXTRACT_SUBREG V128:$V, ssub)>;
7213 def : Pat<(i32 (extractelt (v2i32 V64:$V), (i64 0))), (EXTRACT_SUBREG V64:$V, ssub)>;
7217 class mul_v4i8<SDPatternOperator ldop> :
7218 PatFrag<(ops node:$Rn, node:$Rm, node:$offset),
7219 (mul (ldop (add node:$Rn, node:$offset)),
7220 (ldop (add node:$Rm, node:$offset)))>;
7221 class mulz_v4i8<SDPatternOperator ldop> :
7222 PatFrag<(ops node:$Rn, node:$Rm),
7223 (mul (ldop node:$Rn), (ldop node:$Rm))>;
7226 OutPatFrag<(ops node:$R),
7228 (v2i32 (IMPLICIT_DEF)),
7229 (i32 (COPY_TO_REGCLASS (LDRWui node:$R, (i64 0)), FPR32)),
7232 class dot_v4i8<Instruction DOT, SDPatternOperator ldop> :
7233 Pat<(i32 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 3)),
7234 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 2)),
7235 (add (mul_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm, (i64 1)),
7236 (mulz_v4i8<ldop> GPR64sp:$Rn, GPR64sp:$Rm))))),
7237 (EXTRACT_SUBREG (i64 (DOT (DUPv2i32gpr WZR),
7238 (load_v4i8 GPR64sp:$Rn),
7239 (load_v4i8 GPR64sp:$Rm))),
7240 sub_32)>, Requires<[HasDotProd]>;
7243 class ee_v8i8<SDPatternOperator extend> :
7244 PatFrag<(ops node:$V, node:$K),
7245 (v4i16 (extract_subvector (v8i16 (extend node:$V)), node:$K))>;
7247 class mul_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7248 PatFrag<(ops node:$M, node:$N, node:$K),
7249 (mulop (v4i16 (ee_v8i8<extend> node:$M, node:$K)),
7250 (v4i16 (ee_v8i8<extend> node:$N, node:$K)))>;
7252 class idot_v8i8<SDPatternOperator mulop, SDPatternOperator extend> :
7253 PatFrag<(ops node:$M, node:$N),
7255 (v4i32 (AArch64uaddv
7256 (add (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 0)),
7257 (mul_v8i8<mulop, extend> node:$M, node:$N, (i64 4))))),
7260 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
7261 def VADDV_32 : OutPatFrag<(ops node:$R), (ADDPv2i32 node:$R, node:$R)>;
7263 class odot_v8i8<Instruction DOT> :
7264 OutPatFrag<(ops node:$Vm, node:$Vn),
7267 (i64 (DOT (DUPv2i32gpr WZR),
7272 class dot_v8i8<Instruction DOT, SDPatternOperator mulop,
7273 SDPatternOperator extend> :
7274 Pat<(idot_v8i8<mulop, extend> V64:$Vm, V64:$Vn),
7275 (odot_v8i8<DOT> V64:$Vm, V64:$Vn)>,
7276 Requires<[HasDotProd]>;
7279 class ee_v16i8<SDPatternOperator extend> :
7280 PatFrag<(ops node:$V, node:$K1, node:$K2),
7281 (v4i16 (extract_subvector
7283 (v8i8 (extract_subvector node:$V, node:$K1)))), node:$K2))>;
7285 class mul_v16i8<SDPatternOperator mulop, SDPatternOperator extend> :
7286 PatFrag<(ops node:$M, node:$N, node:$K1, node:$K2),
7288 (mulop (v4i16 (ee_v16i8<extend> node:$M, node:$K1, node:$K2)),
7289 (v4i16 (ee_v16i8<extend> node:$N, node:$K1, node:$K2))))>;
7291 class idot_v16i8<SDPatternOperator m, SDPatternOperator x> :
7292 PatFrag<(ops node:$M, node:$N),
7294 (v4i32 (AArch64uaddv
7296 (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 0)),
7297 (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 0))),
7298 (add (mul_v16i8<m, x> node:$M, node:$N, (i64 0), (i64 4)),
7299 (mul_v16i8<m, x> node:$M, node:$N, (i64 8), (i64 4)))))),
7302 class odot_v16i8<Instruction DOT> :
7303 OutPatFrag<(ops node:$Vm, node:$Vn),
7305 (DOT (DUPv4i32gpr WZR), node:$Vm, node:$Vn)))>;
7307 class dot_v16i8<Instruction DOT, SDPatternOperator mulop,
7308 SDPatternOperator extend> :
7309 Pat<(idot_v16i8<mulop, extend> V128:$Vm, V128:$Vn),
7310 (odot_v16i8<DOT> V128:$Vm, V128:$Vn)>,
7311 Requires<[HasDotProd]>;
7313 let AddedComplexity = 10 in {
7314 def : dot_v4i8<SDOTv8i8, sextloadi8>;
7315 def : dot_v4i8<UDOTv8i8, zextloadi8>;
7316 def : dot_v8i8<SDOTv8i8, AArch64smull, sext>;
7317 def : dot_v8i8<UDOTv8i8, AArch64umull, zext>;
7318 def : dot_v16i8<SDOTv16i8, AArch64smull, sext>;
7319 def : dot_v16i8<UDOTv16i8, AArch64umull, zext>;
7321 // FIXME: add patterns to generate vector by element dot product.
7322 // FIXME: add SVE dot-product patterns.
7325 include "AArch64InstrAtomics.td"
7326 include "AArch64SVEInstrInfo.td"