1 //=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // AArch64 Instruction definitions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // ARM Instruction Predicate Definitions.
17 def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">,
18 AssemblerPredicate<"HasV8_1aOps", "armv8.1a">;
19 def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">,
20 AssemblerPredicate<"HasV8_2aOps", "armv8.2a">;
21 def HasV8_3a : Predicate<"Subtarget->hasV8_3aOps()">,
22 AssemblerPredicate<"HasV8_3aOps", "armv8.3a">;
23 def HasV8_4a : Predicate<"Subtarget->hasV8_4aOps()">,
24 AssemblerPredicate<"HasV8_4aOps", "armv8.4a">;
25 def HasV8_5a : Predicate<"Subtarget->hasV8_5aOps()">,
26 AssemblerPredicate<"HasV8_5aOps", "armv8.5a">;
27 def HasVH : Predicate<"Subtarget->hasVH()">,
28 AssemblerPredicate<"FeatureVH", "vh">;
30 def HasLOR : Predicate<"Subtarget->hasLOR()">,
31 AssemblerPredicate<"FeatureLOR", "lor">;
33 def HasPA : Predicate<"Subtarget->hasPA()">,
34 AssemblerPredicate<"FeaturePA", "pa">;
36 def HasJS : Predicate<"Subtarget->hasJS()">,
37 AssemblerPredicate<"FeatureJS", "jsconv">;
39 def HasCCIDX : Predicate<"Subtarget->hasCCIDX()">,
40 AssemblerPredicate<"FeatureCCIDX", "ccidx">;
42 def HasComplxNum : Predicate<"Subtarget->hasComplxNum()">,
43 AssemblerPredicate<"FeatureComplxNum", "complxnum">;
45 def HasNV : Predicate<"Subtarget->hasNV()">,
46 AssemblerPredicate<"FeatureNV", "nv">;
48 def HasRASv8_4 : Predicate<"Subtarget->hasRASv8_4()">,
49 AssemblerPredicate<"FeatureRASv8_4", "rasv8_4">;
51 def HasMPAM : Predicate<"Subtarget->hasMPAM()">,
52 AssemblerPredicate<"FeatureMPAM", "mpam">;
54 def HasDIT : Predicate<"Subtarget->hasDIT()">,
55 AssemblerPredicate<"FeatureDIT", "dit">;
57 def HasTRACEV8_4 : Predicate<"Subtarget->hasTRACEV8_4()">,
58 AssemblerPredicate<"FeatureTRACEV8_4", "tracev8.4">;
60 def HasAM : Predicate<"Subtarget->hasAM()">,
61 AssemblerPredicate<"FeatureAM", "am">;
63 def HasSEL2 : Predicate<"Subtarget->hasSEL2()">,
64 AssemblerPredicate<"FeatureSEL2", "sel2">;
66 def HasTLB_RMI : Predicate<"Subtarget->hasTLB_RMI()">,
67 AssemblerPredicate<"FeatureTLB_RMI", "tlb-rmi">;
69 def HasFMI : Predicate<"Subtarget->hasFMI()">,
70 AssemblerPredicate<"FeatureFMI", "fmi">;
72 def HasRCPC_IMMO : Predicate<"Subtarget->hasRCPCImm()">,
73 AssemblerPredicate<"FeatureRCPC_IMMO", "rcpc-immo">;
75 def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
76 AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
77 def HasNEON : Predicate<"Subtarget->hasNEON()">,
78 AssemblerPredicate<"FeatureNEON", "neon">;
79 def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
80 AssemblerPredicate<"FeatureCrypto", "crypto">;
81 def HasSM4 : Predicate<"Subtarget->hasSM4()">,
82 AssemblerPredicate<"FeatureSM4", "sm4">;
83 def HasSHA3 : Predicate<"Subtarget->hasSHA3()">,
84 AssemblerPredicate<"FeatureSHA3", "sha3">;
85 def HasSHA2 : Predicate<"Subtarget->hasSHA2()">,
86 AssemblerPredicate<"FeatureSHA2", "sha2">;
87 def HasAES : Predicate<"Subtarget->hasAES()">,
88 AssemblerPredicate<"FeatureAES", "aes">;
89 def HasDotProd : Predicate<"Subtarget->hasDotProd()">,
90 AssemblerPredicate<"FeatureDotProd", "dotprod">;
91 def HasCRC : Predicate<"Subtarget->hasCRC()">,
92 AssemblerPredicate<"FeatureCRC", "crc">;
93 def HasLSE : Predicate<"Subtarget->hasLSE()">,
94 AssemblerPredicate<"FeatureLSE", "lse">;
95 def HasRAS : Predicate<"Subtarget->hasRAS()">,
96 AssemblerPredicate<"FeatureRAS", "ras">;
97 def HasRDM : Predicate<"Subtarget->hasRDM()">,
98 AssemblerPredicate<"FeatureRDM", "rdm">;
99 def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">;
100 def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
101 AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
102 def HasFP16FML : Predicate<"Subtarget->hasFP16FML()">,
103 AssemblerPredicate<"FeatureFP16FML", "fp16fml">;
104 def HasSPE : Predicate<"Subtarget->hasSPE()">,
105 AssemblerPredicate<"FeatureSPE", "spe">;
106 def HasFuseAES : Predicate<"Subtarget->hasFuseAES()">,
107 AssemblerPredicate<"FeatureFuseAES",
109 def HasSVE : Predicate<"Subtarget->hasSVE()">,
110 AssemblerPredicate<"FeatureSVE", "sve">;
111 def HasRCPC : Predicate<"Subtarget->hasRCPC()">,
112 AssemblerPredicate<"FeatureRCPC", "rcpc">;
113 def HasAltNZCV : Predicate<"Subtarget->hasAlternativeNZCV()">,
114 AssemblerPredicate<"FeatureAltFPCmp", "altnzcv">;
115 def HasFRInt3264 : Predicate<"Subtarget->hasFRInt3264()">,
116 AssemblerPredicate<"FeatureFRInt3264", "frint3264">;
117 def HasSB : Predicate<"Subtarget->hasSB()">,
118 AssemblerPredicate<"FeatureSB", "sb">;
119 def HasPredRes : Predicate<"Subtarget->hasPredRes()">,
120 AssemblerPredicate<"FeaturePredRes", "predres">;
121 def HasCCDP : Predicate<"Subtarget->hasCCDP()">,
122 AssemblerPredicate<"FeatureCacheDeepPersist", "ccdp">;
123 def HasBTI : Predicate<"Subtarget->hasBTI()">,
124 AssemblerPredicate<"FeatureBranchTargetId", "bti">;
125 def HasMTE : Predicate<"Subtarget->hasMTE()">,
126 AssemblerPredicate<"FeatureMTE", "mte">;
127 def IsLE : Predicate<"Subtarget->isLittleEndian()">;
128 def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
129 def UseAlternateSExtLoadCVTF32
130 : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
132 def UseNegativeImmediates
133 : Predicate<"false">, AssemblerPredicate<"!FeatureNoNegativeImmediates",
134 "NegativeImmediates">;
136 def AArch64LocalRecover : SDNode<"ISD::LOCAL_RECOVER",
137 SDTypeProfile<1, 1, [SDTCisSameAs<0, 1>,
141 //===----------------------------------------------------------------------===//
142 // AArch64-specific DAG Nodes.
145 // SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
146 def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
149 SDTCisInt<0>, SDTCisVT<1, i32>]>;
151 // SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
152 def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
158 // SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
159 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
166 def SDT_AArch64Brcond : SDTypeProfile<0, 3,
167 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
169 def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
170 def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
171 SDTCisVT<2, OtherVT>]>;
174 def SDT_AArch64CSel : SDTypeProfile<1, 4,
179 def SDT_AArch64CCMP : SDTypeProfile<1, 5,
186 def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
193 def SDT_AArch64FCmp : SDTypeProfile<0, 2,
195 SDTCisSameAs<0, 1>]>;
196 def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
197 def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
198 def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>,
200 SDTCisSameAs<0, 2>]>;
201 def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
202 def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
203 def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
204 SDTCisInt<2>, SDTCisInt<3>]>;
205 def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
206 def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
207 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
208 def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
210 def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
211 def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
212 def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
213 def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
215 def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
218 def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
219 def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
221 def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
223 def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
226 // Generates the general dynamic sequences, i.e.
227 // adrp x0, :tlsdesc:var
228 // ldr x1, [x0, #:tlsdesc_lo12:var]
229 // add x0, x0, #:tlsdesc_lo12:var
233 // (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
234 // number of operands (the variable)
235 def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
238 def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
239 [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
240 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
241 SDTCisSameAs<1, 4>]>;
245 def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
246 def AArch64adr : SDNode<"AArch64ISD::ADR", SDTIntUnaryOp, []>;
247 def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
248 def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
249 def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
250 SDCallSeqStart<[ SDTCisVT<0, i32>,
252 [SDNPHasChain, SDNPOutGlue]>;
253 def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END",
254 SDCallSeqEnd<[ SDTCisVT<0, i32>,
256 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
257 def AArch64call : SDNode<"AArch64ISD::CALL",
258 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
259 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
261 def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
263 def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
265 def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
267 def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
269 def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
273 def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
274 def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
275 def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
276 def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
277 def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
278 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
279 def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >;
280 def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>;
281 def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut,
283 def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>;
284 def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut,
286 def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
287 def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
289 def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>;
290 def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>;
291 def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
293 def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
295 def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
297 def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
298 def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
299 def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
300 def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
301 def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
303 def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
304 def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
305 def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
306 def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
307 def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
308 def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
310 def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
311 def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
312 def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
313 def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
314 def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
315 def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
316 def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
318 def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
319 def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
320 def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
321 def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
323 def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
324 def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
325 def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
326 def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
327 def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
328 def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
329 def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
330 def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
332 def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
333 def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
334 def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>;
336 def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
337 def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
338 def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
339 def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
340 def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
342 def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
343 def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
344 def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
346 def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
347 def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
348 def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
349 def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
350 def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
351 def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
352 (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
354 def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
355 def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
356 def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
357 def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
358 def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
360 def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
361 def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
363 def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
365 def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
366 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
368 def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
369 [SDNPHasChain, SDNPSideEffect]>;
371 def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
372 def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
374 def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
375 SDT_AArch64TLSDescCallSeq,
376 [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
380 def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
381 SDT_AArch64WrapperLarge>;
383 def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
385 def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
386 SDTCisSameAs<1, 2>]>;
387 def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
388 def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
390 def AArch64frecpe : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
391 def AArch64frecps : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
392 def AArch64frsqrte : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
393 def AArch64frsqrts : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
395 def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
396 def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
397 def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
398 def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
399 def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
400 def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
402 //===----------------------------------------------------------------------===//
404 //===----------------------------------------------------------------------===//
406 // AArch64 Instruction Predicate Definitions.
407 // We could compute these on a per-module basis but doing so requires accessing
408 // the Function object through the <Target>Subtarget and objections were raised
409 // to that (see post-commit review comments for r301750).
410 let RecomputePerFunction = 1 in {
411 def ForCodeSize : Predicate<"MF->getFunction().optForSize()">;
412 def NotForCodeSize : Predicate<"!MF->getFunction().optForSize()">;
413 // Avoid generating STRQro if it is slow, unless we're optimizing for code size.
414 def UseSTRQro : Predicate<"!Subtarget->isSTRQroSlow() || MF->getFunction().optForSize()">;
416 def UseBTI : Predicate<[{ MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
417 def NotUseBTI : Predicate<[{ !MF->getFunction().hasFnAttribute("branch-target-enforcement") }]>;
420 include "AArch64InstrFormats.td"
421 include "SVEInstrFormats.td"
423 //===----------------------------------------------------------------------===//
425 //===----------------------------------------------------------------------===//
426 // Miscellaneous instructions.
427 //===----------------------------------------------------------------------===//
429 let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
430 // We set Sched to empty list because we expect these instructions to simply get
431 // removed in most cases.
432 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
433 [(AArch64callseq_start timm:$amt1, timm:$amt2)]>,
435 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
436 [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
438 } // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
440 let isReMaterializable = 1, isCodeGenOnly = 1 in {
441 // FIXME: The following pseudo instructions are only needed because remat
442 // cannot handle multiple instructions. When that changes, they can be
443 // removed, along with the AArch64Wrapper node.
445 let AddedComplexity = 10 in
446 def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
447 [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
450 // The MOVaddr instruction should match only when the add is not folded
451 // into a load or store address.
453 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
454 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
455 tglobaladdr:$low))]>,
456 Sched<[WriteAdrAdr]>;
458 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
459 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
461 Sched<[WriteAdrAdr]>;
463 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
464 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
466 Sched<[WriteAdrAdr]>;
468 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
469 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
470 tblockaddress:$low))]>,
471 Sched<[WriteAdrAdr]>;
473 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
474 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
475 tglobaltlsaddr:$low))]>,
476 Sched<[WriteAdrAdr]>;
478 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
479 [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
480 texternalsym:$low))]>,
481 Sched<[WriteAdrAdr]>;
482 // Normally AArch64addlow either gets folded into a following ldr/str,
483 // or together with an adrp into MOVaddr above. For cases with TLS, it
484 // might appear without either of them, so allow lowering it into a plain
487 : Pseudo<(outs GPR64:$dst), (ins GPR64:$src, i64imm:$low),
488 [(set GPR64:$dst, (AArch64addlow GPR64:$src,
489 tglobaltlsaddr:$low))]>,
492 } // isReMaterializable, isCodeGenOnly
494 def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
495 (LOADgot tglobaltlsaddr:$addr)>;
497 def : Pat<(AArch64LOADgot texternalsym:$addr),
498 (LOADgot texternalsym:$addr)>;
500 def : Pat<(AArch64LOADgot tconstpool:$addr),
501 (LOADgot tconstpool:$addr)>;
503 // 32-bit jump table destination is actually only 2 instructions since we can
504 // use the table itself as a PC-relative base. But optimization occurs after
505 // branch relaxation so be pessimistic.
506 let Size = 12, Constraints = "@earlyclobber $dst,@earlyclobber $scratch" in {
507 def JumpTableDest32 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
508 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
510 def JumpTableDest16 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
511 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
513 def JumpTableDest8 : Pseudo<(outs GPR64:$dst, GPR64sp:$scratch),
514 (ins GPR64:$table, GPR64:$entry, i32imm:$jti), []>,
518 // Space-consuming pseudo to aid testing of placement and reachability
519 // algorithms. Immediate operand is the number of bytes this "instruction"
520 // occupies; register operands can be used to enforce dependency and constrain
522 let hasSideEffects = 1, mayLoad = 1, mayStore = 1 in
523 def SPACE : Pseudo<(outs GPR64:$Rd), (ins i32imm:$size, GPR64:$Rn),
524 [(set GPR64:$Rd, (int_aarch64_space imm:$size, GPR64:$Rn))]>,
527 let hasSideEffects = 1, isCodeGenOnly = 1 in {
528 def SpeculationSafeValueX
529 : Pseudo<(outs GPR64:$dst), (ins GPR64:$src), []>, Sched<[]>;
530 def SpeculationSafeValueW
531 : Pseudo<(outs GPR32:$dst), (ins GPR32:$src), []>, Sched<[]>;
535 //===----------------------------------------------------------------------===//
536 // System instructions.
537 //===----------------------------------------------------------------------===//
539 def HINT : HintI<"hint">;
540 def : InstAlias<"nop", (HINT 0b000)>;
541 def : InstAlias<"yield",(HINT 0b001)>;
542 def : InstAlias<"wfe", (HINT 0b010)>;
543 def : InstAlias<"wfi", (HINT 0b011)>;
544 def : InstAlias<"sev", (HINT 0b100)>;
545 def : InstAlias<"sevl", (HINT 0b101)>;
546 def : InstAlias<"esb", (HINT 0b10000)>, Requires<[HasRAS]>;
547 def : InstAlias<"csdb", (HINT 20)>;
548 def : InstAlias<"bti", (HINT 32)>, Requires<[HasBTI]>;
549 def : InstAlias<"bti $op", (HINT btihint_op:$op)>, Requires<[HasBTI]>;
551 // v8.2a Statistical Profiling extension
552 def : InstAlias<"psb $op", (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
554 // As far as LLVM is concerned this writes to the system's exclusive monitors.
555 let mayLoad = 1, mayStore = 1 in
556 def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
558 // NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
559 // model patterns with sufficiently fine granularity.
560 let mayLoad = ?, mayStore = ? in {
561 def DMB : CRmSystemI<barrier_op, 0b101, "dmb",
562 [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
564 def DSB : CRmSystemI<barrier_op, 0b100, "dsb",
565 [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
567 def ISB : CRmSystemI<barrier_op, 0b110, "isb",
568 [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
570 def TSB : CRmSystemI<barrier_op, 0b010, "tsb", []> {
573 let Predicates = [HasTRACEV8_4];
577 // ARMv8.2-A Dot Product
578 let Predicates = [HasDotProd] in {
579 defm SDOT : SIMDThreeSameVectorDot<0, "sdot", int_aarch64_neon_sdot>;
580 defm UDOT : SIMDThreeSameVectorDot<1, "udot", int_aarch64_neon_udot>;
581 defm SDOTlane : SIMDThreeSameVectorDotIndex<0, "sdot", int_aarch64_neon_sdot>;
582 defm UDOTlane : SIMDThreeSameVectorDotIndex<1, "udot", int_aarch64_neon_udot>;
585 // ARMv8.2-A FP16 Fused Multiply-Add Long
586 let Predicates = [HasNEON, HasFP16FML] in {
587 defm FMLAL : SIMDThreeSameVectorFML<0, 1, 0b001, "fmlal", int_aarch64_neon_fmlal>;
588 defm FMLSL : SIMDThreeSameVectorFML<0, 1, 0b101, "fmlsl", int_aarch64_neon_fmlsl>;
589 defm FMLAL2 : SIMDThreeSameVectorFML<1, 0, 0b001, "fmlal2", int_aarch64_neon_fmlal2>;
590 defm FMLSL2 : SIMDThreeSameVectorFML<1, 0, 0b101, "fmlsl2", int_aarch64_neon_fmlsl2>;
591 defm FMLALlane : SIMDThreeSameVectorFMLIndex<0, 0b0000, "fmlal", int_aarch64_neon_fmlal>;
592 defm FMLSLlane : SIMDThreeSameVectorFMLIndex<0, 0b0100, "fmlsl", int_aarch64_neon_fmlsl>;
593 defm FMLAL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1000, "fmlal2", int_aarch64_neon_fmlal2>;
594 defm FMLSL2lane : SIMDThreeSameVectorFMLIndex<1, 0b1100, "fmlsl2", int_aarch64_neon_fmlsl2>;
597 // Armv8.2-A Crypto extensions
598 let Predicates = [HasSHA3] in {
599 def SHA512H : CryptoRRRTied<0b0, 0b00, "sha512h">;
600 def SHA512H2 : CryptoRRRTied<0b0, 0b01, "sha512h2">;
601 def SHA512SU0 : CryptoRRTied_2D<0b0, 0b00, "sha512su0">;
602 def SHA512SU1 : CryptoRRRTied_2D<0b0, 0b10, "sha512su1">;
603 def RAX1 : CryptoRRR_2D<0b0,0b11, "rax1">;
604 def EOR3 : CryptoRRRR_16B<0b00, "eor3">;
605 def BCAX : CryptoRRRR_16B<0b01, "bcax">;
606 def XAR : CryptoRRRi6<"xar">;
609 let Predicates = [HasSM4] in {
610 def SM3TT1A : CryptoRRRi2Tied<0b0, 0b00, "sm3tt1a">;
611 def SM3TT1B : CryptoRRRi2Tied<0b0, 0b01, "sm3tt1b">;
612 def SM3TT2A : CryptoRRRi2Tied<0b0, 0b10, "sm3tt2a">;
613 def SM3TT2B : CryptoRRRi2Tied<0b0, 0b11, "sm3tt2b">;
614 def SM3SS1 : CryptoRRRR_4S<0b10, "sm3ss1">;
615 def SM3PARTW1 : CryptoRRRTied_4S<0b1, 0b00, "sm3partw1">;
616 def SM3PARTW2 : CryptoRRRTied_4S<0b1, 0b01, "sm3partw2">;
617 def SM4ENCKEY : CryptoRRR_4S<0b1, 0b10, "sm4ekey">;
618 def SM4E : CryptoRRTied_4S<0b0, 0b01, "sm4e">;
621 let Predicates = [HasRCPC] in {
622 // v8.3 Release Consistent Processor Consistent support, optional in v8.2.
623 def LDAPRB : RCPCLoad<0b00, "ldaprb", GPR32>;
624 def LDAPRH : RCPCLoad<0b01, "ldaprh", GPR32>;
625 def LDAPRW : RCPCLoad<0b10, "ldapr", GPR32>;
626 def LDAPRX : RCPCLoad<0b11, "ldapr", GPR64>;
629 // v8.3a complex add and multiply-accumulate. No predicate here, that is done
630 // inside the multiclass as the FP16 versions need different predicates.
631 defm FCMLA : SIMDThreeSameVectorTiedComplexHSD<1, 0b110, complexrotateop,
633 defm FCADD : SIMDThreeSameVectorComplexHSD<1, 0b111, complexrotateopodd,
635 defm FCMLA : SIMDIndexedTiedComplexHSD<1, 0, 1, complexrotateop, "fcmla",
638 // v8.3a Pointer Authentication
639 // These instructions inhabit part of the hint space and so can be used for
641 let Uses = [LR], Defs = [LR] in {
642 def PACIAZ : SystemNoOperands<0b000, "paciaz">;
643 def PACIBZ : SystemNoOperands<0b010, "pacibz">;
644 def AUTIAZ : SystemNoOperands<0b100, "autiaz">;
645 def AUTIBZ : SystemNoOperands<0b110, "autibz">;
647 let Uses = [LR, SP], Defs = [LR] in {
648 def PACIASP : SystemNoOperands<0b001, "paciasp">;
649 def PACIBSP : SystemNoOperands<0b011, "pacibsp">;
650 def AUTIASP : SystemNoOperands<0b101, "autiasp">;
651 def AUTIBSP : SystemNoOperands<0b111, "autibsp">;
653 let Uses = [X16, X17], Defs = [X17], CRm = 0b0001 in {
654 def PACIA1716 : SystemNoOperands<0b000, "pacia1716">;
655 def PACIB1716 : SystemNoOperands<0b010, "pacib1716">;
656 def AUTIA1716 : SystemNoOperands<0b100, "autia1716">;
657 def AUTIB1716 : SystemNoOperands<0b110, "autib1716">;
660 let Uses = [LR], Defs = [LR], CRm = 0b0000 in {
661 def XPACLRI : SystemNoOperands<0b111, "xpaclri">;
664 // These pointer authentication isntructions require armv8.3a
665 let Predicates = [HasPA] in {
666 multiclass SignAuth<bits<3> prefix, bits<3> prefix_z, string asm> {
667 def IA : SignAuthOneData<prefix, 0b00, !strconcat(asm, "ia")>;
668 def IB : SignAuthOneData<prefix, 0b01, !strconcat(asm, "ib")>;
669 def DA : SignAuthOneData<prefix, 0b10, !strconcat(asm, "da")>;
670 def DB : SignAuthOneData<prefix, 0b11, !strconcat(asm, "db")>;
671 def IZA : SignAuthZero<prefix_z, 0b00, !strconcat(asm, "iza")>;
672 def DZA : SignAuthZero<prefix_z, 0b10, !strconcat(asm, "dza")>;
673 def IZB : SignAuthZero<prefix_z, 0b01, !strconcat(asm, "izb")>;
674 def DZB : SignAuthZero<prefix_z, 0b11, !strconcat(asm, "dzb")>;
677 defm PAC : SignAuth<0b000, 0b010, "pac">;
678 defm AUT : SignAuth<0b001, 0b011, "aut">;
680 def XPACI : SignAuthZero<0b100, 0b00, "xpaci">;
681 def XPACD : SignAuthZero<0b100, 0b01, "xpacd">;
682 def PACGA : SignAuthTwoOperand<0b1100, "pacga", null_frag>;
684 // Combined Instructions
685 def BRAA : AuthBranchTwoOperands<0, 0, "braa">;
686 def BRAB : AuthBranchTwoOperands<0, 1, "brab">;
687 def BLRAA : AuthBranchTwoOperands<1, 0, "blraa">;
688 def BLRAB : AuthBranchTwoOperands<1, 1, "blrab">;
690 def BRAAZ : AuthOneOperand<0b000, 0, "braaz">;
691 def BRABZ : AuthOneOperand<0b000, 1, "brabz">;
692 def BLRAAZ : AuthOneOperand<0b001, 0, "blraaz">;
693 def BLRABZ : AuthOneOperand<0b001, 1, "blrabz">;
695 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
696 def RETAA : AuthReturn<0b010, 0, "retaa">;
697 def RETAB : AuthReturn<0b010, 1, "retab">;
698 def ERETAA : AuthReturn<0b100, 0, "eretaa">;
699 def ERETAB : AuthReturn<0b100, 1, "eretab">;
702 defm LDRAA : AuthLoad<0, "ldraa", simm10Scaled>;
703 defm LDRAB : AuthLoad<1, "ldrab", simm10Scaled>;
707 // v8.3a floating point conversion for javascript
708 let Predicates = [HasJS, HasFPARMv8] in
709 def FJCVTZS : BaseFPToIntegerUnscaled<0b01, 0b11, 0b110, FPR64, GPR32,
712 } // HasJS, HasFPARMv8
714 // v8.4 Flag manipulation instructions
715 let Predicates = [HasFMI] in {
716 def CFINV : SimpleSystemI<0, (ins), "cfinv", "">, Sched<[WriteSys]> {
717 let Inst{20-5} = 0b0000001000000000;
719 def SETF8 : BaseFlagManipulation<0, 0, (ins GPR32:$Rn), "setf8", "{\t$Rn}">;
720 def SETF16 : BaseFlagManipulation<0, 1, (ins GPR32:$Rn), "setf16", "{\t$Rn}">;
721 def RMIF : FlagRotate<(ins GPR64:$Rn, uimm6:$imm, imm0_15:$mask), "rmif",
722 "{\t$Rn, $imm, $mask}">;
725 // v8.5 flag manipulation instructions
726 let Predicates = [HasAltNZCV], Uses = [NZCV], Defs = [NZCV] in {
728 def XAFLAG : PstateWriteSimple<(ins), "xaflag", "">, Sched<[WriteSys]> {
729 let Inst{18-16} = 0b000;
730 let Inst{11-8} = 0b0000;
731 let Unpredictable{11-8} = 0b1111;
732 let Inst{7-5} = 0b001;
735 def AXFLAG : PstateWriteSimple<(ins), "axflag", "">, Sched<[WriteSys]> {
736 let Inst{18-16} = 0b000;
737 let Inst{11-8} = 0b0000;
738 let Unpredictable{11-8} = 0b1111;
739 let Inst{7-5} = 0b010;
744 // Armv8.5-A speculation barrier
745 def SB : SimpleSystemI<0, (ins), "sb", "">, Sched<[]> {
746 let Inst{20-5} = 0b0001100110000111;
747 let Unpredictable{11-8} = 0b1111;
748 let Predicates = [HasSB];
749 let hasSideEffects = 1;
752 def : InstAlias<"clrex", (CLREX 0xf)>;
753 def : InstAlias<"isb", (ISB 0xf)>;
754 def : InstAlias<"ssbb", (DSB 0)>;
755 def : InstAlias<"pssbb", (DSB 4)>;
759 def MSRpstateImm1 : MSRpstateImm0_1;
760 def MSRpstateImm4 : MSRpstateImm0_15;
762 // The thread pointer (on Linux, at least, where this has been implemented) is
764 def MOVbaseTLS : Pseudo<(outs GPR64:$dst), (ins),
765 [(set GPR64:$dst, AArch64threadpointer)]>, Sched<[WriteSys]>;
767 // The cycle counter PMC register is PMCCNTR_EL0.
768 let Predicates = [HasPerfMon] in
769 def : Pat<(readcyclecounter), (MRS 0xdce8)>;
772 def : Pat<(i64 (int_aarch64_get_fpcr)), (MRS 0xda20)>;
774 // Generic system instructions
775 def SYSxt : SystemXtI<0, "sys">;
776 def SYSLxt : SystemLXtI<1, "sysl">;
778 def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
779 (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
780 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
782 //===----------------------------------------------------------------------===//
783 // Move immediate instructions.
784 //===----------------------------------------------------------------------===//
786 defm MOVK : InsertImmediate<0b11, "movk">;
787 defm MOVN : MoveImmediate<0b00, "movn">;
789 let PostEncoderMethod = "fixMOVZ" in
790 defm MOVZ : MoveImmediate<0b10, "movz">;
792 // First group of aliases covers an implicit "lsl #0".
793 def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0), 0>;
794 def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0), 0>;
795 def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
796 def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
797 def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
798 def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>;
800 // Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
801 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
802 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
803 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
804 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
806 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
807 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
808 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
809 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
811 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48), 0>;
812 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32), 0>;
813 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16), 0>;
814 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0), 0>;
816 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
817 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
819 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
820 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
822 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16), 0>;
823 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0), 0>;
825 // Final group of aliases covers true "mov $Rd, $imm" cases.
826 multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
827 int width, int shift> {
828 def _asmoperand : AsmOperandClass {
829 let Name = basename # width # "_lsl" # shift # "MovAlias";
830 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
832 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
835 def _movimm : Operand<i32> {
836 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
839 def : InstAlias<"mov $Rd, $imm",
840 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
843 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
844 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
846 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
847 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
848 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
849 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
851 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
852 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
854 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
855 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
856 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
857 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
859 let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
860 isAsCheapAsAMove = 1 in {
861 // FIXME: The following pseudo instructions are only needed because remat
862 // cannot handle multiple instructions. When that changes, we can select
863 // directly to the real instructions and get rid of these pseudos.
866 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
867 [(set GPR32:$dst, imm:$src)]>,
870 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
871 [(set GPR64:$dst, imm:$src)]>,
873 } // isReMaterializable, isCodeGenOnly
875 // If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
876 // eventual expansion code fewer bits to worry about getting right. Marshalling
877 // the types is a little tricky though:
878 def i64imm_32bit : ImmLeaf<i64, [{
879 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
882 def s64imm_32bit : ImmLeaf<i64, [{
883 int64_t Imm64 = static_cast<int64_t>(Imm);
884 return Imm64 >= std::numeric_limits<int32_t>::min() &&
885 Imm64 <= std::numeric_limits<int32_t>::max();
888 def trunc_imm : SDNodeXForm<imm, [{
889 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
892 def gi_trunc_imm : GICustomOperandRenderer<"renderTruncImm">,
893 GISDNodeXFormEquiv<trunc_imm>;
895 def : Pat<(i64 i64imm_32bit:$src),
896 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
898 // Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
899 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
900 return CurDAG->getTargetConstant(
901 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
904 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
905 return CurDAG->getTargetConstant(
906 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
910 def : Pat<(f32 fpimm:$in),
911 (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
912 def : Pat<(f64 fpimm:$in),
913 (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
916 // Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
918 def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
919 tglobaladdr:$g1, tglobaladdr:$g0),
920 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g0, 0),
921 tglobaladdr:$g1, 16),
922 tglobaladdr:$g2, 32),
923 tglobaladdr:$g3, 48)>;
925 def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
926 tblockaddress:$g1, tblockaddress:$g0),
927 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g0, 0),
928 tblockaddress:$g1, 16),
929 tblockaddress:$g2, 32),
930 tblockaddress:$g3, 48)>;
932 def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
933 tconstpool:$g1, tconstpool:$g0),
934 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g0, 0),
937 tconstpool:$g3, 48)>;
939 def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
940 tjumptable:$g1, tjumptable:$g0),
941 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g0, 0),
944 tjumptable:$g3, 48)>;
947 //===----------------------------------------------------------------------===//
948 // Arithmetic instructions.
949 //===----------------------------------------------------------------------===//
951 // Add/subtract with carry.
952 defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
953 defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
955 def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>;
956 def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>;
957 def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
958 def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
961 defm ADD : AddSub<0, "add", "sub", add>;
962 defm SUB : AddSub<1, "sub", "add">;
964 def : InstAlias<"mov $dst, $src",
965 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
966 def : InstAlias<"mov $dst, $src",
967 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
968 def : InstAlias<"mov $dst, $src",
969 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
970 def : InstAlias<"mov $dst, $src",
971 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
973 defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
974 defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
976 // Use SUBS instead of SUB to enable CSE between SUBS and SUB.
977 def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
978 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
979 def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
980 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
981 def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
982 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
983 def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
984 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
985 def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
986 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
987 def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
988 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
989 let AddedComplexity = 1 in {
990 def : Pat<(sub GPR32sp:$R2, arith_extended_reg32<i32>:$R3),
991 (SUBSWrx GPR32sp:$R2, arith_extended_reg32<i32>:$R3)>;
992 def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3),
993 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3)>;
996 // Because of the immediate format for add/sub-imm instructions, the
997 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
998 // These patterns capture that transformation.
999 let AddedComplexity = 1 in {
1000 def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1001 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1002 def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1003 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1004 def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1005 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1006 def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1007 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1010 // Because of the immediate format for add/sub-imm instructions, the
1011 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
1012 // These patterns capture that transformation.
1013 let AddedComplexity = 1 in {
1014 def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1015 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1016 def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1017 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1018 def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
1019 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
1020 def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
1021 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
1024 def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1025 def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1026 def : InstAlias<"neg $dst, $src$shift",
1027 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1028 def : InstAlias<"neg $dst, $src$shift",
1029 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1031 def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
1032 def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
1033 def : InstAlias<"negs $dst, $src$shift",
1034 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
1035 def : InstAlias<"negs $dst, $src$shift",
1036 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
1039 // Unsigned/Signed divide
1040 defm UDIV : Div<0, "udiv", udiv>;
1041 defm SDIV : Div<1, "sdiv", sdiv>;
1043 def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr GPR32:$Rn, GPR32:$Rm)>;
1044 def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr GPR64:$Rn, GPR64:$Rm)>;
1045 def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr GPR32:$Rn, GPR32:$Rm)>;
1046 def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr GPR64:$Rn, GPR64:$Rm)>;
1049 defm ASRV : Shift<0b10, "asr", sra>;
1050 defm LSLV : Shift<0b00, "lsl", shl>;
1051 defm LSRV : Shift<0b01, "lsr", srl>;
1052 defm RORV : Shift<0b11, "ror", rotr>;
1054 def : ShiftAlias<"asrv", ASRVWr, GPR32>;
1055 def : ShiftAlias<"asrv", ASRVXr, GPR64>;
1056 def : ShiftAlias<"lslv", LSLVWr, GPR32>;
1057 def : ShiftAlias<"lslv", LSLVXr, GPR64>;
1058 def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
1059 def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
1060 def : ShiftAlias<"rorv", RORVWr, GPR32>;
1061 def : ShiftAlias<"rorv", RORVXr, GPR64>;
1064 let AddedComplexity = 5 in {
1065 defm MADD : MulAccum<0, "madd", add>;
1066 defm MSUB : MulAccum<1, "msub", sub>;
1068 def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
1069 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1070 def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
1071 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1073 def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
1074 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1075 def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
1076 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1077 def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
1078 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
1079 def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
1080 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
1081 } // AddedComplexity = 5
1083 let AddedComplexity = 5 in {
1084 def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
1085 def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
1086 def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
1087 def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
1089 def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
1090 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1091 def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
1092 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1094 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
1095 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1096 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
1097 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
1099 def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
1100 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1101 def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
1102 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1103 def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
1104 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1105 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1107 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1108 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1109 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1110 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1111 def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
1112 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1113 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
1115 def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
1116 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1117 def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
1118 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1119 def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
1121 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1122 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1124 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
1125 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1126 def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
1127 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1128 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
1129 (s64imm_32bit:$C)))),
1130 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
1131 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
1132 } // AddedComplexity = 5
1134 def : MulAccumWAlias<"mul", MADDWrrr>;
1135 def : MulAccumXAlias<"mul", MADDXrrr>;
1136 def : MulAccumWAlias<"mneg", MSUBWrrr>;
1137 def : MulAccumXAlias<"mneg", MSUBXrrr>;
1138 def : WideMulAccumAlias<"smull", SMADDLrrr>;
1139 def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
1140 def : WideMulAccumAlias<"umull", UMADDLrrr>;
1141 def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
1144 def SMULHrr : MulHi<0b010, "smulh", mulhs>;
1145 def UMULHrr : MulHi<0b110, "umulh", mulhu>;
1148 def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
1149 def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
1150 def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
1151 def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
1153 def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
1154 def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
1155 def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
1156 def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
1159 defm CAS : CompareAndSwap<0, 0, "">;
1160 defm CASA : CompareAndSwap<1, 0, "a">;
1161 defm CASL : CompareAndSwap<0, 1, "l">;
1162 defm CASAL : CompareAndSwap<1, 1, "al">;
1165 defm CASP : CompareAndSwapPair<0, 0, "">;
1166 defm CASPA : CompareAndSwapPair<1, 0, "a">;
1167 defm CASPL : CompareAndSwapPair<0, 1, "l">;
1168 defm CASPAL : CompareAndSwapPair<1, 1, "al">;
1171 defm SWP : Swap<0, 0, "">;
1172 defm SWPA : Swap<1, 0, "a">;
1173 defm SWPL : Swap<0, 1, "l">;
1174 defm SWPAL : Swap<1, 1, "al">;
1176 // v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
1177 defm LDADD : LDOPregister<0b000, "add", 0, 0, "">;
1178 defm LDADDA : LDOPregister<0b000, "add", 1, 0, "a">;
1179 defm LDADDL : LDOPregister<0b000, "add", 0, 1, "l">;
1180 defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
1182 defm LDCLR : LDOPregister<0b001, "clr", 0, 0, "">;
1183 defm LDCLRA : LDOPregister<0b001, "clr", 1, 0, "a">;
1184 defm LDCLRL : LDOPregister<0b001, "clr", 0, 1, "l">;
1185 defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
1187 defm LDEOR : LDOPregister<0b010, "eor", 0, 0, "">;
1188 defm LDEORA : LDOPregister<0b010, "eor", 1, 0, "a">;
1189 defm LDEORL : LDOPregister<0b010, "eor", 0, 1, "l">;
1190 defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
1192 defm LDSET : LDOPregister<0b011, "set", 0, 0, "">;
1193 defm LDSETA : LDOPregister<0b011, "set", 1, 0, "a">;
1194 defm LDSETL : LDOPregister<0b011, "set", 0, 1, "l">;
1195 defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
1197 defm LDSMAX : LDOPregister<0b100, "smax", 0, 0, "">;
1198 defm LDSMAXA : LDOPregister<0b100, "smax", 1, 0, "a">;
1199 defm LDSMAXL : LDOPregister<0b100, "smax", 0, 1, "l">;
1200 defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
1202 defm LDSMIN : LDOPregister<0b101, "smin", 0, 0, "">;
1203 defm LDSMINA : LDOPregister<0b101, "smin", 1, 0, "a">;
1204 defm LDSMINL : LDOPregister<0b101, "smin", 0, 1, "l">;
1205 defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
1207 defm LDUMAX : LDOPregister<0b110, "umax", 0, 0, "">;
1208 defm LDUMAXA : LDOPregister<0b110, "umax", 1, 0, "a">;
1209 defm LDUMAXL : LDOPregister<0b110, "umax", 0, 1, "l">;
1210 defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
1212 defm LDUMIN : LDOPregister<0b111, "umin", 0, 0, "">;
1213 defm LDUMINA : LDOPregister<0b111, "umin", 1, 0, "a">;
1214 defm LDUMINL : LDOPregister<0b111, "umin", 0, 1, "l">;
1215 defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
1217 // v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
1218 defm : STOPregister<"stadd","LDADD">; // STADDx
1219 defm : STOPregister<"stclr","LDCLR">; // STCLRx
1220 defm : STOPregister<"steor","LDEOR">; // STEORx
1221 defm : STOPregister<"stset","LDSET">; // STSETx
1222 defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
1223 defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
1224 defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
1225 defm : STOPregister<"stumin","LDUMIN">;// STUMINx
1227 // v8.5 Memory Tagging Extension
1228 let Predicates = [HasMTE] in {
1230 def IRG : BaseTwoOperand<0b0100, GPR64sp, "irg", null_frag, GPR64sp, GPR64>,
1234 def GMI : BaseTwoOperand<0b0101, GPR64, "gmi", null_frag, GPR64sp>, Sched<[]>{
1236 let isNotDuplicable = 1;
1238 def ADDG : AddSubG<0, "addg", null_frag>;
1239 def SUBG : AddSubG<1, "subg", null_frag>;
1241 def : InstAlias<"irg $dst, $src", (IRG GPR64sp:$dst, GPR64sp:$src, XZR), 1>;
1243 def SUBP : SUBP<0, "subp", null_frag>, Sched<[]>;
1244 def SUBPS : SUBP<1, "subps", null_frag>, Sched<[]>{
1248 def : InstAlias<"cmpp $lhs, $rhs", (SUBPS XZR, GPR64sp:$lhs, GPR64sp:$rhs), 0>;
1250 def LDG : MemTagLoad<"ldg", "\t$Rt, [$Rn, $offset]">;
1251 def : InstAlias<"ldg $Rt, [$Rn]", (LDG GPR64:$Rt, GPR64sp:$Rn, 0), 1>;
1253 def LDGV : MemTagVector<1, "ldgv", "\t$Rt, [$Rn]!",
1254 (outs GPR64sp:$wback, GPR64:$Rt), (ins GPR64sp:$Rn)> {
1255 let DecoderMethod = "DecodeLoadAllocTagArrayInstruction";
1257 def STGV : MemTagVector<0, "stgv", "\t$Rt, [$Rn]!",
1258 (outs GPR64sp:$wback), (ins GPR64:$Rt, GPR64sp:$Rn)>;
1260 defm STG : MemTagStore<0b00, "stg">;
1261 defm STZG : MemTagStore<0b01, "stzg">;
1262 defm ST2G : MemTagStore<0b10, "st2g">;
1263 defm STZ2G : MemTagStore<0b11, "stz2g">;
1265 defm STGP : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
1266 def STGPpre : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
1267 def STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
1269 } // Predicates = [HasMTE]
1271 //===----------------------------------------------------------------------===//
1272 // Logical instructions.
1273 //===----------------------------------------------------------------------===//
1276 defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
1277 defm AND : LogicalImm<0b00, "and", and, "bic">;
1278 defm EOR : LogicalImm<0b10, "eor", xor, "eon">;
1279 defm ORR : LogicalImm<0b01, "orr", or, "orn">;
1281 // FIXME: these aliases *are* canonical sometimes (when movz can't be
1282 // used). Actually, it seems to be working right now, but putting logical_immXX
1283 // here is a bit dodgy on the AsmParser side too.
1284 def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
1285 logical_imm32:$imm), 0>;
1286 def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
1287 logical_imm64:$imm), 0>;
1291 defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
1292 defm BICS : LogicalRegS<0b11, 1, "bics",
1293 BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
1294 defm AND : LogicalReg<0b00, 0, "and", and>;
1295 defm BIC : LogicalReg<0b00, 1, "bic",
1296 BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
1297 defm EON : LogicalReg<0b10, 1, "eon",
1298 BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
1299 defm EOR : LogicalReg<0b10, 0, "eor", xor>;
1300 defm ORN : LogicalReg<0b01, 1, "orn",
1301 BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
1302 defm ORR : LogicalReg<0b01, 0, "orr", or>;
1304 def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
1305 def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
1307 def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
1308 def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
1310 def : InstAlias<"mvn $Wd, $Wm$sh",
1311 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
1312 def : InstAlias<"mvn $Xd, $Xm$sh",
1313 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
1315 def : InstAlias<"tst $src1, $src2",
1316 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
1317 def : InstAlias<"tst $src1, $src2",
1318 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
1320 def : InstAlias<"tst $src1, $src2",
1321 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
1322 def : InstAlias<"tst $src1, $src2",
1323 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
1325 def : InstAlias<"tst $src1, $src2$sh",
1326 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
1327 def : InstAlias<"tst $src1, $src2$sh",
1328 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
1331 def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
1332 def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
1335 //===----------------------------------------------------------------------===//
1336 // One operand data processing instructions.
1337 //===----------------------------------------------------------------------===//
1339 defm CLS : OneOperandData<0b101, "cls">;
1340 defm CLZ : OneOperandData<0b100, "clz", ctlz>;
1341 defm RBIT : OneOperandData<0b000, "rbit", bitreverse>;
1343 def REV16Wr : OneWRegData<0b001, "rev16",
1344 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
1345 def REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
1347 def : Pat<(cttz GPR32:$Rn),
1348 (CLZWr (RBITWr GPR32:$Rn))>;
1349 def : Pat<(cttz GPR64:$Rn),
1350 (CLZXr (RBITXr GPR64:$Rn))>;
1351 def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
1354 def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
1358 // Unlike the other one operand instructions, the instructions with the "rev"
1359 // mnemonic do *not* just different in the size bit, but actually use different
1360 // opcode bits for the different sizes.
1361 def REVWr : OneWRegData<0b010, "rev", bswap>;
1362 def REVXr : OneXRegData<0b011, "rev", bswap>;
1363 def REV32Xr : OneXRegData<0b010, "rev32",
1364 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
1366 def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
1368 // The bswap commutes with the rotr so we want a pattern for both possible
1370 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
1371 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
1373 //===----------------------------------------------------------------------===//
1374 // Bitfield immediate extraction instruction.
1375 //===----------------------------------------------------------------------===//
1376 let hasSideEffects = 0 in
1377 defm EXTR : ExtractImm<"extr">;
1378 def : InstAlias<"ror $dst, $src, $shift",
1379 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
1380 def : InstAlias<"ror $dst, $src, $shift",
1381 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
1383 def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
1384 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
1385 def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
1386 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
1388 //===----------------------------------------------------------------------===//
1389 // Other bitfield immediate instructions.
1390 //===----------------------------------------------------------------------===//
1391 let hasSideEffects = 0 in {
1392 defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
1393 defm SBFM : BitfieldImm<0b00, "sbfm">;
1394 defm UBFM : BitfieldImm<0b10, "ubfm">;
1397 def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
1398 uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
1399 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1402 def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
1403 uint64_t enc = 31 - N->getZExtValue();
1404 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1407 // min(7, 31 - shift_amt)
1408 def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1409 uint64_t enc = 31 - N->getZExtValue();
1410 enc = enc > 7 ? 7 : enc;
1411 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1414 // min(15, 31 - shift_amt)
1415 def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1416 uint64_t enc = 31 - N->getZExtValue();
1417 enc = enc > 15 ? 15 : enc;
1418 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1421 def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
1422 uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
1423 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1426 def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
1427 uint64_t enc = 63 - N->getZExtValue();
1428 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1431 // min(7, 63 - shift_amt)
1432 def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1433 uint64_t enc = 63 - N->getZExtValue();
1434 enc = enc > 7 ? 7 : enc;
1435 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1438 // min(15, 63 - shift_amt)
1439 def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1440 uint64_t enc = 63 - N->getZExtValue();
1441 enc = enc > 15 ? 15 : enc;
1442 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1445 // min(31, 63 - shift_amt)
1446 def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1447 uint64_t enc = 63 - N->getZExtValue();
1448 enc = enc > 31 ? 31 : enc;
1449 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1452 def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1453 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1454 (i64 (i32shift_b imm0_31:$imm)))>;
1455 def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
1456 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
1457 (i64 (i64shift_b imm0_63:$imm)))>;
1459 let AddedComplexity = 10 in {
1460 def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
1461 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1462 def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
1463 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1466 def : InstAlias<"asr $dst, $src, $shift",
1467 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1468 def : InstAlias<"asr $dst, $src, $shift",
1469 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1470 def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1471 def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1472 def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1473 def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1474 def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1476 def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
1477 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1478 def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
1479 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1481 def : InstAlias<"lsr $dst, $src, $shift",
1482 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1483 def : InstAlias<"lsr $dst, $src, $shift",
1484 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1485 def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1486 def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1487 def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1488 def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1489 def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1491 //===----------------------------------------------------------------------===//
1492 // Conditional comparison instructions.
1493 //===----------------------------------------------------------------------===//
1494 defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
1495 defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
1497 //===----------------------------------------------------------------------===//
1498 // Conditional select instructions.
1499 //===----------------------------------------------------------------------===//
1500 defm CSEL : CondSelect<0, 0b00, "csel">;
1502 def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
1503 defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
1504 defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
1505 defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
1507 def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1508 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1509 def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1510 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1511 def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1512 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1513 def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1514 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1515 def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1516 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1517 def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1518 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1520 def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
1521 (CSINCWr WZR, WZR, (i32 imm:$cc))>;
1522 def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
1523 (CSINCXr XZR, XZR, (i32 imm:$cc))>;
1524 def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
1525 (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1526 def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
1527 (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1528 def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
1529 (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1530 def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
1531 (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1532 def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
1533 (CSINVWr WZR, WZR, (i32 imm:$cc))>;
1534 def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
1535 (CSINVXr XZR, XZR, (i32 imm:$cc))>;
1536 def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
1537 (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1538 def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
1539 (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1540 def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
1541 (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1542 def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
1543 (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1545 // The inverse of the condition code from the alias instruction is what is used
1546 // in the aliased instruction. The parser all ready inverts the condition code
1547 // for these aliases.
1548 def : InstAlias<"cset $dst, $cc",
1549 (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1550 def : InstAlias<"cset $dst, $cc",
1551 (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1553 def : InstAlias<"csetm $dst, $cc",
1554 (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1555 def : InstAlias<"csetm $dst, $cc",
1556 (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1558 def : InstAlias<"cinc $dst, $src, $cc",
1559 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1560 def : InstAlias<"cinc $dst, $src, $cc",
1561 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1563 def : InstAlias<"cinv $dst, $src, $cc",
1564 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1565 def : InstAlias<"cinv $dst, $src, $cc",
1566 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1568 def : InstAlias<"cneg $dst, $src, $cc",
1569 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1570 def : InstAlias<"cneg $dst, $src, $cc",
1571 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1573 //===----------------------------------------------------------------------===//
1574 // PC-relative instructions.
1575 //===----------------------------------------------------------------------===//
1576 let isReMaterializable = 1 in {
1577 let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
1578 def ADR : ADRI<0, "adr", adrlabel,
1579 [(set GPR64:$Xd, (AArch64adr tglobaladdr:$label))]>;
1580 } // hasSideEffects = 0
1582 def ADRP : ADRI<1, "adrp", adrplabel,
1583 [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
1584 } // isReMaterializable = 1
1586 // page address of a constant pool entry, block address
1587 def : Pat<(AArch64adr tconstpool:$cp), (ADR tconstpool:$cp)>;
1588 def : Pat<(AArch64adr tblockaddress:$cp), (ADR tblockaddress:$cp)>;
1589 def : Pat<(AArch64adr texternalsym:$sym), (ADR texternalsym:$sym)>;
1590 def : Pat<(AArch64adr tjumptable:$sym), (ADR tjumptable:$sym)>;
1591 def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
1592 def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
1593 def : Pat<(AArch64adrp texternalsym:$sym), (ADRP texternalsym:$sym)>;
1595 //===----------------------------------------------------------------------===//
1596 // Unconditional branch (register) instructions.
1597 //===----------------------------------------------------------------------===//
1599 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1600 def RET : BranchReg<0b0010, "ret", []>;
1601 def DRPS : SpecialReturn<0b0101, "drps">;
1602 def ERET : SpecialReturn<0b0100, "eret">;
1603 } // isReturn = 1, isTerminator = 1, isBarrier = 1
1605 // Default to the LR register.
1606 def : InstAlias<"ret", (RET LR)>;
1608 let isCall = 1, Defs = [LR], Uses = [SP] in {
1609 def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>;
1612 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
1613 def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
1614 } // isBranch, isTerminator, isBarrier, isIndirectBranch
1616 // Create a separate pseudo-instruction for codegen to use so that we don't
1617 // flag lr as used in every function. It'll be restored before the RET by the
1618 // epilogue if it's legitimately used.
1619 def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
1620 Sched<[WriteBrReg]> {
1621 let isTerminator = 1;
1626 // This is a directive-like pseudo-instruction. The purpose is to insert an
1627 // R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
1628 // (which in the usual case is a BLR).
1629 let hasSideEffects = 1 in
1630 def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
1631 let AsmString = ".tlsdesccall $sym";
1634 // Pseudo instruction to tell the streamer to emit a 'B' character into the
1635 // augmentation string.
1636 def EMITBKEY : Pseudo<(outs), (ins), []>, Sched<[]> {}
1638 // FIXME: maybe the scratch register used shouldn't be fixed to X1?
1639 // FIXME: can "hasSideEffects be dropped?
1640 let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
1641 isCodeGenOnly = 1 in
1643 : Pseudo<(outs), (ins i64imm:$sym),
1644 [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
1645 Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
1646 def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
1647 (TLSDESC_CALLSEQ texternalsym:$sym)>;
1649 //===----------------------------------------------------------------------===//
1650 // Conditional branch (immediate) instruction.
1651 //===----------------------------------------------------------------------===//
1652 def Bcc : BranchCond;
1654 //===----------------------------------------------------------------------===//
1655 // Compare-and-branch instructions.
1656 //===----------------------------------------------------------------------===//
1657 defm CBZ : CmpBranch<0, "cbz", AArch64cbz>;
1658 defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
1660 //===----------------------------------------------------------------------===//
1661 // Test-bit-and-branch instructions.
1662 //===----------------------------------------------------------------------===//
1663 defm TBZ : TestBranch<0, "tbz", AArch64tbz>;
1664 defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
1666 //===----------------------------------------------------------------------===//
1667 // Unconditional branch (immediate) instructions.
1668 //===----------------------------------------------------------------------===//
1669 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
1670 def B : BranchImm<0, "b", [(br bb:$addr)]>;
1671 } // isBranch, isTerminator, isBarrier
1673 let isCall = 1, Defs = [LR], Uses = [SP] in {
1674 def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
1676 def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
1678 //===----------------------------------------------------------------------===//
1679 // Exception generation instructions.
1680 //===----------------------------------------------------------------------===//
1682 def BRK : ExceptionGeneration<0b001, 0b00, "brk">;
1684 def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
1685 def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
1686 def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
1687 def HLT : ExceptionGeneration<0b010, 0b00, "hlt">;
1688 def HVC : ExceptionGeneration<0b000, 0b10, "hvc">;
1689 def SMC : ExceptionGeneration<0b000, 0b11, "smc">;
1690 def SVC : ExceptionGeneration<0b000, 0b01, "svc">;
1692 // DCPSn defaults to an immediate operand of zero if unspecified.
1693 def : InstAlias<"dcps1", (DCPS1 0)>;
1694 def : InstAlias<"dcps2", (DCPS2 0)>;
1695 def : InstAlias<"dcps3", (DCPS3 0)>;
1697 def UDF : UDFType<0, "udf">;
1699 //===----------------------------------------------------------------------===//
1700 // Load instructions.
1701 //===----------------------------------------------------------------------===//
1703 // Pair (indexed, offset)
1704 defm LDPW : LoadPairOffset<0b00, 0, GPR32z, simm7s4, "ldp">;
1705 defm LDPX : LoadPairOffset<0b10, 0, GPR64z, simm7s8, "ldp">;
1706 defm LDPS : LoadPairOffset<0b00, 1, FPR32Op, simm7s4, "ldp">;
1707 defm LDPD : LoadPairOffset<0b01, 1, FPR64Op, simm7s8, "ldp">;
1708 defm LDPQ : LoadPairOffset<0b10, 1, FPR128Op, simm7s16, "ldp">;
1710 defm LDPSW : LoadPairOffset<0b01, 0, GPR64z, simm7s4, "ldpsw">;
1712 // Pair (pre-indexed)
1713 def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
1714 def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
1715 def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
1716 def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
1717 def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
1719 def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
1721 // Pair (post-indexed)
1722 def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32z, simm7s4, "ldp">;
1723 def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64z, simm7s8, "ldp">;
1724 def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32Op, simm7s4, "ldp">;
1725 def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64Op, simm7s8, "ldp">;
1726 def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128Op, simm7s16, "ldp">;
1728 def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64z, simm7s4, "ldpsw">;
1731 // Pair (no allocate)
1732 defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32z, simm7s4, "ldnp">;
1733 defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64z, simm7s8, "ldnp">;
1734 defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32Op, simm7s4, "ldnp">;
1735 defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64Op, simm7s8, "ldnp">;
1736 defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128Op, simm7s16, "ldnp">;
1739 // (register offset)
1743 defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
1744 defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
1745 defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
1746 defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
1749 defm LDRB : Load8RO<0b00, 1, 0b01, FPR8Op, "ldr", untyped, load>;
1750 defm LDRH : Load16RO<0b01, 1, 0b01, FPR16Op, "ldr", f16, load>;
1751 defm LDRS : Load32RO<0b10, 1, 0b01, FPR32Op, "ldr", f32, load>;
1752 defm LDRD : Load64RO<0b11, 1, 0b01, FPR64Op, "ldr", f64, load>;
1753 defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128Op, "ldr", f128, load>;
1755 // Load sign-extended half-word
1756 defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
1757 defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
1759 // Load sign-extended byte
1760 defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
1761 defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
1763 // Load sign-extended word
1764 defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
1767 defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
1769 // For regular load, we do not have any alignment requirement.
1770 // Thus, it is safe to directly map the vector loads with interesting
1771 // addressing modes.
1772 // FIXME: We could do the same for bitconvert to floating point vectors.
1773 multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
1774 ValueType ScalTy, ValueType VecTy,
1775 Instruction LOADW, Instruction LOADX,
1777 def : Pat<(VecTy (scalar_to_vector (ScalTy
1778 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
1779 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1780 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
1783 def : Pat<(VecTy (scalar_to_vector (ScalTy
1784 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
1785 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1786 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
1790 let AddedComplexity = 10 in {
1791 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>;
1792 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
1794 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
1795 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
1797 defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>;
1798 defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>;
1800 defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
1801 defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
1803 defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>;
1804 defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>;
1806 defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>;
1808 defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>;
1811 def : Pat <(v1i64 (scalar_to_vector (i64
1812 (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
1813 ro_Wextend64:$extend))))),
1814 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
1816 def : Pat <(v1i64 (scalar_to_vector (i64
1817 (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
1818 ro_Xextend64:$extend))))),
1819 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
1822 // Match all load 64 bits width whose type is compatible with FPR64
1823 multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
1824 Instruction LOADW, Instruction LOADX> {
1826 def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1827 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1829 def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1830 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1833 let AddedComplexity = 10 in {
1834 let Predicates = [IsLE] in {
1835 // We must do vector loads with LD1 in big-endian.
1836 defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
1837 defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
1838 defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>;
1839 defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
1840 defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
1843 defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>;
1844 defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>;
1846 // Match all load 128 bits width whose type is compatible with FPR128
1847 let Predicates = [IsLE] in {
1848 // We must do vector loads with LD1 in big-endian.
1849 defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>;
1850 defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>;
1851 defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>;
1852 defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>;
1853 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>;
1854 defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>;
1855 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
1857 } // AddedComplexity = 10
1860 multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
1861 Instruction INSTW, Instruction INSTX> {
1862 def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1863 (SUBREG_TO_REG (i64 0),
1864 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
1867 def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1868 (SUBREG_TO_REG (i64 0),
1869 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
1873 let AddedComplexity = 10 in {
1874 defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>;
1875 defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
1876 defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>;
1878 // zextloadi1 -> zextloadi8
1879 defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1881 // extload -> zextload
1882 defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1883 defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1884 defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1886 // extloadi1 -> zextloadi8
1887 defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>;
1892 multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
1893 Instruction INSTW, Instruction INSTX> {
1894 def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1895 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1897 def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1898 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1902 let AddedComplexity = 10 in {
1903 // extload -> zextload
1904 defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1905 defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1906 defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1908 // zextloadi1 -> zextloadi8
1909 defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1913 // (unsigned immediate)
1915 defm LDRX : LoadUI<0b11, 0, 0b01, GPR64z, uimm12s8, "ldr",
1917 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1918 defm LDRW : LoadUI<0b10, 0, 0b01, GPR32z, uimm12s4, "ldr",
1920 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1921 defm LDRB : LoadUI<0b00, 1, 0b01, FPR8Op, uimm12s1, "ldr",
1923 (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
1924 defm LDRH : LoadUI<0b01, 1, 0b01, FPR16Op, uimm12s2, "ldr",
1925 [(set (f16 FPR16Op:$Rt),
1926 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
1927 defm LDRS : LoadUI<0b10, 1, 0b01, FPR32Op, uimm12s4, "ldr",
1928 [(set (f32 FPR32Op:$Rt),
1929 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1930 defm LDRD : LoadUI<0b11, 1, 0b01, FPR64Op, uimm12s8, "ldr",
1931 [(set (f64 FPR64Op:$Rt),
1932 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1933 defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128Op, uimm12s16, "ldr",
1934 [(set (f128 FPR128Op:$Rt),
1935 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
1937 // For regular load, we do not have any alignment requirement.
1938 // Thus, it is safe to directly map the vector loads with interesting
1939 // addressing modes.
1940 // FIXME: We could do the same for bitconvert to floating point vectors.
1941 def : Pat <(v8i8 (scalar_to_vector (i32
1942 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1943 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
1944 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1945 def : Pat <(v16i8 (scalar_to_vector (i32
1946 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1947 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
1948 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1949 def : Pat <(v4i16 (scalar_to_vector (i32
1950 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1951 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
1952 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1953 def : Pat <(v8i16 (scalar_to_vector (i32
1954 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1955 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
1956 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1957 def : Pat <(v2i32 (scalar_to_vector (i32
1958 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1959 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
1960 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1961 def : Pat <(v4i32 (scalar_to_vector (i32
1962 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1963 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
1964 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1965 def : Pat <(v1i64 (scalar_to_vector (i64
1966 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1967 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1968 def : Pat <(v2i64 (scalar_to_vector (i64
1969 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1970 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1971 (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
1973 // Match all load 64 bits width whose type is compatible with FPR64
1974 let Predicates = [IsLE] in {
1975 // We must use LD1 to perform vector loads in big-endian.
1976 def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1977 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1978 def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1979 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1980 def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1981 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1982 def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1983 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1984 def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1985 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1987 def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1988 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1989 def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1990 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1992 // Match all load 128 bits width whose type is compatible with FPR128
1993 let Predicates = [IsLE] in {
1994 // We must use LD1 to perform vector loads in big-endian.
1995 def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1996 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1997 def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1998 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1999 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2000 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2001 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2002 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2003 def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2004 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2005 def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2006 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2007 def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2008 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2010 def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
2011 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
2013 defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
2015 (zextloadi16 (am_indexed16 GPR64sp:$Rn,
2016 uimm12s2:$offset)))]>;
2017 defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
2019 (zextloadi8 (am_indexed8 GPR64sp:$Rn,
2020 uimm12s1:$offset)))]>;
2022 def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2023 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2024 def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2025 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2027 // zextloadi1 -> zextloadi8
2028 def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2029 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2030 def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2031 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2033 // extload -> zextload
2034 def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2035 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
2036 def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2037 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2038 def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2039 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
2040 def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2041 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2042 def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
2043 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
2044 def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2045 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2046 def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
2047 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
2049 // load sign-extended half-word
2050 defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
2052 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2053 uimm12s2:$offset)))]>;
2054 defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
2056 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
2057 uimm12s2:$offset)))]>;
2059 // load sign-extended byte
2060 defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
2062 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2063 uimm12s1:$offset)))]>;
2064 defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
2066 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
2067 uimm12s1:$offset)))]>;
2069 // load sign-extended word
2070 defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
2072 (sextloadi32 (am_indexed32 GPR64sp:$Rn,
2073 uimm12s4:$offset)))]>;
2075 // load zero-extended word
2076 def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
2077 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
2080 def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
2081 [(AArch64Prefetch imm:$Rt,
2082 (am_indexed64 GPR64sp:$Rn,
2083 uimm12s8:$offset))]>;
2085 def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
2090 def alignedglobal : PatLeaf<(iPTR iPTR:$label), [{
2091 if (auto *G = dyn_cast<GlobalAddressSDNode>(N)) {
2092 const DataLayout &DL = MF->getDataLayout();
2093 unsigned Align = G->getGlobal()->getPointerAlignment(DL);
2094 return Align >= 4 && G->getOffset() % 4 == 0;
2096 if (auto *C = dyn_cast<ConstantPoolSDNode>(N))
2097 return C->getAlignment() >= 4 && C->getOffset() % 4 == 0;
2101 def LDRWl : LoadLiteral<0b00, 0, GPR32z, "ldr",
2102 [(set GPR32z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2103 def LDRXl : LoadLiteral<0b01, 0, GPR64z, "ldr",
2104 [(set GPR64z:$Rt, (load (AArch64adr alignedglobal:$label)))]>;
2105 def LDRSl : LoadLiteral<0b00, 1, FPR32Op, "ldr",
2106 [(set (f32 FPR32Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2107 def LDRDl : LoadLiteral<0b01, 1, FPR64Op, "ldr",
2108 [(set (f64 FPR64Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2109 def LDRQl : LoadLiteral<0b10, 1, FPR128Op, "ldr",
2110 [(set (f128 FPR128Op:$Rt), (load (AArch64adr alignedglobal:$label)))]>;
2112 // load sign-extended word
2113 def LDRSWl : LoadLiteral<0b10, 0, GPR64z, "ldrsw",
2114 [(set GPR64z:$Rt, (sextloadi32 (AArch64adr alignedglobal:$label)))]>;
2116 let AddedComplexity = 20 in {
2117 def : Pat<(i64 (zextloadi32 (AArch64adr alignedglobal:$label))),
2118 (SUBREG_TO_REG (i64 0), (LDRWl $label), sub_32)>;
2122 def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
2123 // [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
2126 // (unscaled immediate)
2127 defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64z, "ldur",
2129 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2130 defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32z, "ldur",
2132 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2133 defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8Op, "ldur",
2135 (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2136 defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16Op, "ldur",
2138 (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2139 defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32Op, "ldur",
2140 [(set (f32 FPR32Op:$Rt),
2141 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2142 defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64Op, "ldur",
2143 [(set (f64 FPR64Op:$Rt),
2144 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
2145 defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128Op, "ldur",
2146 [(set (f128 FPR128Op:$Rt),
2147 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
2150 : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
2152 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2154 : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
2156 (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2158 // Match all load 64 bits width whose type is compatible with FPR64
2159 let Predicates = [IsLE] in {
2160 def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2161 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2162 def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2163 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2164 def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2165 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2166 def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2167 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2168 def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2169 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2171 def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2172 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2173 def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
2174 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
2176 // Match all load 128 bits width whose type is compatible with FPR128
2177 let Predicates = [IsLE] in {
2178 def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2179 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2180 def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2181 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2182 def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2183 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2184 def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2185 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2186 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2187 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2188 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2189 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2190 def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
2191 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
2195 def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2196 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2197 def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2198 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2199 def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2200 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2201 def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2202 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2203 def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2204 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2205 def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2206 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2207 def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2208 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2210 def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2211 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
2212 def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2213 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2214 def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2215 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
2216 def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
2217 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2218 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2219 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2220 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2221 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2222 def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2223 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2227 // LDR mnemonics fall back to LDUR for negative or unaligned offsets.
2229 // Define new assembler match classes as we want to only match these when
2230 // the don't otherwise match the scaled addressing mode for LDR/STR. Don't
2231 // associate a DiagnosticType either, as we want the diagnostic for the
2232 // canonical form (the scaled operand) to take precedence.
2233 class SImm9OffsetOperand<int Width> : AsmOperandClass {
2234 let Name = "SImm9OffsetFB" # Width;
2235 let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
2236 let RenderMethod = "addImmOperands";
2239 def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
2240 def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
2241 def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
2242 def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
2243 def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
2245 def simm9_offset_fb8 : Operand<i64> {
2246 let ParserMatchClass = SImm9OffsetFB8Operand;
2248 def simm9_offset_fb16 : Operand<i64> {
2249 let ParserMatchClass = SImm9OffsetFB16Operand;
2251 def simm9_offset_fb32 : Operand<i64> {
2252 let ParserMatchClass = SImm9OffsetFB32Operand;
2254 def simm9_offset_fb64 : Operand<i64> {
2255 let ParserMatchClass = SImm9OffsetFB64Operand;
2257 def simm9_offset_fb128 : Operand<i64> {
2258 let ParserMatchClass = SImm9OffsetFB128Operand;
2261 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2262 (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2263 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2264 (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2265 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2266 (LDURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2267 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2268 (LDURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2269 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2270 (LDURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2271 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2272 (LDURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2273 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
2274 (LDURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2277 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
2278 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2279 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
2280 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
2282 // load sign-extended half-word
2284 : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
2286 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2288 : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
2290 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
2292 // load sign-extended byte
2294 : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
2296 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2298 : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
2300 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
2302 // load sign-extended word
2304 : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
2306 (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
2308 // zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
2309 def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
2310 (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2311 def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
2312 (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2313 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2314 (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2315 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
2316 (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2317 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2318 (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2319 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
2320 (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2321 def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
2322 (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2325 defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
2326 [(AArch64Prefetch imm:$Rt,
2327 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2330 // (unscaled immediate, unprivileged)
2331 defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
2332 defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
2334 defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
2335 defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
2337 // load sign-extended half-word
2338 defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
2339 defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
2341 // load sign-extended byte
2342 defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
2343 defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
2345 // load sign-extended word
2346 defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
2349 // (immediate pre-indexed)
2350 def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2351 def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2352 def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8Op, "ldr">;
2353 def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2354 def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2355 def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2356 def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2358 // load sign-extended half-word
2359 def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2360 def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2362 // load sign-extended byte
2363 def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2364 def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2366 // load zero-extended byte
2367 def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2368 def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2370 // load sign-extended word
2371 def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2374 // (immediate post-indexed)
2375 def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32z, "ldr">;
2376 def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64z, "ldr">;
2377 def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8Op, "ldr">;
2378 def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16Op, "ldr">;
2379 def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32Op, "ldr">;
2380 def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64Op, "ldr">;
2381 def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128Op, "ldr">;
2383 // load sign-extended half-word
2384 def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32z, "ldrsh">;
2385 def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64z, "ldrsh">;
2387 // load sign-extended byte
2388 def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32z, "ldrsb">;
2389 def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64z, "ldrsb">;
2391 // load zero-extended byte
2392 def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32z, "ldrb">;
2393 def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32z, "ldrh">;
2395 // load sign-extended word
2396 def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64z, "ldrsw">;
2398 //===----------------------------------------------------------------------===//
2399 // Store instructions.
2400 //===----------------------------------------------------------------------===//
2402 // Pair (indexed, offset)
2403 // FIXME: Use dedicated range-checked addressing mode operand here.
2404 defm STPW : StorePairOffset<0b00, 0, GPR32z, simm7s4, "stp">;
2405 defm STPX : StorePairOffset<0b10, 0, GPR64z, simm7s8, "stp">;
2406 defm STPS : StorePairOffset<0b00, 1, FPR32Op, simm7s4, "stp">;
2407 defm STPD : StorePairOffset<0b01, 1, FPR64Op, simm7s8, "stp">;
2408 defm STPQ : StorePairOffset<0b10, 1, FPR128Op, simm7s16, "stp">;
2410 // Pair (pre-indexed)
2411 def STPWpre : StorePairPreIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2412 def STPXpre : StorePairPreIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2413 def STPSpre : StorePairPreIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2414 def STPDpre : StorePairPreIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2415 def STPQpre : StorePairPreIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2417 // Pair (pre-indexed)
2418 def STPWpost : StorePairPostIdx<0b00, 0, GPR32z, simm7s4, "stp">;
2419 def STPXpost : StorePairPostIdx<0b10, 0, GPR64z, simm7s8, "stp">;
2420 def STPSpost : StorePairPostIdx<0b00, 1, FPR32Op, simm7s4, "stp">;
2421 def STPDpost : StorePairPostIdx<0b01, 1, FPR64Op, simm7s8, "stp">;
2422 def STPQpost : StorePairPostIdx<0b10, 1, FPR128Op, simm7s16, "stp">;
2424 // Pair (no allocate)
2425 defm STNPW : StorePairNoAlloc<0b00, 0, GPR32z, simm7s4, "stnp">;
2426 defm STNPX : StorePairNoAlloc<0b10, 0, GPR64z, simm7s8, "stnp">;
2427 defm STNPS : StorePairNoAlloc<0b00, 1, FPR32Op, simm7s4, "stnp">;
2428 defm STNPD : StorePairNoAlloc<0b01, 1, FPR64Op, simm7s8, "stnp">;
2429 defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128Op, simm7s16, "stnp">;
2432 // (Register offset)
2435 defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
2436 defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
2437 defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>;
2438 defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>;
2442 defm STRB : Store8RO< 0b00, 1, 0b00, FPR8Op, "str", untyped, store>;
2443 defm STRH : Store16RO<0b01, 1, 0b00, FPR16Op, "str", f16, store>;
2444 defm STRS : Store32RO<0b10, 1, 0b00, FPR32Op, "str", f32, store>;
2445 defm STRD : Store64RO<0b11, 1, 0b00, FPR64Op, "str", f64, store>;
2446 defm STRQ : Store128RO<0b00, 1, 0b10, FPR128Op, "str", f128, store>;
2448 let Predicates = [UseSTRQro], AddedComplexity = 10 in {
2449 def : Pat<(store (f128 FPR128:$Rt),
2450 (ro_Windexed128 GPR64sp:$Rn, GPR32:$Rm,
2451 ro_Wextend128:$extend)),
2452 (STRQroW FPR128:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro_Wextend128:$extend)>;
2453 def : Pat<(store (f128 FPR128:$Rt),
2454 (ro_Xindexed128 GPR64sp:$Rn, GPR64:$Rm,
2455 ro_Xextend128:$extend)),
2456 (STRQroX FPR128:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro_Wextend128:$extend)>;
2459 multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
2460 Instruction STRW, Instruction STRX> {
2462 def : Pat<(storeop GPR64:$Rt,
2463 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2464 (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2465 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2467 def : Pat<(storeop GPR64:$Rt,
2468 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2469 (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2470 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2473 let AddedComplexity = 10 in {
2475 defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>;
2476 defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
2477 defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>;
2480 multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
2481 Instruction STRW, Instruction STRX> {
2482 def : Pat<(store (VecTy FPR:$Rt),
2483 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2484 (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2486 def : Pat<(store (VecTy FPR:$Rt),
2487 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2488 (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2491 let AddedComplexity = 10 in {
2492 // Match all store 64 bits width whose type is compatible with FPR64
2493 let Predicates = [IsLE] in {
2494 // We must use ST1 to store vectors in big-endian.
2495 defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
2496 defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
2497 defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
2498 defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
2499 defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
2502 defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
2503 defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
2505 // Match all store 128 bits width whose type is compatible with FPR128
2506 let Predicates = [IsLE, UseSTRQro] in {
2507 // We must use ST1 to store vectors in big-endian.
2508 defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
2509 defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
2510 defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
2511 defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
2512 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
2513 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
2514 defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
2516 } // AddedComplexity = 10
2518 // Match stores from lane 0 to the appropriate subreg's store.
2519 multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
2520 ValueType VecTy, ValueType STy,
2521 SubRegIndex SubRegIdx,
2522 Instruction STRW, Instruction STRX> {
2524 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2525 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2526 (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2527 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2529 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2530 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2531 (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2532 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2535 let AddedComplexity = 19 in {
2536 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
2537 defm : VecROStoreLane0Pat<ro16, store, v8f16, f16, hsub, STRHroW, STRHroX>;
2538 defm : VecROStoreLane0Pat<ro32, store, v4i32, i32, ssub, STRSroW, STRSroX>;
2539 defm : VecROStoreLane0Pat<ro32, store, v4f32, f32, ssub, STRSroW, STRSroX>;
2540 defm : VecROStoreLane0Pat<ro64, store, v2i64, i64, dsub, STRDroW, STRDroX>;
2541 defm : VecROStoreLane0Pat<ro64, store, v2f64, f64, dsub, STRDroW, STRDroX>;
2545 // (unsigned immediate)
2546 defm STRX : StoreUIz<0b11, 0, 0b00, GPR64z, uimm12s8, "str",
2548 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2549 defm STRW : StoreUIz<0b10, 0, 0b00, GPR32z, uimm12s4, "str",
2551 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2552 defm STRB : StoreUI<0b00, 1, 0b00, FPR8Op, uimm12s1, "str",
2554 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
2555 defm STRH : StoreUI<0b01, 1, 0b00, FPR16Op, uimm12s2, "str",
2556 [(store (f16 FPR16Op:$Rt),
2557 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
2558 defm STRS : StoreUI<0b10, 1, 0b00, FPR32Op, uimm12s4, "str",
2559 [(store (f32 FPR32Op:$Rt),
2560 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2561 defm STRD : StoreUI<0b11, 1, 0b00, FPR64Op, uimm12s8, "str",
2562 [(store (f64 FPR64Op:$Rt),
2563 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2564 defm STRQ : StoreUI<0b00, 1, 0b10, FPR128Op, uimm12s16, "str", []>;
2566 defm STRHH : StoreUIz<0b01, 0, 0b00, GPR32z, uimm12s2, "strh",
2567 [(truncstorei16 GPR32z:$Rt,
2568 (am_indexed16 GPR64sp:$Rn,
2569 uimm12s2:$offset))]>;
2570 defm STRBB : StoreUIz<0b00, 0, 0b00, GPR32z, uimm12s1, "strb",
2571 [(truncstorei8 GPR32z:$Rt,
2572 (am_indexed8 GPR64sp:$Rn,
2573 uimm12s1:$offset))]>;
2575 let AddedComplexity = 10 in {
2577 // Match all store 64 bits width whose type is compatible with FPR64
2578 def : Pat<(store (v1i64 FPR64:$Rt),
2579 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2580 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2581 def : Pat<(store (v1f64 FPR64:$Rt),
2582 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2583 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2585 let Predicates = [IsLE] in {
2586 // We must use ST1 to store vectors in big-endian.
2587 def : Pat<(store (v2f32 FPR64:$Rt),
2588 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2589 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2590 def : Pat<(store (v8i8 FPR64:$Rt),
2591 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2592 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2593 def : Pat<(store (v4i16 FPR64:$Rt),
2594 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2595 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2596 def : Pat<(store (v2i32 FPR64:$Rt),
2597 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2598 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2599 def : Pat<(store (v4f16 FPR64:$Rt),
2600 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2601 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2604 // Match all store 128 bits width whose type is compatible with FPR128
2605 def : Pat<(store (f128 FPR128:$Rt),
2606 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2607 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2609 let Predicates = [IsLE] in {
2610 // We must use ST1 to store vectors in big-endian.
2611 def : Pat<(store (v4f32 FPR128:$Rt),
2612 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2613 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2614 def : Pat<(store (v2f64 FPR128:$Rt),
2615 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2616 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2617 def : Pat<(store (v16i8 FPR128:$Rt),
2618 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2619 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2620 def : Pat<(store (v8i16 FPR128:$Rt),
2621 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2622 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2623 def : Pat<(store (v4i32 FPR128:$Rt),
2624 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2625 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2626 def : Pat<(store (v2i64 FPR128:$Rt),
2627 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2628 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2629 def : Pat<(store (v8f16 FPR128:$Rt),
2630 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2631 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2635 def : Pat<(truncstorei32 GPR64:$Rt,
2636 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
2637 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
2638 def : Pat<(truncstorei16 GPR64:$Rt,
2639 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
2640 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
2641 def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
2642 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
2644 } // AddedComplexity = 10
2646 // Match stores from lane 0 to the appropriate subreg's store.
2647 multiclass VecStoreLane0Pat<Operand UIAddrMode, SDPatternOperator storeop,
2648 ValueType VTy, ValueType STy,
2649 SubRegIndex SubRegIdx, Operand IndexType,
2651 def : Pat<(storeop (STy (vector_extract (VTy VecListOne128:$Vt), 0)),
2652 (UIAddrMode GPR64sp:$Rn, IndexType:$offset)),
2653 (STR (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2654 GPR64sp:$Rn, IndexType:$offset)>;
2657 let AddedComplexity = 19 in {
2658 defm : VecStoreLane0Pat<am_indexed16, truncstorei16, v8i16, i32, hsub, uimm12s2, STRHui>;
2659 defm : VecStoreLane0Pat<am_indexed16, store, v8f16, f16, hsub, uimm12s2, STRHui>;
2660 defm : VecStoreLane0Pat<am_indexed32, store, v4i32, i32, ssub, uimm12s4, STRSui>;
2661 defm : VecStoreLane0Pat<am_indexed32, store, v4f32, f32, ssub, uimm12s4, STRSui>;
2662 defm : VecStoreLane0Pat<am_indexed64, store, v2i64, i64, dsub, uimm12s8, STRDui>;
2663 defm : VecStoreLane0Pat<am_indexed64, store, v2f64, f64, dsub, uimm12s8, STRDui>;
2667 // (unscaled immediate)
2668 defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64z, "stur",
2670 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2671 defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32z, "stur",
2673 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2674 defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8Op, "stur",
2676 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2677 defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16Op, "stur",
2678 [(store (f16 FPR16Op:$Rt),
2679 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2680 defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32Op, "stur",
2681 [(store (f32 FPR32Op:$Rt),
2682 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2683 defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64Op, "stur",
2684 [(store (f64 FPR64Op:$Rt),
2685 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2686 defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128Op, "stur",
2687 [(store (f128 FPR128Op:$Rt),
2688 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
2689 defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32z, "sturh",
2690 [(truncstorei16 GPR32z:$Rt,
2691 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2692 defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32z, "sturb",
2693 [(truncstorei8 GPR32z:$Rt,
2694 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2696 // Armv8.4 Weaker Release Consistency enhancements
2697 // LDAPR & STLR with Immediate Offset instructions
2698 let Predicates = [HasRCPC_IMMO] in {
2699 defm STLURB : BaseStoreUnscaleV84<"stlurb", 0b00, 0b00, GPR32>;
2700 defm STLURH : BaseStoreUnscaleV84<"stlurh", 0b01, 0b00, GPR32>;
2701 defm STLURW : BaseStoreUnscaleV84<"stlur", 0b10, 0b00, GPR32>;
2702 defm STLURX : BaseStoreUnscaleV84<"stlur", 0b11, 0b00, GPR64>;
2703 defm LDAPURB : BaseLoadUnscaleV84<"ldapurb", 0b00, 0b01, GPR32>;
2704 defm LDAPURSBW : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b11, GPR32>;
2705 defm LDAPURSBX : BaseLoadUnscaleV84<"ldapursb", 0b00, 0b10, GPR64>;
2706 defm LDAPURH : BaseLoadUnscaleV84<"ldapurh", 0b01, 0b01, GPR32>;
2707 defm LDAPURSHW : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b11, GPR32>;
2708 defm LDAPURSHX : BaseLoadUnscaleV84<"ldapursh", 0b01, 0b10, GPR64>;
2709 defm LDAPUR : BaseLoadUnscaleV84<"ldapur", 0b10, 0b01, GPR32>;
2710 defm LDAPURSW : BaseLoadUnscaleV84<"ldapursw", 0b10, 0b10, GPR64>;
2711 defm LDAPURX : BaseLoadUnscaleV84<"ldapur", 0b11, 0b01, GPR64>;
2714 // Match all store 64 bits width whose type is compatible with FPR64
2715 def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2716 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2717 def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2718 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2720 let AddedComplexity = 10 in {
2722 let Predicates = [IsLE] in {
2723 // We must use ST1 to store vectors in big-endian.
2724 def : Pat<(store (v2f32 FPR64:$Rt),
2725 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2726 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2727 def : Pat<(store (v8i8 FPR64:$Rt),
2728 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2729 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2730 def : Pat<(store (v4i16 FPR64:$Rt),
2731 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2732 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2733 def : Pat<(store (v2i32 FPR64:$Rt),
2734 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2735 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2736 def : Pat<(store (v4f16 FPR64:$Rt),
2737 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2738 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2741 // Match all store 128 bits width whose type is compatible with FPR128
2742 def : Pat<(store (f128 FPR128:$Rt), (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2743 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2745 let Predicates = [IsLE] in {
2746 // We must use ST1 to store vectors in big-endian.
2747 def : Pat<(store (v4f32 FPR128:$Rt),
2748 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2749 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2750 def : Pat<(store (v2f64 FPR128:$Rt),
2751 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2752 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2753 def : Pat<(store (v16i8 FPR128:$Rt),
2754 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2755 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2756 def : Pat<(store (v8i16 FPR128:$Rt),
2757 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2758 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2759 def : Pat<(store (v4i32 FPR128:$Rt),
2760 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2761 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2762 def : Pat<(store (v2i64 FPR128:$Rt),
2763 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2764 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2765 def : Pat<(store (v2f64 FPR128:$Rt),
2766 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2767 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2768 def : Pat<(store (v8f16 FPR128:$Rt),
2769 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2770 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2773 } // AddedComplexity = 10
2775 // unscaled i64 truncating stores
2776 def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
2777 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2778 def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
2779 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2780 def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
2781 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2783 // Match stores from lane 0 to the appropriate subreg's store.
2784 multiclass VecStoreULane0Pat<SDPatternOperator StoreOp,
2785 ValueType VTy, ValueType STy,
2786 SubRegIndex SubRegIdx, Instruction STR> {
2787 defm : VecStoreLane0Pat<am_unscaled128, StoreOp, VTy, STy, SubRegIdx, simm9, STR>;
2790 let AddedComplexity = 19 in {
2791 defm : VecStoreULane0Pat<truncstorei16, v8i16, i32, hsub, STURHi>;
2792 defm : VecStoreULane0Pat<store, v8f16, f16, hsub, STURHi>;
2793 defm : VecStoreULane0Pat<store, v4i32, i32, ssub, STURSi>;
2794 defm : VecStoreULane0Pat<store, v4f32, f32, ssub, STURSi>;
2795 defm : VecStoreULane0Pat<store, v2i64, i64, dsub, STURDi>;
2796 defm : VecStoreULane0Pat<store, v2f64, f64, dsub, STURDi>;
2800 // STR mnemonics fall back to STUR for negative or unaligned offsets.
2801 def : InstAlias<"str $Rt, [$Rn, $offset]",
2802 (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2803 def : InstAlias<"str $Rt, [$Rn, $offset]",
2804 (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2805 def : InstAlias<"str $Rt, [$Rn, $offset]",
2806 (STURBi FPR8Op:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2807 def : InstAlias<"str $Rt, [$Rn, $offset]",
2808 (STURHi FPR16Op:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2809 def : InstAlias<"str $Rt, [$Rn, $offset]",
2810 (STURSi FPR32Op:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2811 def : InstAlias<"str $Rt, [$Rn, $offset]",
2812 (STURDi FPR64Op:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2813 def : InstAlias<"str $Rt, [$Rn, $offset]",
2814 (STURQi FPR128Op:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2816 def : InstAlias<"strb $Rt, [$Rn, $offset]",
2817 (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2818 def : InstAlias<"strh $Rt, [$Rn, $offset]",
2819 (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2822 // (unscaled immediate, unprivileged)
2823 defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
2824 defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
2826 defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
2827 defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
2830 // (immediate pre-indexed)
2831 def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32z, "str", pre_store, i32>;
2832 def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64z, "str", pre_store, i64>;
2833 def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8Op, "str", pre_store, untyped>;
2834 def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16Op, "str", pre_store, f16>;
2835 def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32Op, "str", pre_store, f32>;
2836 def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64Op, "str", pre_store, f64>;
2837 def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128Op, "str", pre_store, f128>;
2839 def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32z, "strb", pre_truncsti8, i32>;
2840 def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32z, "strh", pre_truncsti16, i32>;
2843 def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2844 (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2846 def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2847 (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2849 def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2850 (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2853 def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2854 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2855 def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2856 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2857 def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2858 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2859 def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2860 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2861 def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2862 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2863 def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2864 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2865 def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2866 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2868 def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2869 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2870 def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2871 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2872 def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2873 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2874 def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2875 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2876 def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2877 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2878 def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2879 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2880 def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2881 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2884 // (immediate post-indexed)
2885 def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32z, "str", post_store, i32>;
2886 def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64z, "str", post_store, i64>;
2887 def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8Op, "str", post_store, untyped>;
2888 def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16Op, "str", post_store, f16>;
2889 def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32Op, "str", post_store, f32>;
2890 def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64Op, "str", post_store, f64>;
2891 def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128Op, "str", post_store, f128>;
2893 def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32z, "strb", post_truncsti8, i32>;
2894 def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32z, "strh", post_truncsti16, i32>;
2897 def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2898 (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2900 def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2901 (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2903 def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2904 (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2907 def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2908 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2909 def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2910 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2911 def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2912 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2913 def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2914 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2915 def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2916 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2917 def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2918 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2919 def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2920 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2922 def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2923 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2924 def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2925 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2926 def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2927 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2928 def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2929 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2930 def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2931 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2932 def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2933 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2934 def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2935 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2937 //===----------------------------------------------------------------------===//
2938 // Load/store exclusive instructions.
2939 //===----------------------------------------------------------------------===//
2941 def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">;
2942 def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">;
2943 def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
2944 def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
2946 def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
2947 def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
2948 def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
2949 def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
2951 def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
2952 def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
2953 def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
2954 def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
2956 def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">;
2957 def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">;
2958 def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
2959 def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
2961 def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
2962 def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
2963 def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
2964 def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
2966 def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
2967 def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
2968 def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
2969 def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
2971 def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
2972 def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
2974 def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
2975 def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
2977 def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
2978 def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
2980 def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
2981 def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
2983 let Predicates = [HasLOR] in {
2984 // v8.1a "Limited Order Region" extension load-acquire instructions
2985 def LDLARW : LoadAcquire <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
2986 def LDLARX : LoadAcquire <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
2987 def LDLARB : LoadAcquire <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
2988 def LDLARH : LoadAcquire <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
2990 // v8.1a "Limited Order Region" extension store-release instructions
2991 def STLLRW : StoreRelease <0b10, 1, 0, 0, 0, GPR32, "stllr">;
2992 def STLLRX : StoreRelease <0b11, 1, 0, 0, 0, GPR64, "stllr">;
2993 def STLLRB : StoreRelease <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
2994 def STLLRH : StoreRelease <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
2997 //===----------------------------------------------------------------------===//
2998 // Scaled floating point to integer conversion instructions.
2999 //===----------------------------------------------------------------------===//
3001 defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
3002 defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
3003 defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
3004 defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
3005 defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
3006 defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
3007 defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
3008 defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
3009 defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
3010 defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
3011 defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
3012 defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
3014 multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
3015 def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
3016 def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
3017 def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
3018 def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
3019 def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
3020 def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
3022 def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
3023 (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
3024 def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
3025 (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
3026 def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
3027 (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
3028 def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
3029 (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
3030 def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
3031 (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
3032 def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
3033 (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
3036 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
3037 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
3039 multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
3040 def : Pat<(i32 (to_int (round f32:$Rn))),
3041 (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
3042 def : Pat<(i64 (to_int (round f32:$Rn))),
3043 (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
3044 def : Pat<(i32 (to_int (round f64:$Rn))),
3045 (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
3046 def : Pat<(i64 (to_int (round f64:$Rn))),
3047 (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
3050 defm : FPToIntegerPats<fp_to_sint, fceil, "FCVTPS">;
3051 defm : FPToIntegerPats<fp_to_uint, fceil, "FCVTPU">;
3052 defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
3053 defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
3054 defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
3055 defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
3056 defm : FPToIntegerPats<fp_to_sint, fround, "FCVTAS">;
3057 defm : FPToIntegerPats<fp_to_uint, fround, "FCVTAU">;
3059 //===----------------------------------------------------------------------===//
3060 // Scaled integer to floating point conversion instructions.
3061 //===----------------------------------------------------------------------===//
3063 defm SCVTF : IntegerToFP<0, "scvtf", sint_to_fp>;
3064 defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>;
3066 //===----------------------------------------------------------------------===//
3067 // Unscaled integer to floating point conversion instruction.
3068 //===----------------------------------------------------------------------===//
3070 defm FMOV : UnscaledConversion<"fmov">;
3072 // Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
3073 let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
3074 def FMOVH0 : Pseudo<(outs FPR16:$Rd), (ins), [(set f16:$Rd, (fpimm0))]>,
3075 Sched<[WriteF]>, Requires<[HasFullFP16]>;
3076 def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
3078 def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
3081 // Similarly add aliases
3082 def : InstAlias<"fmov $Rd, #0.0", (FMOVWHr FPR16:$Rd, WZR), 0>,
3083 Requires<[HasFullFP16]>;
3084 def : InstAlias<"fmov $Rd, #0.0", (FMOVWSr FPR32:$Rd, WZR), 0>;
3085 def : InstAlias<"fmov $Rd, #0.0", (FMOVXDr FPR64:$Rd, XZR), 0>;
3087 //===----------------------------------------------------------------------===//
3088 // Floating point conversion instruction.
3089 //===----------------------------------------------------------------------===//
3091 defm FCVT : FPConversion<"fcvt">;
3093 //===----------------------------------------------------------------------===//
3094 // Floating point single operand instructions.
3095 //===----------------------------------------------------------------------===//
3097 defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>;
3098 defm FMOV : SingleOperandFPData<0b0000, "fmov">;
3099 defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>;
3100 defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
3101 defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
3102 defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
3103 defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
3104 defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
3106 def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
3107 (FRINTNDr FPR64:$Rn)>;
3109 defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
3110 defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
3112 let SchedRW = [WriteFDiv] in {
3113 defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
3116 let Predicates = [HasFRInt3264] in {
3117 defm FRINT32Z : FRIntNNT<0b00, "frint32z">;
3118 defm FRINT64Z : FRIntNNT<0b10, "frint64z">;
3119 defm FRINT32X : FRIntNNT<0b01, "frint32x">;
3120 defm FRINT64X : FRIntNNT<0b11, "frint64x">;
3123 //===----------------------------------------------------------------------===//
3124 // Floating point two operand instructions.
3125 //===----------------------------------------------------------------------===//
3127 defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>;
3128 let SchedRW = [WriteFDiv] in {
3129 defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>;
3131 defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
3132 defm FMAX : TwoOperandFPData<0b0100, "fmax", fmaximum>;
3133 defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
3134 defm FMIN : TwoOperandFPData<0b0101, "fmin", fminimum>;
3135 let SchedRW = [WriteFMul] in {
3136 defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>;
3137 defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
3139 defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>;
3141 def : Pat<(v1f64 (fmaximum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3142 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
3143 def : Pat<(v1f64 (fminimum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3144 (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
3145 def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3146 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
3147 def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3148 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
3150 //===----------------------------------------------------------------------===//
3151 // Floating point three operand instructions.
3152 //===----------------------------------------------------------------------===//
3154 defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>;
3155 defm FMSUB : ThreeOperandFPData<0, 1, "fmsub",
3156 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
3157 defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
3158 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
3159 defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
3160 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
3162 // The following def pats catch the case where the LHS of an FMA is negated.
3163 // The TriOpFrag above catches the case where the middle operand is negated.
3165 // N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
3166 // the NEON variant.
3167 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
3168 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3170 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
3171 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3173 // We handled -(a + b*c) for FNMADD above, now it's time for "(-a) + (-b)*c" and
3175 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
3176 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3178 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
3179 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3181 def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
3182 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
3184 def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
3185 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
3187 //===----------------------------------------------------------------------===//
3188 // Floating point comparison instructions.
3189 //===----------------------------------------------------------------------===//
3191 defm FCMPE : FPComparison<1, "fcmpe">;
3192 defm FCMP : FPComparison<0, "fcmp", AArch64fcmp>;
3194 //===----------------------------------------------------------------------===//
3195 // Floating point conditional comparison instructions.
3196 //===----------------------------------------------------------------------===//
3198 defm FCCMPE : FPCondComparison<1, "fccmpe">;
3199 defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>;
3201 //===----------------------------------------------------------------------===//
3202 // Floating point conditional select instruction.
3203 //===----------------------------------------------------------------------===//
3205 defm FCSEL : FPCondSelect<"fcsel">;
3207 // CSEL instructions providing f128 types need to be handled by a
3208 // pseudo-instruction since the eventual code will need to introduce basic
3209 // blocks and control flow.
3210 def F128CSEL : Pseudo<(outs FPR128:$Rd),
3211 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
3212 [(set (f128 FPR128:$Rd),
3213 (AArch64csel FPR128:$Rn, FPR128:$Rm,
3214 (i32 imm:$cond), NZCV))]> {
3216 let usesCustomInserter = 1;
3217 let hasNoSchedulingInfo = 1;
3220 //===----------------------------------------------------------------------===//
3221 // Instructions used for emitting unwind opcodes on ARM64 Windows.
3222 //===----------------------------------------------------------------------===//
3223 let isPseudo = 1 in {
3224 def SEH_StackAlloc : Pseudo<(outs), (ins i32imm:$size), []>, Sched<[]>;
3225 def SEH_SaveFPLR : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3226 def SEH_SaveFPLR_X : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3227 def SEH_SaveReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3228 def SEH_SaveReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3229 def SEH_SaveRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3230 def SEH_SaveRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3231 def SEH_SaveFReg : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3232 def SEH_SaveFReg_X : Pseudo<(outs), (ins i32imm:$reg, i32imm:$offs), []>, Sched<[]>;
3233 def SEH_SaveFRegP : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3234 def SEH_SaveFRegP_X : Pseudo<(outs), (ins i32imm:$reg0, i32imm:$reg1, i32imm:$offs), []>, Sched<[]>;
3235 def SEH_SetFP : Pseudo<(outs), (ins), []>, Sched<[]>;
3236 def SEH_AddFP : Pseudo<(outs), (ins i32imm:$offs), []>, Sched<[]>;
3237 def SEH_Nop : Pseudo<(outs), (ins), []>, Sched<[]>;
3238 def SEH_PrologEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3239 def SEH_EpilogStart : Pseudo<(outs), (ins), []>, Sched<[]>;
3240 def SEH_EpilogEnd : Pseudo<(outs), (ins), []>, Sched<[]>;
3243 // Pseudo instructions for Windows EH
3244 //===----------------------------------------------------------------------===//
3245 let isTerminator = 1, hasSideEffects = 1, isBarrier = 1, hasCtrlDep = 1,
3246 isCodeGenOnly = 1, isReturn = 1, isEHScopeReturn = 1, isPseudo = 1 in {
3247 def CLEANUPRET : Pseudo<(outs), (ins), [(cleanupret)]>, Sched<[]>;
3248 let usesCustomInserter = 1 in
3249 def CATCHRET : Pseudo<(outs), (ins am_brcond:$dst, am_brcond:$src), [(catchret bb:$dst, bb:$src)]>,
3253 let hasSideEffects = 1, hasCtrlDep = 1, isCodeGenOnly = 1,
3254 usesCustomInserter = 1 in
3255 def CATCHPAD : Pseudo<(outs), (ins), [(catchpad)]>, Sched<[]>;
3257 //===----------------------------------------------------------------------===//
3258 // Floating point immediate move.
3259 //===----------------------------------------------------------------------===//
3261 let isReMaterializable = 1 in {
3262 defm FMOV : FPMoveImmediate<"fmov">;
3265 //===----------------------------------------------------------------------===//
3266 // Advanced SIMD two vector instructions.
3267 //===----------------------------------------------------------------------===//
3269 defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
3270 int_aarch64_neon_uabd>;
3271 // Match UABDL in log2-shuffle patterns.
3272 def : Pat<(abs (v8i16 (sub (zext (v8i8 V64:$opA)),
3273 (zext (v8i8 V64:$opB))))),
3274 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3275 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3276 (v8i16 (add (sub (zext (v8i8 V64:$opA)),
3277 (zext (v8i8 V64:$opB))),
3278 (AArch64vashr v8i16:$src, (i32 15))))),
3279 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
3280 def : Pat<(abs (v8i16 (sub (zext (extract_high_v16i8 V128:$opA)),
3281 (zext (extract_high_v16i8 V128:$opB))))),
3282 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3283 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
3284 (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
3285 (zext (extract_high_v16i8 V128:$opB))),
3286 (AArch64vashr v8i16:$src, (i32 15))))),
3287 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
3288 def : Pat<(abs (v4i32 (sub (zext (v4i16 V64:$opA)),
3289 (zext (v4i16 V64:$opB))))),
3290 (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
3291 def : Pat<(abs (v4i32 (sub (zext (extract_high_v8i16 V128:$opA)),
3292 (zext (extract_high_v8i16 V128:$opB))))),
3293 (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
3294 def : Pat<(abs (v2i64 (sub (zext (v2i32 V64:$opA)),
3295 (zext (v2i32 V64:$opB))))),
3296 (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
3297 def : Pat<(abs (v2i64 (sub (zext (extract_high_v4i32 V128:$opA)),
3298 (zext (extract_high_v4i32 V128:$opB))))),
3299 (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
3301 defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", abs>;
3302 defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
3303 defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
3304 defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
3305 defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
3306 defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
3307 defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
3308 defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
3309 defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
3310 defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
3312 defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3313 defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3314 defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3315 defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3316 defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3317 defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
3318 defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
3319 defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
3320 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
3321 (FCVTLv4i16 V64:$Rn)>;
3322 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
3324 (FCVTLv8i16 V128:$Rn)>;
3325 def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
3326 def : Pat<(v2f64 (fpextend (v2f32 (extract_subvector (v4f32 V128:$Rn),
3328 (FCVTLv4i32 V128:$Rn)>;
3330 def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
3331 def : Pat<(v4f32 (fpextend (v4f16 (extract_subvector (v8f16 V128:$Rn),
3333 (FCVTLv8i16 V128:$Rn)>;
3335 defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
3336 defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
3337 defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
3338 defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
3339 defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
3340 def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
3341 (FCVTNv4i16 V128:$Rn)>;
3342 def : Pat<(concat_vectors V64:$Rd,
3343 (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
3344 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3345 def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
3346 def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
3347 def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
3348 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
3349 defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
3350 defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
3351 defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
3352 int_aarch64_neon_fcvtxn>;
3353 defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
3354 defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
3356 def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
3357 def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
3358 def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
3359 def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
3360 def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
3362 def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
3363 def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
3364 def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
3365 def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
3366 def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
3368 defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
3369 defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
3370 defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
3371 defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
3372 defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
3373 defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
3374 defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
3375 defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
3376 defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
3378 let Predicates = [HasFRInt3264] in {
3379 defm FRINT32Z : FRIntNNTVector<0, 0, "frint32z">;
3380 defm FRINT64Z : FRIntNNTVector<0, 1, "frint64z">;
3381 defm FRINT32X : FRIntNNTVector<1, 0, "frint32x">;
3382 defm FRINT64X : FRIntNNTVector<1, 1, "frint64x">;
3385 defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
3386 defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
3387 defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
3388 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3389 defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
3390 // Aliases for MVN -> NOT.
3391 def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
3392 (NOTv8i8 V64:$Vd, V64:$Vn)>;
3393 def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
3394 (NOTv16i8 V128:$Vd, V128:$Vn)>;
3396 def : Pat<(AArch64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
3397 def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
3398 def : Pat<(AArch64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>;
3399 def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
3400 def : Pat<(AArch64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>;
3401 def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
3402 def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
3404 def : Pat<(AArch64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3405 def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3406 def : Pat<(AArch64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3407 def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3408 def : Pat<(AArch64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3409 def : Pat<(AArch64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3410 def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3411 def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3413 def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3414 def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3415 def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
3416 def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3417 def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
3419 defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
3420 defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
3421 defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
3422 defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
3423 defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
3424 BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
3425 defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
3426 defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
3427 defm SHLL : SIMDVectorLShiftLongBySizeBHS;
3428 defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3429 defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3430 defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
3431 defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
3432 defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
3433 defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
3434 BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
3435 defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
3436 int_aarch64_neon_uaddlp>;
3437 defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
3438 defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
3439 defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
3440 defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
3441 defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
3442 defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
3444 def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
3445 def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
3446 def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
3447 def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
3448 def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
3449 def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
3451 // Patterns for vector long shift (by element width). These need to match all
3452 // three of zext, sext and anyext so it's easier to pull the patterns out of the
3454 multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
3455 def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
3456 (SHLLv8i8 V64:$Rn)>;
3457 def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
3458 (SHLLv16i8 V128:$Rn)>;
3459 def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
3460 (SHLLv4i16 V64:$Rn)>;
3461 def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
3462 (SHLLv8i16 V128:$Rn)>;
3463 def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
3464 (SHLLv2i32 V64:$Rn)>;
3465 def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
3466 (SHLLv4i32 V128:$Rn)>;
3469 defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
3470 defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
3471 defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
3473 //===----------------------------------------------------------------------===//
3474 // Advanced SIMD three vector instructions.
3475 //===----------------------------------------------------------------------===//
3477 defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>;
3478 defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
3479 defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
3480 defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
3481 defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
3482 defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
3483 defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
3484 defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
3485 defm FABD : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
3486 let Predicates = [HasNEON] in {
3487 foreach VT = [ v2f32, v4f32, v2f64 ] in
3488 def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
3490 let Predicates = [HasNEON, HasFullFP16] in {
3491 foreach VT = [ v4f16, v8f16 ] in
3492 def : Pat<(fabs (fsub VT:$Rn, VT:$Rm)), (!cast<Instruction>("FABD"#VT) VT:$Rn, VT:$Rm)>;
3494 defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
3495 defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
3496 defm FADDP : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_addp>;
3497 defm FADD : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
3498 defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
3499 defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
3500 defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
3501 defm FDIV : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
3502 defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
3503 defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
3504 defm FMAXP : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
3505 defm FMAX : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaximum>;
3506 defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
3507 defm FMINNM : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
3508 defm FMINP : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
3509 defm FMIN : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminimum>;
3511 // NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
3512 // instruction expects the addend first, while the fma intrinsic puts it last.
3513 defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
3514 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
3515 defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
3516 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
3518 // The following def pats catch the case where the LHS of an FMA is negated.
3519 // The TriOpFrag above catches the case where the middle operand is negated.
3520 def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
3521 (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
3523 def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
3524 (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
3526 def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
3527 (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
3529 defm FMULX : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
3530 defm FMUL : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
3531 defm FRECPS : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
3532 defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
3533 defm FSUB : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
3534 defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla",
3535 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))> >;
3536 defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls",
3537 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))> >;
3538 defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
3539 defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
3540 defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
3541 TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >;
3542 defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>;
3543 defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
3544 defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
3545 defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
3546 defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
3547 defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
3548 defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
3549 defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
3550 defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
3551 defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
3552 defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
3553 defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
3554 defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
3555 defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>;
3556 defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
3557 defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
3558 defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
3559 defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
3560 TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >;
3561 defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>;
3562 defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
3563 defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
3564 defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
3565 defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
3566 defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
3567 defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
3568 defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
3569 defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
3570 defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
3571 defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
3572 defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>;
3573 defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
3574 defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
3575 defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
3576 int_aarch64_neon_sqadd>;
3577 defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
3578 int_aarch64_neon_sqsub>;
3580 defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
3581 defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
3582 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
3583 defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
3584 defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
3585 defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
3586 TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
3587 defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
3588 defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
3589 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
3590 defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
3593 def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
3594 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3595 def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
3596 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3597 def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
3598 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3599 def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
3600 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3602 def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
3603 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3604 def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
3605 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3606 def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
3607 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3608 def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
3609 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3611 def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
3612 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
3613 def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
3614 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3615 def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
3616 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3617 def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
3618 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3620 def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
3621 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
3622 def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
3623 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3624 def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
3625 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3626 def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
3627 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3629 def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
3630 "|cmls.8b\t$dst, $src1, $src2}",
3631 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3632 def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
3633 "|cmls.16b\t$dst, $src1, $src2}",
3634 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3635 def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
3636 "|cmls.4h\t$dst, $src1, $src2}",
3637 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3638 def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
3639 "|cmls.8h\t$dst, $src1, $src2}",
3640 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3641 def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
3642 "|cmls.2s\t$dst, $src1, $src2}",
3643 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3644 def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
3645 "|cmls.4s\t$dst, $src1, $src2}",
3646 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3647 def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
3648 "|cmls.2d\t$dst, $src1, $src2}",
3649 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3651 def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
3652 "|cmlo.8b\t$dst, $src1, $src2}",
3653 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3654 def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
3655 "|cmlo.16b\t$dst, $src1, $src2}",
3656 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3657 def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
3658 "|cmlo.4h\t$dst, $src1, $src2}",
3659 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3660 def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
3661 "|cmlo.8h\t$dst, $src1, $src2}",
3662 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3663 def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
3664 "|cmlo.2s\t$dst, $src1, $src2}",
3665 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3666 def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
3667 "|cmlo.4s\t$dst, $src1, $src2}",
3668 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3669 def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
3670 "|cmlo.2d\t$dst, $src1, $src2}",
3671 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3673 def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
3674 "|cmle.8b\t$dst, $src1, $src2}",
3675 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3676 def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
3677 "|cmle.16b\t$dst, $src1, $src2}",
3678 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3679 def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
3680 "|cmle.4h\t$dst, $src1, $src2}",
3681 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3682 def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
3683 "|cmle.8h\t$dst, $src1, $src2}",
3684 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3685 def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
3686 "|cmle.2s\t$dst, $src1, $src2}",
3687 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3688 def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
3689 "|cmle.4s\t$dst, $src1, $src2}",
3690 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3691 def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
3692 "|cmle.2d\t$dst, $src1, $src2}",
3693 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3695 def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
3696 "|cmlt.8b\t$dst, $src1, $src2}",
3697 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3698 def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
3699 "|cmlt.16b\t$dst, $src1, $src2}",
3700 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3701 def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
3702 "|cmlt.4h\t$dst, $src1, $src2}",
3703 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3704 def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
3705 "|cmlt.8h\t$dst, $src1, $src2}",
3706 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3707 def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
3708 "|cmlt.2s\t$dst, $src1, $src2}",
3709 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3710 def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
3711 "|cmlt.4s\t$dst, $src1, $src2}",
3712 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3713 def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
3714 "|cmlt.2d\t$dst, $src1, $src2}",
3715 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3717 let Predicates = [HasNEON, HasFullFP16] in {
3718 def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
3719 "|fcmle.4h\t$dst, $src1, $src2}",
3720 (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3721 def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
3722 "|fcmle.8h\t$dst, $src1, $src2}",
3723 (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3725 def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
3726 "|fcmle.2s\t$dst, $src1, $src2}",
3727 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3728 def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
3729 "|fcmle.4s\t$dst, $src1, $src2}",
3730 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3731 def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
3732 "|fcmle.2d\t$dst, $src1, $src2}",
3733 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3735 let Predicates = [HasNEON, HasFullFP16] in {
3736 def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
3737 "|fcmlt.4h\t$dst, $src1, $src2}",
3738 (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3739 def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
3740 "|fcmlt.8h\t$dst, $src1, $src2}",
3741 (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3743 def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
3744 "|fcmlt.2s\t$dst, $src1, $src2}",
3745 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3746 def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
3747 "|fcmlt.4s\t$dst, $src1, $src2}",
3748 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3749 def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
3750 "|fcmlt.2d\t$dst, $src1, $src2}",
3751 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3753 let Predicates = [HasNEON, HasFullFP16] in {
3754 def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
3755 "|facle.4h\t$dst, $src1, $src2}",
3756 (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3757 def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
3758 "|facle.8h\t$dst, $src1, $src2}",
3759 (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3761 def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
3762 "|facle.2s\t$dst, $src1, $src2}",
3763 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3764 def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
3765 "|facle.4s\t$dst, $src1, $src2}",
3766 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3767 def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
3768 "|facle.2d\t$dst, $src1, $src2}",
3769 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3771 let Predicates = [HasNEON, HasFullFP16] in {
3772 def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
3773 "|faclt.4h\t$dst, $src1, $src2}",
3774 (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3775 def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
3776 "|faclt.8h\t$dst, $src1, $src2}",
3777 (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3779 def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
3780 "|faclt.2s\t$dst, $src1, $src2}",
3781 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3782 def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
3783 "|faclt.4s\t$dst, $src1, $src2}",
3784 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3785 def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
3786 "|faclt.2d\t$dst, $src1, $src2}",
3787 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3789 //===----------------------------------------------------------------------===//
3790 // Advanced SIMD three scalar instructions.
3791 //===----------------------------------------------------------------------===//
3793 defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>;
3794 defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
3795 defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
3796 defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
3797 defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
3798 defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
3799 defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
3800 defm FABD : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
3801 def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3802 (FABD64 FPR64:$Rn, FPR64:$Rm)>;
3803 let Predicates = [HasFullFP16] in {
3804 def : Pat<(fabs (fsub f16:$Rn, f16:$Rm)), (FABD16 f16:$Rn, f16:$Rm)>;
3806 def : Pat<(fabs (fsub f32:$Rn, f32:$Rm)), (FABD32 f32:$Rn, f32:$Rm)>;
3807 def : Pat<(fabs (fsub f64:$Rn, f64:$Rm)), (FABD64 f64:$Rn, f64:$Rm)>;
3808 defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
3809 int_aarch64_neon_facge>;
3810 defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
3811 int_aarch64_neon_facgt>;
3812 defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
3813 defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
3814 defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
3815 defm FMULX : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>;
3816 defm FRECPS : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>;
3817 defm FRSQRTS : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>;
3818 defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
3819 defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
3820 defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
3821 defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
3822 defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
3823 defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
3824 defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>;
3825 defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>;
3826 defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>;
3827 defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
3828 defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
3829 defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
3830 defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
3831 defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>;
3832 defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>;
3833 let Predicates = [HasRDM] in {
3834 defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
3835 defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
3836 def : Pat<(i32 (int_aarch64_neon_sqadd
3838 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3839 (i32 FPR32:$Rm))))),
3840 (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3841 def : Pat<(i32 (int_aarch64_neon_sqsub
3843 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3844 (i32 FPR32:$Rm))))),
3845 (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3848 def : InstAlias<"cmls $dst, $src1, $src2",
3849 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3850 def : InstAlias<"cmle $dst, $src1, $src2",
3851 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3852 def : InstAlias<"cmlo $dst, $src1, $src2",
3853 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3854 def : InstAlias<"cmlt $dst, $src1, $src2",
3855 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3856 def : InstAlias<"fcmle $dst, $src1, $src2",
3857 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3858 def : InstAlias<"fcmle $dst, $src1, $src2",
3859 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3860 def : InstAlias<"fcmlt $dst, $src1, $src2",
3861 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3862 def : InstAlias<"fcmlt $dst, $src1, $src2",
3863 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3864 def : InstAlias<"facle $dst, $src1, $src2",
3865 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3866 def : InstAlias<"facle $dst, $src1, $src2",
3867 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3868 def : InstAlias<"faclt $dst, $src1, $src2",
3869 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3870 def : InstAlias<"faclt $dst, $src1, $src2",
3871 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3873 //===----------------------------------------------------------------------===//
3874 // Advanced SIMD three scalar instructions (mixed operands).
3875 //===----------------------------------------------------------------------===//
3876 defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
3877 int_aarch64_neon_sqdmulls_scalar>;
3878 defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
3879 defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
3881 def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
3882 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3883 (i32 FPR32:$Rm))))),
3884 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3885 def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
3886 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3887 (i32 FPR32:$Rm))))),
3888 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3890 //===----------------------------------------------------------------------===//
3891 // Advanced SIMD two scalar instructions.
3892 //===----------------------------------------------------------------------===//
3894 defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", abs>;
3895 defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
3896 defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
3897 defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
3898 defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
3899 defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
3900 defm FCMEQ : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3901 defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3902 defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3903 defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3904 defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3905 defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">;
3906 defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">;
3907 defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">;
3908 defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">;
3909 defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">;
3910 defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">;
3911 defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">;
3912 defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">;
3913 def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
3914 defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">;
3915 defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">;
3916 defm FRECPE : SIMDFPTwoScalar< 0, 1, 0b11101, "frecpe">;
3917 defm FRECPX : SIMDFPTwoScalar< 0, 1, 0b11111, "frecpx">;
3918 defm FRSQRTE : SIMDFPTwoScalar< 1, 1, 0b11101, "frsqrte">;
3919 defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg",
3920 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3921 defm SCVTF : SIMDFPTwoScalarCVT< 0, 0, 0b11101, "scvtf", AArch64sitof>;
3922 defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3923 defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3924 defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
3925 defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
3926 defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
3927 int_aarch64_neon_suqadd>;
3928 defm UCVTF : SIMDFPTwoScalarCVT< 1, 0, 0b11101, "ucvtf", AArch64uitof>;
3929 defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
3930 defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
3931 int_aarch64_neon_usqadd>;
3933 def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
3935 def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
3936 (FCVTASv1i64 FPR64:$Rn)>;
3937 def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
3938 (FCVTAUv1i64 FPR64:$Rn)>;
3939 def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
3940 (FCVTMSv1i64 FPR64:$Rn)>;
3941 def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
3942 (FCVTMUv1i64 FPR64:$Rn)>;
3943 def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
3944 (FCVTNSv1i64 FPR64:$Rn)>;
3945 def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
3946 (FCVTNUv1i64 FPR64:$Rn)>;
3947 def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
3948 (FCVTPSv1i64 FPR64:$Rn)>;
3949 def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
3950 (FCVTPUv1i64 FPR64:$Rn)>;
3952 def : Pat<(f16 (int_aarch64_neon_frecpe (f16 FPR16:$Rn))),
3953 (FRECPEv1f16 FPR16:$Rn)>;
3954 def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
3955 (FRECPEv1i32 FPR32:$Rn)>;
3956 def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
3957 (FRECPEv1i64 FPR64:$Rn)>;
3958 def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
3959 (FRECPEv1i64 FPR64:$Rn)>;
3961 def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
3962 (FRECPEv1i32 FPR32:$Rn)>;
3963 def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
3964 (FRECPEv2f32 V64:$Rn)>;
3965 def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
3966 (FRECPEv4f32 FPR128:$Rn)>;
3967 def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
3968 (FRECPEv1i64 FPR64:$Rn)>;
3969 def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
3970 (FRECPEv1i64 FPR64:$Rn)>;
3971 def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
3972 (FRECPEv2f64 FPR128:$Rn)>;
3974 def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
3975 (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
3976 def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
3977 (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
3978 def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
3979 (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
3980 def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
3981 (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
3982 def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
3983 (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
3985 def : Pat<(f16 (int_aarch64_neon_frecpx (f16 FPR16:$Rn))),
3986 (FRECPXv1f16 FPR16:$Rn)>;
3987 def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
3988 (FRECPXv1i32 FPR32:$Rn)>;
3989 def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
3990 (FRECPXv1i64 FPR64:$Rn)>;
3992 def : Pat<(f16 (int_aarch64_neon_frsqrte (f16 FPR16:$Rn))),
3993 (FRSQRTEv1f16 FPR16:$Rn)>;
3994 def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
3995 (FRSQRTEv1i32 FPR32:$Rn)>;
3996 def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
3997 (FRSQRTEv1i64 FPR64:$Rn)>;
3998 def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
3999 (FRSQRTEv1i64 FPR64:$Rn)>;
4001 def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
4002 (FRSQRTEv1i32 FPR32:$Rn)>;
4003 def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
4004 (FRSQRTEv2f32 V64:$Rn)>;
4005 def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
4006 (FRSQRTEv4f32 FPR128:$Rn)>;
4007 def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
4008 (FRSQRTEv1i64 FPR64:$Rn)>;
4009 def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
4010 (FRSQRTEv1i64 FPR64:$Rn)>;
4011 def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
4012 (FRSQRTEv2f64 FPR128:$Rn)>;
4014 def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
4015 (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
4016 def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
4017 (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
4018 def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
4019 (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
4020 def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
4021 (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
4022 def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
4023 (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
4025 // If an integer is about to be converted to a floating point value,
4026 // just load it on the floating point unit.
4027 // Here are the patterns for 8 and 16-bits to float.
4029 multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
4030 SDPatternOperator loadop, Instruction UCVTF,
4031 ROAddrMode ro, Instruction LDRW, Instruction LDRX,
4033 def : Pat<(DstTy (uint_to_fp (SrcTy
4034 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
4035 ro.Wext:$extend))))),
4036 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4037 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
4040 def : Pat<(DstTy (uint_to_fp (SrcTy
4041 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
4042 ro.Wext:$extend))))),
4043 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
4044 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
4048 defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
4049 UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
4050 def : Pat <(f32 (uint_to_fp (i32
4051 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4052 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4053 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4054 def : Pat <(f32 (uint_to_fp (i32
4055 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4056 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4057 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4058 // 16-bits -> float.
4059 defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
4060 UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
4061 def : Pat <(f32 (uint_to_fp (i32
4062 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4063 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4064 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4065 def : Pat <(f32 (uint_to_fp (i32
4066 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4067 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
4068 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4069 // 32-bits are handled in target specific dag combine:
4070 // performIntToFpCombine.
4071 // 64-bits integer to 32-bits floating point, not possible with
4072 // UCVTF on floating point registers (both source and destination
4073 // must have the same size).
4075 // Here are the patterns for 8, 16, 32, and 64-bits to double.
4076 // 8-bits -> double.
4077 defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
4078 UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
4079 def : Pat <(f64 (uint_to_fp (i32
4080 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
4081 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4082 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
4083 def : Pat <(f64 (uint_to_fp (i32
4084 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
4085 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4086 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
4087 // 16-bits -> double.
4088 defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
4089 UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
4090 def : Pat <(f64 (uint_to_fp (i32
4091 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
4092 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4093 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
4094 def : Pat <(f64 (uint_to_fp (i32
4095 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
4096 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4097 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
4098 // 32-bits -> double.
4099 defm : UIntToFPROLoadPat<f64, i32, load,
4100 UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
4101 def : Pat <(f64 (uint_to_fp (i32
4102 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
4103 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4104 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
4105 def : Pat <(f64 (uint_to_fp (i32
4106 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
4107 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4108 (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
4109 // 64-bits -> double are handled in target specific dag combine:
4110 // performIntToFpCombine.
4112 //===----------------------------------------------------------------------===//
4113 // Advanced SIMD three different-sized vector instructions.
4114 //===----------------------------------------------------------------------===//
4116 defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
4117 defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
4118 defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
4119 defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
4120 defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
4121 defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
4122 int_aarch64_neon_sabd>;
4123 defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
4124 int_aarch64_neon_sabd>;
4125 defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl",
4126 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
4127 defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw",
4128 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
4129 defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
4130 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4131 defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
4132 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4133 defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
4134 defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
4135 int_aarch64_neon_sqadd>;
4136 defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
4137 int_aarch64_neon_sqsub>;
4138 defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
4139 int_aarch64_neon_sqdmull>;
4140 defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
4141 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
4142 defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
4143 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
4144 defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
4145 int_aarch64_neon_uabd>;
4146 defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
4147 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
4148 defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
4149 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
4150 defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
4151 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4152 defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
4153 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4154 defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
4155 defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
4156 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
4157 defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
4158 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
4160 // Additional patterns for SMULL and UMULL
4161 multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
4162 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4163 def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4164 (INST8B V64:$Rn, V64:$Rm)>;
4165 def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4166 (INST4H V64:$Rn, V64:$Rm)>;
4167 def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4168 (INST2S V64:$Rn, V64:$Rm)>;
4171 defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
4172 SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
4173 defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
4174 UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
4176 // Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
4177 multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
4178 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
4179 def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
4180 (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
4181 def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
4182 (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
4183 def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
4184 (INST2S V128:$Rd, V64:$Rn, V64:$Rm)>;
4187 defm : Neon_mulacc_widen_patterns<
4188 TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4189 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
4190 defm : Neon_mulacc_widen_patterns<
4191 TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4192 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
4193 defm : Neon_mulacc_widen_patterns<
4194 TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
4195 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
4196 defm : Neon_mulacc_widen_patterns<
4197 TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
4198 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
4200 // Patterns for 64-bit pmull
4201 def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
4202 (PMULLv1i64 V64:$Rn, V64:$Rm)>;
4203 def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
4204 (extractelt (v2i64 V128:$Rm), (i64 1))),
4205 (PMULLv2i64 V128:$Rn, V128:$Rm)>;
4207 // CodeGen patterns for addhn and subhn instructions, which can actually be
4208 // written in LLVM IR without too much difficulty.
4211 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
4212 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4213 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4215 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4216 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4218 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4219 def : Pat<(concat_vectors (v8i8 V64:$Rd),
4220 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4222 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4223 V128:$Rn, V128:$Rm)>;
4224 def : Pat<(concat_vectors (v4i16 V64:$Rd),
4225 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4227 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4228 V128:$Rn, V128:$Rm)>;
4229 def : Pat<(concat_vectors (v2i32 V64:$Rd),
4230 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
4232 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4233 V128:$Rn, V128:$Rm)>;
4236 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
4237 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
4238 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4240 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
4241 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4243 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
4244 def : Pat<(concat_vectors (v8i8 V64:$Rd),
4245 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4247 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4248 V128:$Rn, V128:$Rm)>;
4249 def : Pat<(concat_vectors (v4i16 V64:$Rd),
4250 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4252 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4253 V128:$Rn, V128:$Rm)>;
4254 def : Pat<(concat_vectors (v2i32 V64:$Rd),
4255 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
4257 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
4258 V128:$Rn, V128:$Rm)>;
4260 //----------------------------------------------------------------------------
4261 // AdvSIMD bitwise extract from vector instruction.
4262 //----------------------------------------------------------------------------
4264 defm EXT : SIMDBitwiseExtract<"ext">;
4266 def AdjustExtImm : SDNodeXForm<imm, [{
4267 return CurDAG->getTargetConstant(8 + N->getZExtValue(), SDLoc(N), MVT::i32);
4269 multiclass ExtPat<ValueType VT64, ValueType VT128, int N> {
4270 def : Pat<(VT64 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
4271 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
4272 def : Pat<(VT128 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
4273 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
4274 // We use EXT to handle extract_subvector to copy the upper 64-bits of a
4276 def : Pat<(VT64 (extract_subvector V128:$Rn, (i64 N))),
4277 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
4278 // A 64-bit EXT of two halves of the same 128-bit register can be done as a
4279 // single 128-bit EXT.
4280 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 0)),
4281 (extract_subvector V128:$Rn, (i64 N)),
4283 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, imm:$imm), dsub)>;
4284 // A 64-bit EXT of the high half of a 128-bit register can be done using a
4285 // 128-bit EXT of the whole register with an adjustment to the immediate. The
4286 // top half of the other operand will be unset, but that doesn't matter as it
4287 // will not be used.
4288 def : Pat<(VT64 (AArch64ext (extract_subvector V128:$Rn, (i64 N)),
4291 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn,
4292 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4293 (AdjustExtImm imm:$imm)), dsub)>;
4296 defm : ExtPat<v8i8, v16i8, 8>;
4297 defm : ExtPat<v4i16, v8i16, 4>;
4298 defm : ExtPat<v4f16, v8f16, 4>;
4299 defm : ExtPat<v2i32, v4i32, 2>;
4300 defm : ExtPat<v2f32, v4f32, 2>;
4301 defm : ExtPat<v1i64, v2i64, 1>;
4302 defm : ExtPat<v1f64, v2f64, 1>;
4304 //----------------------------------------------------------------------------
4305 // AdvSIMD zip vector
4306 //----------------------------------------------------------------------------
4308 defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
4309 defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
4310 defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
4311 defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
4312 defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
4313 defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
4315 //----------------------------------------------------------------------------
4316 // AdvSIMD TBL/TBX instructions
4317 //----------------------------------------------------------------------------
4319 defm TBL : SIMDTableLookup< 0, "tbl">;
4320 defm TBX : SIMDTableLookupTied<1, "tbx">;
4322 def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4323 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
4324 def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4325 (TBLv16i8One V128:$Ri, V128:$Rn)>;
4327 def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
4328 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
4329 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
4330 def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
4331 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
4332 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
4335 //----------------------------------------------------------------------------
4336 // AdvSIMD scalar CPY instruction
4337 //----------------------------------------------------------------------------
4339 defm CPY : SIMDScalarCPY<"cpy">;
4341 //----------------------------------------------------------------------------
4342 // AdvSIMD scalar pairwise instructions
4343 //----------------------------------------------------------------------------
4345 defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">;
4346 defm FADDP : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
4347 defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
4348 defm FMAXP : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
4349 defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
4350 defm FMINP : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
4351 def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
4352 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4353 def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
4354 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
4355 def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
4356 (FADDPv2i32p V64:$Rn)>;
4357 def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
4358 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
4359 def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
4360 (FADDPv2i64p V128:$Rn)>;
4361 def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
4362 (FMAXNMPv2i32p V64:$Rn)>;
4363 def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
4364 (FMAXNMPv2i64p V128:$Rn)>;
4365 def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
4366 (FMAXPv2i32p V64:$Rn)>;
4367 def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
4368 (FMAXPv2i64p V128:$Rn)>;
4369 def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
4370 (FMINNMPv2i32p V64:$Rn)>;
4371 def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
4372 (FMINNMPv2i64p V128:$Rn)>;
4373 def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
4374 (FMINPv2i32p V64:$Rn)>;
4375 def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
4376 (FMINPv2i64p V128:$Rn)>;
4378 //----------------------------------------------------------------------------
4379 // AdvSIMD INS/DUP instructions
4380 //----------------------------------------------------------------------------
4382 def DUPv8i8gpr : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
4383 def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
4384 def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
4385 def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
4386 def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
4387 def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
4388 def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
4390 def DUPv2i64lane : SIMDDup64FromElement;
4391 def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
4392 def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
4393 def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
4394 def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
4395 def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
4396 def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
4398 // DUP from a 64-bit register to a 64-bit register is just a copy
4399 def : Pat<(v1i64 (AArch64dup (i64 GPR64:$Rn))),
4400 (COPY_TO_REGCLASS GPR64:$Rn, FPR64)>;
4401 def : Pat<(v1f64 (AArch64dup (f64 FPR64:$Rn))),
4402 (COPY_TO_REGCLASS FPR64:$Rn, FPR64)>;
4404 def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
4405 (v2f32 (DUPv2i32lane
4406 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4408 def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
4409 (v4f32 (DUPv4i32lane
4410 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
4412 def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
4413 (v2f64 (DUPv2i64lane
4414 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
4416 def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
4417 (v4f16 (DUPv4i16lane
4418 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4420 def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
4421 (v8f16 (DUPv8i16lane
4422 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
4425 def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4426 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
4427 def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
4428 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
4430 def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4431 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
4432 def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
4433 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
4434 def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
4435 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
4437 // If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
4438 // instruction even if the types don't match: we just have to remap the lane
4439 // carefully. N.b. this trick only applies to truncations.
4440 def VecIndex_x2 : SDNodeXForm<imm, [{
4441 return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
4443 def VecIndex_x4 : SDNodeXForm<imm, [{
4444 return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
4446 def VecIndex_x8 : SDNodeXForm<imm, [{
4447 return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
4450 multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
4451 ValueType Src128VT, ValueType ScalVT,
4452 Instruction DUP, SDNodeXForm IdxXFORM> {
4453 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
4455 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
4457 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
4459 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
4462 defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>;
4463 defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>;
4464 defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
4466 defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
4467 defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
4468 defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
4470 multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
4471 SDNodeXForm IdxXFORM> {
4472 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
4474 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
4476 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
4478 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
4481 defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>;
4482 defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>;
4483 defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>;
4485 defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
4486 defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
4487 defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
4489 // SMOV and UMOV definitions, with some extra patterns for convenience
4493 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
4494 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
4495 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
4496 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
4497 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4498 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
4499 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4500 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
4501 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
4502 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
4503 def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
4504 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
4506 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
4507 VectorIndexB:$idx)))), i8),
4508 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
4509 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
4510 VectorIndexH:$idx)))), i16),
4511 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
4513 // Extracting i8 or i16 elements will have the zero-extend transformed to
4514 // an 'and' mask by type legalization since neither i8 nor i16 are legal types
4515 // for AArch64. Match these patterns here since UMOV already zeroes out the high
4516 // bits of the destination register.
4517 def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
4519 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
4520 def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
4522 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
4526 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
4527 (SUBREG_TO_REG (i32 0),
4528 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4529 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
4530 (SUBREG_TO_REG (i32 0),
4531 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4533 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
4534 (SUBREG_TO_REG (i32 0),
4535 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4536 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
4537 (SUBREG_TO_REG (i32 0),
4538 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
4540 def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
4541 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4542 def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
4543 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4545 def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
4546 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
4547 (i32 FPR32:$Rn), ssub))>;
4548 def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
4549 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4550 (i32 FPR32:$Rn), ssub))>;
4552 def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
4553 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
4554 (i64 FPR64:$Rn), dsub))>;
4556 def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
4557 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4558 def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
4559 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
4561 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
4562 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4563 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
4564 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4566 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
4567 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
4569 def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
4570 (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
4573 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4575 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4579 def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
4580 (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
4582 V128:$Rn, VectorIndexH:$imm,
4583 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4586 def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
4587 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4590 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4592 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4595 def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
4596 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4598 V128:$Rn, VectorIndexS:$imm,
4599 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4601 def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
4602 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
4604 V128:$Rn, VectorIndexD:$imm,
4605 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
4608 // Copy an element at a constant index in one vector into a constant indexed
4609 // element of another.
4610 // FIXME refactor to a shared class/dev parameterized on vector type, vector
4611 // index type and INS extension
4612 def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
4613 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
4614 VectorIndexB:$idx2)),
4616 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
4618 def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
4619 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
4620 VectorIndexH:$idx2)),
4622 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
4624 def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
4625 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
4626 VectorIndexS:$idx2)),
4628 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
4630 def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
4631 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
4632 VectorIndexD:$idx2)),
4634 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
4637 multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
4638 ValueType VTScal, Instruction INS> {
4639 def : Pat<(VT128 (vector_insert V128:$src,
4640 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
4642 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
4644 def : Pat<(VT128 (vector_insert V128:$src,
4645 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
4647 (INS V128:$src, imm:$Immd,
4648 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
4650 def : Pat<(VT64 (vector_insert V64:$src,
4651 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
4653 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
4654 imm:$Immd, V128:$Rn, imm:$Immn),
4657 def : Pat<(VT64 (vector_insert V64:$src,
4658 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
4661 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
4662 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
4666 defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
4667 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
4668 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
4671 // Floating point vector extractions are codegen'd as either a sequence of
4672 // subregister extractions, or a MOV (aka CPY here, alias for DUP) if
4673 // the lane number is anything other than zero.
4674 def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
4675 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
4676 def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
4677 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
4678 def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
4679 (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
4681 def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
4682 (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
4683 def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
4684 (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
4685 def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
4686 (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
4688 // All concat_vectors operations are canonicalised to act on i64 vectors for
4689 // AArch64. In the general case we need an instruction, which had just as well be
4691 class ConcatPat<ValueType DstTy, ValueType SrcTy>
4692 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
4693 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
4694 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
4696 def : ConcatPat<v2i64, v1i64>;
4697 def : ConcatPat<v2f64, v1f64>;
4698 def : ConcatPat<v4i32, v2i32>;
4699 def : ConcatPat<v4f32, v2f32>;
4700 def : ConcatPat<v8i16, v4i16>;
4701 def : ConcatPat<v8f16, v4f16>;
4702 def : ConcatPat<v16i8, v8i8>;
4704 // If the high lanes are undef, though, we can just ignore them:
4705 class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
4706 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
4707 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
4709 def : ConcatUndefPat<v2i64, v1i64>;
4710 def : ConcatUndefPat<v2f64, v1f64>;
4711 def : ConcatUndefPat<v4i32, v2i32>;
4712 def : ConcatUndefPat<v4f32, v2f32>;
4713 def : ConcatUndefPat<v8i16, v4i16>;
4714 def : ConcatUndefPat<v16i8, v8i8>;
4716 //----------------------------------------------------------------------------
4717 // AdvSIMD across lanes instructions
4718 //----------------------------------------------------------------------------
4720 defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
4721 defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
4722 defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
4723 defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
4724 defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
4725 defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
4726 defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
4727 defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
4728 defm FMAXV : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
4729 defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
4730 defm FMINV : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
4732 // Patterns for across-vector intrinsics, that have a node equivalent, that
4733 // returns a vector (with only the low lane defined) instead of a scalar.
4734 // In effect, opNode is the same as (scalar_to_vector (IntNode)).
4735 multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
4736 SDPatternOperator opNode> {
4737 // If a lane instruction caught the vector_extract around opNode, we can
4738 // directly match the latter to the instruction.
4739 def : Pat<(v8i8 (opNode V64:$Rn)),
4740 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
4741 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
4742 def : Pat<(v16i8 (opNode V128:$Rn)),
4743 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4744 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
4745 def : Pat<(v4i16 (opNode V64:$Rn)),
4746 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
4747 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
4748 def : Pat<(v8i16 (opNode V128:$Rn)),
4749 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4750 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
4751 def : Pat<(v4i32 (opNode V128:$Rn)),
4752 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4753 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
4756 // If none did, fallback to the explicit patterns, consuming the vector_extract.
4757 def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
4758 (i32 0)), (i64 0))),
4759 (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
4760 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
4762 def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
4763 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4764 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
4766 def : Pat<(i32 (vector_extract (insert_subvector undef,
4767 (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
4768 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
4769 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
4771 def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
4772 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4773 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
4775 def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
4776 (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4777 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
4782 multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
4783 SDPatternOperator opNode>
4784 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4785 // If there is a sign extension after this intrinsic, consume it as smov already
4787 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4788 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
4790 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4791 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4793 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4794 (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
4796 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4797 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4799 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4800 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
4802 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4803 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4805 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4806 (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
4808 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4809 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4813 multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
4814 SDPatternOperator opNode>
4815 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4816 // If there is a masking operation keeping only what has been actually
4817 // generated, consume it.
4818 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4819 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
4820 (i32 (EXTRACT_SUBREG
4821 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4822 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4824 def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
4826 (i32 (EXTRACT_SUBREG
4827 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4828 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4830 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4831 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
4832 (i32 (EXTRACT_SUBREG
4833 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4834 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4836 def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
4838 (i32 (EXTRACT_SUBREG
4839 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4840 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4844 defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>;
4845 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4846 def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
4847 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4849 defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
4850 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4851 def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
4852 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4854 defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
4855 def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
4856 (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
4858 defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
4859 def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
4860 (SMINPv2i32 V64:$Rn, V64:$Rn)>;
4862 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
4863 def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
4864 (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
4866 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
4867 def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
4868 (UMINPv2i32 V64:$Rn, V64:$Rn)>;
4870 multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
4871 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4873 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4874 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4876 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4878 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4879 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4882 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4883 (i32 (EXTRACT_SUBREG
4884 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4885 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4887 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4888 (i32 (EXTRACT_SUBREG
4889 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4890 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4893 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4894 (i64 (EXTRACT_SUBREG
4895 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4896 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4900 multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
4902 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4903 (i32 (EXTRACT_SUBREG
4904 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4905 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4907 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4908 (i32 (EXTRACT_SUBREG
4909 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4910 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4913 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4914 (i32 (EXTRACT_SUBREG
4915 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4916 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4918 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4919 (i32 (EXTRACT_SUBREG
4920 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4921 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4924 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4925 (i64 (EXTRACT_SUBREG
4926 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4927 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4931 defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
4932 defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
4934 // The vaddlv_s32 intrinsic gets mapped to SADDLP.
4935 def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
4936 (i64 (EXTRACT_SUBREG
4937 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4938 (SADDLPv2i32_v1i64 V64:$Rn), dsub),
4940 // The vaddlv_u32 intrinsic gets mapped to UADDLP.
4941 def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
4942 (i64 (EXTRACT_SUBREG
4943 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4944 (UADDLPv2i32_v1i64 V64:$Rn), dsub),
4947 //------------------------------------------------------------------------------
4948 // AdvSIMD modified immediate instructions
4949 //------------------------------------------------------------------------------
4952 defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
4954 defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
4956 def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4957 def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4958 def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4959 def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4961 def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4962 def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4963 def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4964 def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4966 def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4967 def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4968 def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4969 def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4971 def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4972 def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4973 def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4974 def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4977 def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
4979 [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4980 def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64, fpimm8,
4982 [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4983 def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
4985 [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4986 let Predicates = [HasNEON, HasFullFP16] in {
4987 def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64, fpimm8,
4989 [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4990 def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
4992 [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4993 } // Predicates = [HasNEON, HasFullFP16]
4997 // EDIT byte mask: scalar
4998 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
4999 def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
5000 [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
5001 // The movi_edit node has the immediate value already encoded, so we use
5002 // a plain imm0_255 here.
5003 def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
5004 (MOVID imm0_255:$shift)>;
5006 // EDIT byte mask: 2d
5008 // The movi_edit node has the immediate value already encoded, so we use
5009 // a plain imm0_255 in the pattern
5010 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5011 def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
5014 [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
5016 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5017 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5018 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5019 def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
5021 def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5022 def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5023 def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5024 def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
5026 // Set 64-bit vectors to all 0/1 by extracting from a 128-bit register as the
5027 // extract is free and this gives better MachineCSE results.
5028 def : Pat<(v1i64 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5029 def : Pat<(v2i32 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5030 def : Pat<(v4i16 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5031 def : Pat<(v8i8 immAllZerosV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 0)), dsub)>;
5033 def : Pat<(v1i64 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5034 def : Pat<(v2i32 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5035 def : Pat<(v4i16 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5036 def : Pat<(v8i8 immAllOnesV), (EXTRACT_SUBREG (MOVIv2d_ns (i32 255)), dsub)>;
5038 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
5039 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5040 defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
5042 def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5043 def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5044 def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5045 def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5047 def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5048 def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5049 def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5050 def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5052 def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5053 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
5054 def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5055 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
5056 def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5057 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
5058 def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
5059 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
5061 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5062 // EDIT per word: 2s & 4s with MSL shifter
5063 def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
5064 [(set (v2i32 V64:$Rd),
5065 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5066 def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
5067 [(set (v4i32 V128:$Rd),
5068 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5070 // Per byte: 8b & 16b
5071 def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64, imm0_255,
5073 [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
5075 def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
5077 [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
5082 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
5083 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
5084 defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
5086 def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5087 def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5088 def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5089 def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5091 def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
5092 def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
5093 def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
5094 def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
5096 def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5097 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
5098 def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5099 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
5100 def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5101 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
5102 def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
5103 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
5105 // EDIT per word: 2s & 4s with MSL shifter
5106 let isReMaterializable = 1, isAsCheapAsAMove = 1 in {
5107 def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
5108 [(set (v2i32 V64:$Rd),
5109 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5110 def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
5111 [(set (v4i32 V128:$Rd),
5112 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
5115 //----------------------------------------------------------------------------
5116 // AdvSIMD indexed element
5117 //----------------------------------------------------------------------------
5119 let hasSideEffects = 0 in {
5120 defm FMLA : SIMDFPIndexedTied<0, 0b0001, "fmla">;
5121 defm FMLS : SIMDFPIndexedTied<0, 0b0101, "fmls">;
5124 // NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
5125 // instruction expects the addend first, while the intrinsic expects it last.
5127 // On the other hand, there are quite a few valid combinatorial options due to
5128 // the commutativity of multiplication and the fact that (-x) * y = x * (-y).
5129 defm : SIMDFPIndexedTiedPatterns<"FMLA",
5130 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
5131 defm : SIMDFPIndexedTiedPatterns<"FMLA",
5132 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
5134 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5135 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
5136 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5137 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
5138 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5139 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
5140 defm : SIMDFPIndexedTiedPatterns<"FMLS",
5141 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
5143 multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
5144 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5146 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5147 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5148 VectorIndexS:$idx))),
5149 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5150 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5151 (v2f32 (AArch64duplane32
5152 (v4f32 (insert_subvector undef,
5153 (v2f32 (fneg V64:$Rm)),
5155 VectorIndexS:$idx)))),
5156 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5157 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5158 VectorIndexS:$idx)>;
5159 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
5160 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5161 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
5162 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5164 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
5166 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5167 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
5168 VectorIndexS:$idx))),
5169 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
5170 VectorIndexS:$idx)>;
5171 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5172 (v4f32 (AArch64duplane32
5173 (v4f32 (insert_subvector undef,
5174 (v2f32 (fneg V64:$Rm)),
5176 VectorIndexS:$idx)))),
5177 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5178 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
5179 VectorIndexS:$idx)>;
5180 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
5181 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
5182 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
5183 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
5185 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
5186 // (DUPLANE from 64-bit would be trivial).
5187 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5188 (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
5189 VectorIndexD:$idx))),
5191 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
5192 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
5193 (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
5194 (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
5195 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
5197 // 2 variants for 32-bit scalar version: extract from .2s or from .4s
5198 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5199 (vector_extract (v4f32 (fneg V128:$Rm)),
5200 VectorIndexS:$idx))),
5201 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5202 V128:$Rm, VectorIndexS:$idx)>;
5203 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
5204 (vector_extract (v4f32 (insert_subvector undef,
5205 (v2f32 (fneg V64:$Rm)),
5207 VectorIndexS:$idx))),
5208 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
5209 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
5211 // 1 variant for 64-bit scalar version: extract from .1d or from .2d
5212 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
5213 (vector_extract (v2f64 (fneg V128:$Rm)),
5214 VectorIndexS:$idx))),
5215 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
5216 V128:$Rm, VectorIndexS:$idx)>;
5219 defm : FMLSIndexedAfterNegPatterns<
5220 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
5221 defm : FMLSIndexedAfterNegPatterns<
5222 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
5224 defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
5225 defm FMUL : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
5227 def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5228 (FMULv2i32_indexed V64:$Rn,
5229 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5231 def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
5232 (FMULv4i32_indexed V128:$Rn,
5233 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
5235 def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
5236 (FMULv2i64_indexed V128:$Rn,
5237 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
5240 defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
5241 defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
5242 defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla",
5243 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))>>;
5244 defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls",
5245 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))>>;
5246 defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
5247 defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
5248 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5249 defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
5250 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
5251 defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
5252 int_aarch64_neon_smull>;
5253 defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
5254 int_aarch64_neon_sqadd>;
5255 defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
5256 int_aarch64_neon_sqsub>;
5257 defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
5258 int_aarch64_neon_sqadd>;
5259 defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
5260 int_aarch64_neon_sqsub>;
5261 defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
5262 defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
5263 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5264 defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
5265 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
5266 defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
5267 int_aarch64_neon_umull>;
5269 // A scalar sqdmull with the second operand being a vector lane can be
5270 // handled directly with the indexed instruction encoding.
5271 def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
5272 (vector_extract (v4i32 V128:$Vm),
5273 VectorIndexS:$idx)),
5274 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
5276 //----------------------------------------------------------------------------
5277 // AdvSIMD scalar shift instructions
5278 //----------------------------------------------------------------------------
5279 defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
5280 defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
5281 defm SCVTF : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
5282 defm UCVTF : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
5283 // Codegen patterns for the above. We don't put these directly on the
5284 // instructions because TableGen's type inference can't handle the truth.
5285 // Having the same base pattern for fp <--> int totally freaks it out.
5286 def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
5287 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
5288 def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
5289 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
5290 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
5291 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5292 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
5293 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5294 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
5296 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
5297 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
5299 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
5300 def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
5301 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5302 def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5303 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5304 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
5306 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5307 def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
5308 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5309 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
5311 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
5312 def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
5313 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
5315 // Patterns for FP16 Instrinsics - requires reg copy to/from as i16s not supported.
5317 def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 (sext_inreg FPR32:$Rn, i16)), vecshiftR16:$imm)),
5318 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5319 def : Pat<(f16 (int_aarch64_neon_vcvtfxs2fp (i32 FPR32:$Rn), vecshiftR16:$imm)),
5320 (SCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5321 def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp
5322 (and FPR32:$Rn, (i32 65535)),
5324 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5325 def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR16:$imm)),
5326 (UCVTFh (EXTRACT_SUBREG FPR32:$Rn, hsub), vecshiftR16:$imm)>;
5327 def : Pat<(f16 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR16:$imm)),
5328 (UCVTFh (EXTRACT_SUBREG FPR64:$Rn, hsub), vecshiftR16:$imm)>;
5329 def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR32:$imm)),
5331 (i32 (IMPLICIT_DEF)),
5332 (FCVTZSh FPR16:$Rn, vecshiftR32:$imm),
5334 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f16 FPR16:$Rn), vecshiftR64:$imm)),
5336 (i64 (IMPLICIT_DEF)),
5337 (FCVTZSh FPR16:$Rn, vecshiftR64:$imm),
5339 def : Pat<(i32 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR32:$imm)),
5341 (i32 (IMPLICIT_DEF)),
5342 (FCVTZUh FPR16:$Rn, vecshiftR32:$imm),
5344 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f16 FPR16:$Rn), vecshiftR64:$imm)),
5346 (i64 (IMPLICIT_DEF)),
5347 (FCVTZUh FPR16:$Rn, vecshiftR64:$imm),
5350 defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>;
5351 defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
5352 defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
5353 int_aarch64_neon_sqrshrn>;
5354 defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
5355 int_aarch64_neon_sqrshrun>;
5356 defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
5357 defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
5358 defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
5359 int_aarch64_neon_sqshrn>;
5360 defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
5361 int_aarch64_neon_sqshrun>;
5362 defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">;
5363 defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>;
5364 defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
5365 TriOpFrag<(add node:$LHS,
5366 (AArch64srshri node:$MHS, node:$RHS))>>;
5367 defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>;
5368 defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
5369 TriOpFrag<(add node:$LHS,
5370 (AArch64vashr node:$MHS, node:$RHS))>>;
5371 defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
5372 int_aarch64_neon_uqrshrn>;
5373 defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
5374 defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
5375 int_aarch64_neon_uqshrn>;
5376 defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>;
5377 defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
5378 TriOpFrag<(add node:$LHS,
5379 (AArch64urshri node:$MHS, node:$RHS))>>;
5380 defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>;
5381 defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
5382 TriOpFrag<(add node:$LHS,
5383 (AArch64vlshr node:$MHS, node:$RHS))>>;
5385 //----------------------------------------------------------------------------
5386 // AdvSIMD vector shift instructions
5387 //----------------------------------------------------------------------------
5388 defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
5389 defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
5390 defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
5391 int_aarch64_neon_vcvtfxs2fp>;
5392 defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
5393 int_aarch64_neon_rshrn>;
5394 defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
5395 defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
5396 BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
5397 defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>;
5398 def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
5399 (i32 vecshiftL64:$imm))),
5400 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
5401 defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
5402 int_aarch64_neon_sqrshrn>;
5403 defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
5404 int_aarch64_neon_sqrshrun>;
5405 defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
5406 defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
5407 defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
5408 int_aarch64_neon_sqshrn>;
5409 defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
5410 int_aarch64_neon_sqshrun>;
5411 defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>;
5412 def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
5413 (i32 vecshiftR64:$imm))),
5414 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
5415 defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
5416 defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
5417 TriOpFrag<(add node:$LHS,
5418 (AArch64srshri node:$MHS, node:$RHS))> >;
5419 defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
5420 BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
5422 defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
5423 defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
5424 TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
5425 defm UCVTF : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
5426 int_aarch64_neon_vcvtfxu2fp>;
5427 defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
5428 int_aarch64_neon_uqrshrn>;
5429 defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
5430 defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
5431 int_aarch64_neon_uqshrn>;
5432 defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
5433 defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
5434 TriOpFrag<(add node:$LHS,
5435 (AArch64urshri node:$MHS, node:$RHS))> >;
5436 defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
5437 BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
5438 defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
5439 defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
5440 TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
5442 // SHRN patterns for when a logical right shift was used instead of arithmetic
5443 // (the immediate guarantees no sign bits actually end up in the result so it
5445 def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
5446 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
5447 def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
5448 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
5449 def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
5450 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
5452 def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
5453 (trunc (AArch64vlshr (v8i16 V128:$Rn),
5454 vecshiftR16Narrow:$imm)))),
5455 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5456 V128:$Rn, vecshiftR16Narrow:$imm)>;
5457 def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
5458 (trunc (AArch64vlshr (v4i32 V128:$Rn),
5459 vecshiftR32Narrow:$imm)))),
5460 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5461 V128:$Rn, vecshiftR32Narrow:$imm)>;
5462 def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
5463 (trunc (AArch64vlshr (v2i64 V128:$Rn),
5464 vecshiftR64Narrow:$imm)))),
5465 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
5466 V128:$Rn, vecshiftR32Narrow:$imm)>;
5468 // Vector sign and zero extensions are implemented with SSHLL and USSHLL.
5469 // Anyexts are implemented as zexts.
5470 def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>;
5471 def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
5472 def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
5473 def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
5474 def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
5475 def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
5476 def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
5477 def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
5478 def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
5479 // Also match an extend from the upper half of a 128 bit source register.
5480 def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5481 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
5482 def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5483 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
5484 def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
5485 (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
5486 def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5487 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
5488 def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5489 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
5490 def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
5491 (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
5492 def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5493 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
5494 def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5495 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
5496 def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
5497 (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
5499 // Vector shift sxtl aliases
5500 def : InstAlias<"sxtl.8h $dst, $src1",
5501 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5502 def : InstAlias<"sxtl $dst.8h, $src1.8b",
5503 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5504 def : InstAlias<"sxtl.4s $dst, $src1",
5505 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5506 def : InstAlias<"sxtl $dst.4s, $src1.4h",
5507 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5508 def : InstAlias<"sxtl.2d $dst, $src1",
5509 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5510 def : InstAlias<"sxtl $dst.2d, $src1.2s",
5511 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5513 // Vector shift sxtl2 aliases
5514 def : InstAlias<"sxtl2.8h $dst, $src1",
5515 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5516 def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
5517 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5518 def : InstAlias<"sxtl2.4s $dst, $src1",
5519 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5520 def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
5521 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5522 def : InstAlias<"sxtl2.2d $dst, $src1",
5523 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5524 def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
5525 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5527 // Vector shift uxtl aliases
5528 def : InstAlias<"uxtl.8h $dst, $src1",
5529 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5530 def : InstAlias<"uxtl $dst.8h, $src1.8b",
5531 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
5532 def : InstAlias<"uxtl.4s $dst, $src1",
5533 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5534 def : InstAlias<"uxtl $dst.4s, $src1.4h",
5535 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
5536 def : InstAlias<"uxtl.2d $dst, $src1",
5537 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5538 def : InstAlias<"uxtl $dst.2d, $src1.2s",
5539 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
5541 // Vector shift uxtl2 aliases
5542 def : InstAlias<"uxtl2.8h $dst, $src1",
5543 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5544 def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
5545 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
5546 def : InstAlias<"uxtl2.4s $dst, $src1",
5547 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5548 def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
5549 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
5550 def : InstAlias<"uxtl2.2d $dst, $src1",
5551 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5552 def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
5553 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
5555 // If an integer is about to be converted to a floating point value,
5556 // just load it on the floating point unit.
5557 // These patterns are more complex because floating point loads do not
5558 // support sign extension.
5559 // The sign extension has to be explicitly added and is only supported for
5560 // one step: byte-to-half, half-to-word, word-to-doubleword.
5561 // SCVTF GPR -> FPR is 9 cycles.
5562 // SCVTF FPR -> FPR is 4 cyclces.
5563 // (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
5564 // Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
5565 // and still being faster.
5566 // However, this is not good for code size.
5567 // 8-bits -> float. 2 sizes step-up.
5568 class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
5569 : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
5570 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
5575 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5582 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
5584 def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
5585 (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
5586 def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
5587 (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
5588 def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
5589 (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
5590 def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
5591 (LDURBi GPR64sp:$Rn, simm9:$offset)>;
5593 // 16-bits -> float. 1 size step-up.
5594 class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
5595 : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
5596 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
5598 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5602 ssub)))>, Requires<[NotForCodeSize]>;
5604 def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
5605 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
5606 def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
5607 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
5608 def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
5609 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
5610 def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
5611 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
5613 // 32-bits to 32-bits are handled in target specific dag combine:
5614 // performIntToFpCombine.
5615 // 64-bits integer to 32-bits floating point, not possible with
5616 // SCVTF on floating point registers (both source and destination
5617 // must have the same size).
5619 // Here are the patterns for 8, 16, 32, and 64-bits to double.
5620 // 8-bits -> double. 3 size step-up: give up.
5621 // 16-bits -> double. 2 size step.
5622 class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
5623 : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
5624 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
5629 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5636 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
5638 def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
5639 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
5640 def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
5641 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
5642 def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
5643 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
5644 def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
5645 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
5646 // 32-bits -> double. 1 size step-up.
5647 class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
5648 : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
5649 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
5651 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5655 dsub)))>, Requires<[NotForCodeSize]>;
5657 def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
5658 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
5659 def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
5660 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
5661 def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
5662 (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
5663 def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
5664 (LDURSi GPR64sp:$Rn, simm9:$offset)>;
5666 // 64-bits -> double are handled in target specific dag combine:
5667 // performIntToFpCombine.
5670 //----------------------------------------------------------------------------
5671 // AdvSIMD Load-Store Structure
5672 //----------------------------------------------------------------------------
5673 defm LD1 : SIMDLd1Multiple<"ld1">;
5674 defm LD2 : SIMDLd2Multiple<"ld2">;
5675 defm LD3 : SIMDLd3Multiple<"ld3">;
5676 defm LD4 : SIMDLd4Multiple<"ld4">;
5678 defm ST1 : SIMDSt1Multiple<"st1">;
5679 defm ST2 : SIMDSt2Multiple<"st2">;
5680 defm ST3 : SIMDSt3Multiple<"st3">;
5681 defm ST4 : SIMDSt4Multiple<"st4">;
5683 class Ld1Pat<ValueType ty, Instruction INST>
5684 : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
5686 def : Ld1Pat<v16i8, LD1Onev16b>;
5687 def : Ld1Pat<v8i16, LD1Onev8h>;
5688 def : Ld1Pat<v4i32, LD1Onev4s>;
5689 def : Ld1Pat<v2i64, LD1Onev2d>;
5690 def : Ld1Pat<v8i8, LD1Onev8b>;
5691 def : Ld1Pat<v4i16, LD1Onev4h>;
5692 def : Ld1Pat<v2i32, LD1Onev2s>;
5693 def : Ld1Pat<v1i64, LD1Onev1d>;
5695 class St1Pat<ValueType ty, Instruction INST>
5696 : Pat<(store ty:$Vt, GPR64sp:$Rn),
5697 (INST ty:$Vt, GPR64sp:$Rn)>;
5699 def : St1Pat<v16i8, ST1Onev16b>;
5700 def : St1Pat<v8i16, ST1Onev8h>;
5701 def : St1Pat<v4i32, ST1Onev4s>;
5702 def : St1Pat<v2i64, ST1Onev2d>;
5703 def : St1Pat<v8i8, ST1Onev8b>;
5704 def : St1Pat<v4i16, ST1Onev4h>;
5705 def : St1Pat<v2i32, ST1Onev2s>;
5706 def : St1Pat<v1i64, ST1Onev1d>;
5712 defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
5713 defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
5714 defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
5715 defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
5716 let mayLoad = 1, hasSideEffects = 0 in {
5717 defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
5718 defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
5719 defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
5720 defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>;
5721 defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>;
5722 defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>;
5723 defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>;
5724 defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>;
5725 defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>;
5726 defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>;
5727 defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
5728 defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
5729 defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>;
5730 defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>;
5731 defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>;
5732 defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>;
5735 def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
5736 (LD1Rv8b GPR64sp:$Rn)>;
5737 def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
5738 (LD1Rv16b GPR64sp:$Rn)>;
5739 def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
5740 (LD1Rv4h GPR64sp:$Rn)>;
5741 def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
5742 (LD1Rv8h GPR64sp:$Rn)>;
5743 def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
5744 (LD1Rv2s GPR64sp:$Rn)>;
5745 def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
5746 (LD1Rv4s GPR64sp:$Rn)>;
5747 def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
5748 (LD1Rv2d GPR64sp:$Rn)>;
5749 def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
5750 (LD1Rv1d GPR64sp:$Rn)>;
5751 // Grab the floating point version too
5752 def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
5753 (LD1Rv2s GPR64sp:$Rn)>;
5754 def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
5755 (LD1Rv4s GPR64sp:$Rn)>;
5756 def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
5757 (LD1Rv2d GPR64sp:$Rn)>;
5758 def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
5759 (LD1Rv1d GPR64sp:$Rn)>;
5760 def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
5761 (LD1Rv4h GPR64sp:$Rn)>;
5762 def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
5763 (LD1Rv8h GPR64sp:$Rn)>;
5765 class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
5766 ValueType VTy, ValueType STy, Instruction LD1>
5767 : Pat<(vector_insert (VTy VecListOne128:$Rd),
5768 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
5769 (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
5771 def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
5772 def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
5773 def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
5774 def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
5775 def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
5776 def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
5777 def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>;
5779 class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
5780 ValueType VTy, ValueType STy, Instruction LD1>
5781 : Pat<(vector_insert (VTy VecListOne64:$Rd),
5782 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
5784 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
5785 VecIndex:$idx, GPR64sp:$Rn),
5788 def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
5789 def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
5790 def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
5791 def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
5792 def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>;
5795 defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
5796 defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
5797 defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
5798 defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
5801 defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>;
5802 defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
5803 defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
5804 defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
5806 let AddedComplexity = 19 in
5807 class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5808 ValueType VTy, ValueType STy, Instruction ST1>
5810 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5812 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
5814 def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>;
5815 def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
5816 def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
5817 def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
5818 def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
5819 def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
5820 def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>;
5822 let AddedComplexity = 19 in
5823 class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5824 ValueType VTy, ValueType STy, Instruction ST1>
5826 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5828 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5829 VecIndex:$idx, GPR64sp:$Rn)>;
5831 def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
5832 def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
5833 def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
5834 def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
5835 def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>;
5837 multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5838 ValueType VTy, ValueType STy, Instruction ST1,
5840 def : Pat<(scalar_store
5841 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5842 GPR64sp:$Rn, offset),
5843 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5844 VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5846 def : Pat<(scalar_store
5847 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5848 GPR64sp:$Rn, GPR64:$Rm),
5849 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5850 VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5853 defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
5854 defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
5856 defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
5857 defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
5858 defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
5859 defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
5860 defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
5862 multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5863 ValueType VTy, ValueType STy, Instruction ST1,
5865 def : Pat<(scalar_store
5866 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5867 GPR64sp:$Rn, offset),
5868 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5870 def : Pat<(scalar_store
5871 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5872 GPR64sp:$Rn, GPR64:$Rm),
5873 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5876 defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
5878 defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
5880 defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
5881 defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
5882 defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
5883 defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
5884 defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
5886 let mayStore = 1, hasSideEffects = 0 in {
5887 defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
5888 defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
5889 defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
5890 defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>;
5891 defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>;
5892 defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>;
5893 defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
5894 defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
5895 defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>;
5896 defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>;
5897 defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>;
5898 defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>;
5901 defm ST1 : SIMDLdSt1SingleAliases<"st1">;
5902 defm ST2 : SIMDLdSt2SingleAliases<"st2">;
5903 defm ST3 : SIMDLdSt3SingleAliases<"st3">;
5904 defm ST4 : SIMDLdSt4SingleAliases<"st4">;
5906 //----------------------------------------------------------------------------
5907 // Crypto extensions
5908 //----------------------------------------------------------------------------
5910 let Predicates = [HasAES] in {
5911 def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>;
5912 def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>;
5913 def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>;
5914 def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>;
5917 // Pseudo instructions for AESMCrr/AESIMCrr with a register constraint required
5918 // for AES fusion on some CPUs.
5919 let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
5920 def AESMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
5922 def AESIMCrrTied: Pseudo<(outs V128:$Rd), (ins V128:$Rn), [], "$Rn = $Rd">,
5926 // Only use constrained versions of AES(I)MC instructions if they are paired with
5928 def : Pat<(v16i8 (int_aarch64_crypto_aesmc
5929 (v16i8 (int_aarch64_crypto_aese (v16i8 V128:$src1),
5930 (v16i8 V128:$src2))))),
5931 (v16i8 (AESMCrrTied (v16i8 (AESErr (v16i8 V128:$src1),
5932 (v16i8 V128:$src2)))))>,
5933 Requires<[HasFuseAES]>;
5935 def : Pat<(v16i8 (int_aarch64_crypto_aesimc
5936 (v16i8 (int_aarch64_crypto_aesd (v16i8 V128:$src1),
5937 (v16i8 V128:$src2))))),
5938 (v16i8 (AESIMCrrTied (v16i8 (AESDrr (v16i8 V128:$src1),
5939 (v16i8 V128:$src2)))))>,
5940 Requires<[HasFuseAES]>;
5942 let Predicates = [HasSHA2] in {
5943 def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>;
5944 def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>;
5945 def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>;
5946 def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
5947 def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
5948 def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
5949 def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
5951 def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>;
5952 def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>;
5953 def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
5956 //----------------------------------------------------------------------------
5958 //----------------------------------------------------------------------------
5959 // FIXME: Like for X86, these should go in their own separate .td file.
5961 def def32 : PatLeaf<(i32 GPR32:$src), [{
5965 // In the case of a 32-bit def that is known to implicitly zero-extend,
5966 // we can use a SUBREG_TO_REG.
5967 def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
5969 // For an anyext, we don't care what the high bits are, so we can perform an
5970 // INSERT_SUBREF into an IMPLICIT_DEF.
5971 def : Pat<(i64 (anyext GPR32:$src)),
5972 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
5974 // When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
5975 // then assert the extension has happened.
5976 def : Pat<(i64 (zext GPR32:$src)),
5977 (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
5979 // To sign extend, we use a signed bitfield move instruction (SBFM) on the
5980 // containing super-reg.
5981 def : Pat<(i64 (sext GPR32:$src)),
5982 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
5983 def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
5984 def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
5985 def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>;
5986 def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>;
5987 def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
5988 def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>;
5989 def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>;
5991 def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
5992 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
5993 (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
5994 def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
5995 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
5996 (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
5998 def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
5999 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
6000 (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
6001 def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
6002 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
6003 (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
6005 def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
6006 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6007 (i64 (i64shift_a imm0_63:$imm)),
6008 (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
6010 // sra patterns have an AddedComplexity of 10, so make sure we have a higher
6011 // AddedComplexity for the following patterns since we want to match sext + sra
6012 // patterns before we attempt to match a single sra node.
6013 let AddedComplexity = 20 in {
6014 // We support all sext + sra combinations which preserve at least one bit of the
6015 // original value which is to be sign extended. E.g. we support shifts up to
6017 def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
6018 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
6019 def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
6020 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
6022 def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
6023 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
6024 def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
6025 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
6027 def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
6028 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
6029 (i64 imm0_31:$imm), 31)>;
6030 } // AddedComplexity = 20
6032 // To truncate, we can simply extract from a subregister.
6033 def : Pat<(i32 (trunc GPR64sp:$src)),
6034 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
6036 // __builtin_trap() uses the BRK instruction on AArch64.
6037 def : Pat<(trap), (BRK 1)>;
6039 // Multiply high patterns which multiply the lower subvector using smull/umull
6040 // and the upper subvector with smull2/umull2. Then shuffle the high the high
6041 // part of both results together.
6042 def : Pat<(v16i8 (mulhs V128:$Rn, V128:$Rm)),
6044 (SMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6045 (EXTRACT_SUBREG V128:$Rm, dsub)),
6046 (SMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6047 def : Pat<(v8i16 (mulhs V128:$Rn, V128:$Rm)),
6049 (SMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6050 (EXTRACT_SUBREG V128:$Rm, dsub)),
6051 (SMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6052 def : Pat<(v4i32 (mulhs V128:$Rn, V128:$Rm)),
6054 (SMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6055 (EXTRACT_SUBREG V128:$Rm, dsub)),
6056 (SMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6058 def : Pat<(v16i8 (mulhu V128:$Rn, V128:$Rm)),
6060 (UMULLv8i8_v8i16 (EXTRACT_SUBREG V128:$Rn, dsub),
6061 (EXTRACT_SUBREG V128:$Rm, dsub)),
6062 (UMULLv16i8_v8i16 V128:$Rn, V128:$Rm))>;
6063 def : Pat<(v8i16 (mulhu V128:$Rn, V128:$Rm)),
6065 (UMULLv4i16_v4i32 (EXTRACT_SUBREG V128:$Rn, dsub),
6066 (EXTRACT_SUBREG V128:$Rm, dsub)),
6067 (UMULLv8i16_v4i32 V128:$Rn, V128:$Rm))>;
6068 def : Pat<(v4i32 (mulhu V128:$Rn, V128:$Rm)),
6070 (UMULLv2i32_v2i64 (EXTRACT_SUBREG V128:$Rn, dsub),
6071 (EXTRACT_SUBREG V128:$Rm, dsub)),
6072 (UMULLv4i32_v2i64 V128:$Rn, V128:$Rm))>;
6074 // Conversions within AdvSIMD types in the same register size are free.
6075 // But because we need a consistent lane ordering, in big endian many
6076 // conversions require one or more REV instructions.
6078 // Consider a simple memory load followed by a bitconvert then a store.
6080 // v1 = BITCAST v2i32 v0 to v4i16
6083 // In big endian mode every memory access has an implicit byte swap. LDR and
6084 // STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
6085 // is, they treat the vector as a sequence of elements to be byte-swapped.
6086 // The two pairs of instructions are fundamentally incompatible. We've decided
6087 // to use LD1/ST1 only to simplify compiler implementation.
6089 // LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
6090 // the original code sequence:
6092 // v1 = REV v2i32 (implicit)
6093 // v2 = BITCAST v2i32 v1 to v4i16
6094 // v3 = REV v4i16 v2 (implicit)
6097 // But this is now broken - the value stored is different to the value loaded
6098 // due to lane reordering. To fix this, on every BITCAST we must perform two
6101 // v1 = REV v2i32 (implicit)
6103 // v3 = BITCAST v2i32 v2 to v4i16
6105 // v5 = REV v4i16 v4 (implicit)
6108 // This means an extra two instructions, but actually in most cases the two REV
6109 // instructions can be combined into one. For example:
6110 // (REV64_2s (REV64_4h X)) === (REV32_4h X)
6112 // There is also no 128-bit REV instruction. This must be synthesized with an
6115 // Most bitconverts require some sort of conversion. The only exceptions are:
6116 // a) Identity conversions - vNfX <-> vNiX
6117 // b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
6120 // Natural vector casts (64 bit)
6121 def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
6122 def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6123 def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6124 def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
6125 def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6126 def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6128 def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
6129 def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
6130 def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6131 def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6132 def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6134 def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
6135 def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
6136 def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
6137 def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
6138 def : Pat<(v2f32 (AArch64NvCast (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
6139 def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
6141 def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6142 def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6143 def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6144 def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6145 def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6146 def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6147 def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
6149 def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
6150 def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6151 def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6152 def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
6153 def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6155 // Natural vector casts (128 bit)
6156 def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
6157 def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6158 def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
6159 def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
6160 def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6161 def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6162 def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6164 def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
6165 def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
6166 def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
6167 def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6168 def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6169 def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6170 def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6172 def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
6173 def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6174 def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
6175 def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6176 def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6177 def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6178 def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6180 def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
6181 def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6182 def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
6183 def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
6184 def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
6185 def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
6186 def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
6188 def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
6189 def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
6190 def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
6191 def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
6192 def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
6193 def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
6194 def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
6196 def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
6197 def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
6198 def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
6199 def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
6200 def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
6201 def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
6202 def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
6204 let Predicates = [IsLE] in {
6205 def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6206 def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6207 def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6208 def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6209 def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6211 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
6212 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6213 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6214 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6215 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6216 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6217 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6218 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6219 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6220 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6221 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6222 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6224 let Predicates = [IsBE] in {
6225 def : Pat<(v8i8 (bitconvert GPR64:$Xn)),
6226 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6227 def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
6228 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6229 def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
6230 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6231 def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
6232 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6233 def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
6234 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
6236 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
6237 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6238 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
6239 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6240 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
6241 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6242 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
6243 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6244 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
6245 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
6247 def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6248 def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6249 def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
6250 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6251 def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
6252 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6253 def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
6254 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6255 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
6257 def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
6258 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
6259 def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
6260 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
6261 def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
6262 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
6263 def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
6264 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
6265 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
6266 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
6268 let Predicates = [IsLE] in {
6269 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
6270 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
6271 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
6272 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
6273 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
6275 let Predicates = [IsBE] in {
6276 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
6277 (v1i64 (REV64v2i32 FPR64:$src))>;
6278 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
6279 (v1i64 (REV64v4i16 FPR64:$src))>;
6280 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
6281 (v1i64 (REV64v8i8 FPR64:$src))>;
6282 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
6283 (v1i64 (REV64v4i16 FPR64:$src))>;
6284 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
6285 (v1i64 (REV64v2i32 FPR64:$src))>;
6287 def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6288 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
6290 let Predicates = [IsLE] in {
6291 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
6292 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
6293 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
6294 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6295 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
6296 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
6298 let Predicates = [IsBE] in {
6299 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
6300 (v2i32 (REV64v2i32 FPR64:$src))>;
6301 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
6302 (v2i32 (REV32v4i16 FPR64:$src))>;
6303 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))),
6304 (v2i32 (REV32v8i8 FPR64:$src))>;
6305 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
6306 (v2i32 (REV64v2i32 FPR64:$src))>;
6307 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
6308 (v2i32 (REV64v2i32 FPR64:$src))>;
6309 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
6310 (v2i32 (REV32v4i16 FPR64:$src))>;
6312 def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
6314 let Predicates = [IsLE] in {
6315 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
6316 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
6317 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
6318 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6319 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
6320 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
6322 let Predicates = [IsBE] in {
6323 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
6324 (v4i16 (REV64v4i16 FPR64:$src))>;
6325 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
6326 (v4i16 (REV32v4i16 FPR64:$src))>;
6327 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
6328 (v4i16 (REV16v8i8 FPR64:$src))>;
6329 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
6330 (v4i16 (REV64v4i16 FPR64:$src))>;
6331 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
6332 (v4i16 (REV32v4i16 FPR64:$src))>;
6333 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
6334 (v4i16 (REV64v4i16 FPR64:$src))>;
6336 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
6338 let Predicates = [IsLE] in {
6339 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
6340 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
6341 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
6342 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6343 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
6344 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
6346 let Predicates = [IsBE] in {
6347 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
6348 (v4f16 (REV64v4i16 FPR64:$src))>;
6349 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
6350 (v4f16 (REV32v4i16 FPR64:$src))>;
6351 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))),
6352 (v4f16 (REV16v8i8 FPR64:$src))>;
6353 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))),
6354 (v4f16 (REV64v4i16 FPR64:$src))>;
6355 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
6356 (v4f16 (REV32v4i16 FPR64:$src))>;
6357 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
6358 (v4f16 (REV64v4i16 FPR64:$src))>;
6360 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
6362 let Predicates = [IsLE] in {
6363 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
6364 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
6365 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
6366 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6367 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
6368 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
6369 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>;
6371 let Predicates = [IsBE] in {
6372 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
6373 (v8i8 (REV64v8i8 FPR64:$src))>;
6374 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))),
6375 (v8i8 (REV32v8i8 FPR64:$src))>;
6376 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))),
6377 (v8i8 (REV16v8i8 FPR64:$src))>;
6378 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
6379 (v8i8 (REV64v8i8 FPR64:$src))>;
6380 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
6381 (v8i8 (REV32v8i8 FPR64:$src))>;
6382 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
6383 (v8i8 (REV64v8i8 FPR64:$src))>;
6384 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))),
6385 (v8i8 (REV16v8i8 FPR64:$src))>;
6388 let Predicates = [IsLE] in {
6389 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
6390 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
6391 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
6392 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
6393 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>;
6395 let Predicates = [IsBE] in {
6396 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
6397 (f64 (REV64v2i32 FPR64:$src))>;
6398 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))),
6399 (f64 (REV64v4i16 FPR64:$src))>;
6400 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
6401 (f64 (REV64v2i32 FPR64:$src))>;
6402 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
6403 (f64 (REV64v8i8 FPR64:$src))>;
6404 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))),
6405 (f64 (REV64v4i16 FPR64:$src))>;
6407 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
6408 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
6410 let Predicates = [IsLE] in {
6411 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
6412 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
6413 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
6414 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
6415 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
6417 let Predicates = [IsBE] in {
6418 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
6419 (v1f64 (REV64v2i32 FPR64:$src))>;
6420 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
6421 (v1f64 (REV64v4i16 FPR64:$src))>;
6422 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
6423 (v1f64 (REV64v8i8 FPR64:$src))>;
6424 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
6425 (v1f64 (REV64v2i32 FPR64:$src))>;
6426 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
6427 (v1f64 (REV64v4i16 FPR64:$src))>;
6429 def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
6430 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
6432 let Predicates = [IsLE] in {
6433 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
6434 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
6435 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
6436 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6437 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
6438 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
6440 let Predicates = [IsBE] in {
6441 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
6442 (v2f32 (REV64v2i32 FPR64:$src))>;
6443 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
6444 (v2f32 (REV32v4i16 FPR64:$src))>;
6445 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))),
6446 (v2f32 (REV32v8i8 FPR64:$src))>;
6447 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
6448 (v2f32 (REV64v2i32 FPR64:$src))>;
6449 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
6450 (v2f32 (REV64v2i32 FPR64:$src))>;
6451 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
6452 (v2f32 (REV32v4i16 FPR64:$src))>;
6454 def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
6456 let Predicates = [IsLE] in {
6457 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
6458 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
6459 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
6460 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
6461 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
6462 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
6463 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
6465 let Predicates = [IsBE] in {
6466 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
6467 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
6468 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
6469 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
6470 (REV64v4i32 FPR128:$src), (i32 8)))>;
6471 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
6472 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
6473 (REV64v8i16 FPR128:$src), (i32 8)))>;
6474 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
6475 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
6476 (REV64v8i16 FPR128:$src), (i32 8)))>;
6477 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
6478 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
6479 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
6480 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
6481 (REV64v4i32 FPR128:$src), (i32 8)))>;
6482 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
6483 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
6484 (REV64v16i8 FPR128:$src), (i32 8)))>;
6487 let Predicates = [IsLE] in {
6488 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
6489 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
6490 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
6491 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
6492 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
6493 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
6495 let Predicates = [IsBE] in {
6496 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
6497 (v2f64 (EXTv16i8 FPR128:$src,
6498 FPR128:$src, (i32 8)))>;
6499 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
6500 (v2f64 (REV64v4i32 FPR128:$src))>;
6501 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
6502 (v2f64 (REV64v8i16 FPR128:$src))>;
6503 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
6504 (v2f64 (REV64v8i16 FPR128:$src))>;
6505 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
6506 (v2f64 (REV64v16i8 FPR128:$src))>;
6507 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
6508 (v2f64 (REV64v4i32 FPR128:$src))>;
6510 def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
6512 let Predicates = [IsLE] in {
6513 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
6514 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
6515 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
6516 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
6517 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
6518 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
6520 let Predicates = [IsBE] in {
6521 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
6522 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
6523 (REV64v4i32 FPR128:$src), (i32 8)))>;
6524 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
6525 (v4f32 (REV32v8i16 FPR128:$src))>;
6526 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
6527 (v4f32 (REV32v8i16 FPR128:$src))>;
6528 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
6529 (v4f32 (REV32v16i8 FPR128:$src))>;
6530 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
6531 (v4f32 (REV64v4i32 FPR128:$src))>;
6532 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
6533 (v4f32 (REV64v4i32 FPR128:$src))>;
6535 def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
6537 let Predicates = [IsLE] in {
6538 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
6539 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
6540 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
6541 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
6542 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
6543 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
6545 let Predicates = [IsBE] in {
6546 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
6547 (v2i64 (EXTv16i8 FPR128:$src,
6548 FPR128:$src, (i32 8)))>;
6549 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
6550 (v2i64 (REV64v4i32 FPR128:$src))>;
6551 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
6552 (v2i64 (REV64v8i16 FPR128:$src))>;
6553 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
6554 (v2i64 (REV64v16i8 FPR128:$src))>;
6555 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
6556 (v2i64 (REV64v4i32 FPR128:$src))>;
6557 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
6558 (v2i64 (REV64v8i16 FPR128:$src))>;
6560 def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
6562 let Predicates = [IsLE] in {
6563 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
6564 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
6565 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
6566 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
6567 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
6568 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
6570 let Predicates = [IsBE] in {
6571 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
6572 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
6573 (REV64v4i32 FPR128:$src),
6575 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
6576 (v4i32 (REV64v4i32 FPR128:$src))>;
6577 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
6578 (v4i32 (REV32v8i16 FPR128:$src))>;
6579 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
6580 (v4i32 (REV32v16i8 FPR128:$src))>;
6581 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
6582 (v4i32 (REV64v4i32 FPR128:$src))>;
6583 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
6584 (v4i32 (REV32v8i16 FPR128:$src))>;
6586 def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
6588 let Predicates = [IsLE] in {
6589 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
6590 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
6591 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
6592 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
6593 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
6594 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
6596 let Predicates = [IsBE] in {
6597 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
6598 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
6599 (REV64v8i16 FPR128:$src),
6601 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
6602 (v8i16 (REV64v8i16 FPR128:$src))>;
6603 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
6604 (v8i16 (REV32v8i16 FPR128:$src))>;
6605 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
6606 (v8i16 (REV16v16i8 FPR128:$src))>;
6607 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
6608 (v8i16 (REV64v8i16 FPR128:$src))>;
6609 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
6610 (v8i16 (REV32v8i16 FPR128:$src))>;
6612 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
6614 let Predicates = [IsLE] in {
6615 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>;
6616 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
6617 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
6618 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
6619 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
6620 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
6622 let Predicates = [IsBE] in {
6623 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))),
6624 (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
6625 (REV64v8i16 FPR128:$src),
6627 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
6628 (v8f16 (REV64v8i16 FPR128:$src))>;
6629 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
6630 (v8f16 (REV32v8i16 FPR128:$src))>;
6631 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
6632 (v8f16 (REV16v16i8 FPR128:$src))>;
6633 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
6634 (v8f16 (REV64v8i16 FPR128:$src))>;
6635 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
6636 (v8f16 (REV32v8i16 FPR128:$src))>;
6638 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
6640 let Predicates = [IsLE] in {
6641 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
6642 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
6643 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
6644 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
6645 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
6646 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
6647 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
6649 let Predicates = [IsBE] in {
6650 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
6651 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
6652 (REV64v16i8 FPR128:$src),
6654 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
6655 (v16i8 (REV64v16i8 FPR128:$src))>;
6656 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
6657 (v16i8 (REV32v16i8 FPR128:$src))>;
6658 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
6659 (v16i8 (REV16v16i8 FPR128:$src))>;
6660 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
6661 (v16i8 (REV64v16i8 FPR128:$src))>;
6662 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
6663 (v16i8 (REV32v16i8 FPR128:$src))>;
6664 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
6665 (v16i8 (REV16v16i8 FPR128:$src))>;
6668 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
6669 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6670 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
6671 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6672 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
6673 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6674 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
6675 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6676 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
6677 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6678 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
6679 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6680 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
6681 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6683 def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
6684 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6685 def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
6686 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6687 def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
6688 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6689 def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
6690 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6692 // A 64-bit subvector insert to the first 128-bit vector position
6693 // is a subregister copy that needs no instruction.
6694 multiclass InsertSubvectorUndef<ValueType Ty> {
6695 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (Ty 0)),
6696 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6697 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (Ty 0)),
6698 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6699 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (Ty 0)),
6700 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6701 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (Ty 0)),
6702 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6703 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (Ty 0)),
6704 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6705 def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (Ty 0)),
6706 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6707 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (Ty 0)),
6708 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6711 defm : InsertSubvectorUndef<i32>;
6712 defm : InsertSubvectorUndef<i64>;
6714 // Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
6716 def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
6717 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
6718 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
6719 def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
6720 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
6721 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
6722 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
6723 // so we match on v4f32 here, not v2f32. This will also catch adding
6724 // the low two lanes of a true v4f32 vector.
6725 def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
6726 (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
6727 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
6729 // Scalar 64-bit shifts in FPR64 registers.
6730 def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6731 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6732 def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6733 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6734 def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6735 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6736 def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6737 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6739 // Patterns for nontemporal/no-allocate stores.
6740 // We have to resort to tricks to turn a single-input store into a store pair,
6741 // because there is no single-input nontemporal store, only STNP.
6742 let Predicates = [IsLE] in {
6743 let AddedComplexity = 15 in {
6744 class NTStore128Pat<ValueType VT> :
6745 Pat<(nontemporalstore (VT FPR128:$Rt),
6746 (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
6747 (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
6748 (CPYi64 FPR128:$Rt, (i64 1)),
6749 GPR64sp:$Rn, simm7s8:$offset)>;
6751 def : NTStore128Pat<v2i64>;
6752 def : NTStore128Pat<v4i32>;
6753 def : NTStore128Pat<v8i16>;
6754 def : NTStore128Pat<v16i8>;
6756 class NTStore64Pat<ValueType VT> :
6757 Pat<(nontemporalstore (VT FPR64:$Rt),
6758 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
6759 (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
6760 (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
6761 GPR64sp:$Rn, simm7s4:$offset)>;
6763 // FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
6764 def : NTStore64Pat<v1f64>;
6765 def : NTStore64Pat<v1i64>;
6766 def : NTStore64Pat<v2i32>;
6767 def : NTStore64Pat<v4i16>;
6768 def : NTStore64Pat<v8i8>;
6770 def : Pat<(nontemporalstore GPR64:$Rt,
6771 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
6772 (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
6773 (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
6774 GPR64sp:$Rn, simm7s4:$offset)>;
6775 } // AddedComplexity=10
6776 } // Predicates = [IsLE]
6778 // Tail call return handling. These are all compiler pseudo-instructions,
6779 // so no encoding information or anything like that.
6780 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
6781 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
6782 Sched<[WriteBrReg]>;
6783 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
6784 Sched<[WriteBrReg]>;
6785 // Indirect tail-call with any register allowed, used by MachineOutliner when
6786 // this is proven safe.
6787 // FIXME: If we have to add any more hacks like this, we should instead relax
6788 // some verifier checks for outlined functions.
6789 def TCRETURNriALL : Pseudo<(outs), (ins GPR64:$dst, i32imm:$FPDiff), []>,
6790 Sched<[WriteBrReg]>;
6791 // Indirect tail-call limited to only use registers (x16 and x17) which are
6792 // allowed to tail-call a "BTI c" instruction.
6793 def TCRETURNriBTI : Pseudo<(outs), (ins rtcGPR64:$dst, i32imm:$FPDiff), []>,
6794 Sched<[WriteBrReg]>;
6797 def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
6798 (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>,
6799 Requires<[NotUseBTI]>;
6800 def : Pat<(AArch64tcret rtcGPR64:$dst, (i32 timm:$FPDiff)),
6801 (TCRETURNriBTI rtcGPR64:$dst, imm:$FPDiff)>,
6803 def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
6804 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
6805 def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
6806 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
6808 def MOVMCSym : Pseudo<(outs GPR64:$dst), (ins i64imm:$sym), []>, Sched<[]>;
6809 def : Pat<(i64 (AArch64LocalRecover mcsym:$sym)), (MOVMCSym mcsym:$sym)>;
6811 include "AArch64InstrAtomics.td"
6812 include "AArch64SVEInstrInfo.td"