1 //=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // AArch64 Instruction definitions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // ARM Instruction Predicate Definitions.
17 def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">,
18 AssemblerPredicate<"HasV8_1aOps", "armv8.1a">;
19 def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">,
20 AssemblerPredicate<"HasV8_2aOps", "armv8.2a">;
21 def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
22 AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
23 def HasNEON : Predicate<"Subtarget->hasNEON()">,
24 AssemblerPredicate<"FeatureNEON", "neon">;
25 def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
26 AssemblerPredicate<"FeatureCrypto", "crypto">;
27 def HasCRC : Predicate<"Subtarget->hasCRC()">,
28 AssemblerPredicate<"FeatureCRC", "crc">;
29 def HasRAS : Predicate<"Subtarget->hasRAS()">,
30 AssemblerPredicate<"FeatureRAS", "ras">;
31 def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">;
32 def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
33 AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
34 def HasSPE : Predicate<"Subtarget->hasSPE()">,
35 AssemblerPredicate<"FeatureSPE", "spe">;
37 def IsLE : Predicate<"Subtarget->isLittleEndian()">;
38 def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
39 def UseAlternateSExtLoadCVTF32
40 : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
42 //===----------------------------------------------------------------------===//
43 // AArch64-specific DAG Nodes.
46 // SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
47 def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
50 SDTCisInt<0>, SDTCisVT<1, i32>]>;
52 // SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
53 def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
59 // SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
60 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
67 def SDT_AArch64Brcond : SDTypeProfile<0, 3,
68 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
70 def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
71 def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
72 SDTCisVT<2, OtherVT>]>;
75 def SDT_AArch64CSel : SDTypeProfile<1, 4,
80 def SDT_AArch64CCMP : SDTypeProfile<1, 5,
87 def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
94 def SDT_AArch64FCmp : SDTypeProfile<0, 2,
97 def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
98 def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
99 def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>,
101 SDTCisSameAs<0, 2>]>;
102 def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
103 def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
104 def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
105 SDTCisInt<2>, SDTCisInt<3>]>;
106 def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
107 def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
108 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
109 def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
111 def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
112 def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
113 def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
114 def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
116 def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
119 def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
120 def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
122 def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
124 def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
127 // Generates the general dynamic sequences, i.e.
128 // adrp x0, :tlsdesc:var
129 // ldr x1, [x0, #:tlsdesc_lo12:var]
130 // add x0, x0, #:tlsdesc_lo12:var
134 // (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
135 // number of operands (the variable)
136 def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
139 def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
140 [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
141 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
142 SDTCisSameAs<1, 4>]>;
146 def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
147 def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
148 def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
149 def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
150 SDCallSeqStart<[ SDTCisVT<0, i32> ]>,
151 [SDNPHasChain, SDNPOutGlue]>;
152 def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END",
153 SDCallSeqEnd<[ SDTCisVT<0, i32>,
155 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
156 def AArch64call : SDNode<"AArch64ISD::CALL",
157 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
158 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
160 def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
162 def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
164 def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
166 def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
168 def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
172 def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
173 def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
174 def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
175 def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
176 def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
177 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
178 def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >;
179 def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>;
180 def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut,
182 def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>;
183 def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut,
185 def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
186 def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
188 def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>;
189 def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>;
190 def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
192 def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
194 def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
196 def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
197 def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
198 def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
199 def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
200 def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
202 def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
203 def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
204 def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
205 def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
206 def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
207 def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
209 def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
210 def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
211 def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
212 def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
213 def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
214 def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
215 def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
217 def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
218 def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
219 def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
220 def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
222 def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
223 def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
224 def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
225 def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
226 def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
227 def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
228 def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
229 def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
231 def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
232 def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
233 def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>;
235 def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
236 def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
237 def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
238 def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
239 def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
241 def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
242 def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
243 def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
245 def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
246 def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
247 def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
248 def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
249 def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
250 def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
251 (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
253 def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
254 def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
255 def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
256 def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
257 def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
259 def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
260 def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
262 def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
264 def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
265 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
267 def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
268 [SDNPHasChain, SDNPSideEffect]>;
270 def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
271 def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
273 def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
274 SDT_AArch64TLSDescCallSeq,
275 [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
279 def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
280 SDT_AArch64WrapperLarge>;
282 def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
284 def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
285 SDTCisSameAs<1, 2>]>;
286 def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
287 def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
289 def AArch64frecpe : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
290 def AArch64frsqrte : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
292 def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
293 def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
294 def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
295 def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
296 def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
297 def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
299 //===----------------------------------------------------------------------===//
301 //===----------------------------------------------------------------------===//
303 // AArch64 Instruction Predicate Definitions.
304 def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
305 def IsNotDarwin: Predicate<"!Subtarget->isTargetDarwin()">;
306 def ForCodeSize : Predicate<"ForCodeSize">;
307 def NotForCodeSize : Predicate<"!ForCodeSize">;
309 include "AArch64InstrFormats.td"
311 //===----------------------------------------------------------------------===//
313 //===----------------------------------------------------------------------===//
314 // Miscellaneous instructions.
315 //===----------------------------------------------------------------------===//
317 let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
318 // We set Sched to empty list because we expect these instructions to simply get
319 // removed in most cases.
320 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
321 [(AArch64callseq_start timm:$amt)]>, Sched<[]>;
322 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
323 [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
325 } // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
327 let isReMaterializable = 1, isCodeGenOnly = 1 in {
328 // FIXME: The following pseudo instructions are only needed because remat
329 // cannot handle multiple instructions. When that changes, they can be
330 // removed, along with the AArch64Wrapper node.
332 let AddedComplexity = 10 in
333 def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
334 [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
337 // The MOVaddr instruction should match only when the add is not folded
338 // into a load or store address.
340 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
341 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
342 tglobaladdr:$low))]>,
343 Sched<[WriteAdrAdr]>;
345 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
346 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
348 Sched<[WriteAdrAdr]>;
350 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
351 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
353 Sched<[WriteAdrAdr]>;
355 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
356 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
357 tblockaddress:$low))]>,
358 Sched<[WriteAdrAdr]>;
360 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
361 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
362 tglobaltlsaddr:$low))]>,
363 Sched<[WriteAdrAdr]>;
365 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
366 [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
367 texternalsym:$low))]>,
368 Sched<[WriteAdrAdr]>;
370 } // isReMaterializable, isCodeGenOnly
372 def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
373 (LOADgot tglobaltlsaddr:$addr)>;
375 def : Pat<(AArch64LOADgot texternalsym:$addr),
376 (LOADgot texternalsym:$addr)>;
378 def : Pat<(AArch64LOADgot tconstpool:$addr),
379 (LOADgot tconstpool:$addr)>;
381 //===----------------------------------------------------------------------===//
382 // System instructions.
383 //===----------------------------------------------------------------------===//
385 def HINT : HintI<"hint">;
386 def : InstAlias<"nop", (HINT 0b000)>;
387 def : InstAlias<"yield",(HINT 0b001)>;
388 def : InstAlias<"wfe", (HINT 0b010)>;
389 def : InstAlias<"wfi", (HINT 0b011)>;
390 def : InstAlias<"sev", (HINT 0b100)>;
391 def : InstAlias<"sevl", (HINT 0b101)>;
392 def : InstAlias<"esb", (HINT 0b10000)>, Requires<[HasRAS]>;
394 // v8.2a Statistical Profiling extension
395 def : InstAlias<"psb $op", (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
397 // As far as LLVM is concerned this writes to the system's exclusive monitors.
398 let mayLoad = 1, mayStore = 1 in
399 def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
401 // NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
402 // model patterns with sufficiently fine granularity.
403 let mayLoad = ?, mayStore = ? in {
404 def DMB : CRmSystemI<barrier_op, 0b101, "dmb",
405 [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
407 def DSB : CRmSystemI<barrier_op, 0b100, "dsb",
408 [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
410 def ISB : CRmSystemI<barrier_op, 0b110, "isb",
411 [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
414 def : InstAlias<"clrex", (CLREX 0xf)>;
415 def : InstAlias<"isb", (ISB 0xf)>;
419 def MSRpstateImm1 : MSRpstateImm0_1;
420 def MSRpstateImm4 : MSRpstateImm0_15;
422 // The thread pointer (on Linux, at least, where this has been implemented) is
424 def : Pat<(AArch64threadpointer), (MRS 0xde82)>;
426 // The cycle counter PMC register is PMCCNTR_EL0.
427 let Predicates = [HasPerfMon] in
428 def : Pat<(readcyclecounter), (MRS 0xdce8)>;
430 // Generic system instructions
431 def SYSxt : SystemXtI<0, "sys">;
432 def SYSLxt : SystemLXtI<1, "sysl">;
434 def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
435 (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
436 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
438 //===----------------------------------------------------------------------===//
439 // Move immediate instructions.
440 //===----------------------------------------------------------------------===//
442 defm MOVK : InsertImmediate<0b11, "movk">;
443 defm MOVN : MoveImmediate<0b00, "movn">;
445 let PostEncoderMethod = "fixMOVZ" in
446 defm MOVZ : MoveImmediate<0b10, "movz">;
448 // First group of aliases covers an implicit "lsl #0".
449 def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>;
450 def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>;
451 def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
452 def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
453 def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
454 def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>;
456 // Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
457 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
458 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
459 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
460 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
462 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
463 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
464 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
465 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
467 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48)>;
468 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32)>;
469 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16)>;
470 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0)>;
472 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
473 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
475 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
476 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
478 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>;
479 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>;
481 // Final group of aliases covers true "mov $Rd, $imm" cases.
482 multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
483 int width, int shift> {
484 def _asmoperand : AsmOperandClass {
485 let Name = basename # width # "_lsl" # shift # "MovAlias";
486 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
488 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
491 def _movimm : Operand<i32> {
492 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
495 def : InstAlias<"mov $Rd, $imm",
496 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
499 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
500 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
502 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
503 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
504 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
505 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
507 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
508 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
510 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
511 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
512 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
513 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
515 let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
516 isAsCheapAsAMove = 1 in {
517 // FIXME: The following pseudo instructions are only needed because remat
518 // cannot handle multiple instructions. When that changes, we can select
519 // directly to the real instructions and get rid of these pseudos.
522 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
523 [(set GPR32:$dst, imm:$src)]>,
526 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
527 [(set GPR64:$dst, imm:$src)]>,
529 } // isReMaterializable, isCodeGenOnly
531 // If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
532 // eventual expansion code fewer bits to worry about getting right. Marshalling
533 // the types is a little tricky though:
534 def i64imm_32bit : ImmLeaf<i64, [{
535 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
538 def s64imm_32bit : ImmLeaf<i64, [{
539 int64_t Imm64 = static_cast<int64_t>(Imm);
540 return Imm64 >= std::numeric_limits<int32_t>::min() &&
541 Imm64 <= std::numeric_limits<int32_t>::max();
544 def trunc_imm : SDNodeXForm<imm, [{
545 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
548 def : Pat<(i64 i64imm_32bit:$src),
549 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
551 // Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
552 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
553 return CurDAG->getTargetConstant(
554 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
557 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
558 return CurDAG->getTargetConstant(
559 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
563 def : Pat<(f32 fpimm:$in),
564 (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
565 def : Pat<(f64 fpimm:$in),
566 (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
569 // Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
571 def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
572 tglobaladdr:$g1, tglobaladdr:$g0),
573 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g3, 48),
574 tglobaladdr:$g2, 32),
575 tglobaladdr:$g1, 16),
576 tglobaladdr:$g0, 0)>;
578 def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
579 tblockaddress:$g1, tblockaddress:$g0),
580 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g3, 48),
581 tblockaddress:$g2, 32),
582 tblockaddress:$g1, 16),
583 tblockaddress:$g0, 0)>;
585 def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
586 tconstpool:$g1, tconstpool:$g0),
587 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g3, 48),
592 def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
593 tjumptable:$g1, tjumptable:$g0),
594 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g3, 48),
600 //===----------------------------------------------------------------------===//
601 // Arithmetic instructions.
602 //===----------------------------------------------------------------------===//
604 // Add/subtract with carry.
605 defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
606 defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
608 def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>;
609 def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>;
610 def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
611 def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
614 defm ADD : AddSub<0, "add", "sub", add>;
615 defm SUB : AddSub<1, "sub", "add">;
617 def : InstAlias<"mov $dst, $src",
618 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
619 def : InstAlias<"mov $dst, $src",
620 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
621 def : InstAlias<"mov $dst, $src",
622 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
623 def : InstAlias<"mov $dst, $src",
624 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
626 defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
627 defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
629 // Use SUBS instead of SUB to enable CSE between SUBS and SUB.
630 def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
631 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
632 def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
633 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
634 def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
635 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
636 def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
637 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
638 def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
639 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
640 def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
641 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
642 let AddedComplexity = 1 in {
643 def : Pat<(sub GPR32sp:$R2, arith_extended_reg32<i32>:$R3),
644 (SUBSWrx GPR32sp:$R2, arith_extended_reg32<i32>:$R3)>;
645 def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3),
646 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3)>;
649 // Because of the immediate format for add/sub-imm instructions, the
650 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
651 // These patterns capture that transformation.
652 let AddedComplexity = 1 in {
653 def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
654 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
655 def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
656 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
657 def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
658 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
659 def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
660 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
663 // Because of the immediate format for add/sub-imm instructions, the
664 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
665 // These patterns capture that transformation.
666 let AddedComplexity = 1 in {
667 def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
668 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
669 def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
670 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
671 def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
672 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
673 def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
674 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
677 def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
678 def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
679 def : InstAlias<"neg $dst, $src$shift",
680 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
681 def : InstAlias<"neg $dst, $src$shift",
682 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
684 def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
685 def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
686 def : InstAlias<"negs $dst, $src$shift",
687 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
688 def : InstAlias<"negs $dst, $src$shift",
689 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
692 // Unsigned/Signed divide
693 defm UDIV : Div<0, "udiv", udiv>;
694 defm SDIV : Div<1, "sdiv", sdiv>;
696 def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr $Rn, $Rm)>;
697 def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr $Rn, $Rm)>;
698 def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr $Rn, $Rm)>;
699 def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr $Rn, $Rm)>;
702 defm ASRV : Shift<0b10, "asr", sra>;
703 defm LSLV : Shift<0b00, "lsl", shl>;
704 defm LSRV : Shift<0b01, "lsr", srl>;
705 defm RORV : Shift<0b11, "ror", rotr>;
707 def : ShiftAlias<"asrv", ASRVWr, GPR32>;
708 def : ShiftAlias<"asrv", ASRVXr, GPR64>;
709 def : ShiftAlias<"lslv", LSLVWr, GPR32>;
710 def : ShiftAlias<"lslv", LSLVXr, GPR64>;
711 def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
712 def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
713 def : ShiftAlias<"rorv", RORVWr, GPR32>;
714 def : ShiftAlias<"rorv", RORVXr, GPR64>;
717 let AddedComplexity = 7 in {
718 defm MADD : MulAccum<0, "madd", add>;
719 defm MSUB : MulAccum<1, "msub", sub>;
721 def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
722 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
723 def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
724 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
726 def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
727 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
728 def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
729 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
730 def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
731 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
732 def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
733 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
734 } // AddedComplexity = 7
736 let AddedComplexity = 5 in {
737 def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
738 def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
739 def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
740 def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
742 def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
743 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
744 def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
745 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
747 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
748 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
749 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
750 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
752 def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
753 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
754 def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
755 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
756 def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
757 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
758 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
760 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
761 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
762 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
763 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
764 def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
765 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
766 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
768 def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
769 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
770 def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
771 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
772 def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
774 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
775 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
777 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
778 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
779 def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
780 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
781 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
782 (s64imm_32bit:$C)))),
783 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
784 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
785 } // AddedComplexity = 5
787 def : MulAccumWAlias<"mul", MADDWrrr>;
788 def : MulAccumXAlias<"mul", MADDXrrr>;
789 def : MulAccumWAlias<"mneg", MSUBWrrr>;
790 def : MulAccumXAlias<"mneg", MSUBXrrr>;
791 def : WideMulAccumAlias<"smull", SMADDLrrr>;
792 def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
793 def : WideMulAccumAlias<"umull", UMADDLrrr>;
794 def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
797 def SMULHrr : MulHi<0b010, "smulh", mulhs>;
798 def UMULHrr : MulHi<0b110, "umulh", mulhu>;
801 def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
802 def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
803 def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
804 def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
806 def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
807 def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
808 def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
809 def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
812 defm CAS : CompareAndSwap<0, 0, "">;
813 defm CASA : CompareAndSwap<1, 0, "a">;
814 defm CASL : CompareAndSwap<0, 1, "l">;
815 defm CASAL : CompareAndSwap<1, 1, "al">;
818 defm CASP : CompareAndSwapPair<0, 0, "">;
819 defm CASPA : CompareAndSwapPair<1, 0, "a">;
820 defm CASPL : CompareAndSwapPair<0, 1, "l">;
821 defm CASPAL : CompareAndSwapPair<1, 1, "al">;
824 defm SWP : Swap<0, 0, "">;
825 defm SWPA : Swap<1, 0, "a">;
826 defm SWPL : Swap<0, 1, "l">;
827 defm SWPAL : Swap<1, 1, "al">;
829 // v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
830 defm LDADD : LDOPregister<0b000, "add", 0, 0, "">;
831 defm LDADDA : LDOPregister<0b000, "add", 1, 0, "a">;
832 defm LDADDL : LDOPregister<0b000, "add", 0, 1, "l">;
833 defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
835 defm LDCLR : LDOPregister<0b001, "clr", 0, 0, "">;
836 defm LDCLRA : LDOPregister<0b001, "clr", 1, 0, "a">;
837 defm LDCLRL : LDOPregister<0b001, "clr", 0, 1, "l">;
838 defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
840 defm LDEOR : LDOPregister<0b010, "eor", 0, 0, "">;
841 defm LDEORA : LDOPregister<0b010, "eor", 1, 0, "a">;
842 defm LDEORL : LDOPregister<0b010, "eor", 0, 1, "l">;
843 defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
845 defm LDSET : LDOPregister<0b011, "set", 0, 0, "">;
846 defm LDSETA : LDOPregister<0b011, "set", 1, 0, "a">;
847 defm LDSETL : LDOPregister<0b011, "set", 0, 1, "l">;
848 defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
850 defm LDSMAX : LDOPregister<0b100, "smax", 0, 0, "">;
851 defm LDSMAXA : LDOPregister<0b100, "smax", 1, 0, "a">;
852 defm LDSMAXL : LDOPregister<0b100, "smax", 0, 1, "l">;
853 defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
855 defm LDSMIN : LDOPregister<0b101, "smin", 0, 0, "">;
856 defm LDSMINA : LDOPregister<0b101, "smin", 1, 0, "a">;
857 defm LDSMINL : LDOPregister<0b101, "smin", 0, 1, "l">;
858 defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
860 defm LDUMAX : LDOPregister<0b110, "umax", 0, 0, "">;
861 defm LDUMAXA : LDOPregister<0b110, "umax", 1, 0, "a">;
862 defm LDUMAXL : LDOPregister<0b110, "umax", 0, 1, "l">;
863 defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
865 defm LDUMIN : LDOPregister<0b111, "umin", 0, 0, "">;
866 defm LDUMINA : LDOPregister<0b111, "umin", 1, 0, "a">;
867 defm LDUMINL : LDOPregister<0b111, "umin", 0, 1, "l">;
868 defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
870 // v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
871 defm : STOPregister<"stadd","LDADD">; // STADDx
872 defm : STOPregister<"stclr","LDCLR">; // STCLRx
873 defm : STOPregister<"steor","LDEOR">; // STEORx
874 defm : STOPregister<"stset","LDSET">; // STSETx
875 defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
876 defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
877 defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
878 defm : STOPregister<"stumin","LDUMIN">;// STUMINx
880 //===----------------------------------------------------------------------===//
881 // Logical instructions.
882 //===----------------------------------------------------------------------===//
885 defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
886 defm AND : LogicalImm<0b00, "and", and, "bic">;
887 defm EOR : LogicalImm<0b10, "eor", xor, "eon">;
888 defm ORR : LogicalImm<0b01, "orr", or, "orn">;
890 // FIXME: these aliases *are* canonical sometimes (when movz can't be
891 // used). Actually, it seems to be working right now, but putting logical_immXX
892 // here is a bit dodgy on the AsmParser side too.
893 def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
894 logical_imm32:$imm), 0>;
895 def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
896 logical_imm64:$imm), 0>;
900 defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
901 defm BICS : LogicalRegS<0b11, 1, "bics",
902 BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
903 defm AND : LogicalReg<0b00, 0, "and", and>;
904 defm BIC : LogicalReg<0b00, 1, "bic",
905 BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
906 defm EON : LogicalReg<0b10, 1, "eon",
907 BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
908 defm EOR : LogicalReg<0b10, 0, "eor", xor>;
909 defm ORN : LogicalReg<0b01, 1, "orn",
910 BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
911 defm ORR : LogicalReg<0b01, 0, "orr", or>;
913 def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
914 def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
916 def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
917 def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
919 def : InstAlias<"mvn $Wd, $Wm$sh",
920 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
921 def : InstAlias<"mvn $Xd, $Xm$sh",
922 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
924 def : InstAlias<"tst $src1, $src2",
925 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
926 def : InstAlias<"tst $src1, $src2",
927 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
929 def : InstAlias<"tst $src1, $src2",
930 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
931 def : InstAlias<"tst $src1, $src2",
932 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
934 def : InstAlias<"tst $src1, $src2$sh",
935 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
936 def : InstAlias<"tst $src1, $src2$sh",
937 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
940 def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
941 def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
944 //===----------------------------------------------------------------------===//
945 // One operand data processing instructions.
946 //===----------------------------------------------------------------------===//
948 defm CLS : OneOperandData<0b101, "cls">;
949 defm CLZ : OneOperandData<0b100, "clz", ctlz>;
950 defm RBIT : OneOperandData<0b000, "rbit">;
952 def : Pat<(int_aarch64_rbit GPR32:$Rn), (RBITWr $Rn)>;
953 def : Pat<(int_aarch64_rbit GPR64:$Rn), (RBITXr $Rn)>;
955 def REV16Wr : OneWRegData<0b001, "rev16",
956 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
957 def REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
959 def : Pat<(cttz GPR32:$Rn),
960 (CLZWr (RBITWr GPR32:$Rn))>;
961 def : Pat<(cttz GPR64:$Rn),
962 (CLZXr (RBITXr GPR64:$Rn))>;
963 def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
966 def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
970 // Unlike the other one operand instructions, the instructions with the "rev"
971 // mnemonic do *not* just different in the size bit, but actually use different
972 // opcode bits for the different sizes.
973 def REVWr : OneWRegData<0b010, "rev", bswap>;
974 def REVXr : OneXRegData<0b011, "rev", bswap>;
975 def REV32Xr : OneXRegData<0b010, "rev32",
976 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
978 def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
980 // The bswap commutes with the rotr so we want a pattern for both possible
982 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
983 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
985 //===----------------------------------------------------------------------===//
986 // Bitfield immediate extraction instruction.
987 //===----------------------------------------------------------------------===//
988 let hasSideEffects = 0 in
989 defm EXTR : ExtractImm<"extr">;
990 def : InstAlias<"ror $dst, $src, $shift",
991 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
992 def : InstAlias<"ror $dst, $src, $shift",
993 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
995 def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
996 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
997 def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
998 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
1000 //===----------------------------------------------------------------------===//
1001 // Other bitfield immediate instructions.
1002 //===----------------------------------------------------------------------===//
1003 let hasSideEffects = 0 in {
1004 defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
1005 defm SBFM : BitfieldImm<0b00, "sbfm">;
1006 defm UBFM : BitfieldImm<0b10, "ubfm">;
1009 def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
1010 uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
1011 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1014 def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
1015 uint64_t enc = 31 - N->getZExtValue();
1016 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1019 // min(7, 31 - shift_amt)
1020 def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1021 uint64_t enc = 31 - N->getZExtValue();
1022 enc = enc > 7 ? 7 : enc;
1023 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1026 // min(15, 31 - shift_amt)
1027 def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1028 uint64_t enc = 31 - N->getZExtValue();
1029 enc = enc > 15 ? 15 : enc;
1030 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1033 def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
1034 uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
1035 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1038 def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
1039 uint64_t enc = 63 - N->getZExtValue();
1040 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1043 // min(7, 63 - shift_amt)
1044 def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1045 uint64_t enc = 63 - N->getZExtValue();
1046 enc = enc > 7 ? 7 : enc;
1047 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1050 // min(15, 63 - shift_amt)
1051 def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1052 uint64_t enc = 63 - N->getZExtValue();
1053 enc = enc > 15 ? 15 : enc;
1054 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1057 // min(31, 63 - shift_amt)
1058 def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1059 uint64_t enc = 63 - N->getZExtValue();
1060 enc = enc > 31 ? 31 : enc;
1061 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1064 def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1065 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1066 (i64 (i32shift_b imm0_31:$imm)))>;
1067 def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
1068 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
1069 (i64 (i64shift_b imm0_63:$imm)))>;
1071 let AddedComplexity = 10 in {
1072 def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
1073 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1074 def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
1075 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1078 def : InstAlias<"asr $dst, $src, $shift",
1079 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1080 def : InstAlias<"asr $dst, $src, $shift",
1081 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1082 def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1083 def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1084 def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1085 def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1086 def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1088 def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
1089 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1090 def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
1091 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1093 def : InstAlias<"lsr $dst, $src, $shift",
1094 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1095 def : InstAlias<"lsr $dst, $src, $shift",
1096 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1097 def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1098 def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1099 def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1100 def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1101 def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1103 //===----------------------------------------------------------------------===//
1104 // Conditional comparison instructions.
1105 //===----------------------------------------------------------------------===//
1106 defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
1107 defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
1109 //===----------------------------------------------------------------------===//
1110 // Conditional select instructions.
1111 //===----------------------------------------------------------------------===//
1112 defm CSEL : CondSelect<0, 0b00, "csel">;
1114 def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
1115 defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
1116 defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
1117 defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
1119 def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1120 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1121 def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1122 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1123 def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1124 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1125 def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1126 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1127 def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1128 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1129 def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1130 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1132 def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
1133 (CSINCWr WZR, WZR, (i32 imm:$cc))>;
1134 def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
1135 (CSINCXr XZR, XZR, (i32 imm:$cc))>;
1136 def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
1137 (CSINVWr WZR, WZR, (i32 imm:$cc))>;
1138 def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
1139 (CSINVXr XZR, XZR, (i32 imm:$cc))>;
1140 def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
1141 (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1142 def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
1143 (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1144 def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
1145 (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1146 def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
1147 (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1149 // The inverse of the condition code from the alias instruction is what is used
1150 // in the aliased instruction. The parser all ready inverts the condition code
1151 // for these aliases.
1152 def : InstAlias<"cset $dst, $cc",
1153 (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1154 def : InstAlias<"cset $dst, $cc",
1155 (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1157 def : InstAlias<"csetm $dst, $cc",
1158 (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1159 def : InstAlias<"csetm $dst, $cc",
1160 (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1162 def : InstAlias<"cinc $dst, $src, $cc",
1163 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1164 def : InstAlias<"cinc $dst, $src, $cc",
1165 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1167 def : InstAlias<"cinv $dst, $src, $cc",
1168 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1169 def : InstAlias<"cinv $dst, $src, $cc",
1170 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1172 def : InstAlias<"cneg $dst, $src, $cc",
1173 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1174 def : InstAlias<"cneg $dst, $src, $cc",
1175 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1177 //===----------------------------------------------------------------------===//
1178 // PC-relative instructions.
1179 //===----------------------------------------------------------------------===//
1180 let isReMaterializable = 1 in {
1181 let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
1182 def ADR : ADRI<0, "adr", adrlabel, []>;
1183 } // hasSideEffects = 0
1185 def ADRP : ADRI<1, "adrp", adrplabel,
1186 [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
1187 } // isReMaterializable = 1
1189 // page address of a constant pool entry, block address
1190 def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
1191 def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
1193 //===----------------------------------------------------------------------===//
1194 // Unconditional branch (register) instructions.
1195 //===----------------------------------------------------------------------===//
1197 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1198 def RET : BranchReg<0b0010, "ret", []>;
1199 def DRPS : SpecialReturn<0b0101, "drps">;
1200 def ERET : SpecialReturn<0b0100, "eret">;
1201 } // isReturn = 1, isTerminator = 1, isBarrier = 1
1203 // Default to the LR register.
1204 def : InstAlias<"ret", (RET LR)>;
1206 let isCall = 1, Defs = [LR], Uses = [SP] in {
1207 def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>;
1210 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
1211 def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
1212 } // isBranch, isTerminator, isBarrier, isIndirectBranch
1214 // Create a separate pseudo-instruction for codegen to use so that we don't
1215 // flag lr as used in every function. It'll be restored before the RET by the
1216 // epilogue if it's legitimately used.
1217 def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
1218 Sched<[WriteBrReg]> {
1219 let isTerminator = 1;
1224 // This is a directive-like pseudo-instruction. The purpose is to insert an
1225 // R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
1226 // (which in the usual case is a BLR).
1227 let hasSideEffects = 1 in
1228 def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
1229 let AsmString = ".tlsdesccall $sym";
1232 // FIXME: maybe the scratch register used shouldn't be fixed to X1?
1233 // FIXME: can "hasSideEffects be dropped?
1234 let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
1235 isCodeGenOnly = 1 in
1237 : Pseudo<(outs), (ins i64imm:$sym),
1238 [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
1239 Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
1240 def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
1241 (TLSDESC_CALLSEQ texternalsym:$sym)>;
1243 //===----------------------------------------------------------------------===//
1244 // Conditional branch (immediate) instruction.
1245 //===----------------------------------------------------------------------===//
1246 def Bcc : BranchCond;
1248 //===----------------------------------------------------------------------===//
1249 // Compare-and-branch instructions.
1250 //===----------------------------------------------------------------------===//
1251 defm CBZ : CmpBranch<0, "cbz", AArch64cbz>;
1252 defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
1254 //===----------------------------------------------------------------------===//
1255 // Test-bit-and-branch instructions.
1256 //===----------------------------------------------------------------------===//
1257 defm TBZ : TestBranch<0, "tbz", AArch64tbz>;
1258 defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
1260 //===----------------------------------------------------------------------===//
1261 // Unconditional branch (immediate) instructions.
1262 //===----------------------------------------------------------------------===//
1263 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
1264 def B : BranchImm<0, "b", [(br bb:$addr)]>;
1265 } // isBranch, isTerminator, isBarrier
1267 let isCall = 1, Defs = [LR], Uses = [SP] in {
1268 def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
1270 def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
1272 //===----------------------------------------------------------------------===//
1273 // Exception generation instructions.
1274 //===----------------------------------------------------------------------===//
1275 def BRK : ExceptionGeneration<0b001, 0b00, "brk">;
1276 def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
1277 def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
1278 def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
1279 def HLT : ExceptionGeneration<0b010, 0b00, "hlt">;
1280 def HVC : ExceptionGeneration<0b000, 0b10, "hvc">;
1281 def SMC : ExceptionGeneration<0b000, 0b11, "smc">;
1282 def SVC : ExceptionGeneration<0b000, 0b01, "svc">;
1284 // DCPSn defaults to an immediate operand of zero if unspecified.
1285 def : InstAlias<"dcps1", (DCPS1 0)>;
1286 def : InstAlias<"dcps2", (DCPS2 0)>;
1287 def : InstAlias<"dcps3", (DCPS3 0)>;
1289 //===----------------------------------------------------------------------===//
1290 // Load instructions.
1291 //===----------------------------------------------------------------------===//
1293 // Pair (indexed, offset)
1294 defm LDPW : LoadPairOffset<0b00, 0, GPR32, simm7s4, "ldp">;
1295 defm LDPX : LoadPairOffset<0b10, 0, GPR64, simm7s8, "ldp">;
1296 defm LDPS : LoadPairOffset<0b00, 1, FPR32, simm7s4, "ldp">;
1297 defm LDPD : LoadPairOffset<0b01, 1, FPR64, simm7s8, "ldp">;
1298 defm LDPQ : LoadPairOffset<0b10, 1, FPR128, simm7s16, "ldp">;
1300 defm LDPSW : LoadPairOffset<0b01, 0, GPR64, simm7s4, "ldpsw">;
1302 // Pair (pre-indexed)
1303 def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32, simm7s4, "ldp">;
1304 def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64, simm7s8, "ldp">;
1305 def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32, simm7s4, "ldp">;
1306 def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64, simm7s8, "ldp">;
1307 def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128, simm7s16, "ldp">;
1309 def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
1311 // Pair (post-indexed)
1312 def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32, simm7s4, "ldp">;
1313 def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64, simm7s8, "ldp">;
1314 def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32, simm7s4, "ldp">;
1315 def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64, simm7s8, "ldp">;
1316 def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128, simm7s16, "ldp">;
1318 def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
1321 // Pair (no allocate)
1322 defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32, simm7s4, "ldnp">;
1323 defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64, simm7s8, "ldnp">;
1324 defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32, simm7s4, "ldnp">;
1325 defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64, simm7s8, "ldnp">;
1326 defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128, simm7s16, "ldnp">;
1329 // (register offset)
1333 defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
1334 defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
1335 defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
1336 defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
1339 defm LDRB : Load8RO<0b00, 1, 0b01, FPR8, "ldr", untyped, load>;
1340 defm LDRH : Load16RO<0b01, 1, 0b01, FPR16, "ldr", f16, load>;
1341 defm LDRS : Load32RO<0b10, 1, 0b01, FPR32, "ldr", f32, load>;
1342 defm LDRD : Load64RO<0b11, 1, 0b01, FPR64, "ldr", f64, load>;
1343 defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128, "ldr", f128, load>;
1345 // Load sign-extended half-word
1346 defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
1347 defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
1349 // Load sign-extended byte
1350 defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
1351 defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
1353 // Load sign-extended word
1354 defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
1357 defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
1359 // For regular load, we do not have any alignment requirement.
1360 // Thus, it is safe to directly map the vector loads with interesting
1361 // addressing modes.
1362 // FIXME: We could do the same for bitconvert to floating point vectors.
1363 multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
1364 ValueType ScalTy, ValueType VecTy,
1365 Instruction LOADW, Instruction LOADX,
1367 def : Pat<(VecTy (scalar_to_vector (ScalTy
1368 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
1369 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1370 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
1373 def : Pat<(VecTy (scalar_to_vector (ScalTy
1374 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
1375 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1376 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
1380 let AddedComplexity = 10 in {
1381 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>;
1382 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
1384 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
1385 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
1387 defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>;
1388 defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>;
1390 defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
1391 defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
1393 defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>;
1394 defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>;
1396 defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>;
1398 defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>;
1401 def : Pat <(v1i64 (scalar_to_vector (i64
1402 (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
1403 ro_Wextend64:$extend))))),
1404 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
1406 def : Pat <(v1i64 (scalar_to_vector (i64
1407 (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
1408 ro_Xextend64:$extend))))),
1409 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
1412 // Match all load 64 bits width whose type is compatible with FPR64
1413 multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
1414 Instruction LOADW, Instruction LOADX> {
1416 def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1417 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1419 def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1420 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1423 let AddedComplexity = 10 in {
1424 let Predicates = [IsLE] in {
1425 // We must do vector loads with LD1 in big-endian.
1426 defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
1427 defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
1428 defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>;
1429 defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
1430 defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
1433 defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>;
1434 defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>;
1436 // Match all load 128 bits width whose type is compatible with FPR128
1437 let Predicates = [IsLE] in {
1438 // We must do vector loads with LD1 in big-endian.
1439 defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>;
1440 defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>;
1441 defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>;
1442 defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>;
1443 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>;
1444 defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>;
1445 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
1447 } // AddedComplexity = 10
1450 multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
1451 Instruction INSTW, Instruction INSTX> {
1452 def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1453 (SUBREG_TO_REG (i64 0),
1454 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
1457 def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1458 (SUBREG_TO_REG (i64 0),
1459 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
1463 let AddedComplexity = 10 in {
1464 defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>;
1465 defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
1466 defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>;
1468 // zextloadi1 -> zextloadi8
1469 defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1471 // extload -> zextload
1472 defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1473 defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1474 defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1476 // extloadi1 -> zextloadi8
1477 defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>;
1482 multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
1483 Instruction INSTW, Instruction INSTX> {
1484 def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1485 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1487 def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1488 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1492 let AddedComplexity = 10 in {
1493 // extload -> zextload
1494 defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1495 defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1496 defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1498 // zextloadi1 -> zextloadi8
1499 defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1503 // (unsigned immediate)
1505 defm LDRX : LoadUI<0b11, 0, 0b01, GPR64, uimm12s8, "ldr",
1507 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1508 defm LDRW : LoadUI<0b10, 0, 0b01, GPR32, uimm12s4, "ldr",
1510 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1511 defm LDRB : LoadUI<0b00, 1, 0b01, FPR8, uimm12s1, "ldr",
1513 (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
1514 defm LDRH : LoadUI<0b01, 1, 0b01, FPR16, uimm12s2, "ldr",
1515 [(set (f16 FPR16:$Rt),
1516 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
1517 defm LDRS : LoadUI<0b10, 1, 0b01, FPR32, uimm12s4, "ldr",
1518 [(set (f32 FPR32:$Rt),
1519 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1520 defm LDRD : LoadUI<0b11, 1, 0b01, FPR64, uimm12s8, "ldr",
1521 [(set (f64 FPR64:$Rt),
1522 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1523 defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128, uimm12s16, "ldr",
1524 [(set (f128 FPR128:$Rt),
1525 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
1527 // For regular load, we do not have any alignment requirement.
1528 // Thus, it is safe to directly map the vector loads with interesting
1529 // addressing modes.
1530 // FIXME: We could do the same for bitconvert to floating point vectors.
1531 def : Pat <(v8i8 (scalar_to_vector (i32
1532 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1533 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
1534 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1535 def : Pat <(v16i8 (scalar_to_vector (i32
1536 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1537 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
1538 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1539 def : Pat <(v4i16 (scalar_to_vector (i32
1540 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1541 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
1542 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1543 def : Pat <(v8i16 (scalar_to_vector (i32
1544 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1545 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
1546 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1547 def : Pat <(v2i32 (scalar_to_vector (i32
1548 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1549 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
1550 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1551 def : Pat <(v4i32 (scalar_to_vector (i32
1552 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1553 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
1554 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1555 def : Pat <(v1i64 (scalar_to_vector (i64
1556 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1557 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1558 def : Pat <(v2i64 (scalar_to_vector (i64
1559 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1560 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1561 (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
1563 // Match all load 64 bits width whose type is compatible with FPR64
1564 let Predicates = [IsLE] in {
1565 // We must use LD1 to perform vector loads in big-endian.
1566 def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1567 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1568 def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1569 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1570 def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1571 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1572 def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1573 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1574 def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1575 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1577 def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1578 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1579 def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1580 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1582 // Match all load 128 bits width whose type is compatible with FPR128
1583 let Predicates = [IsLE] in {
1584 // We must use LD1 to perform vector loads in big-endian.
1585 def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1586 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1587 def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1588 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1589 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1590 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1591 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1592 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1593 def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1594 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1595 def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1596 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1597 def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1598 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1600 def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1601 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1603 defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
1605 (zextloadi16 (am_indexed16 GPR64sp:$Rn,
1606 uimm12s2:$offset)))]>;
1607 defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
1609 (zextloadi8 (am_indexed8 GPR64sp:$Rn,
1610 uimm12s1:$offset)))]>;
1612 def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1613 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1614 def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1615 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
1617 // zextloadi1 -> zextloadi8
1618 def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1619 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1620 def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1621 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1623 // extload -> zextload
1624 def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1625 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
1626 def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1627 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1628 def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1629 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1630 def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
1631 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
1632 def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1633 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
1634 def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1635 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1636 def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1637 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1639 // load sign-extended half-word
1640 defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
1642 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
1643 uimm12s2:$offset)))]>;
1644 defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
1646 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
1647 uimm12s2:$offset)))]>;
1649 // load sign-extended byte
1650 defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
1652 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
1653 uimm12s1:$offset)))]>;
1654 defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
1656 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
1657 uimm12s1:$offset)))]>;
1659 // load sign-extended word
1660 defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
1662 (sextloadi32 (am_indexed32 GPR64sp:$Rn,
1663 uimm12s4:$offset)))]>;
1665 // load zero-extended word
1666 def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
1667 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
1670 def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
1671 [(AArch64Prefetch imm:$Rt,
1672 (am_indexed64 GPR64sp:$Rn,
1673 uimm12s8:$offset))]>;
1675 def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
1679 def LDRWl : LoadLiteral<0b00, 0, GPR32, "ldr">;
1680 def LDRXl : LoadLiteral<0b01, 0, GPR64, "ldr">;
1681 def LDRSl : LoadLiteral<0b00, 1, FPR32, "ldr">;
1682 def LDRDl : LoadLiteral<0b01, 1, FPR64, "ldr">;
1683 def LDRQl : LoadLiteral<0b10, 1, FPR128, "ldr">;
1685 // load sign-extended word
1686 def LDRSWl : LoadLiteral<0b10, 0, GPR64, "ldrsw">;
1689 def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
1690 // [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
1693 // (unscaled immediate)
1694 defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64, "ldur",
1696 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
1697 defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32, "ldur",
1699 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1700 defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8, "ldur",
1702 (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1703 defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16, "ldur",
1705 (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1706 defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32, "ldur",
1707 [(set (f32 FPR32:$Rt),
1708 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1709 defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64, "ldur",
1710 [(set (f64 FPR64:$Rt),
1711 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
1712 defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128, "ldur",
1713 [(set (f128 FPR128:$Rt),
1714 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
1717 : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
1719 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1721 : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
1723 (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1725 // Match all load 64 bits width whose type is compatible with FPR64
1726 let Predicates = [IsLE] in {
1727 def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1728 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1729 def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1730 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1731 def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1732 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1733 def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1734 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1735 def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1736 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1738 def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1739 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1740 def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1741 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1743 // Match all load 128 bits width whose type is compatible with FPR128
1744 let Predicates = [IsLE] in {
1745 def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1746 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1747 def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1748 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1749 def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1750 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1751 def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1752 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1753 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1754 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1755 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1756 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1757 def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1758 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1762 def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1763 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
1764 def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1765 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1766 def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1767 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1768 def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
1769 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1770 def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1771 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1772 def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1773 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1774 def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1775 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1777 def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1778 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
1779 def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1780 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1781 def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1782 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1783 def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
1784 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1785 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1786 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1787 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1788 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1789 def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1790 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1794 // LDR mnemonics fall back to LDUR for negative or unaligned offsets.
1796 // Define new assembler match classes as we want to only match these when
1797 // the don't otherwise match the scaled addressing mode for LDR/STR. Don't
1798 // associate a DiagnosticType either, as we want the diagnostic for the
1799 // canonical form (the scaled operand) to take precedence.
1800 class SImm9OffsetOperand<int Width> : AsmOperandClass {
1801 let Name = "SImm9OffsetFB" # Width;
1802 let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
1803 let RenderMethod = "addImmOperands";
1806 def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
1807 def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
1808 def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
1809 def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
1810 def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
1812 def simm9_offset_fb8 : Operand<i64> {
1813 let ParserMatchClass = SImm9OffsetFB8Operand;
1815 def simm9_offset_fb16 : Operand<i64> {
1816 let ParserMatchClass = SImm9OffsetFB16Operand;
1818 def simm9_offset_fb32 : Operand<i64> {
1819 let ParserMatchClass = SImm9OffsetFB32Operand;
1821 def simm9_offset_fb64 : Operand<i64> {
1822 let ParserMatchClass = SImm9OffsetFB64Operand;
1824 def simm9_offset_fb128 : Operand<i64> {
1825 let ParserMatchClass = SImm9OffsetFB128Operand;
1828 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1829 (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
1830 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1831 (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1832 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1833 (LDURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1834 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1835 (LDURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1836 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1837 (LDURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1838 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1839 (LDURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
1840 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1841 (LDURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
1844 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1845 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1846 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1847 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1849 // load sign-extended half-word
1851 : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
1853 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1855 : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
1857 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1859 // load sign-extended byte
1861 : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
1863 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1865 : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
1867 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1869 // load sign-extended word
1871 : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
1873 (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1875 // zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
1876 def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
1877 (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1878 def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
1879 (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1880 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
1881 (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1882 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
1883 (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1884 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
1885 (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1886 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
1887 (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1888 def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
1889 (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1892 defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
1893 [(AArch64Prefetch imm:$Rt,
1894 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
1897 // (unscaled immediate, unprivileged)
1898 defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
1899 defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
1901 defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
1902 defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
1904 // load sign-extended half-word
1905 defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
1906 defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
1908 // load sign-extended byte
1909 defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
1910 defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
1912 // load sign-extended word
1913 defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
1916 // (immediate pre-indexed)
1917 def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32, "ldr">;
1918 def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64, "ldr">;
1919 def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8, "ldr">;
1920 def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16, "ldr">;
1921 def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32, "ldr">;
1922 def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64, "ldr">;
1923 def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128, "ldr">;
1925 // load sign-extended half-word
1926 def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1927 def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1929 // load sign-extended byte
1930 def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1931 def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1933 // load zero-extended byte
1934 def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1935 def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1937 // load sign-extended word
1938 def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1941 // (immediate post-indexed)
1942 def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32, "ldr">;
1943 def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64, "ldr">;
1944 def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8, "ldr">;
1945 def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16, "ldr">;
1946 def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32, "ldr">;
1947 def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64, "ldr">;
1948 def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128, "ldr">;
1950 // load sign-extended half-word
1951 def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1952 def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1954 // load sign-extended byte
1955 def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1956 def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1958 // load zero-extended byte
1959 def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1960 def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1962 // load sign-extended word
1963 def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1965 //===----------------------------------------------------------------------===//
1966 // Store instructions.
1967 //===----------------------------------------------------------------------===//
1969 // Pair (indexed, offset)
1970 // FIXME: Use dedicated range-checked addressing mode operand here.
1971 defm STPW : StorePairOffset<0b00, 0, GPR32, simm7s4, "stp">;
1972 defm STPX : StorePairOffset<0b10, 0, GPR64, simm7s8, "stp">;
1973 defm STPS : StorePairOffset<0b00, 1, FPR32, simm7s4, "stp">;
1974 defm STPD : StorePairOffset<0b01, 1, FPR64, simm7s8, "stp">;
1975 defm STPQ : StorePairOffset<0b10, 1, FPR128, simm7s16, "stp">;
1977 // Pair (pre-indexed)
1978 def STPWpre : StorePairPreIdx<0b00, 0, GPR32, simm7s4, "stp">;
1979 def STPXpre : StorePairPreIdx<0b10, 0, GPR64, simm7s8, "stp">;
1980 def STPSpre : StorePairPreIdx<0b00, 1, FPR32, simm7s4, "stp">;
1981 def STPDpre : StorePairPreIdx<0b01, 1, FPR64, simm7s8, "stp">;
1982 def STPQpre : StorePairPreIdx<0b10, 1, FPR128, simm7s16, "stp">;
1984 // Pair (pre-indexed)
1985 def STPWpost : StorePairPostIdx<0b00, 0, GPR32, simm7s4, "stp">;
1986 def STPXpost : StorePairPostIdx<0b10, 0, GPR64, simm7s8, "stp">;
1987 def STPSpost : StorePairPostIdx<0b00, 1, FPR32, simm7s4, "stp">;
1988 def STPDpost : StorePairPostIdx<0b01, 1, FPR64, simm7s8, "stp">;
1989 def STPQpost : StorePairPostIdx<0b10, 1, FPR128, simm7s16, "stp">;
1991 // Pair (no allocate)
1992 defm STNPW : StorePairNoAlloc<0b00, 0, GPR32, simm7s4, "stnp">;
1993 defm STNPX : StorePairNoAlloc<0b10, 0, GPR64, simm7s8, "stnp">;
1994 defm STNPS : StorePairNoAlloc<0b00, 1, FPR32, simm7s4, "stnp">;
1995 defm STNPD : StorePairNoAlloc<0b01, 1, FPR64, simm7s8, "stnp">;
1996 defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128, simm7s16, "stnp">;
1999 // (Register offset)
2002 defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
2003 defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
2004 defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>;
2005 defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>;
2009 defm STRB : Store8RO< 0b00, 1, 0b00, FPR8, "str", untyped, store>;
2010 defm STRH : Store16RO<0b01, 1, 0b00, FPR16, "str", f16, store>;
2011 defm STRS : Store32RO<0b10, 1, 0b00, FPR32, "str", f32, store>;
2012 defm STRD : Store64RO<0b11, 1, 0b00, FPR64, "str", f64, store>;
2013 defm STRQ : Store128RO<0b00, 1, 0b10, FPR128, "str", f128, store>;
2015 multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
2016 Instruction STRW, Instruction STRX> {
2018 def : Pat<(storeop GPR64:$Rt,
2019 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2020 (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2021 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2023 def : Pat<(storeop GPR64:$Rt,
2024 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2025 (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2026 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2029 let AddedComplexity = 10 in {
2031 defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>;
2032 defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
2033 defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>;
2036 multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
2037 Instruction STRW, Instruction STRX> {
2038 def : Pat<(store (VecTy FPR:$Rt),
2039 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2040 (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2042 def : Pat<(store (VecTy FPR:$Rt),
2043 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2044 (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2047 let AddedComplexity = 10 in {
2048 // Match all store 64 bits width whose type is compatible with FPR64
2049 let Predicates = [IsLE] in {
2050 // We must use ST1 to store vectors in big-endian.
2051 defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
2052 defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
2053 defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
2054 defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
2055 defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
2058 defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
2059 defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
2061 // Match all store 128 bits width whose type is compatible with FPR128
2062 let Predicates = [IsLE] in {
2063 // We must use ST1 to store vectors in big-endian.
2064 defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
2065 defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
2066 defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
2067 defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
2068 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
2069 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
2070 defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
2072 } // AddedComplexity = 10
2074 // Match stores from lane 0 to the appropriate subreg's store.
2075 multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
2076 ValueType VecTy, ValueType STy,
2077 SubRegIndex SubRegIdx,
2078 Instruction STRW, Instruction STRX> {
2080 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2081 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2082 (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2083 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2085 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2086 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2087 (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2088 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2091 let AddedComplexity = 19 in {
2092 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
2093 defm : VecROStoreLane0Pat<ro16, store , v8i16, i16, hsub, STRHroW, STRHroX>;
2094 defm : VecROStoreLane0Pat<ro32, truncstorei32, v4i32, i32, ssub, STRSroW, STRSroX>;
2095 defm : VecROStoreLane0Pat<ro32, store , v4i32, i32, ssub, STRSroW, STRSroX>;
2096 defm : VecROStoreLane0Pat<ro32, store , v4f32, f32, ssub, STRSroW, STRSroX>;
2097 defm : VecROStoreLane0Pat<ro64, store , v2i64, i64, dsub, STRDroW, STRDroX>;
2098 defm : VecROStoreLane0Pat<ro64, store , v2f64, f64, dsub, STRDroW, STRDroX>;
2102 // (unsigned immediate)
2103 defm STRX : StoreUI<0b11, 0, 0b00, GPR64, uimm12s8, "str",
2105 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2106 defm STRW : StoreUI<0b10, 0, 0b00, GPR32, uimm12s4, "str",
2108 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2109 defm STRB : StoreUI<0b00, 1, 0b00, FPR8, uimm12s1, "str",
2111 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
2112 defm STRH : StoreUI<0b01, 1, 0b00, FPR16, uimm12s2, "str",
2113 [(store (f16 FPR16:$Rt),
2114 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
2115 defm STRS : StoreUI<0b10, 1, 0b00, FPR32, uimm12s4, "str",
2116 [(store (f32 FPR32:$Rt),
2117 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2118 defm STRD : StoreUI<0b11, 1, 0b00, FPR64, uimm12s8, "str",
2119 [(store (f64 FPR64:$Rt),
2120 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2121 defm STRQ : StoreUI<0b00, 1, 0b10, FPR128, uimm12s16, "str", []>;
2123 defm STRHH : StoreUI<0b01, 0, 0b00, GPR32, uimm12s2, "strh",
2124 [(truncstorei16 GPR32:$Rt,
2125 (am_indexed16 GPR64sp:$Rn,
2126 uimm12s2:$offset))]>;
2127 defm STRBB : StoreUI<0b00, 0, 0b00, GPR32, uimm12s1, "strb",
2128 [(truncstorei8 GPR32:$Rt,
2129 (am_indexed8 GPR64sp:$Rn,
2130 uimm12s1:$offset))]>;
2132 // Match all store 64 bits width whose type is compatible with FPR64
2133 let AddedComplexity = 10 in {
2134 let Predicates = [IsLE] in {
2135 // We must use ST1 to store vectors in big-endian.
2136 def : Pat<(store (v2f32 FPR64:$Rt),
2137 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2138 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2139 def : Pat<(store (v8i8 FPR64:$Rt),
2140 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2141 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2142 def : Pat<(store (v4i16 FPR64:$Rt),
2143 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2144 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2145 def : Pat<(store (v2i32 FPR64:$Rt),
2146 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2147 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2148 def : Pat<(store (v4f16 FPR64:$Rt),
2149 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2150 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2152 def : Pat<(store (v1f64 FPR64:$Rt),
2153 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2154 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2155 def : Pat<(store (v1i64 FPR64:$Rt),
2156 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2157 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2159 // Match all store 128 bits width whose type is compatible with FPR128
2160 let Predicates = [IsLE] in {
2161 // We must use ST1 to store vectors in big-endian.
2162 def : Pat<(store (v4f32 FPR128:$Rt),
2163 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2164 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2165 def : Pat<(store (v2f64 FPR128:$Rt),
2166 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2167 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2168 def : Pat<(store (v16i8 FPR128:$Rt),
2169 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2170 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2171 def : Pat<(store (v8i16 FPR128:$Rt),
2172 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2173 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2174 def : Pat<(store (v4i32 FPR128:$Rt),
2175 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2176 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2177 def : Pat<(store (v2i64 FPR128:$Rt),
2178 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2179 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2180 def : Pat<(store (v8f16 FPR128:$Rt),
2181 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2182 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2184 def : Pat<(store (f128 FPR128:$Rt),
2185 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2186 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2189 def : Pat<(truncstorei32 GPR64:$Rt,
2190 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
2191 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
2192 def : Pat<(truncstorei16 GPR64:$Rt,
2193 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
2194 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
2195 def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
2196 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
2198 } // AddedComplexity = 10
2201 // (unscaled immediate)
2202 defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64, "stur",
2204 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2205 defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32, "stur",
2207 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2208 defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8, "stur",
2210 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2211 defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16, "stur",
2212 [(store (f16 FPR16:$Rt),
2213 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2214 defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32, "stur",
2215 [(store (f32 FPR32:$Rt),
2216 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2217 defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64, "stur",
2218 [(store (f64 FPR64:$Rt),
2219 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2220 defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128, "stur",
2221 [(store (f128 FPR128:$Rt),
2222 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
2223 defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32, "sturh",
2224 [(truncstorei16 GPR32:$Rt,
2225 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2226 defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32, "sturb",
2227 [(truncstorei8 GPR32:$Rt,
2228 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2230 // Match all store 64 bits width whose type is compatible with FPR64
2231 let Predicates = [IsLE] in {
2232 // We must use ST1 to store vectors in big-endian.
2233 def : Pat<(store (v2f32 FPR64:$Rt),
2234 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2235 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2236 def : Pat<(store (v8i8 FPR64:$Rt),
2237 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2238 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2239 def : Pat<(store (v4i16 FPR64:$Rt),
2240 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2241 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2242 def : Pat<(store (v2i32 FPR64:$Rt),
2243 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2244 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2245 def : Pat<(store (v4f16 FPR64:$Rt),
2246 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2247 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2249 def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2250 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2251 def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2252 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2254 // Match all store 128 bits width whose type is compatible with FPR128
2255 let Predicates = [IsLE] in {
2256 // We must use ST1 to store vectors in big-endian.
2257 def : Pat<(store (v4f32 FPR128:$Rt),
2258 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2259 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2260 def : Pat<(store (v2f64 FPR128:$Rt),
2261 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2262 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2263 def : Pat<(store (v16i8 FPR128:$Rt),
2264 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2265 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2266 def : Pat<(store (v8i16 FPR128:$Rt),
2267 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2268 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2269 def : Pat<(store (v4i32 FPR128:$Rt),
2270 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2271 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2272 def : Pat<(store (v2i64 FPR128:$Rt),
2273 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2274 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2275 def : Pat<(store (v2f64 FPR128:$Rt),
2276 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2277 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2278 def : Pat<(store (v8f16 FPR128:$Rt),
2279 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2280 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2283 // unscaled i64 truncating stores
2284 def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
2285 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2286 def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
2287 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2288 def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
2289 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2292 // STR mnemonics fall back to STUR for negative or unaligned offsets.
2293 def : InstAlias<"str $Rt, [$Rn, $offset]",
2294 (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2295 def : InstAlias<"str $Rt, [$Rn, $offset]",
2296 (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2297 def : InstAlias<"str $Rt, [$Rn, $offset]",
2298 (STURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2299 def : InstAlias<"str $Rt, [$Rn, $offset]",
2300 (STURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2301 def : InstAlias<"str $Rt, [$Rn, $offset]",
2302 (STURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2303 def : InstAlias<"str $Rt, [$Rn, $offset]",
2304 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2305 def : InstAlias<"str $Rt, [$Rn, $offset]",
2306 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2308 def : InstAlias<"strb $Rt, [$Rn, $offset]",
2309 (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2310 def : InstAlias<"strh $Rt, [$Rn, $offset]",
2311 (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2314 // (unscaled immediate, unprivileged)
2315 defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
2316 defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
2318 defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
2319 defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
2322 // (immediate pre-indexed)
2323 def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32, "str", pre_store, i32>;
2324 def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64, "str", pre_store, i64>;
2325 def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8, "str", pre_store, untyped>;
2326 def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16, "str", pre_store, f16>;
2327 def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32, "str", pre_store, f32>;
2328 def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64, "str", pre_store, f64>;
2329 def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128, "str", pre_store, f128>;
2331 def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32, "strb", pre_truncsti8, i32>;
2332 def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32, "strh", pre_truncsti16, i32>;
2335 def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2336 (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2338 def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2339 (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2341 def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2342 (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2345 def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2346 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2347 def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2348 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2349 def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2350 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2351 def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2352 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2353 def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2354 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2355 def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2356 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2357 def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2358 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2360 def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2361 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2362 def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2363 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2364 def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2365 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2366 def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2367 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2368 def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2369 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2370 def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2371 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2372 def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2373 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2376 // (immediate post-indexed)
2377 def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32, "str", post_store, i32>;
2378 def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64, "str", post_store, i64>;
2379 def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8, "str", post_store, untyped>;
2380 def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16, "str", post_store, f16>;
2381 def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32, "str", post_store, f32>;
2382 def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64, "str", post_store, f64>;
2383 def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128, "str", post_store, f128>;
2385 def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32, "strb", post_truncsti8, i32>;
2386 def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32, "strh", post_truncsti16, i32>;
2389 def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2390 (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2392 def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2393 (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2395 def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2396 (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2399 def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2400 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2401 def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2402 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2403 def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2404 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2405 def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2406 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2407 def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2408 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2409 def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2410 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2411 def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2412 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2414 def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2415 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2416 def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2417 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2418 def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2419 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2420 def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2421 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2422 def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2423 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2424 def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2425 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2426 def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2427 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2429 //===----------------------------------------------------------------------===//
2430 // Load/store exclusive instructions.
2431 //===----------------------------------------------------------------------===//
2433 def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">;
2434 def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">;
2435 def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
2436 def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
2438 def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
2439 def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
2440 def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
2441 def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
2443 def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
2444 def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
2445 def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
2446 def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
2448 def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">;
2449 def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">;
2450 def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
2451 def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
2453 def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
2454 def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
2455 def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
2456 def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
2458 def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
2459 def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
2460 def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
2461 def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
2463 def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
2464 def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
2466 def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
2467 def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
2469 def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
2470 def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
2472 def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
2473 def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
2475 let Predicates = [HasV8_1a] in {
2476 // v8.1a "Limited Order Region" extension load-acquire instructions
2477 def LDLARW : LoadAcquire <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
2478 def LDLARX : LoadAcquire <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
2479 def LDLARB : LoadAcquire <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
2480 def LDLARH : LoadAcquire <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
2482 // v8.1a "Limited Order Region" extension store-release instructions
2483 def STLLRW : StoreRelease <0b10, 1, 0, 0, 0, GPR32, "stllr">;
2484 def STLLRX : StoreRelease <0b11, 1, 0, 0, 0, GPR64, "stllr">;
2485 def STLLRB : StoreRelease <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
2486 def STLLRH : StoreRelease <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
2489 //===----------------------------------------------------------------------===//
2490 // Scaled floating point to integer conversion instructions.
2491 //===----------------------------------------------------------------------===//
2493 defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
2494 defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
2495 defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
2496 defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
2497 defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
2498 defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
2499 defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
2500 defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
2501 defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2502 defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2503 defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2504 defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2506 multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
2507 def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
2508 def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
2509 def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
2510 def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
2511 def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
2512 def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
2514 def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
2515 (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
2516 def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
2517 (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
2518 def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
2519 (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
2520 def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
2521 (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
2522 def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
2523 (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
2524 def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
2525 (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
2528 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
2529 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
2531 multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
2532 def : Pat<(i32 (to_int (round f32:$Rn))),
2533 (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
2534 def : Pat<(i64 (to_int (round f32:$Rn))),
2535 (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
2536 def : Pat<(i32 (to_int (round f64:$Rn))),
2537 (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
2538 def : Pat<(i64 (to_int (round f64:$Rn))),
2539 (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
2542 defm : FPToIntegerPats<fp_to_sint, fceil, "FCVTPS">;
2543 defm : FPToIntegerPats<fp_to_uint, fceil, "FCVTPU">;
2544 defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
2545 defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
2546 defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
2547 defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
2548 defm : FPToIntegerPats<fp_to_sint, frnd, "FCVTAS">;
2549 defm : FPToIntegerPats<fp_to_uint, frnd, "FCVTAU">;
2551 //===----------------------------------------------------------------------===//
2552 // Scaled integer to floating point conversion instructions.
2553 //===----------------------------------------------------------------------===//
2555 defm SCVTF : IntegerToFP<0, "scvtf", sint_to_fp>;
2556 defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>;
2558 //===----------------------------------------------------------------------===//
2559 // Unscaled integer to floating point conversion instruction.
2560 //===----------------------------------------------------------------------===//
2562 defm FMOV : UnscaledConversion<"fmov">;
2564 // Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
2565 let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
2566 def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
2568 def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
2572 //===----------------------------------------------------------------------===//
2573 // Floating point conversion instruction.
2574 //===----------------------------------------------------------------------===//
2576 defm FCVT : FPConversion<"fcvt">;
2578 //===----------------------------------------------------------------------===//
2579 // Floating point single operand instructions.
2580 //===----------------------------------------------------------------------===//
2582 defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>;
2583 defm FMOV : SingleOperandFPData<0b0000, "fmov">;
2584 defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>;
2585 defm FRINTA : SingleOperandFPData<0b1100, "frinta", frnd>;
2586 defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
2587 defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
2588 defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
2589 defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
2591 def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
2592 (FRINTNDr FPR64:$Rn)>;
2594 defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
2595 defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
2597 let SchedRW = [WriteFDiv] in {
2598 defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
2601 //===----------------------------------------------------------------------===//
2602 // Floating point two operand instructions.
2603 //===----------------------------------------------------------------------===//
2605 defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>;
2606 let SchedRW = [WriteFDiv] in {
2607 defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>;
2609 defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
2610 defm FMAX : TwoOperandFPData<0b0100, "fmax", fmaxnan>;
2611 defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
2612 defm FMIN : TwoOperandFPData<0b0101, "fmin", fminnan>;
2613 let SchedRW = [WriteFMul] in {
2614 defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>;
2615 defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
2617 defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>;
2619 def : Pat<(v1f64 (fmaxnan (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2620 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
2621 def : Pat<(v1f64 (fminnan (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2622 (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
2623 def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2624 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
2625 def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2626 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
2628 //===----------------------------------------------------------------------===//
2629 // Floating point three operand instructions.
2630 //===----------------------------------------------------------------------===//
2632 defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>;
2633 defm FMSUB : ThreeOperandFPData<0, 1, "fmsub",
2634 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
2635 defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
2636 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
2637 defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
2638 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
2640 // The following def pats catch the case where the LHS of an FMA is negated.
2641 // The TriOpFrag above catches the case where the middle operand is negated.
2643 // N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
2644 // the NEON variant.
2645 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
2646 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2648 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
2649 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2651 // We handled -(a + b*c) for FNMADD above, now it's time for "(-a) + (-b)*c" and
2653 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
2654 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2656 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
2657 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2659 def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
2660 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2662 def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
2663 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2665 //===----------------------------------------------------------------------===//
2666 // Floating point comparison instructions.
2667 //===----------------------------------------------------------------------===//
2669 defm FCMPE : FPComparison<1, "fcmpe">;
2670 defm FCMP : FPComparison<0, "fcmp", AArch64fcmp>;
2672 //===----------------------------------------------------------------------===//
2673 // Floating point conditional comparison instructions.
2674 //===----------------------------------------------------------------------===//
2676 defm FCCMPE : FPCondComparison<1, "fccmpe">;
2677 defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>;
2679 //===----------------------------------------------------------------------===//
2680 // Floating point conditional select instruction.
2681 //===----------------------------------------------------------------------===//
2683 defm FCSEL : FPCondSelect<"fcsel">;
2685 // CSEL instructions providing f128 types need to be handled by a
2686 // pseudo-instruction since the eventual code will need to introduce basic
2687 // blocks and control flow.
2688 def F128CSEL : Pseudo<(outs FPR128:$Rd),
2689 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
2690 [(set (f128 FPR128:$Rd),
2691 (AArch64csel FPR128:$Rn, FPR128:$Rm,
2692 (i32 imm:$cond), NZCV))]> {
2694 let usesCustomInserter = 1;
2695 let hasNoSchedulingInfo = 1;
2699 //===----------------------------------------------------------------------===//
2700 // Floating point immediate move.
2701 //===----------------------------------------------------------------------===//
2703 let isReMaterializable = 1 in {
2704 defm FMOV : FPMoveImmediate<"fmov">;
2707 //===----------------------------------------------------------------------===//
2708 // Advanced SIMD two vector instructions.
2709 //===----------------------------------------------------------------------===//
2711 defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
2712 int_aarch64_neon_uabd>;
2713 // Match UABDL in log2-shuffle patterns.
2714 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
2715 (v8i16 (add (sub (zext (v8i8 V64:$opA)),
2716 (zext (v8i8 V64:$opB))),
2717 (AArch64vashr v8i16:$src, (i32 15))))),
2718 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
2719 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
2720 (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
2721 (zext (extract_high_v16i8 V128:$opB))),
2722 (AArch64vashr v8i16:$src, (i32 15))))),
2723 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
2724 def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
2725 (v4i32 (add (sub (zext (v4i16 V64:$opA)),
2726 (zext (v4i16 V64:$opB))),
2727 (AArch64vashr v4i32:$src, (i32 31))))),
2728 (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
2729 def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
2730 (v4i32 (add (sub (zext (extract_high_v8i16 V128:$opA)),
2731 (zext (extract_high_v8i16 V128:$opB))),
2732 (AArch64vashr v4i32:$src, (i32 31))))),
2733 (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
2734 def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
2735 (v2i64 (add (sub (zext (v2i32 V64:$opA)),
2736 (zext (v2i32 V64:$opB))),
2737 (AArch64vashr v2i64:$src, (i32 63))))),
2738 (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
2739 def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
2740 (v2i64 (add (sub (zext (extract_high_v4i32 V128:$opA)),
2741 (zext (extract_high_v4i32 V128:$opB))),
2742 (AArch64vashr v2i64:$src, (i32 63))))),
2743 (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
2745 defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_aarch64_neon_abs>;
2746 def : Pat<(xor (v8i8 (AArch64vashr V64:$src, (i32 7))),
2747 (v8i8 (add V64:$src, (AArch64vashr V64:$src, (i32 7))))),
2748 (ABSv8i8 V64:$src)>;
2749 def : Pat<(xor (v4i16 (AArch64vashr V64:$src, (i32 15))),
2750 (v4i16 (add V64:$src, (AArch64vashr V64:$src, (i32 15))))),
2751 (ABSv4i16 V64:$src)>;
2752 def : Pat<(xor (v2i32 (AArch64vashr V64:$src, (i32 31))),
2753 (v2i32 (add V64:$src, (AArch64vashr V64:$src, (i32 31))))),
2754 (ABSv2i32 V64:$src)>;
2755 def : Pat<(xor (v16i8 (AArch64vashr V128:$src, (i32 7))),
2756 (v16i8 (add V128:$src, (AArch64vashr V128:$src, (i32 7))))),
2757 (ABSv16i8 V128:$src)>;
2758 def : Pat<(xor (v8i16 (AArch64vashr V128:$src, (i32 15))),
2759 (v8i16 (add V128:$src, (AArch64vashr V128:$src, (i32 15))))),
2760 (ABSv8i16 V128:$src)>;
2761 def : Pat<(xor (v4i32 (AArch64vashr V128:$src, (i32 31))),
2762 (v4i32 (add V128:$src, (AArch64vashr V128:$src, (i32 31))))),
2763 (ABSv4i32 V128:$src)>;
2764 def : Pat<(xor (v2i64 (AArch64vashr V128:$src, (i32 63))),
2765 (v2i64 (add V128:$src, (AArch64vashr V128:$src, (i32 63))))),
2766 (ABSv2i64 V128:$src)>;
2768 defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
2769 defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
2770 defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
2771 defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
2772 defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
2773 defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
2774 defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
2775 defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
2776 defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
2778 defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
2779 defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
2780 defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
2781 defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
2782 defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
2783 defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
2784 defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
2785 defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
2786 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
2787 (FCVTLv4i16 V64:$Rn)>;
2788 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
2790 (FCVTLv8i16 V128:$Rn)>;
2791 def : Pat<(v2f64 (fextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
2792 def : Pat<(v2f64 (fextend (v2f32 (extract_subvector (v4f32 V128:$Rn),
2794 (FCVTLv4i32 V128:$Rn)>;
2796 def : Pat<(v4f32 (fextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
2797 def : Pat<(v4f32 (fextend (v4f16 (extract_subvector (v8f16 V128:$Rn),
2799 (FCVTLv8i16 V128:$Rn)>;
2801 defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
2802 defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
2803 defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
2804 defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
2805 defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
2806 def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
2807 (FCVTNv4i16 V128:$Rn)>;
2808 def : Pat<(concat_vectors V64:$Rd,
2809 (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
2810 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2811 def : Pat<(v2f32 (fround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
2812 def : Pat<(v4f16 (fround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
2813 def : Pat<(concat_vectors V64:$Rd, (v2f32 (fround (v2f64 V128:$Rn)))),
2814 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2815 defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
2816 defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
2817 defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
2818 int_aarch64_neon_fcvtxn>;
2819 defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
2820 defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
2822 def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
2823 def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
2824 def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
2825 def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
2826 def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
2828 def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
2829 def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
2830 def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
2831 def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
2832 def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
2834 defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
2835 defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
2836 defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", frnd>;
2837 defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
2838 defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
2839 defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
2840 defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
2841 defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
2842 defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
2843 defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
2844 defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
2845 defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
2846 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
2847 defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
2848 // Aliases for MVN -> NOT.
2849 def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
2850 (NOTv8i8 V64:$Vd, V64:$Vn)>;
2851 def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
2852 (NOTv16i8 V128:$Vd, V128:$Vn)>;
2854 def : Pat<(AArch64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
2855 def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
2856 def : Pat<(AArch64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>;
2857 def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
2858 def : Pat<(AArch64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>;
2859 def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
2860 def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
2862 def : Pat<(AArch64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2863 def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2864 def : Pat<(AArch64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2865 def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2866 def : Pat<(AArch64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2867 def : Pat<(AArch64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2868 def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2869 def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2871 def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2872 def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2873 def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2874 def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2875 def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2877 defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
2878 defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
2879 defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
2880 defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
2881 defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
2882 BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
2883 defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
2884 defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
2885 defm SHLL : SIMDVectorLShiftLongBySizeBHS;
2886 defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
2887 defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
2888 defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
2889 defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
2890 defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
2891 defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
2892 BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
2893 defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
2894 int_aarch64_neon_uaddlp>;
2895 defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
2896 defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
2897 defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
2898 defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
2899 defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
2900 defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
2902 def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
2903 def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
2904 def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
2905 def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
2906 def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
2907 def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
2909 // Patterns for vector long shift (by element width). These need to match all
2910 // three of zext, sext and anyext so it's easier to pull the patterns out of the
2912 multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
2913 def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
2914 (SHLLv8i8 V64:$Rn)>;
2915 def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
2916 (SHLLv16i8 V128:$Rn)>;
2917 def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
2918 (SHLLv4i16 V64:$Rn)>;
2919 def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
2920 (SHLLv8i16 V128:$Rn)>;
2921 def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
2922 (SHLLv2i32 V64:$Rn)>;
2923 def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
2924 (SHLLv4i32 V128:$Rn)>;
2927 defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
2928 defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
2929 defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
2931 //===----------------------------------------------------------------------===//
2932 // Advanced SIMD three vector instructions.
2933 //===----------------------------------------------------------------------===//
2935 defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>;
2936 defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
2937 defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
2938 defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
2939 defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
2940 defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
2941 defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
2942 defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
2943 defm FABD : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
2944 defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
2945 defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
2946 defm FADDP : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_addp>;
2947 defm FADD : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
2948 defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
2949 defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
2950 defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
2951 defm FDIV : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
2952 defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
2953 defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
2954 defm FMAXP : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
2955 defm FMAX : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaxnan>;
2956 defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
2957 defm FMINNM : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
2958 defm FMINP : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
2959 defm FMIN : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminnan>;
2961 // NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
2962 // instruction expects the addend first, while the fma intrinsic puts it last.
2963 defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
2964 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
2965 defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
2966 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
2968 // The following def pats catch the case where the LHS of an FMA is negated.
2969 // The TriOpFrag above catches the case where the middle operand is negated.
2970 def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
2971 (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
2973 def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2974 (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
2976 def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2977 (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
2979 defm FMULX : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
2980 defm FMUL : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
2981 defm FRECPS : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
2982 defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
2983 defm FSUB : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
2984 defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla",
2985 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))> >;
2986 defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls",
2987 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))> >;
2988 defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
2989 defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
2990 defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
2991 TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >;
2992 defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>;
2993 defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
2994 defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
2995 defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
2996 defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
2997 defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
2998 defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
2999 defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
3000 defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
3001 defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
3002 defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
3003 defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
3004 defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
3005 defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>;
3006 defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
3007 defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
3008 defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
3009 defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
3010 TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >;
3011 defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>;
3012 defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
3013 defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
3014 defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
3015 defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
3016 defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
3017 defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
3018 defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
3019 defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
3020 defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
3021 defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
3022 defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>;
3023 defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
3024 defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
3025 defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
3026 int_aarch64_neon_sqadd>;
3027 defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
3028 int_aarch64_neon_sqsub>;
3030 defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
3031 defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
3032 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
3033 defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
3034 defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
3035 defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
3036 TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
3037 defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
3038 defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
3039 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
3040 defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
3043 def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
3044 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3045 def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
3046 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3047 def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
3048 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3049 def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
3050 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3052 def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
3053 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3054 def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
3055 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3056 def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
3057 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3058 def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
3059 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3061 def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
3062 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
3063 def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
3064 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3065 def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
3066 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3067 def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
3068 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3070 def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
3071 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
3072 def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
3073 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3074 def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
3075 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3076 def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
3077 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3079 def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
3080 "|cmls.8b\t$dst, $src1, $src2}",
3081 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3082 def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
3083 "|cmls.16b\t$dst, $src1, $src2}",
3084 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3085 def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
3086 "|cmls.4h\t$dst, $src1, $src2}",
3087 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3088 def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
3089 "|cmls.8h\t$dst, $src1, $src2}",
3090 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3091 def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
3092 "|cmls.2s\t$dst, $src1, $src2}",
3093 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3094 def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
3095 "|cmls.4s\t$dst, $src1, $src2}",
3096 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3097 def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
3098 "|cmls.2d\t$dst, $src1, $src2}",
3099 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3101 def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
3102 "|cmlo.8b\t$dst, $src1, $src2}",
3103 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3104 def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
3105 "|cmlo.16b\t$dst, $src1, $src2}",
3106 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3107 def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
3108 "|cmlo.4h\t$dst, $src1, $src2}",
3109 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3110 def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
3111 "|cmlo.8h\t$dst, $src1, $src2}",
3112 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3113 def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
3114 "|cmlo.2s\t$dst, $src1, $src2}",
3115 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3116 def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
3117 "|cmlo.4s\t$dst, $src1, $src2}",
3118 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3119 def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
3120 "|cmlo.2d\t$dst, $src1, $src2}",
3121 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3123 def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
3124 "|cmle.8b\t$dst, $src1, $src2}",
3125 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3126 def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
3127 "|cmle.16b\t$dst, $src1, $src2}",
3128 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3129 def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
3130 "|cmle.4h\t$dst, $src1, $src2}",
3131 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3132 def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
3133 "|cmle.8h\t$dst, $src1, $src2}",
3134 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3135 def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
3136 "|cmle.2s\t$dst, $src1, $src2}",
3137 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3138 def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
3139 "|cmle.4s\t$dst, $src1, $src2}",
3140 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3141 def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
3142 "|cmle.2d\t$dst, $src1, $src2}",
3143 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3145 def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
3146 "|cmlt.8b\t$dst, $src1, $src2}",
3147 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3148 def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
3149 "|cmlt.16b\t$dst, $src1, $src2}",
3150 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3151 def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
3152 "|cmlt.4h\t$dst, $src1, $src2}",
3153 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3154 def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
3155 "|cmlt.8h\t$dst, $src1, $src2}",
3156 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3157 def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
3158 "|cmlt.2s\t$dst, $src1, $src2}",
3159 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3160 def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
3161 "|cmlt.4s\t$dst, $src1, $src2}",
3162 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3163 def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
3164 "|cmlt.2d\t$dst, $src1, $src2}",
3165 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3167 let Predicates = [HasNEON, HasFullFP16] in {
3168 def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
3169 "|fcmle.4h\t$dst, $src1, $src2}",
3170 (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3171 def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
3172 "|fcmle.8h\t$dst, $src1, $src2}",
3173 (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3175 def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
3176 "|fcmle.2s\t$dst, $src1, $src2}",
3177 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3178 def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
3179 "|fcmle.4s\t$dst, $src1, $src2}",
3180 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3181 def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
3182 "|fcmle.2d\t$dst, $src1, $src2}",
3183 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3185 let Predicates = [HasNEON, HasFullFP16] in {
3186 def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
3187 "|fcmlt.4h\t$dst, $src1, $src2}",
3188 (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3189 def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
3190 "|fcmlt.8h\t$dst, $src1, $src2}",
3191 (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3193 def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
3194 "|fcmlt.2s\t$dst, $src1, $src2}",
3195 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3196 def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
3197 "|fcmlt.4s\t$dst, $src1, $src2}",
3198 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3199 def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
3200 "|fcmlt.2d\t$dst, $src1, $src2}",
3201 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3203 let Predicates = [HasNEON, HasFullFP16] in {
3204 def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
3205 "|facle.4h\t$dst, $src1, $src2}",
3206 (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3207 def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
3208 "|facle.8h\t$dst, $src1, $src2}",
3209 (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3211 def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
3212 "|facle.2s\t$dst, $src1, $src2}",
3213 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3214 def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
3215 "|facle.4s\t$dst, $src1, $src2}",
3216 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3217 def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
3218 "|facle.2d\t$dst, $src1, $src2}",
3219 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3221 let Predicates = [HasNEON, HasFullFP16] in {
3222 def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
3223 "|faclt.4h\t$dst, $src1, $src2}",
3224 (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3225 def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
3226 "|faclt.8h\t$dst, $src1, $src2}",
3227 (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3229 def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
3230 "|faclt.2s\t$dst, $src1, $src2}",
3231 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3232 def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
3233 "|faclt.4s\t$dst, $src1, $src2}",
3234 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3235 def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
3236 "|faclt.2d\t$dst, $src1, $src2}",
3237 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3239 //===----------------------------------------------------------------------===//
3240 // Advanced SIMD three scalar instructions.
3241 //===----------------------------------------------------------------------===//
3243 defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>;
3244 defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
3245 defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
3246 defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
3247 defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
3248 defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
3249 defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
3250 defm FABD : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
3251 def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3252 (FABD64 FPR64:$Rn, FPR64:$Rm)>;
3253 defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
3254 int_aarch64_neon_facge>;
3255 defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
3256 int_aarch64_neon_facgt>;
3257 defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
3258 defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
3259 defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
3260 defm FMULX : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>;
3261 defm FRECPS : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>;
3262 defm FRSQRTS : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>;
3263 defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
3264 defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
3265 defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
3266 defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
3267 defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
3268 defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
3269 defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>;
3270 defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>;
3271 defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>;
3272 defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
3273 defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
3274 defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
3275 defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
3276 defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>;
3277 defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>;
3278 let Predicates = [HasV8_1a] in {
3279 defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
3280 defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
3281 def : Pat<(i32 (int_aarch64_neon_sqadd
3283 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3284 (i32 FPR32:$Rm))))),
3285 (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3286 def : Pat<(i32 (int_aarch64_neon_sqsub
3288 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3289 (i32 FPR32:$Rm))))),
3290 (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3293 def : InstAlias<"cmls $dst, $src1, $src2",
3294 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3295 def : InstAlias<"cmle $dst, $src1, $src2",
3296 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3297 def : InstAlias<"cmlo $dst, $src1, $src2",
3298 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3299 def : InstAlias<"cmlt $dst, $src1, $src2",
3300 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3301 def : InstAlias<"fcmle $dst, $src1, $src2",
3302 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3303 def : InstAlias<"fcmle $dst, $src1, $src2",
3304 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3305 def : InstAlias<"fcmlt $dst, $src1, $src2",
3306 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3307 def : InstAlias<"fcmlt $dst, $src1, $src2",
3308 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3309 def : InstAlias<"facle $dst, $src1, $src2",
3310 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3311 def : InstAlias<"facle $dst, $src1, $src2",
3312 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3313 def : InstAlias<"faclt $dst, $src1, $src2",
3314 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3315 def : InstAlias<"faclt $dst, $src1, $src2",
3316 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3318 //===----------------------------------------------------------------------===//
3319 // Advanced SIMD three scalar instructions (mixed operands).
3320 //===----------------------------------------------------------------------===//
3321 defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
3322 int_aarch64_neon_sqdmulls_scalar>;
3323 defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
3324 defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
3326 def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
3327 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3328 (i32 FPR32:$Rm))))),
3329 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3330 def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
3331 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3332 (i32 FPR32:$Rm))))),
3333 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3335 //===----------------------------------------------------------------------===//
3336 // Advanced SIMD two scalar instructions.
3337 //===----------------------------------------------------------------------===//
3339 defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_aarch64_neon_abs>;
3340 defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
3341 defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
3342 defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
3343 defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
3344 defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
3345 defm FCMEQ : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3346 defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3347 defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3348 defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3349 defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3350 defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">;
3351 defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">;
3352 defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">;
3353 defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">;
3354 defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">;
3355 defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">;
3356 defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">;
3357 defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">;
3358 def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
3359 defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">;
3360 defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">;
3361 defm FRECPE : SIMDFPTwoScalar< 0, 1, 0b11101, "frecpe">;
3362 defm FRECPX : SIMDFPTwoScalar< 0, 1, 0b11111, "frecpx">;
3363 defm FRSQRTE : SIMDFPTwoScalar< 1, 1, 0b11101, "frsqrte">;
3364 defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg",
3365 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3366 defm SCVTF : SIMDFPTwoScalarCVT< 0, 0, 0b11101, "scvtf", AArch64sitof>;
3367 defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3368 defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3369 defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
3370 defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
3371 defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
3372 int_aarch64_neon_suqadd>;
3373 defm UCVTF : SIMDFPTwoScalarCVT< 1, 0, 0b11101, "ucvtf", AArch64uitof>;
3374 defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
3375 defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
3376 int_aarch64_neon_usqadd>;
3378 def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
3380 def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
3381 (FCVTASv1i64 FPR64:$Rn)>;
3382 def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
3383 (FCVTAUv1i64 FPR64:$Rn)>;
3384 def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
3385 (FCVTMSv1i64 FPR64:$Rn)>;
3386 def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
3387 (FCVTMUv1i64 FPR64:$Rn)>;
3388 def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
3389 (FCVTNSv1i64 FPR64:$Rn)>;
3390 def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
3391 (FCVTNUv1i64 FPR64:$Rn)>;
3392 def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
3393 (FCVTPSv1i64 FPR64:$Rn)>;
3394 def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
3395 (FCVTPUv1i64 FPR64:$Rn)>;
3397 def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
3398 (FRECPEv1i32 FPR32:$Rn)>;
3399 def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
3400 (FRECPEv1i64 FPR64:$Rn)>;
3401 def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
3402 (FRECPEv1i64 FPR64:$Rn)>;
3404 def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
3405 (FRECPEv1i32 FPR32:$Rn)>;
3406 def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
3407 (FRECPEv2f32 V64:$Rn)>;
3408 def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
3409 (FRECPEv4f32 FPR128:$Rn)>;
3410 def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
3411 (FRECPEv1i64 FPR64:$Rn)>;
3412 def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
3413 (FRECPEv1i64 FPR64:$Rn)>;
3414 def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
3415 (FRECPEv2f64 FPR128:$Rn)>;
3417 def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
3418 (FRECPXv1i32 FPR32:$Rn)>;
3419 def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
3420 (FRECPXv1i64 FPR64:$Rn)>;
3422 def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
3423 (FRSQRTEv1i32 FPR32:$Rn)>;
3424 def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
3425 (FRSQRTEv1i64 FPR64:$Rn)>;
3426 def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
3427 (FRSQRTEv1i64 FPR64:$Rn)>;
3429 def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
3430 (FRSQRTEv1i32 FPR32:$Rn)>;
3431 def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
3432 (FRSQRTEv2f32 V64:$Rn)>;
3433 def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
3434 (FRSQRTEv4f32 FPR128:$Rn)>;
3435 def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
3436 (FRSQRTEv1i64 FPR64:$Rn)>;
3437 def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
3438 (FRSQRTEv1i64 FPR64:$Rn)>;
3439 def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
3440 (FRSQRTEv2f64 FPR128:$Rn)>;
3442 // If an integer is about to be converted to a floating point value,
3443 // just load it on the floating point unit.
3444 // Here are the patterns for 8 and 16-bits to float.
3446 multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
3447 SDPatternOperator loadop, Instruction UCVTF,
3448 ROAddrMode ro, Instruction LDRW, Instruction LDRX,
3450 def : Pat<(DstTy (uint_to_fp (SrcTy
3451 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
3452 ro.Wext:$extend))))),
3453 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
3454 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
3457 def : Pat<(DstTy (uint_to_fp (SrcTy
3458 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
3459 ro.Wext:$extend))))),
3460 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
3461 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
3465 defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
3466 UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
3467 def : Pat <(f32 (uint_to_fp (i32
3468 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
3469 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3470 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
3471 def : Pat <(f32 (uint_to_fp (i32
3472 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
3473 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3474 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
3475 // 16-bits -> float.
3476 defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
3477 UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
3478 def : Pat <(f32 (uint_to_fp (i32
3479 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
3480 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3481 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
3482 def : Pat <(f32 (uint_to_fp (i32
3483 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
3484 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3485 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
3486 // 32-bits are handled in target specific dag combine:
3487 // performIntToFpCombine.
3488 // 64-bits integer to 32-bits floating point, not possible with
3489 // UCVTF on floating point registers (both source and destination
3490 // must have the same size).
3492 // Here are the patterns for 8, 16, 32, and 64-bits to double.
3493 // 8-bits -> double.
3494 defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
3495 UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
3496 def : Pat <(f64 (uint_to_fp (i32
3497 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
3498 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3499 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
3500 def : Pat <(f64 (uint_to_fp (i32
3501 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
3502 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3503 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
3504 // 16-bits -> double.
3505 defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
3506 UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
3507 def : Pat <(f64 (uint_to_fp (i32
3508 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
3509 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3510 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
3511 def : Pat <(f64 (uint_to_fp (i32
3512 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
3513 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3514 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
3515 // 32-bits -> double.
3516 defm : UIntToFPROLoadPat<f64, i32, load,
3517 UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
3518 def : Pat <(f64 (uint_to_fp (i32
3519 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
3520 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3521 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
3522 def : Pat <(f64 (uint_to_fp (i32
3523 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
3524 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3525 (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
3526 // 64-bits -> double are handled in target specific dag combine:
3527 // performIntToFpCombine.
3529 //===----------------------------------------------------------------------===//
3530 // Advanced SIMD three different-sized vector instructions.
3531 //===----------------------------------------------------------------------===//
3533 defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
3534 defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
3535 defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
3536 defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
3537 defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
3538 defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
3539 int_aarch64_neon_sabd>;
3540 defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
3541 int_aarch64_neon_sabd>;
3542 defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl",
3543 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
3544 defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw",
3545 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
3546 defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
3547 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
3548 defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
3549 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
3550 defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
3551 defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
3552 int_aarch64_neon_sqadd>;
3553 defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
3554 int_aarch64_neon_sqsub>;
3555 defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
3556 int_aarch64_neon_sqdmull>;
3557 defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
3558 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
3559 defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
3560 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
3561 defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
3562 int_aarch64_neon_uabd>;
3563 defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
3564 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
3565 defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
3566 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
3567 defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
3568 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
3569 defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
3570 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
3571 defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
3572 defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
3573 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
3574 defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
3575 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
3577 // Additional patterns for SMULL and UMULL
3578 multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
3579 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
3580 def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
3581 (INST8B V64:$Rn, V64:$Rm)>;
3582 def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
3583 (INST4H V64:$Rn, V64:$Rm)>;
3584 def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
3585 (INST2S V64:$Rn, V64:$Rm)>;
3588 defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
3589 SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
3590 defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
3591 UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
3593 // Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
3594 multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
3595 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
3596 def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
3597 (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
3598 def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
3599 (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
3600 def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
3601 (INST2S V128:$Rd, V64:$Rn, V64:$Rm)>;
3604 defm : Neon_mulacc_widen_patterns<
3605 TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
3606 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
3607 defm : Neon_mulacc_widen_patterns<
3608 TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
3609 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
3610 defm : Neon_mulacc_widen_patterns<
3611 TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
3612 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
3613 defm : Neon_mulacc_widen_patterns<
3614 TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
3615 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
3617 // Patterns for 64-bit pmull
3618 def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
3619 (PMULLv1i64 V64:$Rn, V64:$Rm)>;
3620 def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
3621 (extractelt (v2i64 V128:$Rm), (i64 1))),
3622 (PMULLv2i64 V128:$Rn, V128:$Rm)>;
3624 // CodeGen patterns for addhn and subhn instructions, which can actually be
3625 // written in LLVM IR without too much difficulty.
3628 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
3629 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
3630 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3632 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
3633 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3635 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
3636 def : Pat<(concat_vectors (v8i8 V64:$Rd),
3637 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3639 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3640 V128:$Rn, V128:$Rm)>;
3641 def : Pat<(concat_vectors (v4i16 V64:$Rd),
3642 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3644 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3645 V128:$Rn, V128:$Rm)>;
3646 def : Pat<(concat_vectors (v2i32 V64:$Rd),
3647 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3649 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3650 V128:$Rn, V128:$Rm)>;
3653 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
3654 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
3655 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3657 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
3658 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3660 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
3661 def : Pat<(concat_vectors (v8i8 V64:$Rd),
3662 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3664 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3665 V128:$Rn, V128:$Rm)>;
3666 def : Pat<(concat_vectors (v4i16 V64:$Rd),
3667 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3669 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3670 V128:$Rn, V128:$Rm)>;
3671 def : Pat<(concat_vectors (v2i32 V64:$Rd),
3672 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3674 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3675 V128:$Rn, V128:$Rm)>;
3677 //----------------------------------------------------------------------------
3678 // AdvSIMD bitwise extract from vector instruction.
3679 //----------------------------------------------------------------------------
3681 defm EXT : SIMDBitwiseExtract<"ext">;
3683 def : Pat<(v4i16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3684 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3685 def : Pat<(v8i16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3686 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3687 def : Pat<(v2i32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3688 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3689 def : Pat<(v2f32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3690 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3691 def : Pat<(v4i32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3692 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3693 def : Pat<(v4f32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3694 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3695 def : Pat<(v2i64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3696 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3697 def : Pat<(v2f64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3698 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3699 def : Pat<(v4f16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3700 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3701 def : Pat<(v8f16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3702 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3704 // We use EXT to handle extract_subvector to copy the upper 64-bits of a
3706 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 8))),
3707 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3708 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 4))),
3709 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3710 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 2))),
3711 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3712 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 1))),
3713 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3714 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 4))),
3715 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3716 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 2))),
3717 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3718 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 1))),
3719 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3722 //----------------------------------------------------------------------------
3723 // AdvSIMD zip vector
3724 //----------------------------------------------------------------------------
3726 defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
3727 defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
3728 defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
3729 defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
3730 defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
3731 defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
3733 //----------------------------------------------------------------------------
3734 // AdvSIMD TBL/TBX instructions
3735 //----------------------------------------------------------------------------
3737 defm TBL : SIMDTableLookup< 0, "tbl">;
3738 defm TBX : SIMDTableLookupTied<1, "tbx">;
3740 def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3741 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
3742 def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3743 (TBLv16i8One V128:$Ri, V128:$Rn)>;
3745 def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
3746 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3747 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
3748 def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
3749 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3750 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
3753 //----------------------------------------------------------------------------
3754 // AdvSIMD scalar CPY instruction
3755 //----------------------------------------------------------------------------
3757 defm CPY : SIMDScalarCPY<"cpy">;
3759 //----------------------------------------------------------------------------
3760 // AdvSIMD scalar pairwise instructions
3761 //----------------------------------------------------------------------------
3763 defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">;
3764 defm FADDP : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
3765 defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
3766 defm FMAXP : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
3767 defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
3768 defm FMINP : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
3769 def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
3770 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
3771 def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
3772 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
3773 def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
3774 (FADDPv2i32p V64:$Rn)>;
3775 def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
3776 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
3777 def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
3778 (FADDPv2i64p V128:$Rn)>;
3779 def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
3780 (FMAXNMPv2i32p V64:$Rn)>;
3781 def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
3782 (FMAXNMPv2i64p V128:$Rn)>;
3783 def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
3784 (FMAXPv2i32p V64:$Rn)>;
3785 def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
3786 (FMAXPv2i64p V128:$Rn)>;
3787 def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
3788 (FMINNMPv2i32p V64:$Rn)>;
3789 def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
3790 (FMINNMPv2i64p V128:$Rn)>;
3791 def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
3792 (FMINPv2i32p V64:$Rn)>;
3793 def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
3794 (FMINPv2i64p V128:$Rn)>;
3796 //----------------------------------------------------------------------------
3797 // AdvSIMD INS/DUP instructions
3798 //----------------------------------------------------------------------------
3800 def DUPv8i8gpr : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
3801 def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
3802 def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
3803 def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
3804 def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
3805 def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
3806 def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
3808 def DUPv2i64lane : SIMDDup64FromElement;
3809 def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
3810 def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
3811 def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
3812 def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
3813 def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
3814 def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
3816 def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
3817 (v2f32 (DUPv2i32lane
3818 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3820 def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
3821 (v4f32 (DUPv4i32lane
3822 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3824 def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
3825 (v2f64 (DUPv2i64lane
3826 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
3828 def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
3829 (v4f16 (DUPv4i16lane
3830 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
3832 def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
3833 (v8f16 (DUPv8i16lane
3834 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
3837 def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
3838 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
3839 def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
3840 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
3842 def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3843 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
3844 def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3845 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
3846 def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
3847 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
3849 // If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
3850 // instruction even if the types don't match: we just have to remap the lane
3851 // carefully. N.b. this trick only applies to truncations.
3852 def VecIndex_x2 : SDNodeXForm<imm, [{
3853 return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
3855 def VecIndex_x4 : SDNodeXForm<imm, [{
3856 return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
3858 def VecIndex_x8 : SDNodeXForm<imm, [{
3859 return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
3862 multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
3863 ValueType Src128VT, ValueType ScalVT,
3864 Instruction DUP, SDNodeXForm IdxXFORM> {
3865 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
3867 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3869 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
3871 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3874 defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>;
3875 defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>;
3876 defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
3878 defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
3879 defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
3880 defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
3882 multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
3883 SDNodeXForm IdxXFORM> {
3884 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
3886 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3888 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
3890 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3893 defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>;
3894 defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>;
3895 defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>;
3897 defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
3898 defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
3899 defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
3901 // SMOV and UMOV definitions, with some extra patterns for convenience
3905 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3906 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
3907 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3908 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
3909 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3910 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3911 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3912 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
3913 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3914 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3915 def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
3916 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
3918 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
3919 VectorIndexB:$idx)))), i8),
3920 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
3921 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
3922 VectorIndexH:$idx)))), i16),
3923 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
3925 // Extracting i8 or i16 elements will have the zero-extend transformed to
3926 // an 'and' mask by type legalization since neither i8 nor i16 are legal types
3927 // for AArch64. Match these patterns here since UMOV already zeroes out the high
3928 // bits of the destination register.
3929 def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
3931 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
3932 def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
3934 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
3938 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
3939 (SUBREG_TO_REG (i32 0),
3940 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3941 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
3942 (SUBREG_TO_REG (i32 0),
3943 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3945 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
3946 (SUBREG_TO_REG (i32 0),
3947 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3948 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
3949 (SUBREG_TO_REG (i32 0),
3950 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3952 def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
3953 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
3954 (i32 FPR32:$Rn), ssub))>;
3955 def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
3956 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
3957 (i32 FPR32:$Rn), ssub))>;
3958 def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
3959 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
3960 (i64 FPR64:$Rn), dsub))>;
3962 def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
3963 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
3964 def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
3965 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
3967 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
3968 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
3969 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
3970 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
3971 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
3972 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
3974 def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
3975 (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
3978 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
3980 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
3984 def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
3985 (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
3987 V128:$Rn, VectorIndexH:$imm,
3988 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
3991 def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
3992 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
3995 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
3997 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4000 def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
4001 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4003 V128:$Rn, VectorIndexS:$imm,
4004 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4006 def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
4007 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
4009 V128:$Rn, VectorIndexD:$imm,
4010 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
4013 // Copy an element at a constant index in one vector into a constant indexed
4014 // element of another.
4015 // FIXME refactor to a shared class/dev parameterized on vector type, vector
4016 // index type and INS extension
4017 def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
4018 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
4019 VectorIndexB:$idx2)),
4021 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
4023 def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
4024 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
4025 VectorIndexH:$idx2)),
4027 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
4029 def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
4030 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
4031 VectorIndexS:$idx2)),
4033 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
4035 def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
4036 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
4037 VectorIndexD:$idx2)),
4039 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
4042 multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
4043 ValueType VTScal, Instruction INS> {
4044 def : Pat<(VT128 (vector_insert V128:$src,
4045 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
4047 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
4049 def : Pat<(VT128 (vector_insert V128:$src,
4050 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
4052 (INS V128:$src, imm:$Immd,
4053 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
4055 def : Pat<(VT64 (vector_insert V64:$src,
4056 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
4058 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
4059 imm:$Immd, V128:$Rn, imm:$Immn),
4062 def : Pat<(VT64 (vector_insert V64:$src,
4063 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
4066 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
4067 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
4071 defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
4072 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
4073 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
4076 // Floating point vector extractions are codegen'd as either a sequence of
4077 // subregister extractions, or a MOV (aka CPY here, alias for DUP) if
4078 // the lane number is anything other than zero.
4079 def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
4080 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
4081 def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
4082 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
4083 def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
4084 (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
4086 def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
4087 (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
4088 def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
4089 (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
4090 def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
4091 (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
4093 // All concat_vectors operations are canonicalised to act on i64 vectors for
4094 // AArch64. In the general case we need an instruction, which had just as well be
4096 class ConcatPat<ValueType DstTy, ValueType SrcTy>
4097 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
4098 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
4099 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
4101 def : ConcatPat<v2i64, v1i64>;
4102 def : ConcatPat<v2f64, v1f64>;
4103 def : ConcatPat<v4i32, v2i32>;
4104 def : ConcatPat<v4f32, v2f32>;
4105 def : ConcatPat<v8i16, v4i16>;
4106 def : ConcatPat<v8f16, v4f16>;
4107 def : ConcatPat<v16i8, v8i8>;
4109 // If the high lanes are undef, though, we can just ignore them:
4110 class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
4111 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
4112 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
4114 def : ConcatUndefPat<v2i64, v1i64>;
4115 def : ConcatUndefPat<v2f64, v1f64>;
4116 def : ConcatUndefPat<v4i32, v2i32>;
4117 def : ConcatUndefPat<v4f32, v2f32>;
4118 def : ConcatUndefPat<v8i16, v4i16>;
4119 def : ConcatUndefPat<v16i8, v8i8>;
4121 //----------------------------------------------------------------------------
4122 // AdvSIMD across lanes instructions
4123 //----------------------------------------------------------------------------
4125 defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
4126 defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
4127 defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
4128 defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
4129 defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
4130 defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
4131 defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
4132 defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
4133 defm FMAXV : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
4134 defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
4135 defm FMINV : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
4137 // Patterns for across-vector intrinsics, that have a node equivalent, that
4138 // returns a vector (with only the low lane defined) instead of a scalar.
4139 // In effect, opNode is the same as (scalar_to_vector (IntNode)).
4140 multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
4141 SDPatternOperator opNode> {
4142 // If a lane instruction caught the vector_extract around opNode, we can
4143 // directly match the latter to the instruction.
4144 def : Pat<(v8i8 (opNode V64:$Rn)),
4145 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
4146 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
4147 def : Pat<(v16i8 (opNode V128:$Rn)),
4148 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4149 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
4150 def : Pat<(v4i16 (opNode V64:$Rn)),
4151 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
4152 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
4153 def : Pat<(v8i16 (opNode V128:$Rn)),
4154 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4155 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
4156 def : Pat<(v4i32 (opNode V128:$Rn)),
4157 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4158 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
4161 // If none did, fallback to the explicit patterns, consuming the vector_extract.
4162 def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
4163 (i32 0)), (i64 0))),
4164 (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
4165 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
4167 def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
4168 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4169 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
4171 def : Pat<(i32 (vector_extract (insert_subvector undef,
4172 (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
4173 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
4174 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
4176 def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
4177 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4178 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
4180 def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
4181 (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4182 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
4187 multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
4188 SDPatternOperator opNode>
4189 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4190 // If there is a sign extension after this intrinsic, consume it as smov already
4192 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4193 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
4195 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4196 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4198 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4199 (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
4201 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4202 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4204 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4205 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
4207 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4208 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4210 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4211 (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
4213 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4214 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4218 multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
4219 SDPatternOperator opNode>
4220 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4221 // If there is a masking operation keeping only what has been actually
4222 // generated, consume it.
4223 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4224 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
4225 (i32 (EXTRACT_SUBREG
4226 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4227 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4229 def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
4231 (i32 (EXTRACT_SUBREG
4232 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4233 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4235 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4236 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
4237 (i32 (EXTRACT_SUBREG
4238 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4239 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4241 def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
4243 (i32 (EXTRACT_SUBREG
4244 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4245 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4249 defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>;
4250 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4251 def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
4252 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4254 defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
4255 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4256 def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
4257 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4259 defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
4260 def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
4261 (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
4263 defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
4264 def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
4265 (SMINPv2i32 V64:$Rn, V64:$Rn)>;
4267 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
4268 def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
4269 (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
4271 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
4272 def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
4273 (UMINPv2i32 V64:$Rn, V64:$Rn)>;
4275 multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
4276 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4278 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4279 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4281 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4283 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4284 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4287 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4288 (i32 (EXTRACT_SUBREG
4289 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4290 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4292 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4293 (i32 (EXTRACT_SUBREG
4294 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4295 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4298 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4299 (i64 (EXTRACT_SUBREG
4300 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4301 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4305 multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
4307 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4308 (i32 (EXTRACT_SUBREG
4309 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4310 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4312 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4313 (i32 (EXTRACT_SUBREG
4314 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4315 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4318 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4319 (i32 (EXTRACT_SUBREG
4320 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4321 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4323 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4324 (i32 (EXTRACT_SUBREG
4325 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4326 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4329 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4330 (i64 (EXTRACT_SUBREG
4331 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4332 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4336 defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
4337 defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
4339 // The vaddlv_s32 intrinsic gets mapped to SADDLP.
4340 def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
4341 (i64 (EXTRACT_SUBREG
4342 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4343 (SADDLPv2i32_v1i64 V64:$Rn), dsub),
4345 // The vaddlv_u32 intrinsic gets mapped to UADDLP.
4346 def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
4347 (i64 (EXTRACT_SUBREG
4348 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4349 (UADDLPv2i32_v1i64 V64:$Rn), dsub),
4352 //------------------------------------------------------------------------------
4353 // AdvSIMD modified immediate instructions
4354 //------------------------------------------------------------------------------
4357 defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
4359 defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
4361 def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4362 def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4363 def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4364 def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4366 def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4367 def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4368 def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4369 def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4371 def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4372 def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4373 def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4374 def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4376 def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4377 def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4378 def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4379 def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4382 def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
4384 [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4385 def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64, fpimm8,
4387 [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4388 def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
4390 [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4391 let Predicates = [HasNEON, HasFullFP16] in {
4392 def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64, fpimm8,
4394 [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4395 def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
4397 [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4398 } // Predicates = [HasNEON, HasFullFP16]
4402 // EDIT byte mask: scalar
4403 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
4404 def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
4405 [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
4406 // The movi_edit node has the immediate value already encoded, so we use
4407 // a plain imm0_255 here.
4408 def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
4409 (MOVID imm0_255:$shift)>;
4411 def : Pat<(v1i64 immAllZerosV), (MOVID (i32 0))>;
4412 def : Pat<(v2i32 immAllZerosV), (MOVID (i32 0))>;
4413 def : Pat<(v4i16 immAllZerosV), (MOVID (i32 0))>;
4414 def : Pat<(v8i8 immAllZerosV), (MOVID (i32 0))>;
4416 def : Pat<(v1i64 immAllOnesV), (MOVID (i32 255))>;
4417 def : Pat<(v2i32 immAllOnesV), (MOVID (i32 255))>;
4418 def : Pat<(v4i16 immAllOnesV), (MOVID (i32 255))>;
4419 def : Pat<(v8i8 immAllOnesV), (MOVID (i32 255))>;
4421 // EDIT byte mask: 2d
4423 // The movi_edit node has the immediate value already encoded, so we use
4424 // a plain imm0_255 in the pattern
4425 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
4426 def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
4429 [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
4431 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4432 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4433 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4434 def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4436 def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4437 def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4438 def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4439 def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4441 def : Pat<(v2f64 (AArch64dup (f64 fpimm0))), (MOVIv2d_ns (i32 0))>;
4442 def : Pat<(v4f32 (AArch64dup (f32 fpimm0))), (MOVIv2d_ns (i32 0))>;
4444 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
4445 defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
4447 def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4448 def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4449 def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4450 def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4452 def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4453 def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4454 def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4455 def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4457 def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4458 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
4459 def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4460 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
4461 def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4462 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
4463 def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4464 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
4466 // EDIT per word: 2s & 4s with MSL shifter
4467 def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
4468 [(set (v2i32 V64:$Rd),
4469 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4470 def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
4471 [(set (v4i32 V128:$Rd),
4472 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4474 // Per byte: 8b & 16b
4475 def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64, imm0_255,
4477 [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
4478 def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
4480 [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
4484 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
4485 defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
4487 def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4488 def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4489 def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4490 def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4492 def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4493 def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4494 def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4495 def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4497 def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4498 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
4499 def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4500 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
4501 def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4502 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
4503 def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4504 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
4506 // EDIT per word: 2s & 4s with MSL shifter
4507 def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
4508 [(set (v2i32 V64:$Rd),
4509 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4510 def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
4511 [(set (v4i32 V128:$Rd),
4512 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4514 //----------------------------------------------------------------------------
4515 // AdvSIMD indexed element
4516 //----------------------------------------------------------------------------
4518 let hasSideEffects = 0 in {
4519 defm FMLA : SIMDFPIndexedTied<0, 0b0001, "fmla">;
4520 defm FMLS : SIMDFPIndexedTied<0, 0b0101, "fmls">;
4523 // NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
4524 // instruction expects the addend first, while the intrinsic expects it last.
4526 // On the other hand, there are quite a few valid combinatorial options due to
4527 // the commutativity of multiplication and the fact that (-x) * y = x * (-y).
4528 defm : SIMDFPIndexedTiedPatterns<"FMLA",
4529 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
4530 defm : SIMDFPIndexedTiedPatterns<"FMLA",
4531 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
4533 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4534 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4535 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4536 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
4537 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4538 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
4539 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4540 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
4542 multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
4543 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
4545 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4546 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
4547 VectorIndexS:$idx))),
4548 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
4549 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4550 (v2f32 (AArch64duplane32
4551 (v4f32 (insert_subvector undef,
4552 (v2f32 (fneg V64:$Rm)),
4554 VectorIndexS:$idx)))),
4555 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
4556 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4557 VectorIndexS:$idx)>;
4558 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4559 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
4560 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
4561 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
4563 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
4565 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4566 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
4567 VectorIndexS:$idx))),
4568 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
4569 VectorIndexS:$idx)>;
4570 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4571 (v4f32 (AArch64duplane32
4572 (v4f32 (insert_subvector undef,
4573 (v2f32 (fneg V64:$Rm)),
4575 VectorIndexS:$idx)))),
4576 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
4577 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4578 VectorIndexS:$idx)>;
4579 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4580 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
4581 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
4582 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
4584 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
4585 // (DUPLANE from 64-bit would be trivial).
4586 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
4587 (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
4588 VectorIndexD:$idx))),
4590 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
4591 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
4592 (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
4593 (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
4594 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
4596 // 2 variants for 32-bit scalar version: extract from .2s or from .4s
4597 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
4598 (vector_extract (v4f32 (fneg V128:$Rm)),
4599 VectorIndexS:$idx))),
4600 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
4601 V128:$Rm, VectorIndexS:$idx)>;
4602 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
4603 (vector_extract (v4f32 (insert_subvector undef,
4604 (v2f32 (fneg V64:$Rm)),
4606 VectorIndexS:$idx))),
4607 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
4608 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
4610 // 1 variant for 64-bit scalar version: extract from .1d or from .2d
4611 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
4612 (vector_extract (v2f64 (fneg V128:$Rm)),
4613 VectorIndexS:$idx))),
4614 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
4615 V128:$Rm, VectorIndexS:$idx)>;
4618 defm : FMLSIndexedAfterNegPatterns<
4619 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4620 defm : FMLSIndexedAfterNegPatterns<
4621 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
4623 defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
4624 defm FMUL : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
4626 def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
4627 (FMULv2i32_indexed V64:$Rn,
4628 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
4630 def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
4631 (FMULv4i32_indexed V128:$Rn,
4632 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
4634 def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
4635 (FMULv2i64_indexed V128:$Rn,
4636 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
4639 defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
4640 defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4641 defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla",
4642 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))>>;
4643 defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls",
4644 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))>>;
4645 defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
4646 defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
4647 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4648 defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
4649 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4650 defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
4651 int_aarch64_neon_smull>;
4652 defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
4653 int_aarch64_neon_sqadd>;
4654 defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
4655 int_aarch64_neon_sqsub>;
4656 defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
4657 int_aarch64_neon_sqadd>;
4658 defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
4659 int_aarch64_neon_sqsub>;
4660 defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
4661 defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
4662 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4663 defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
4664 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4665 defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
4666 int_aarch64_neon_umull>;
4668 // A scalar sqdmull with the second operand being a vector lane can be
4669 // handled directly with the indexed instruction encoding.
4670 def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4671 (vector_extract (v4i32 V128:$Vm),
4672 VectorIndexS:$idx)),
4673 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
4675 //----------------------------------------------------------------------------
4676 // AdvSIMD scalar shift instructions
4677 //----------------------------------------------------------------------------
4678 defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
4679 defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
4680 defm SCVTF : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
4681 defm UCVTF : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
4682 // Codegen patterns for the above. We don't put these directly on the
4683 // instructions because TableGen's type inference can't handle the truth.
4684 // Having the same base pattern for fp <--> int totally freaks it out.
4685 def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
4686 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
4687 def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
4688 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
4689 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
4690 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
4691 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
4692 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
4693 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
4695 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
4696 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
4698 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
4699 def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
4700 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
4701 def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
4702 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
4703 def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
4704 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4705 def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
4706 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4707 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
4709 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4710 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
4712 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4714 defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>;
4715 defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
4716 defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
4717 int_aarch64_neon_sqrshrn>;
4718 defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
4719 int_aarch64_neon_sqrshrun>;
4720 defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
4721 defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
4722 defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
4723 int_aarch64_neon_sqshrn>;
4724 defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
4725 int_aarch64_neon_sqshrun>;
4726 defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">;
4727 defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>;
4728 defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
4729 TriOpFrag<(add node:$LHS,
4730 (AArch64srshri node:$MHS, node:$RHS))>>;
4731 defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>;
4732 defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
4733 TriOpFrag<(add node:$LHS,
4734 (AArch64vashr node:$MHS, node:$RHS))>>;
4735 defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
4736 int_aarch64_neon_uqrshrn>;
4737 defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
4738 defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
4739 int_aarch64_neon_uqshrn>;
4740 defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>;
4741 defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
4742 TriOpFrag<(add node:$LHS,
4743 (AArch64urshri node:$MHS, node:$RHS))>>;
4744 defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>;
4745 defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
4746 TriOpFrag<(add node:$LHS,
4747 (AArch64vlshr node:$MHS, node:$RHS))>>;
4749 //----------------------------------------------------------------------------
4750 // AdvSIMD vector shift instructions
4751 //----------------------------------------------------------------------------
4752 defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
4753 defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
4754 defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
4755 int_aarch64_neon_vcvtfxs2fp>;
4756 defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
4757 int_aarch64_neon_rshrn>;
4758 defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
4759 defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
4760 BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
4761 defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>;
4762 def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4763 (i32 vecshiftL64:$imm))),
4764 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
4765 defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
4766 int_aarch64_neon_sqrshrn>;
4767 defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
4768 int_aarch64_neon_sqrshrun>;
4769 defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
4770 defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
4771 defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
4772 int_aarch64_neon_sqshrn>;
4773 defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
4774 int_aarch64_neon_sqshrun>;
4775 defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>;
4776 def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4777 (i32 vecshiftR64:$imm))),
4778 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
4779 defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
4780 defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
4781 TriOpFrag<(add node:$LHS,
4782 (AArch64srshri node:$MHS, node:$RHS))> >;
4783 defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
4784 BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
4786 defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
4787 defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
4788 TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
4789 defm UCVTF : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
4790 int_aarch64_neon_vcvtfxu2fp>;
4791 defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
4792 int_aarch64_neon_uqrshrn>;
4793 defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
4794 defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
4795 int_aarch64_neon_uqshrn>;
4796 defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
4797 defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
4798 TriOpFrag<(add node:$LHS,
4799 (AArch64urshri node:$MHS, node:$RHS))> >;
4800 defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
4801 BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
4802 defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
4803 defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
4804 TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
4806 // SHRN patterns for when a logical right shift was used instead of arithmetic
4807 // (the immediate guarantees no sign bits actually end up in the result so it
4809 def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
4810 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
4811 def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
4812 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
4813 def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
4814 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
4816 def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
4817 (trunc (AArch64vlshr (v8i16 V128:$Rn),
4818 vecshiftR16Narrow:$imm)))),
4819 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4820 V128:$Rn, vecshiftR16Narrow:$imm)>;
4821 def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
4822 (trunc (AArch64vlshr (v4i32 V128:$Rn),
4823 vecshiftR32Narrow:$imm)))),
4824 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4825 V128:$Rn, vecshiftR32Narrow:$imm)>;
4826 def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
4827 (trunc (AArch64vlshr (v2i64 V128:$Rn),
4828 vecshiftR64Narrow:$imm)))),
4829 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4830 V128:$Rn, vecshiftR32Narrow:$imm)>;
4832 // Vector sign and zero extensions are implemented with SSHLL and USSHLL.
4833 // Anyexts are implemented as zexts.
4834 def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>;
4835 def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4836 def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4837 def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
4838 def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4839 def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4840 def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
4841 def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4842 def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4843 // Also match an extend from the upper half of a 128 bit source register.
4844 def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4845 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4846 def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4847 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4848 def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4849 (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
4850 def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4851 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4852 def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4853 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4854 def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4855 (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
4856 def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4857 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4858 def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4859 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4860 def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4861 (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
4863 // Vector shift sxtl aliases
4864 def : InstAlias<"sxtl.8h $dst, $src1",
4865 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4866 def : InstAlias<"sxtl $dst.8h, $src1.8b",
4867 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4868 def : InstAlias<"sxtl.4s $dst, $src1",
4869 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4870 def : InstAlias<"sxtl $dst.4s, $src1.4h",
4871 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4872 def : InstAlias<"sxtl.2d $dst, $src1",
4873 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4874 def : InstAlias<"sxtl $dst.2d, $src1.2s",
4875 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4877 // Vector shift sxtl2 aliases
4878 def : InstAlias<"sxtl2.8h $dst, $src1",
4879 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4880 def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
4881 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4882 def : InstAlias<"sxtl2.4s $dst, $src1",
4883 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4884 def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
4885 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4886 def : InstAlias<"sxtl2.2d $dst, $src1",
4887 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4888 def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
4889 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4891 // Vector shift uxtl aliases
4892 def : InstAlias<"uxtl.8h $dst, $src1",
4893 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4894 def : InstAlias<"uxtl $dst.8h, $src1.8b",
4895 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4896 def : InstAlias<"uxtl.4s $dst, $src1",
4897 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4898 def : InstAlias<"uxtl $dst.4s, $src1.4h",
4899 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4900 def : InstAlias<"uxtl.2d $dst, $src1",
4901 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4902 def : InstAlias<"uxtl $dst.2d, $src1.2s",
4903 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4905 // Vector shift uxtl2 aliases
4906 def : InstAlias<"uxtl2.8h $dst, $src1",
4907 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4908 def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
4909 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4910 def : InstAlias<"uxtl2.4s $dst, $src1",
4911 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4912 def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
4913 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4914 def : InstAlias<"uxtl2.2d $dst, $src1",
4915 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4916 def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
4917 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4919 // If an integer is about to be converted to a floating point value,
4920 // just load it on the floating point unit.
4921 // These patterns are more complex because floating point loads do not
4922 // support sign extension.
4923 // The sign extension has to be explicitly added and is only supported for
4924 // one step: byte-to-half, half-to-word, word-to-doubleword.
4925 // SCVTF GPR -> FPR is 9 cycles.
4926 // SCVTF FPR -> FPR is 4 cyclces.
4927 // (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
4928 // Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
4929 // and still being faster.
4930 // However, this is not good for code size.
4931 // 8-bits -> float. 2 sizes step-up.
4932 class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
4933 : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
4934 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4939 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4946 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
4948 def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
4949 (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
4950 def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
4951 (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
4952 def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
4953 (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
4954 def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
4955 (LDURBi GPR64sp:$Rn, simm9:$offset)>;
4957 // 16-bits -> float. 1 size step-up.
4958 class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
4959 : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
4960 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4962 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4966 ssub)))>, Requires<[NotForCodeSize]>;
4968 def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
4969 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
4970 def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
4971 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
4972 def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
4973 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
4974 def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
4975 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
4977 // 32-bits to 32-bits are handled in target specific dag combine:
4978 // performIntToFpCombine.
4979 // 64-bits integer to 32-bits floating point, not possible with
4980 // SCVTF on floating point registers (both source and destination
4981 // must have the same size).
4983 // Here are the patterns for 8, 16, 32, and 64-bits to double.
4984 // 8-bits -> double. 3 size step-up: give up.
4985 // 16-bits -> double. 2 size step.
4986 class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
4987 : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
4988 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
4993 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5000 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
5002 def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
5003 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
5004 def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
5005 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
5006 def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
5007 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
5008 def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
5009 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
5010 // 32-bits -> double. 1 size step-up.
5011 class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
5012 : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
5013 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
5015 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5019 dsub)))>, Requires<[NotForCodeSize]>;
5021 def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
5022 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
5023 def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
5024 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
5025 def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
5026 (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
5027 def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
5028 (LDURSi GPR64sp:$Rn, simm9:$offset)>;
5030 // 64-bits -> double are handled in target specific dag combine:
5031 // performIntToFpCombine.
5034 //----------------------------------------------------------------------------
5035 // AdvSIMD Load-Store Structure
5036 //----------------------------------------------------------------------------
5037 defm LD1 : SIMDLd1Multiple<"ld1">;
5038 defm LD2 : SIMDLd2Multiple<"ld2">;
5039 defm LD3 : SIMDLd3Multiple<"ld3">;
5040 defm LD4 : SIMDLd4Multiple<"ld4">;
5042 defm ST1 : SIMDSt1Multiple<"st1">;
5043 defm ST2 : SIMDSt2Multiple<"st2">;
5044 defm ST3 : SIMDSt3Multiple<"st3">;
5045 defm ST4 : SIMDSt4Multiple<"st4">;
5047 class Ld1Pat<ValueType ty, Instruction INST>
5048 : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
5050 def : Ld1Pat<v16i8, LD1Onev16b>;
5051 def : Ld1Pat<v8i16, LD1Onev8h>;
5052 def : Ld1Pat<v4i32, LD1Onev4s>;
5053 def : Ld1Pat<v2i64, LD1Onev2d>;
5054 def : Ld1Pat<v8i8, LD1Onev8b>;
5055 def : Ld1Pat<v4i16, LD1Onev4h>;
5056 def : Ld1Pat<v2i32, LD1Onev2s>;
5057 def : Ld1Pat<v1i64, LD1Onev1d>;
5059 class St1Pat<ValueType ty, Instruction INST>
5060 : Pat<(store ty:$Vt, GPR64sp:$Rn),
5061 (INST ty:$Vt, GPR64sp:$Rn)>;
5063 def : St1Pat<v16i8, ST1Onev16b>;
5064 def : St1Pat<v8i16, ST1Onev8h>;
5065 def : St1Pat<v4i32, ST1Onev4s>;
5066 def : St1Pat<v2i64, ST1Onev2d>;
5067 def : St1Pat<v8i8, ST1Onev8b>;
5068 def : St1Pat<v4i16, ST1Onev4h>;
5069 def : St1Pat<v2i32, ST1Onev2s>;
5070 def : St1Pat<v1i64, ST1Onev1d>;
5076 defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
5077 defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
5078 defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
5079 defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
5080 let mayLoad = 1, hasSideEffects = 0 in {
5081 defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
5082 defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
5083 defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
5084 defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>;
5085 defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>;
5086 defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>;
5087 defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>;
5088 defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>;
5089 defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>;
5090 defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>;
5091 defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
5092 defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
5093 defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>;
5094 defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>;
5095 defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>;
5096 defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>;
5099 def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
5100 (LD1Rv8b GPR64sp:$Rn)>;
5101 def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
5102 (LD1Rv16b GPR64sp:$Rn)>;
5103 def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
5104 (LD1Rv4h GPR64sp:$Rn)>;
5105 def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
5106 (LD1Rv8h GPR64sp:$Rn)>;
5107 def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
5108 (LD1Rv2s GPR64sp:$Rn)>;
5109 def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
5110 (LD1Rv4s GPR64sp:$Rn)>;
5111 def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
5112 (LD1Rv2d GPR64sp:$Rn)>;
5113 def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
5114 (LD1Rv1d GPR64sp:$Rn)>;
5115 // Grab the floating point version too
5116 def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
5117 (LD1Rv2s GPR64sp:$Rn)>;
5118 def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
5119 (LD1Rv4s GPR64sp:$Rn)>;
5120 def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
5121 (LD1Rv2d GPR64sp:$Rn)>;
5122 def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
5123 (LD1Rv1d GPR64sp:$Rn)>;
5124 def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
5125 (LD1Rv4h GPR64sp:$Rn)>;
5126 def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
5127 (LD1Rv8h GPR64sp:$Rn)>;
5129 class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
5130 ValueType VTy, ValueType STy, Instruction LD1>
5131 : Pat<(vector_insert (VTy VecListOne128:$Rd),
5132 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
5133 (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
5135 def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
5136 def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
5137 def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
5138 def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
5139 def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
5140 def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
5141 def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>;
5143 class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
5144 ValueType VTy, ValueType STy, Instruction LD1>
5145 : Pat<(vector_insert (VTy VecListOne64:$Rd),
5146 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
5148 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
5149 VecIndex:$idx, GPR64sp:$Rn),
5152 def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
5153 def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
5154 def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
5155 def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
5156 def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>;
5159 defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
5160 defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
5161 defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
5162 defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
5165 defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>;
5166 defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
5167 defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
5168 defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
5170 let AddedComplexity = 19 in
5171 class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5172 ValueType VTy, ValueType STy, Instruction ST1>
5174 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5176 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
5178 def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>;
5179 def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
5180 def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
5181 def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
5182 def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
5183 def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
5184 def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>;
5186 let AddedComplexity = 19 in
5187 class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5188 ValueType VTy, ValueType STy, Instruction ST1>
5190 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5192 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5193 VecIndex:$idx, GPR64sp:$Rn)>;
5195 def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
5196 def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
5197 def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
5198 def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
5199 def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>;
5201 multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5202 ValueType VTy, ValueType STy, Instruction ST1,
5204 def : Pat<(scalar_store
5205 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5206 GPR64sp:$Rn, offset),
5207 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5208 VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5210 def : Pat<(scalar_store
5211 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5212 GPR64sp:$Rn, GPR64:$Rm),
5213 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5214 VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5217 defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
5218 defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
5220 defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
5221 defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
5222 defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
5223 defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
5224 defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
5226 multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5227 ValueType VTy, ValueType STy, Instruction ST1,
5229 def : Pat<(scalar_store
5230 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5231 GPR64sp:$Rn, offset),
5232 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5234 def : Pat<(scalar_store
5235 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5236 GPR64sp:$Rn, GPR64:$Rm),
5237 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5240 defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
5242 defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
5244 defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
5245 defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
5246 defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
5247 defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
5248 defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
5250 let mayStore = 1, hasSideEffects = 0 in {
5251 defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
5252 defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
5253 defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
5254 defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>;
5255 defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>;
5256 defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>;
5257 defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
5258 defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
5259 defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>;
5260 defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>;
5261 defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>;
5262 defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>;
5265 defm ST1 : SIMDLdSt1SingleAliases<"st1">;
5266 defm ST2 : SIMDLdSt2SingleAliases<"st2">;
5267 defm ST3 : SIMDLdSt3SingleAliases<"st3">;
5268 defm ST4 : SIMDLdSt4SingleAliases<"st4">;
5270 //----------------------------------------------------------------------------
5271 // Crypto extensions
5272 //----------------------------------------------------------------------------
5274 def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>;
5275 def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>;
5276 def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>;
5277 def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>;
5279 def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>;
5280 def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>;
5281 def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>;
5282 def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
5283 def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
5284 def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
5285 def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
5287 def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>;
5288 def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>;
5289 def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
5291 //----------------------------------------------------------------------------
5293 //----------------------------------------------------------------------------
5294 // FIXME: Like for X86, these should go in their own separate .td file.
5296 // Any instruction that defines a 32-bit result leaves the high half of the
5297 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
5298 // be copying from a truncate. But any other 32-bit operation will zero-extend
5300 // FIXME: X86 also checks for CMOV here. Do we need something similar?
5301 def def32 : PatLeaf<(i32 GPR32:$src), [{
5302 return N->getOpcode() != ISD::TRUNCATE &&
5303 N->getOpcode() != TargetOpcode::EXTRACT_SUBREG &&
5304 N->getOpcode() != ISD::CopyFromReg;
5307 // In the case of a 32-bit def that is known to implicitly zero-extend,
5308 // we can use a SUBREG_TO_REG.
5309 def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
5311 // For an anyext, we don't care what the high bits are, so we can perform an
5312 // INSERT_SUBREF into an IMPLICIT_DEF.
5313 def : Pat<(i64 (anyext GPR32:$src)),
5314 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
5316 // When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
5317 // then assert the extension has happened.
5318 def : Pat<(i64 (zext GPR32:$src)),
5319 (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
5321 // To sign extend, we use a signed bitfield move instruction (SBFM) on the
5322 // containing super-reg.
5323 def : Pat<(i64 (sext GPR32:$src)),
5324 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
5325 def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
5326 def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
5327 def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>;
5328 def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>;
5329 def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
5330 def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>;
5331 def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>;
5333 def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
5334 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
5335 (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
5336 def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
5337 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
5338 (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
5340 def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
5341 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
5342 (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
5343 def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
5344 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
5345 (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
5347 def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
5348 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
5349 (i64 (i64shift_a imm0_63:$imm)),
5350 (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
5352 // sra patterns have an AddedComplexity of 10, so make sure we have a higher
5353 // AddedComplexity for the following patterns since we want to match sext + sra
5354 // patterns before we attempt to match a single sra node.
5355 let AddedComplexity = 20 in {
5356 // We support all sext + sra combinations which preserve at least one bit of the
5357 // original value which is to be sign extended. E.g. we support shifts up to
5359 def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
5360 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
5361 def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
5362 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
5364 def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
5365 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
5366 def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
5367 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
5369 def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
5370 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
5371 (i64 imm0_31:$imm), 31)>;
5372 } // AddedComplexity = 20
5374 // To truncate, we can simply extract from a subregister.
5375 def : Pat<(i32 (trunc GPR64sp:$src)),
5376 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
5378 // __builtin_trap() uses the BRK instruction on AArch64.
5379 def : Pat<(trap), (BRK 1)>;
5381 // Conversions within AdvSIMD types in the same register size are free.
5382 // But because we need a consistent lane ordering, in big endian many
5383 // conversions require one or more REV instructions.
5385 // Consider a simple memory load followed by a bitconvert then a store.
5387 // v1 = BITCAST v2i32 v0 to v4i16
5390 // In big endian mode every memory access has an implicit byte swap. LDR and
5391 // STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
5392 // is, they treat the vector as a sequence of elements to be byte-swapped.
5393 // The two pairs of instructions are fundamentally incompatible. We've decided
5394 // to use LD1/ST1 only to simplify compiler implementation.
5396 // LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
5397 // the original code sequence:
5399 // v1 = REV v2i32 (implicit)
5400 // v2 = BITCAST v2i32 v1 to v4i16
5401 // v3 = REV v4i16 v2 (implicit)
5404 // But this is now broken - the value stored is different to the value loaded
5405 // due to lane reordering. To fix this, on every BITCAST we must perform two
5408 // v1 = REV v2i32 (implicit)
5410 // v3 = BITCAST v2i32 v2 to v4i16
5412 // v5 = REV v4i16 v4 (implicit)
5415 // This means an extra two instructions, but actually in most cases the two REV
5416 // instructions can be combined into one. For example:
5417 // (REV64_2s (REV64_4h X)) === (REV32_4h X)
5419 // There is also no 128-bit REV instruction. This must be synthesized with an
5422 // Most bitconverts require some sort of conversion. The only exceptions are:
5423 // a) Identity conversions - vNfX <-> vNiX
5424 // b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
5427 // Natural vector casts (64 bit)
5428 def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
5429 def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
5430 def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
5431 def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
5432 def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
5433 def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
5435 def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
5436 def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
5437 def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
5438 def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
5439 def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
5441 def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
5442 def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
5443 def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
5444 def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
5445 def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
5447 def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5448 def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5449 def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5450 def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5451 def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5452 def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5453 def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
5455 def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
5456 def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
5457 def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
5458 def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
5459 def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
5461 // Natural vector casts (128 bit)
5462 def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
5463 def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
5464 def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
5465 def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
5466 def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
5467 def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
5468 def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
5470 def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
5471 def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
5472 def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
5473 def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
5474 def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
5475 def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
5476 def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
5478 def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
5479 def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
5480 def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
5481 def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
5482 def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
5483 def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
5484 def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
5486 def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
5487 def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
5488 def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
5489 def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
5490 def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
5491 def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
5492 def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
5494 def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
5495 def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
5496 def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
5497 def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
5498 def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
5499 def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
5500 def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
5502 def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
5503 def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
5504 def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
5505 def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
5506 def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
5507 def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
5508 def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
5510 let Predicates = [IsLE] in {
5511 def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5512 def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5513 def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5514 def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5515 def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5517 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
5518 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5519 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
5520 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5521 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
5522 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5523 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
5524 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5525 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
5526 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5527 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
5528 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5530 let Predicates = [IsBE] in {
5531 def : Pat<(v8i8 (bitconvert GPR64:$Xn)),
5532 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5533 def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
5534 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5535 def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
5536 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5537 def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
5538 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5539 def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
5540 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5542 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
5543 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5544 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
5545 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5546 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
5547 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5548 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
5549 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5550 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
5551 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5553 def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5554 def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5555 def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
5556 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5557 def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
5558 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5559 def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
5560 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5561 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
5563 def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
5564 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
5565 def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
5566 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
5567 def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
5568 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5569 def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
5570 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
5571 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
5572 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5574 let Predicates = [IsLE] in {
5575 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
5576 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
5577 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
5578 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
5579 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
5581 let Predicates = [IsBE] in {
5582 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
5583 (v1i64 (REV64v2i32 FPR64:$src))>;
5584 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
5585 (v1i64 (REV64v4i16 FPR64:$src))>;
5586 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
5587 (v1i64 (REV64v8i8 FPR64:$src))>;
5588 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
5589 (v1i64 (REV64v4i16 FPR64:$src))>;
5590 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
5591 (v1i64 (REV64v2i32 FPR64:$src))>;
5593 def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5594 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5596 let Predicates = [IsLE] in {
5597 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
5598 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
5599 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
5600 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5601 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5602 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
5604 let Predicates = [IsBE] in {
5605 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
5606 (v2i32 (REV64v2i32 FPR64:$src))>;
5607 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
5608 (v2i32 (REV32v4i16 FPR64:$src))>;
5609 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))),
5610 (v2i32 (REV32v8i8 FPR64:$src))>;
5611 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
5612 (v2i32 (REV64v2i32 FPR64:$src))>;
5613 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
5614 (v2i32 (REV64v2i32 FPR64:$src))>;
5615 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
5616 (v2i32 (REV64v4i16 FPR64:$src))>;
5618 def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
5620 let Predicates = [IsLE] in {
5621 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
5622 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
5623 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
5624 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5625 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
5626 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
5627 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5629 let Predicates = [IsBE] in {
5630 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
5631 (v4i16 (REV64v4i16 FPR64:$src))>;
5632 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
5633 (v4i16 (REV32v4i16 FPR64:$src))>;
5634 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
5635 (v4i16 (REV16v8i8 FPR64:$src))>;
5636 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
5637 (v4i16 (REV64v4i16 FPR64:$src))>;
5638 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))),
5639 (v4i16 (REV32v4i16 FPR64:$src))>;
5640 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
5641 (v4i16 (REV32v4i16 FPR64:$src))>;
5642 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
5643 (v4i16 (REV64v4i16 FPR64:$src))>;
5646 let Predicates = [IsLE] in {
5647 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
5648 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
5649 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
5650 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
5651 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5652 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
5653 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5655 let Predicates = [IsBE] in {
5656 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
5657 (v4f16 (REV64v4i16 FPR64:$src))>;
5658 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
5659 (v4f16 (REV64v4i16 FPR64:$src))>;
5660 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))),
5661 (v4f16 (REV64v4i16 FPR64:$src))>;
5662 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))),
5663 (v4f16 (REV16v8i8 FPR64:$src))>;
5664 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))),
5665 (v4f16 (REV64v4i16 FPR64:$src))>;
5666 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
5667 (v4f16 (REV64v4i16 FPR64:$src))>;
5668 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
5669 (v4f16 (REV64v4i16 FPR64:$src))>;
5674 let Predicates = [IsLE] in {
5675 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
5676 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
5677 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
5678 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5679 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
5680 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5681 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>;
5683 let Predicates = [IsBE] in {
5684 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
5685 (v8i8 (REV64v8i8 FPR64:$src))>;
5686 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))),
5687 (v8i8 (REV32v8i8 FPR64:$src))>;
5688 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))),
5689 (v8i8 (REV16v8i8 FPR64:$src))>;
5690 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
5691 (v8i8 (REV64v8i8 FPR64:$src))>;
5692 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
5693 (v8i8 (REV32v8i8 FPR64:$src))>;
5694 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
5695 (v8i8 (REV64v8i8 FPR64:$src))>;
5696 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))),
5697 (v8i8 (REV16v8i8 FPR64:$src))>;
5700 let Predicates = [IsLE] in {
5701 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
5702 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
5703 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
5704 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
5705 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>;
5707 let Predicates = [IsBE] in {
5708 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
5709 (f64 (REV64v2i32 FPR64:$src))>;
5710 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))),
5711 (f64 (REV64v4i16 FPR64:$src))>;
5712 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
5713 (f64 (REV64v2i32 FPR64:$src))>;
5714 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
5715 (f64 (REV64v8i8 FPR64:$src))>;
5716 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))),
5717 (f64 (REV64v4i16 FPR64:$src))>;
5719 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
5720 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
5722 let Predicates = [IsLE] in {
5723 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
5724 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
5725 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
5726 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
5727 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
5729 let Predicates = [IsBE] in {
5730 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
5731 (v1f64 (REV64v2i32 FPR64:$src))>;
5732 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
5733 (v1f64 (REV64v4i16 FPR64:$src))>;
5734 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
5735 (v1f64 (REV64v8i8 FPR64:$src))>;
5736 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
5737 (v1f64 (REV64v2i32 FPR64:$src))>;
5738 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
5739 (v1f64 (REV64v4i16 FPR64:$src))>;
5741 def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
5742 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
5744 let Predicates = [IsLE] in {
5745 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
5746 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
5747 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
5748 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5749 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5750 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
5752 let Predicates = [IsBE] in {
5753 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
5754 (v2f32 (REV64v2i32 FPR64:$src))>;
5755 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
5756 (v2f32 (REV32v4i16 FPR64:$src))>;
5757 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))),
5758 (v2f32 (REV32v8i8 FPR64:$src))>;
5759 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
5760 (v2f32 (REV64v2i32 FPR64:$src))>;
5761 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
5762 (v2f32 (REV64v2i32 FPR64:$src))>;
5763 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
5764 (v2f32 (REV64v4i16 FPR64:$src))>;
5766 def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
5768 let Predicates = [IsLE] in {
5769 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
5770 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
5771 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
5772 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
5773 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
5774 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
5775 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
5777 let Predicates = [IsBE] in {
5778 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
5779 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
5780 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
5781 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
5782 (REV64v4i32 FPR128:$src), (i32 8)))>;
5783 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
5784 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
5785 (REV64v8i16 FPR128:$src), (i32 8)))>;
5786 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
5787 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
5788 (REV64v8i16 FPR128:$src), (i32 8)))>;
5789 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
5790 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
5791 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
5792 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
5793 (REV64v4i32 FPR128:$src), (i32 8)))>;
5794 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
5795 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
5796 (REV64v16i8 FPR128:$src), (i32 8)))>;
5799 let Predicates = [IsLE] in {
5800 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
5801 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
5802 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
5803 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
5804 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
5805 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
5807 let Predicates = [IsBE] in {
5808 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
5809 (v2f64 (EXTv16i8 FPR128:$src,
5810 FPR128:$src, (i32 8)))>;
5811 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
5812 (v2f64 (REV64v4i32 FPR128:$src))>;
5813 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
5814 (v2f64 (REV64v8i16 FPR128:$src))>;
5815 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
5816 (v2f64 (REV64v8i16 FPR128:$src))>;
5817 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
5818 (v2f64 (REV64v16i8 FPR128:$src))>;
5819 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
5820 (v2f64 (REV64v4i32 FPR128:$src))>;
5822 def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
5824 let Predicates = [IsLE] in {
5825 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
5826 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
5827 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
5828 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
5829 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
5830 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
5832 let Predicates = [IsBE] in {
5833 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
5834 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
5835 (REV64v4i32 FPR128:$src), (i32 8)))>;
5836 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
5837 (v4f32 (REV32v8i16 FPR128:$src))>;
5838 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
5839 (v4f32 (REV32v8i16 FPR128:$src))>;
5840 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
5841 (v4f32 (REV32v16i8 FPR128:$src))>;
5842 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
5843 (v4f32 (REV64v4i32 FPR128:$src))>;
5844 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
5845 (v4f32 (REV64v4i32 FPR128:$src))>;
5847 def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
5849 let Predicates = [IsLE] in {
5850 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
5851 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
5852 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
5853 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
5854 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
5855 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
5857 let Predicates = [IsBE] in {
5858 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
5859 (v2i64 (EXTv16i8 FPR128:$src,
5860 FPR128:$src, (i32 8)))>;
5861 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
5862 (v2i64 (REV64v4i32 FPR128:$src))>;
5863 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
5864 (v2i64 (REV64v8i16 FPR128:$src))>;
5865 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
5866 (v2i64 (REV64v16i8 FPR128:$src))>;
5867 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
5868 (v2i64 (REV64v4i32 FPR128:$src))>;
5869 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
5870 (v2i64 (REV64v8i16 FPR128:$src))>;
5872 def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
5874 let Predicates = [IsLE] in {
5875 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
5876 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
5877 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
5878 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
5879 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
5880 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
5882 let Predicates = [IsBE] in {
5883 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
5884 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
5885 (REV64v4i32 FPR128:$src),
5887 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
5888 (v4i32 (REV64v4i32 FPR128:$src))>;
5889 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
5890 (v4i32 (REV32v8i16 FPR128:$src))>;
5891 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
5892 (v4i32 (REV32v16i8 FPR128:$src))>;
5893 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
5894 (v4i32 (REV64v4i32 FPR128:$src))>;
5895 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
5896 (v4i32 (REV32v8i16 FPR128:$src))>;
5898 def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
5900 let Predicates = [IsLE] in {
5901 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
5902 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
5903 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
5904 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
5905 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
5906 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
5907 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
5909 let Predicates = [IsBE] in {
5910 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
5911 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
5912 (REV64v8i16 FPR128:$src),
5914 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
5915 (v8i16 (REV64v8i16 FPR128:$src))>;
5916 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
5917 (v8i16 (REV32v8i16 FPR128:$src))>;
5918 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
5919 (v8i16 (REV16v16i8 FPR128:$src))>;
5920 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
5921 (v8i16 (REV64v8i16 FPR128:$src))>;
5922 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
5923 (v8i16 (REV32v8i16 FPR128:$src))>;
5924 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))),
5925 (v8i16 (REV32v8i16 FPR128:$src))>;
5928 let Predicates = [IsLE] in {
5929 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>;
5930 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
5931 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
5932 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
5933 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
5934 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
5935 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
5937 let Predicates = [IsBE] in {
5938 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))),
5939 (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
5940 (REV64v8i16 FPR128:$src),
5942 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
5943 (v8f16 (REV64v8i16 FPR128:$src))>;
5944 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
5945 (v8f16 (REV32v8i16 FPR128:$src))>;
5946 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))),
5947 (v8f16 (REV64v8i16 FPR128:$src))>;
5948 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
5949 (v8f16 (REV16v16i8 FPR128:$src))>;
5950 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
5951 (v8f16 (REV64v8i16 FPR128:$src))>;
5952 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
5953 (v8f16 (REV32v8i16 FPR128:$src))>;
5956 let Predicates = [IsLE] in {
5957 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
5958 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
5959 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
5960 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
5961 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
5962 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
5963 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
5965 let Predicates = [IsBE] in {
5966 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
5967 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
5968 (REV64v16i8 FPR128:$src),
5970 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
5971 (v16i8 (REV64v16i8 FPR128:$src))>;
5972 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
5973 (v16i8 (REV32v16i8 FPR128:$src))>;
5974 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
5975 (v16i8 (REV16v16i8 FPR128:$src))>;
5976 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
5977 (v16i8 (REV64v16i8 FPR128:$src))>;
5978 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
5979 (v16i8 (REV32v16i8 FPR128:$src))>;
5980 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
5981 (v16i8 (REV16v16i8 FPR128:$src))>;
5984 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
5985 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5986 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
5987 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5988 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
5989 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5990 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
5991 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5992 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
5993 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5994 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
5995 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5996 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
5997 (EXTRACT_SUBREG V128:$Rn, dsub)>;
5999 def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
6000 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6001 def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
6002 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6003 def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
6004 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6005 def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
6006 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6008 // A 64-bit subvector insert to the first 128-bit vector position
6009 // is a subregister copy that needs no instruction.
6010 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (i32 0)),
6011 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6012 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (i32 0)),
6013 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6014 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (i32 0)),
6015 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6016 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (i32 0)),
6017 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6018 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (i32 0)),
6019 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6020 def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (i32 0)),
6021 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6022 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (i32 0)),
6023 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6025 // Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
6027 def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
6028 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
6029 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
6030 def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
6031 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
6032 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
6033 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
6034 // so we match on v4f32 here, not v2f32. This will also catch adding
6035 // the low two lanes of a true v4f32 vector.
6036 def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
6037 (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
6038 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
6040 // Scalar 64-bit shifts in FPR64 registers.
6041 def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6042 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6043 def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6044 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6045 def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6046 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6047 def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6048 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6050 // Patterns for nontemporal/no-allocate stores.
6051 // We have to resort to tricks to turn a single-input store into a store pair,
6052 // because there is no single-input nontemporal store, only STNP.
6053 let Predicates = [IsLE] in {
6054 let AddedComplexity = 15 in {
6055 class NTStore128Pat<ValueType VT> :
6056 Pat<(nontemporalstore (VT FPR128:$Rt),
6057 (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
6058 (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
6059 (CPYi64 FPR128:$Rt, (i64 1)),
6060 GPR64sp:$Rn, simm7s8:$offset)>;
6062 def : NTStore128Pat<v2i64>;
6063 def : NTStore128Pat<v4i32>;
6064 def : NTStore128Pat<v8i16>;
6065 def : NTStore128Pat<v16i8>;
6067 class NTStore64Pat<ValueType VT> :
6068 Pat<(nontemporalstore (VT FPR64:$Rt),
6069 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
6070 (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
6071 (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
6072 GPR64sp:$Rn, simm7s4:$offset)>;
6074 // FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
6075 def : NTStore64Pat<v1f64>;
6076 def : NTStore64Pat<v1i64>;
6077 def : NTStore64Pat<v2i32>;
6078 def : NTStore64Pat<v4i16>;
6079 def : NTStore64Pat<v8i8>;
6081 def : Pat<(nontemporalstore GPR64:$Rt,
6082 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
6083 (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
6084 (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
6085 GPR64sp:$Rn, simm7s4:$offset)>;
6086 } // AddedComplexity=10
6087 } // Predicates = [IsLE]
6089 // Tail call return handling. These are all compiler pseudo-instructions,
6090 // so no encoding information or anything like that.
6091 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
6092 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
6093 Sched<[WriteBrReg]>;
6094 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
6095 Sched<[WriteBrReg]>;
6098 def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
6099 (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>;
6100 def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
6101 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
6102 def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
6103 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
6105 include "AArch64InstrAtomics.td"