1 //=- AArch64InstrInfo.td - Describe the AArch64 Instructions -*- tablegen -*-=//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // AArch64 Instruction definitions.
12 //===----------------------------------------------------------------------===//
14 //===----------------------------------------------------------------------===//
15 // ARM Instruction Predicate Definitions.
17 def HasV8_1a : Predicate<"Subtarget->hasV8_1aOps()">,
18 AssemblerPredicate<"HasV8_1aOps", "armv8.1a">;
19 def HasV8_2a : Predicate<"Subtarget->hasV8_2aOps()">,
20 AssemblerPredicate<"HasV8_2aOps", "armv8.2a">;
21 def HasFPARMv8 : Predicate<"Subtarget->hasFPARMv8()">,
22 AssemblerPredicate<"FeatureFPARMv8", "fp-armv8">;
23 def HasNEON : Predicate<"Subtarget->hasNEON()">,
24 AssemblerPredicate<"FeatureNEON", "neon">;
25 def HasCrypto : Predicate<"Subtarget->hasCrypto()">,
26 AssemblerPredicate<"FeatureCrypto", "crypto">;
27 def HasCRC : Predicate<"Subtarget->hasCRC()">,
28 AssemblerPredicate<"FeatureCRC", "crc">;
29 def HasLSE : Predicate<"Subtarget->hasLSE()">,
30 AssemblerPredicate<"FeatureLSE", "lse">;
31 def HasRAS : Predicate<"Subtarget->hasRAS()">,
32 AssemblerPredicate<"FeatureRAS", "ras">;
33 def HasPerfMon : Predicate<"Subtarget->hasPerfMon()">;
34 def HasFullFP16 : Predicate<"Subtarget->hasFullFP16()">,
35 AssemblerPredicate<"FeatureFullFP16", "fullfp16">;
36 def HasSPE : Predicate<"Subtarget->hasSPE()">,
37 AssemblerPredicate<"FeatureSPE", "spe">;
39 def IsLE : Predicate<"Subtarget->isLittleEndian()">;
40 def IsBE : Predicate<"!Subtarget->isLittleEndian()">;
41 def UseAlternateSExtLoadCVTF32
42 : Predicate<"Subtarget->useAlternateSExtLoadCVTF32Pattern()">;
44 //===----------------------------------------------------------------------===//
45 // AArch64-specific DAG Nodes.
48 // SDTBinaryArithWithFlagsOut - RES1, FLAGS = op LHS, RHS
49 def SDTBinaryArithWithFlagsOut : SDTypeProfile<2, 2,
52 SDTCisInt<0>, SDTCisVT<1, i32>]>;
54 // SDTBinaryArithWithFlagsIn - RES1, FLAGS = op LHS, RHS, FLAGS
55 def SDTBinaryArithWithFlagsIn : SDTypeProfile<1, 3,
61 // SDTBinaryArithWithFlagsInOut - RES1, FLAGS = op LHS, RHS, FLAGS
62 def SDTBinaryArithWithFlagsInOut : SDTypeProfile<2, 3,
69 def SDT_AArch64Brcond : SDTypeProfile<0, 3,
70 [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>,
72 def SDT_AArch64cbz : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisVT<1, OtherVT>]>;
73 def SDT_AArch64tbz : SDTypeProfile<0, 3, [SDTCisInt<0>, SDTCisInt<1>,
74 SDTCisVT<2, OtherVT>]>;
77 def SDT_AArch64CSel : SDTypeProfile<1, 4,
82 def SDT_AArch64CCMP : SDTypeProfile<1, 5,
89 def SDT_AArch64FCCMP : SDTypeProfile<1, 5,
96 def SDT_AArch64FCmp : SDTypeProfile<0, 2,
99 def SDT_AArch64Dup : SDTypeProfile<1, 1, [SDTCisVec<0>]>;
100 def SDT_AArch64DupLane : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisInt<2>]>;
101 def SDT_AArch64Zip : SDTypeProfile<1, 2, [SDTCisVec<0>,
103 SDTCisSameAs<0, 2>]>;
104 def SDT_AArch64MOVIedit : SDTypeProfile<1, 1, [SDTCisInt<1>]>;
105 def SDT_AArch64MOVIshift : SDTypeProfile<1, 2, [SDTCisInt<1>, SDTCisInt<2>]>;
106 def SDT_AArch64vecimm : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
107 SDTCisInt<2>, SDTCisInt<3>]>;
108 def SDT_AArch64UnaryVec: SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
109 def SDT_AArch64ExtVec: SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
110 SDTCisSameAs<0,2>, SDTCisInt<3>]>;
111 def SDT_AArch64vshift : SDTypeProfile<1, 2, [SDTCisSameAs<0,1>, SDTCisInt<2>]>;
113 def SDT_AArch64unvec : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>;
114 def SDT_AArch64fcmpz : SDTypeProfile<1, 1, []>;
115 def SDT_AArch64fcmp : SDTypeProfile<1, 2, [SDTCisSameAs<1,2>]>;
116 def SDT_AArch64binvec : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>,
118 def SDT_AArch64trivec : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>,
121 def SDT_AArch64TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>]>;
122 def SDT_AArch64PREFETCH : SDTypeProfile<0, 2, [SDTCisVT<0, i32>, SDTCisPtrTy<1>]>;
124 def SDT_AArch64ITOF : SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisSameAs<0,1>]>;
126 def SDT_AArch64TLSDescCall : SDTypeProfile<0, -2, [SDTCisPtrTy<0>,
129 // Generates the general dynamic sequences, i.e.
130 // adrp x0, :tlsdesc:var
131 // ldr x1, [x0, #:tlsdesc_lo12:var]
132 // add x0, x0, #:tlsdesc_lo12:var
136 // (the TPIDR_EL0 offset is put directly in X0, hence no "result" here)
137 // number of operands (the variable)
138 def SDT_AArch64TLSDescCallSeq : SDTypeProfile<0,1,
141 def SDT_AArch64WrapperLarge : SDTypeProfile<1, 4,
142 [SDTCisVT<0, i64>, SDTCisVT<1, i32>,
143 SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>,
144 SDTCisSameAs<1, 4>]>;
148 def AArch64adrp : SDNode<"AArch64ISD::ADRP", SDTIntUnaryOp, []>;
149 def AArch64addlow : SDNode<"AArch64ISD::ADDlow", SDTIntBinOp, []>;
150 def AArch64LOADgot : SDNode<"AArch64ISD::LOADgot", SDTIntUnaryOp>;
151 def AArch64callseq_start : SDNode<"ISD::CALLSEQ_START",
152 SDCallSeqStart<[ SDTCisVT<0, i32> ]>,
153 [SDNPHasChain, SDNPOutGlue]>;
154 def AArch64callseq_end : SDNode<"ISD::CALLSEQ_END",
155 SDCallSeqEnd<[ SDTCisVT<0, i32>,
157 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
158 def AArch64call : SDNode<"AArch64ISD::CALL",
159 SDTypeProfile<0, -1, [SDTCisPtrTy<0>]>,
160 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
162 def AArch64brcond : SDNode<"AArch64ISD::BRCOND", SDT_AArch64Brcond,
164 def AArch64cbz : SDNode<"AArch64ISD::CBZ", SDT_AArch64cbz,
166 def AArch64cbnz : SDNode<"AArch64ISD::CBNZ", SDT_AArch64cbz,
168 def AArch64tbz : SDNode<"AArch64ISD::TBZ", SDT_AArch64tbz,
170 def AArch64tbnz : SDNode<"AArch64ISD::TBNZ", SDT_AArch64tbz,
174 def AArch64csel : SDNode<"AArch64ISD::CSEL", SDT_AArch64CSel>;
175 def AArch64csinv : SDNode<"AArch64ISD::CSINV", SDT_AArch64CSel>;
176 def AArch64csneg : SDNode<"AArch64ISD::CSNEG", SDT_AArch64CSel>;
177 def AArch64csinc : SDNode<"AArch64ISD::CSINC", SDT_AArch64CSel>;
178 def AArch64retflag : SDNode<"AArch64ISD::RET_FLAG", SDTNone,
179 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
180 def AArch64adc : SDNode<"AArch64ISD::ADC", SDTBinaryArithWithFlagsIn >;
181 def AArch64sbc : SDNode<"AArch64ISD::SBC", SDTBinaryArithWithFlagsIn>;
182 def AArch64add_flag : SDNode<"AArch64ISD::ADDS", SDTBinaryArithWithFlagsOut,
184 def AArch64sub_flag : SDNode<"AArch64ISD::SUBS", SDTBinaryArithWithFlagsOut>;
185 def AArch64and_flag : SDNode<"AArch64ISD::ANDS", SDTBinaryArithWithFlagsOut,
187 def AArch64adc_flag : SDNode<"AArch64ISD::ADCS", SDTBinaryArithWithFlagsInOut>;
188 def AArch64sbc_flag : SDNode<"AArch64ISD::SBCS", SDTBinaryArithWithFlagsInOut>;
190 def AArch64ccmp : SDNode<"AArch64ISD::CCMP", SDT_AArch64CCMP>;
191 def AArch64ccmn : SDNode<"AArch64ISD::CCMN", SDT_AArch64CCMP>;
192 def AArch64fccmp : SDNode<"AArch64ISD::FCCMP", SDT_AArch64FCCMP>;
194 def AArch64threadpointer : SDNode<"AArch64ISD::THREAD_POINTER", SDTPtrLeaf>;
196 def AArch64fcmp : SDNode<"AArch64ISD::FCMP", SDT_AArch64FCmp>;
198 def AArch64dup : SDNode<"AArch64ISD::DUP", SDT_AArch64Dup>;
199 def AArch64duplane8 : SDNode<"AArch64ISD::DUPLANE8", SDT_AArch64DupLane>;
200 def AArch64duplane16 : SDNode<"AArch64ISD::DUPLANE16", SDT_AArch64DupLane>;
201 def AArch64duplane32 : SDNode<"AArch64ISD::DUPLANE32", SDT_AArch64DupLane>;
202 def AArch64duplane64 : SDNode<"AArch64ISD::DUPLANE64", SDT_AArch64DupLane>;
204 def AArch64zip1 : SDNode<"AArch64ISD::ZIP1", SDT_AArch64Zip>;
205 def AArch64zip2 : SDNode<"AArch64ISD::ZIP2", SDT_AArch64Zip>;
206 def AArch64uzp1 : SDNode<"AArch64ISD::UZP1", SDT_AArch64Zip>;
207 def AArch64uzp2 : SDNode<"AArch64ISD::UZP2", SDT_AArch64Zip>;
208 def AArch64trn1 : SDNode<"AArch64ISD::TRN1", SDT_AArch64Zip>;
209 def AArch64trn2 : SDNode<"AArch64ISD::TRN2", SDT_AArch64Zip>;
211 def AArch64movi_edit : SDNode<"AArch64ISD::MOVIedit", SDT_AArch64MOVIedit>;
212 def AArch64movi_shift : SDNode<"AArch64ISD::MOVIshift", SDT_AArch64MOVIshift>;
213 def AArch64movi_msl : SDNode<"AArch64ISD::MOVImsl", SDT_AArch64MOVIshift>;
214 def AArch64mvni_shift : SDNode<"AArch64ISD::MVNIshift", SDT_AArch64MOVIshift>;
215 def AArch64mvni_msl : SDNode<"AArch64ISD::MVNImsl", SDT_AArch64MOVIshift>;
216 def AArch64movi : SDNode<"AArch64ISD::MOVI", SDT_AArch64MOVIedit>;
217 def AArch64fmov : SDNode<"AArch64ISD::FMOV", SDT_AArch64MOVIedit>;
219 def AArch64rev16 : SDNode<"AArch64ISD::REV16", SDT_AArch64UnaryVec>;
220 def AArch64rev32 : SDNode<"AArch64ISD::REV32", SDT_AArch64UnaryVec>;
221 def AArch64rev64 : SDNode<"AArch64ISD::REV64", SDT_AArch64UnaryVec>;
222 def AArch64ext : SDNode<"AArch64ISD::EXT", SDT_AArch64ExtVec>;
224 def AArch64vashr : SDNode<"AArch64ISD::VASHR", SDT_AArch64vshift>;
225 def AArch64vlshr : SDNode<"AArch64ISD::VLSHR", SDT_AArch64vshift>;
226 def AArch64vshl : SDNode<"AArch64ISD::VSHL", SDT_AArch64vshift>;
227 def AArch64sqshli : SDNode<"AArch64ISD::SQSHL_I", SDT_AArch64vshift>;
228 def AArch64uqshli : SDNode<"AArch64ISD::UQSHL_I", SDT_AArch64vshift>;
229 def AArch64sqshlui : SDNode<"AArch64ISD::SQSHLU_I", SDT_AArch64vshift>;
230 def AArch64srshri : SDNode<"AArch64ISD::SRSHR_I", SDT_AArch64vshift>;
231 def AArch64urshri : SDNode<"AArch64ISD::URSHR_I", SDT_AArch64vshift>;
233 def AArch64not: SDNode<"AArch64ISD::NOT", SDT_AArch64unvec>;
234 def AArch64bit: SDNode<"AArch64ISD::BIT", SDT_AArch64trivec>;
235 def AArch64bsl: SDNode<"AArch64ISD::BSL", SDT_AArch64trivec>;
237 def AArch64cmeq: SDNode<"AArch64ISD::CMEQ", SDT_AArch64binvec>;
238 def AArch64cmge: SDNode<"AArch64ISD::CMGE", SDT_AArch64binvec>;
239 def AArch64cmgt: SDNode<"AArch64ISD::CMGT", SDT_AArch64binvec>;
240 def AArch64cmhi: SDNode<"AArch64ISD::CMHI", SDT_AArch64binvec>;
241 def AArch64cmhs: SDNode<"AArch64ISD::CMHS", SDT_AArch64binvec>;
243 def AArch64fcmeq: SDNode<"AArch64ISD::FCMEQ", SDT_AArch64fcmp>;
244 def AArch64fcmge: SDNode<"AArch64ISD::FCMGE", SDT_AArch64fcmp>;
245 def AArch64fcmgt: SDNode<"AArch64ISD::FCMGT", SDT_AArch64fcmp>;
247 def AArch64cmeqz: SDNode<"AArch64ISD::CMEQz", SDT_AArch64unvec>;
248 def AArch64cmgez: SDNode<"AArch64ISD::CMGEz", SDT_AArch64unvec>;
249 def AArch64cmgtz: SDNode<"AArch64ISD::CMGTz", SDT_AArch64unvec>;
250 def AArch64cmlez: SDNode<"AArch64ISD::CMLEz", SDT_AArch64unvec>;
251 def AArch64cmltz: SDNode<"AArch64ISD::CMLTz", SDT_AArch64unvec>;
252 def AArch64cmtst : PatFrag<(ops node:$LHS, node:$RHS),
253 (AArch64not (AArch64cmeqz (and node:$LHS, node:$RHS)))>;
255 def AArch64fcmeqz: SDNode<"AArch64ISD::FCMEQz", SDT_AArch64fcmpz>;
256 def AArch64fcmgez: SDNode<"AArch64ISD::FCMGEz", SDT_AArch64fcmpz>;
257 def AArch64fcmgtz: SDNode<"AArch64ISD::FCMGTz", SDT_AArch64fcmpz>;
258 def AArch64fcmlez: SDNode<"AArch64ISD::FCMLEz", SDT_AArch64fcmpz>;
259 def AArch64fcmltz: SDNode<"AArch64ISD::FCMLTz", SDT_AArch64fcmpz>;
261 def AArch64bici: SDNode<"AArch64ISD::BICi", SDT_AArch64vecimm>;
262 def AArch64orri: SDNode<"AArch64ISD::ORRi", SDT_AArch64vecimm>;
264 def AArch64neg : SDNode<"AArch64ISD::NEG", SDT_AArch64unvec>;
266 def AArch64tcret: SDNode<"AArch64ISD::TC_RETURN", SDT_AArch64TCRET,
267 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
269 def AArch64Prefetch : SDNode<"AArch64ISD::PREFETCH", SDT_AArch64PREFETCH,
270 [SDNPHasChain, SDNPSideEffect]>;
272 def AArch64sitof: SDNode<"AArch64ISD::SITOF", SDT_AArch64ITOF>;
273 def AArch64uitof: SDNode<"AArch64ISD::UITOF", SDT_AArch64ITOF>;
275 def AArch64tlsdesc_callseq : SDNode<"AArch64ISD::TLSDESC_CALLSEQ",
276 SDT_AArch64TLSDescCallSeq,
277 [SDNPInGlue, SDNPOutGlue, SDNPHasChain,
281 def AArch64WrapperLarge : SDNode<"AArch64ISD::WrapperLarge",
282 SDT_AArch64WrapperLarge>;
284 def AArch64NvCast : SDNode<"AArch64ISD::NVCAST", SDTUnaryOp>;
286 def SDT_AArch64mull : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>,
287 SDTCisSameAs<1, 2>]>;
288 def AArch64smull : SDNode<"AArch64ISD::SMULL", SDT_AArch64mull>;
289 def AArch64umull : SDNode<"AArch64ISD::UMULL", SDT_AArch64mull>;
291 def AArch64frecpe : SDNode<"AArch64ISD::FRECPE", SDTFPUnaryOp>;
292 def AArch64frecps : SDNode<"AArch64ISD::FRECPS", SDTFPBinOp>;
293 def AArch64frsqrte : SDNode<"AArch64ISD::FRSQRTE", SDTFPUnaryOp>;
294 def AArch64frsqrts : SDNode<"AArch64ISD::FRSQRTS", SDTFPBinOp>;
296 def AArch64saddv : SDNode<"AArch64ISD::SADDV", SDT_AArch64UnaryVec>;
297 def AArch64uaddv : SDNode<"AArch64ISD::UADDV", SDT_AArch64UnaryVec>;
298 def AArch64sminv : SDNode<"AArch64ISD::SMINV", SDT_AArch64UnaryVec>;
299 def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
300 def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
301 def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
303 //===----------------------------------------------------------------------===//
305 //===----------------------------------------------------------------------===//
307 // AArch64 Instruction Predicate Definitions.
308 def IsDarwin : Predicate<"Subtarget->isTargetDarwin()">;
309 def IsNotDarwin: Predicate<"!Subtarget->isTargetDarwin()">;
310 def ForCodeSize : Predicate<"ForCodeSize">;
311 def NotForCodeSize : Predicate<"!ForCodeSize">;
313 include "AArch64InstrFormats.td"
315 //===----------------------------------------------------------------------===//
317 //===----------------------------------------------------------------------===//
318 // Miscellaneous instructions.
319 //===----------------------------------------------------------------------===//
321 let Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1 in {
322 // We set Sched to empty list because we expect these instructions to simply get
323 // removed in most cases.
324 def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt),
325 [(AArch64callseq_start timm:$amt)]>, Sched<[]>;
326 def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
327 [(AArch64callseq_end timm:$amt1, timm:$amt2)]>,
329 } // Defs = [SP], Uses = [SP], hasSideEffects = 1, isCodeGenOnly = 1
331 let isReMaterializable = 1, isCodeGenOnly = 1 in {
332 // FIXME: The following pseudo instructions are only needed because remat
333 // cannot handle multiple instructions. When that changes, they can be
334 // removed, along with the AArch64Wrapper node.
336 let AddedComplexity = 10 in
337 def LOADgot : Pseudo<(outs GPR64:$dst), (ins i64imm:$addr),
338 [(set GPR64:$dst, (AArch64LOADgot tglobaladdr:$addr))]>,
341 // The MOVaddr instruction should match only when the add is not folded
342 // into a load or store address.
344 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
345 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaladdr:$hi),
346 tglobaladdr:$low))]>,
347 Sched<[WriteAdrAdr]>;
349 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
350 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tjumptable:$hi),
352 Sched<[WriteAdrAdr]>;
354 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
355 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tconstpool:$hi),
357 Sched<[WriteAdrAdr]>;
359 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
360 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tblockaddress:$hi),
361 tblockaddress:$low))]>,
362 Sched<[WriteAdrAdr]>;
364 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
365 [(set GPR64:$dst, (AArch64addlow (AArch64adrp tglobaltlsaddr:$hi),
366 tglobaltlsaddr:$low))]>,
367 Sched<[WriteAdrAdr]>;
369 : Pseudo<(outs GPR64:$dst), (ins i64imm:$hi, i64imm:$low),
370 [(set GPR64:$dst, (AArch64addlow (AArch64adrp texternalsym:$hi),
371 texternalsym:$low))]>,
372 Sched<[WriteAdrAdr]>;
374 } // isReMaterializable, isCodeGenOnly
376 def : Pat<(AArch64LOADgot tglobaltlsaddr:$addr),
377 (LOADgot tglobaltlsaddr:$addr)>;
379 def : Pat<(AArch64LOADgot texternalsym:$addr),
380 (LOADgot texternalsym:$addr)>;
382 def : Pat<(AArch64LOADgot tconstpool:$addr),
383 (LOADgot tconstpool:$addr)>;
385 //===----------------------------------------------------------------------===//
386 // System instructions.
387 //===----------------------------------------------------------------------===//
389 def HINT : HintI<"hint">;
390 def : InstAlias<"nop", (HINT 0b000)>;
391 def : InstAlias<"yield",(HINT 0b001)>;
392 def : InstAlias<"wfe", (HINT 0b010)>;
393 def : InstAlias<"wfi", (HINT 0b011)>;
394 def : InstAlias<"sev", (HINT 0b100)>;
395 def : InstAlias<"sevl", (HINT 0b101)>;
396 def : InstAlias<"esb", (HINT 0b10000)>, Requires<[HasRAS]>;
398 // v8.2a Statistical Profiling extension
399 def : InstAlias<"psb $op", (HINT psbhint_op:$op)>, Requires<[HasSPE]>;
401 // As far as LLVM is concerned this writes to the system's exclusive monitors.
402 let mayLoad = 1, mayStore = 1 in
403 def CLREX : CRmSystemI<imm0_15, 0b010, "clrex">;
405 // NOTE: ideally, this would have mayStore = 0, mayLoad = 0, but we cannot
406 // model patterns with sufficiently fine granularity.
407 let mayLoad = ?, mayStore = ? in {
408 def DMB : CRmSystemI<barrier_op, 0b101, "dmb",
409 [(int_aarch64_dmb (i32 imm32_0_15:$CRm))]>;
411 def DSB : CRmSystemI<barrier_op, 0b100, "dsb",
412 [(int_aarch64_dsb (i32 imm32_0_15:$CRm))]>;
414 def ISB : CRmSystemI<barrier_op, 0b110, "isb",
415 [(int_aarch64_isb (i32 imm32_0_15:$CRm))]>;
418 def : InstAlias<"clrex", (CLREX 0xf)>;
419 def : InstAlias<"isb", (ISB 0xf)>;
423 def MSRpstateImm1 : MSRpstateImm0_1;
424 def MSRpstateImm4 : MSRpstateImm0_15;
426 // The thread pointer (on Linux, at least, where this has been implemented) is
428 def : Pat<(AArch64threadpointer), (MRS 0xde82)>;
430 // The cycle counter PMC register is PMCCNTR_EL0.
431 let Predicates = [HasPerfMon] in
432 def : Pat<(readcyclecounter), (MRS 0xdce8)>;
434 // Generic system instructions
435 def SYSxt : SystemXtI<0, "sys">;
436 def SYSLxt : SystemLXtI<1, "sysl">;
438 def : InstAlias<"sys $op1, $Cn, $Cm, $op2",
439 (SYSxt imm0_7:$op1, sys_cr_op:$Cn,
440 sys_cr_op:$Cm, imm0_7:$op2, XZR)>;
442 //===----------------------------------------------------------------------===//
443 // Move immediate instructions.
444 //===----------------------------------------------------------------------===//
446 defm MOVK : InsertImmediate<0b11, "movk">;
447 defm MOVN : MoveImmediate<0b00, "movn">;
449 let PostEncoderMethod = "fixMOVZ" in
450 defm MOVZ : MoveImmediate<0b10, "movz">;
452 // First group of aliases covers an implicit "lsl #0".
453 def : InstAlias<"movk $dst, $imm", (MOVKWi GPR32:$dst, imm0_65535:$imm, 0)>;
454 def : InstAlias<"movk $dst, $imm", (MOVKXi GPR64:$dst, imm0_65535:$imm, 0)>;
455 def : InstAlias<"movn $dst, $imm", (MOVNWi GPR32:$dst, imm0_65535:$imm, 0)>;
456 def : InstAlias<"movn $dst, $imm", (MOVNXi GPR64:$dst, imm0_65535:$imm, 0)>;
457 def : InstAlias<"movz $dst, $imm", (MOVZWi GPR32:$dst, imm0_65535:$imm, 0)>;
458 def : InstAlias<"movz $dst, $imm", (MOVZXi GPR64:$dst, imm0_65535:$imm, 0)>;
460 // Next, we have various ELF relocations with the ":XYZ_g0:sym" syntax.
461 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
462 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
463 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
464 def : InstAlias<"movz $Rd, $sym", (MOVZXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
466 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g3:$sym, 48)>;
467 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g2:$sym, 32)>;
468 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g1:$sym, 16)>;
469 def : InstAlias<"movn $Rd, $sym", (MOVNXi GPR64:$Rd, movz_symbol_g0:$sym, 0)>;
471 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g3:$sym, 48)>;
472 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g2:$sym, 32)>;
473 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g1:$sym, 16)>;
474 def : InstAlias<"movk $Rd, $sym", (MOVKXi GPR64:$Rd, movk_symbol_g0:$sym, 0)>;
476 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
477 def : InstAlias<"movz $Rd, $sym", (MOVZWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
479 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g1:$sym, 16)>;
480 def : InstAlias<"movn $Rd, $sym", (MOVNWi GPR32:$Rd, movz_symbol_g0:$sym, 0)>;
482 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g1:$sym, 16)>;
483 def : InstAlias<"movk $Rd, $sym", (MOVKWi GPR32:$Rd, movk_symbol_g0:$sym, 0)>;
485 // Final group of aliases covers true "mov $Rd, $imm" cases.
486 multiclass movw_mov_alias<string basename,Instruction INST, RegisterClass GPR,
487 int width, int shift> {
488 def _asmoperand : AsmOperandClass {
489 let Name = basename # width # "_lsl" # shift # "MovAlias";
490 let PredicateMethod = "is" # basename # "MovAlias<" # width # ", "
492 let RenderMethod = "add" # basename # "MovAliasOperands<" # shift # ">";
495 def _movimm : Operand<i32> {
496 let ParserMatchClass = !cast<AsmOperandClass>(NAME # "_asmoperand");
499 def : InstAlias<"mov $Rd, $imm",
500 (INST GPR:$Rd, !cast<Operand>(NAME # "_movimm"):$imm, shift)>;
503 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 0>;
504 defm : movw_mov_alias<"MOVZ", MOVZWi, GPR32, 32, 16>;
506 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 0>;
507 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 16>;
508 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 32>;
509 defm : movw_mov_alias<"MOVZ", MOVZXi, GPR64, 64, 48>;
511 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 0>;
512 defm : movw_mov_alias<"MOVN", MOVNWi, GPR32, 32, 16>;
514 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 0>;
515 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 16>;
516 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 32>;
517 defm : movw_mov_alias<"MOVN", MOVNXi, GPR64, 64, 48>;
519 let isReMaterializable = 1, isCodeGenOnly = 1, isMoveImm = 1,
520 isAsCheapAsAMove = 1 in {
521 // FIXME: The following pseudo instructions are only needed because remat
522 // cannot handle multiple instructions. When that changes, we can select
523 // directly to the real instructions and get rid of these pseudos.
526 : Pseudo<(outs GPR32:$dst), (ins i32imm:$src),
527 [(set GPR32:$dst, imm:$src)]>,
530 : Pseudo<(outs GPR64:$dst), (ins i64imm:$src),
531 [(set GPR64:$dst, imm:$src)]>,
533 } // isReMaterializable, isCodeGenOnly
535 // If possible, we want to use MOVi32imm even for 64-bit moves. This gives the
536 // eventual expansion code fewer bits to worry about getting right. Marshalling
537 // the types is a little tricky though:
538 def i64imm_32bit : ImmLeaf<i64, [{
539 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
542 def s64imm_32bit : ImmLeaf<i64, [{
543 int64_t Imm64 = static_cast<int64_t>(Imm);
544 return Imm64 >= std::numeric_limits<int32_t>::min() &&
545 Imm64 <= std::numeric_limits<int32_t>::max();
548 def trunc_imm : SDNodeXForm<imm, [{
549 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i32);
552 def : Pat<(i64 i64imm_32bit:$src),
553 (SUBREG_TO_REG (i64 0), (MOVi32imm (trunc_imm imm:$src)), sub_32)>;
555 // Materialize FP constants via MOVi32imm/MOVi64imm (MachO large code model).
556 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
557 return CurDAG->getTargetConstant(
558 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
561 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
562 return CurDAG->getTargetConstant(
563 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
567 def : Pat<(f32 fpimm:$in),
568 (COPY_TO_REGCLASS (MOVi32imm (bitcast_fpimm_to_i32 f32:$in)), FPR32)>;
569 def : Pat<(f64 fpimm:$in),
570 (COPY_TO_REGCLASS (MOVi64imm (bitcast_fpimm_to_i64 f64:$in)), FPR64)>;
573 // Deal with the various forms of (ELF) large addressing with MOVZ/MOVK
575 def : Pat<(AArch64WrapperLarge tglobaladdr:$g3, tglobaladdr:$g2,
576 tglobaladdr:$g1, tglobaladdr:$g0),
577 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tglobaladdr:$g3, 48),
578 tglobaladdr:$g2, 32),
579 tglobaladdr:$g1, 16),
580 tglobaladdr:$g0, 0)>;
582 def : Pat<(AArch64WrapperLarge tblockaddress:$g3, tblockaddress:$g2,
583 tblockaddress:$g1, tblockaddress:$g0),
584 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tblockaddress:$g3, 48),
585 tblockaddress:$g2, 32),
586 tblockaddress:$g1, 16),
587 tblockaddress:$g0, 0)>;
589 def : Pat<(AArch64WrapperLarge tconstpool:$g3, tconstpool:$g2,
590 tconstpool:$g1, tconstpool:$g0),
591 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tconstpool:$g3, 48),
596 def : Pat<(AArch64WrapperLarge tjumptable:$g3, tjumptable:$g2,
597 tjumptable:$g1, tjumptable:$g0),
598 (MOVKXi (MOVKXi (MOVKXi (MOVZXi tjumptable:$g3, 48),
604 //===----------------------------------------------------------------------===//
605 // Arithmetic instructions.
606 //===----------------------------------------------------------------------===//
608 // Add/subtract with carry.
609 defm ADC : AddSubCarry<0, "adc", "adcs", AArch64adc, AArch64adc_flag>;
610 defm SBC : AddSubCarry<1, "sbc", "sbcs", AArch64sbc, AArch64sbc_flag>;
612 def : InstAlias<"ngc $dst, $src", (SBCWr GPR32:$dst, WZR, GPR32:$src)>;
613 def : InstAlias<"ngc $dst, $src", (SBCXr GPR64:$dst, XZR, GPR64:$src)>;
614 def : InstAlias<"ngcs $dst, $src", (SBCSWr GPR32:$dst, WZR, GPR32:$src)>;
615 def : InstAlias<"ngcs $dst, $src", (SBCSXr GPR64:$dst, XZR, GPR64:$src)>;
618 defm ADD : AddSub<0, "add", "sub", add>;
619 defm SUB : AddSub<1, "sub", "add">;
621 def : InstAlias<"mov $dst, $src",
622 (ADDWri GPR32sponly:$dst, GPR32sp:$src, 0, 0)>;
623 def : InstAlias<"mov $dst, $src",
624 (ADDWri GPR32sp:$dst, GPR32sponly:$src, 0, 0)>;
625 def : InstAlias<"mov $dst, $src",
626 (ADDXri GPR64sponly:$dst, GPR64sp:$src, 0, 0)>;
627 def : InstAlias<"mov $dst, $src",
628 (ADDXri GPR64sp:$dst, GPR64sponly:$src, 0, 0)>;
630 defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
631 defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
633 // Use SUBS instead of SUB to enable CSE between SUBS and SUB.
634 def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
635 (SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
636 def : Pat<(sub GPR64sp:$Rn, addsub_shifted_imm64:$imm),
637 (SUBSXri GPR64sp:$Rn, addsub_shifted_imm64:$imm)>;
638 def : Pat<(sub GPR32:$Rn, GPR32:$Rm),
639 (SUBSWrr GPR32:$Rn, GPR32:$Rm)>;
640 def : Pat<(sub GPR64:$Rn, GPR64:$Rm),
641 (SUBSXrr GPR64:$Rn, GPR64:$Rm)>;
642 def : Pat<(sub GPR32:$Rn, arith_shifted_reg32:$Rm),
643 (SUBSWrs GPR32:$Rn, arith_shifted_reg32:$Rm)>;
644 def : Pat<(sub GPR64:$Rn, arith_shifted_reg64:$Rm),
645 (SUBSXrs GPR64:$Rn, arith_shifted_reg64:$Rm)>;
646 let AddedComplexity = 1 in {
647 def : Pat<(sub GPR32sp:$R2, arith_extended_reg32<i32>:$R3),
648 (SUBSWrx GPR32sp:$R2, arith_extended_reg32<i32>:$R3)>;
649 def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3),
650 (SUBSXrx GPR64sp:$R2, arith_extended_reg32to64<i64>:$R3)>;
653 // Because of the immediate format for add/sub-imm instructions, the
654 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
655 // These patterns capture that transformation.
656 let AddedComplexity = 1 in {
657 def : Pat<(add GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
658 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
659 def : Pat<(add GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
660 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
661 def : Pat<(sub GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
662 (ADDWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
663 def : Pat<(sub GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
664 (ADDXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
667 // Because of the immediate format for add/sub-imm instructions, the
668 // expression (add x, -1) must be transformed to (SUB{W,X}ri x, 1).
669 // These patterns capture that transformation.
670 let AddedComplexity = 1 in {
671 def : Pat<(AArch64add_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
672 (SUBSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
673 def : Pat<(AArch64add_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
674 (SUBSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
675 def : Pat<(AArch64sub_flag GPR32:$Rn, neg_addsub_shifted_imm32:$imm),
676 (ADDSWri GPR32:$Rn, neg_addsub_shifted_imm32:$imm)>;
677 def : Pat<(AArch64sub_flag GPR64:$Rn, neg_addsub_shifted_imm64:$imm),
678 (ADDSXri GPR64:$Rn, neg_addsub_shifted_imm64:$imm)>;
681 def : InstAlias<"neg $dst, $src", (SUBWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
682 def : InstAlias<"neg $dst, $src", (SUBXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
683 def : InstAlias<"neg $dst, $src$shift",
684 (SUBWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
685 def : InstAlias<"neg $dst, $src$shift",
686 (SUBXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
688 def : InstAlias<"negs $dst, $src", (SUBSWrs GPR32:$dst, WZR, GPR32:$src, 0), 3>;
689 def : InstAlias<"negs $dst, $src", (SUBSXrs GPR64:$dst, XZR, GPR64:$src, 0), 3>;
690 def : InstAlias<"negs $dst, $src$shift",
691 (SUBSWrs GPR32:$dst, WZR, GPR32:$src, arith_shift32:$shift), 2>;
692 def : InstAlias<"negs $dst, $src$shift",
693 (SUBSXrs GPR64:$dst, XZR, GPR64:$src, arith_shift64:$shift), 2>;
696 // Unsigned/Signed divide
697 defm UDIV : Div<0, "udiv", udiv>;
698 defm SDIV : Div<1, "sdiv", sdiv>;
700 def : Pat<(int_aarch64_udiv GPR32:$Rn, GPR32:$Rm), (UDIVWr $Rn, $Rm)>;
701 def : Pat<(int_aarch64_udiv GPR64:$Rn, GPR64:$Rm), (UDIVXr $Rn, $Rm)>;
702 def : Pat<(int_aarch64_sdiv GPR32:$Rn, GPR32:$Rm), (SDIVWr $Rn, $Rm)>;
703 def : Pat<(int_aarch64_sdiv GPR64:$Rn, GPR64:$Rm), (SDIVXr $Rn, $Rm)>;
706 defm ASRV : Shift<0b10, "asr", sra>;
707 defm LSLV : Shift<0b00, "lsl", shl>;
708 defm LSRV : Shift<0b01, "lsr", srl>;
709 defm RORV : Shift<0b11, "ror", rotr>;
711 def : ShiftAlias<"asrv", ASRVWr, GPR32>;
712 def : ShiftAlias<"asrv", ASRVXr, GPR64>;
713 def : ShiftAlias<"lslv", LSLVWr, GPR32>;
714 def : ShiftAlias<"lslv", LSLVXr, GPR64>;
715 def : ShiftAlias<"lsrv", LSRVWr, GPR32>;
716 def : ShiftAlias<"lsrv", LSRVXr, GPR64>;
717 def : ShiftAlias<"rorv", RORVWr, GPR32>;
718 def : ShiftAlias<"rorv", RORVXr, GPR64>;
721 let AddedComplexity = 7 in {
722 defm MADD : MulAccum<0, "madd", add>;
723 defm MSUB : MulAccum<1, "msub", sub>;
725 def : Pat<(i32 (mul GPR32:$Rn, GPR32:$Rm)),
726 (MADDWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
727 def : Pat<(i64 (mul GPR64:$Rn, GPR64:$Rm)),
728 (MADDXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
730 def : Pat<(i32 (ineg (mul GPR32:$Rn, GPR32:$Rm))),
731 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
732 def : Pat<(i64 (ineg (mul GPR64:$Rn, GPR64:$Rm))),
733 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
734 def : Pat<(i32 (mul (ineg GPR32:$Rn), GPR32:$Rm)),
735 (MSUBWrrr GPR32:$Rn, GPR32:$Rm, WZR)>;
736 def : Pat<(i64 (mul (ineg GPR64:$Rn), GPR64:$Rm)),
737 (MSUBXrrr GPR64:$Rn, GPR64:$Rm, XZR)>;
738 } // AddedComplexity = 7
740 let AddedComplexity = 5 in {
741 def SMADDLrrr : WideMulAccum<0, 0b001, "smaddl", add, sext>;
742 def SMSUBLrrr : WideMulAccum<1, 0b001, "smsubl", sub, sext>;
743 def UMADDLrrr : WideMulAccum<0, 0b101, "umaddl", add, zext>;
744 def UMSUBLrrr : WideMulAccum<1, 0b101, "umsubl", sub, zext>;
746 def : Pat<(i64 (mul (sext GPR32:$Rn), (sext GPR32:$Rm))),
747 (SMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
748 def : Pat<(i64 (mul (zext GPR32:$Rn), (zext GPR32:$Rm))),
749 (UMADDLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
751 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (sext GPR32:$Rm)))),
752 (SMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
753 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (zext GPR32:$Rm)))),
754 (UMSUBLrrr GPR32:$Rn, GPR32:$Rm, XZR)>;
756 def : Pat<(i64 (mul (sext GPR32:$Rn), (s64imm_32bit:$C))),
757 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
758 def : Pat<(i64 (mul (zext GPR32:$Rn), (i64imm_32bit:$C))),
759 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
760 def : Pat<(i64 (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C))),
761 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
762 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
764 def : Pat<(i64 (ineg (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
765 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
766 def : Pat<(i64 (ineg (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
767 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), XZR)>;
768 def : Pat<(i64 (ineg (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)))),
769 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
770 (MOVi32imm (trunc_imm imm:$C)), XZR)>;
772 def : Pat<(i64 (add (mul (sext GPR32:$Rn), (s64imm_32bit:$C)), GPR64:$Ra)),
773 (SMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
774 def : Pat<(i64 (add (mul (zext GPR32:$Rn), (i64imm_32bit:$C)), GPR64:$Ra)),
775 (UMADDLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
776 def : Pat<(i64 (add (mul (sext_inreg GPR64:$Rn, i32), (s64imm_32bit:$C)),
778 (SMADDLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
779 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
781 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext GPR32:$Rn), (s64imm_32bit:$C)))),
782 (SMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
783 def : Pat<(i64 (sub GPR64:$Ra, (mul (zext GPR32:$Rn), (i64imm_32bit:$C)))),
784 (UMSUBLrrr GPR32:$Rn, (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
785 def : Pat<(i64 (sub GPR64:$Ra, (mul (sext_inreg GPR64:$Rn, i32),
786 (s64imm_32bit:$C)))),
787 (SMSUBLrrr (i32 (EXTRACT_SUBREG GPR64:$Rn, sub_32)),
788 (MOVi32imm (trunc_imm imm:$C)), GPR64:$Ra)>;
789 } // AddedComplexity = 5
791 def : MulAccumWAlias<"mul", MADDWrrr>;
792 def : MulAccumXAlias<"mul", MADDXrrr>;
793 def : MulAccumWAlias<"mneg", MSUBWrrr>;
794 def : MulAccumXAlias<"mneg", MSUBXrrr>;
795 def : WideMulAccumAlias<"smull", SMADDLrrr>;
796 def : WideMulAccumAlias<"smnegl", SMSUBLrrr>;
797 def : WideMulAccumAlias<"umull", UMADDLrrr>;
798 def : WideMulAccumAlias<"umnegl", UMSUBLrrr>;
801 def SMULHrr : MulHi<0b010, "smulh", mulhs>;
802 def UMULHrr : MulHi<0b110, "umulh", mulhu>;
805 def CRC32Brr : BaseCRC32<0, 0b00, 0, GPR32, int_aarch64_crc32b, "crc32b">;
806 def CRC32Hrr : BaseCRC32<0, 0b01, 0, GPR32, int_aarch64_crc32h, "crc32h">;
807 def CRC32Wrr : BaseCRC32<0, 0b10, 0, GPR32, int_aarch64_crc32w, "crc32w">;
808 def CRC32Xrr : BaseCRC32<1, 0b11, 0, GPR64, int_aarch64_crc32x, "crc32x">;
810 def CRC32CBrr : BaseCRC32<0, 0b00, 1, GPR32, int_aarch64_crc32cb, "crc32cb">;
811 def CRC32CHrr : BaseCRC32<0, 0b01, 1, GPR32, int_aarch64_crc32ch, "crc32ch">;
812 def CRC32CWrr : BaseCRC32<0, 0b10, 1, GPR32, int_aarch64_crc32cw, "crc32cw">;
813 def CRC32CXrr : BaseCRC32<1, 0b11, 1, GPR64, int_aarch64_crc32cx, "crc32cx">;
816 defm CAS : CompareAndSwap<0, 0, "">;
817 defm CASA : CompareAndSwap<1, 0, "a">;
818 defm CASL : CompareAndSwap<0, 1, "l">;
819 defm CASAL : CompareAndSwap<1, 1, "al">;
822 defm CASP : CompareAndSwapPair<0, 0, "">;
823 defm CASPA : CompareAndSwapPair<1, 0, "a">;
824 defm CASPL : CompareAndSwapPair<0, 1, "l">;
825 defm CASPAL : CompareAndSwapPair<1, 1, "al">;
828 defm SWP : Swap<0, 0, "">;
829 defm SWPA : Swap<1, 0, "a">;
830 defm SWPL : Swap<0, 1, "l">;
831 defm SWPAL : Swap<1, 1, "al">;
833 // v8.1 atomic LD<OP>(register). Performs load and then ST<OP>(register)
834 defm LDADD : LDOPregister<0b000, "add", 0, 0, "">;
835 defm LDADDA : LDOPregister<0b000, "add", 1, 0, "a">;
836 defm LDADDL : LDOPregister<0b000, "add", 0, 1, "l">;
837 defm LDADDAL : LDOPregister<0b000, "add", 1, 1, "al">;
839 defm LDCLR : LDOPregister<0b001, "clr", 0, 0, "">;
840 defm LDCLRA : LDOPregister<0b001, "clr", 1, 0, "a">;
841 defm LDCLRL : LDOPregister<0b001, "clr", 0, 1, "l">;
842 defm LDCLRAL : LDOPregister<0b001, "clr", 1, 1, "al">;
844 defm LDEOR : LDOPregister<0b010, "eor", 0, 0, "">;
845 defm LDEORA : LDOPregister<0b010, "eor", 1, 0, "a">;
846 defm LDEORL : LDOPregister<0b010, "eor", 0, 1, "l">;
847 defm LDEORAL : LDOPregister<0b010, "eor", 1, 1, "al">;
849 defm LDSET : LDOPregister<0b011, "set", 0, 0, "">;
850 defm LDSETA : LDOPregister<0b011, "set", 1, 0, "a">;
851 defm LDSETL : LDOPregister<0b011, "set", 0, 1, "l">;
852 defm LDSETAL : LDOPregister<0b011, "set", 1, 1, "al">;
854 defm LDSMAX : LDOPregister<0b100, "smax", 0, 0, "">;
855 defm LDSMAXA : LDOPregister<0b100, "smax", 1, 0, "a">;
856 defm LDSMAXL : LDOPregister<0b100, "smax", 0, 1, "l">;
857 defm LDSMAXAL : LDOPregister<0b100, "smax", 1, 1, "al">;
859 defm LDSMIN : LDOPregister<0b101, "smin", 0, 0, "">;
860 defm LDSMINA : LDOPregister<0b101, "smin", 1, 0, "a">;
861 defm LDSMINL : LDOPregister<0b101, "smin", 0, 1, "l">;
862 defm LDSMINAL : LDOPregister<0b101, "smin", 1, 1, "al">;
864 defm LDUMAX : LDOPregister<0b110, "umax", 0, 0, "">;
865 defm LDUMAXA : LDOPregister<0b110, "umax", 1, 0, "a">;
866 defm LDUMAXL : LDOPregister<0b110, "umax", 0, 1, "l">;
867 defm LDUMAXAL : LDOPregister<0b110, "umax", 1, 1, "al">;
869 defm LDUMIN : LDOPregister<0b111, "umin", 0, 0, "">;
870 defm LDUMINA : LDOPregister<0b111, "umin", 1, 0, "a">;
871 defm LDUMINL : LDOPregister<0b111, "umin", 0, 1, "l">;
872 defm LDUMINAL : LDOPregister<0b111, "umin", 1, 1, "al">;
874 // v8.1 atomic ST<OP>(register) as aliases to "LD<OP>(register) when Rt=xZR"
875 defm : STOPregister<"stadd","LDADD">; // STADDx
876 defm : STOPregister<"stclr","LDCLR">; // STCLRx
877 defm : STOPregister<"steor","LDEOR">; // STEORx
878 defm : STOPregister<"stset","LDSET">; // STSETx
879 defm : STOPregister<"stsmax","LDSMAX">;// STSMAXx
880 defm : STOPregister<"stsmin","LDSMIN">;// STSMINx
881 defm : STOPregister<"stumax","LDUMAX">;// STUMAXx
882 defm : STOPregister<"stumin","LDUMIN">;// STUMINx
884 //===----------------------------------------------------------------------===//
885 // Logical instructions.
886 //===----------------------------------------------------------------------===//
889 defm ANDS : LogicalImmS<0b11, "ands", AArch64and_flag, "bics">;
890 defm AND : LogicalImm<0b00, "and", and, "bic">;
891 defm EOR : LogicalImm<0b10, "eor", xor, "eon">;
892 defm ORR : LogicalImm<0b01, "orr", or, "orn">;
894 // FIXME: these aliases *are* canonical sometimes (when movz can't be
895 // used). Actually, it seems to be working right now, but putting logical_immXX
896 // here is a bit dodgy on the AsmParser side too.
897 def : InstAlias<"mov $dst, $imm", (ORRWri GPR32sp:$dst, WZR,
898 logical_imm32:$imm), 0>;
899 def : InstAlias<"mov $dst, $imm", (ORRXri GPR64sp:$dst, XZR,
900 logical_imm64:$imm), 0>;
904 defm ANDS : LogicalRegS<0b11, 0, "ands", AArch64and_flag>;
905 defm BICS : LogicalRegS<0b11, 1, "bics",
906 BinOpFrag<(AArch64and_flag node:$LHS, (not node:$RHS))>>;
907 defm AND : LogicalReg<0b00, 0, "and", and>;
908 defm BIC : LogicalReg<0b00, 1, "bic",
909 BinOpFrag<(and node:$LHS, (not node:$RHS))>>;
910 defm EON : LogicalReg<0b10, 1, "eon",
911 BinOpFrag<(not (xor node:$LHS, node:$RHS))>>;
912 defm EOR : LogicalReg<0b10, 0, "eor", xor>;
913 defm ORN : LogicalReg<0b01, 1, "orn",
914 BinOpFrag<(or node:$LHS, (not node:$RHS))>>;
915 defm ORR : LogicalReg<0b01, 0, "orr", or>;
917 def : InstAlias<"mov $dst, $src", (ORRWrs GPR32:$dst, WZR, GPR32:$src, 0), 2>;
918 def : InstAlias<"mov $dst, $src", (ORRXrs GPR64:$dst, XZR, GPR64:$src, 0), 2>;
920 def : InstAlias<"mvn $Wd, $Wm", (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, 0), 3>;
921 def : InstAlias<"mvn $Xd, $Xm", (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, 0), 3>;
923 def : InstAlias<"mvn $Wd, $Wm$sh",
924 (ORNWrs GPR32:$Wd, WZR, GPR32:$Wm, logical_shift32:$sh), 2>;
925 def : InstAlias<"mvn $Xd, $Xm$sh",
926 (ORNXrs GPR64:$Xd, XZR, GPR64:$Xm, logical_shift64:$sh), 2>;
928 def : InstAlias<"tst $src1, $src2",
929 (ANDSWri WZR, GPR32:$src1, logical_imm32:$src2), 2>;
930 def : InstAlias<"tst $src1, $src2",
931 (ANDSXri XZR, GPR64:$src1, logical_imm64:$src2), 2>;
933 def : InstAlias<"tst $src1, $src2",
934 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, 0), 3>;
935 def : InstAlias<"tst $src1, $src2",
936 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, 0), 3>;
938 def : InstAlias<"tst $src1, $src2$sh",
939 (ANDSWrs WZR, GPR32:$src1, GPR32:$src2, logical_shift32:$sh), 2>;
940 def : InstAlias<"tst $src1, $src2$sh",
941 (ANDSXrs XZR, GPR64:$src1, GPR64:$src2, logical_shift64:$sh), 2>;
944 def : Pat<(not GPR32:$Wm), (ORNWrr WZR, GPR32:$Wm)>;
945 def : Pat<(not GPR64:$Xm), (ORNXrr XZR, GPR64:$Xm)>;
948 //===----------------------------------------------------------------------===//
949 // One operand data processing instructions.
950 //===----------------------------------------------------------------------===//
952 defm CLS : OneOperandData<0b101, "cls">;
953 defm CLZ : OneOperandData<0b100, "clz", ctlz>;
954 defm RBIT : OneOperandData<0b000, "rbit", bitreverse>;
956 def REV16Wr : OneWRegData<0b001, "rev16",
957 UnOpFrag<(rotr (bswap node:$LHS), (i64 16))>>;
958 def REV16Xr : OneXRegData<0b001, "rev16", null_frag>;
960 def : Pat<(cttz GPR32:$Rn),
961 (CLZWr (RBITWr GPR32:$Rn))>;
962 def : Pat<(cttz GPR64:$Rn),
963 (CLZXr (RBITXr GPR64:$Rn))>;
964 def : Pat<(ctlz (or (shl (xor (sra GPR32:$Rn, (i64 31)), GPR32:$Rn), (i64 1)),
967 def : Pat<(ctlz (or (shl (xor (sra GPR64:$Rn, (i64 63)), GPR64:$Rn), (i64 1)),
971 // Unlike the other one operand instructions, the instructions with the "rev"
972 // mnemonic do *not* just different in the size bit, but actually use different
973 // opcode bits for the different sizes.
974 def REVWr : OneWRegData<0b010, "rev", bswap>;
975 def REVXr : OneXRegData<0b011, "rev", bswap>;
976 def REV32Xr : OneXRegData<0b010, "rev32",
977 UnOpFrag<(rotr (bswap node:$LHS), (i64 32))>>;
979 def : InstAlias<"rev64 $Rd, $Rn", (REVXr GPR64:$Rd, GPR64:$Rn), 0>;
981 // The bswap commutes with the rotr so we want a pattern for both possible
983 def : Pat<(bswap (rotr GPR32:$Rn, (i64 16))), (REV16Wr GPR32:$Rn)>;
984 def : Pat<(bswap (rotr GPR64:$Rn, (i64 32))), (REV32Xr GPR64:$Rn)>;
986 //===----------------------------------------------------------------------===//
987 // Bitfield immediate extraction instruction.
988 //===----------------------------------------------------------------------===//
989 let hasSideEffects = 0 in
990 defm EXTR : ExtractImm<"extr">;
991 def : InstAlias<"ror $dst, $src, $shift",
992 (EXTRWrri GPR32:$dst, GPR32:$src, GPR32:$src, imm0_31:$shift)>;
993 def : InstAlias<"ror $dst, $src, $shift",
994 (EXTRXrri GPR64:$dst, GPR64:$src, GPR64:$src, imm0_63:$shift)>;
996 def : Pat<(rotr GPR32:$Rn, (i64 imm0_31:$imm)),
997 (EXTRWrri GPR32:$Rn, GPR32:$Rn, imm0_31:$imm)>;
998 def : Pat<(rotr GPR64:$Rn, (i64 imm0_63:$imm)),
999 (EXTRXrri GPR64:$Rn, GPR64:$Rn, imm0_63:$imm)>;
1001 //===----------------------------------------------------------------------===//
1002 // Other bitfield immediate instructions.
1003 //===----------------------------------------------------------------------===//
1004 let hasSideEffects = 0 in {
1005 defm BFM : BitfieldImmWith2RegArgs<0b01, "bfm">;
1006 defm SBFM : BitfieldImm<0b00, "sbfm">;
1007 defm UBFM : BitfieldImm<0b10, "ubfm">;
1010 def i32shift_a : Operand<i64>, SDNodeXForm<imm, [{
1011 uint64_t enc = (32 - N->getZExtValue()) & 0x1f;
1012 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1015 def i32shift_b : Operand<i64>, SDNodeXForm<imm, [{
1016 uint64_t enc = 31 - N->getZExtValue();
1017 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1020 // min(7, 31 - shift_amt)
1021 def i32shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1022 uint64_t enc = 31 - N->getZExtValue();
1023 enc = enc > 7 ? 7 : enc;
1024 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1027 // min(15, 31 - shift_amt)
1028 def i32shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1029 uint64_t enc = 31 - N->getZExtValue();
1030 enc = enc > 15 ? 15 : enc;
1031 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1034 def i64shift_a : Operand<i64>, SDNodeXForm<imm, [{
1035 uint64_t enc = (64 - N->getZExtValue()) & 0x3f;
1036 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1039 def i64shift_b : Operand<i64>, SDNodeXForm<imm, [{
1040 uint64_t enc = 63 - N->getZExtValue();
1041 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1044 // min(7, 63 - shift_amt)
1045 def i64shift_sext_i8 : Operand<i64>, SDNodeXForm<imm, [{
1046 uint64_t enc = 63 - N->getZExtValue();
1047 enc = enc > 7 ? 7 : enc;
1048 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1051 // min(15, 63 - shift_amt)
1052 def i64shift_sext_i16 : Operand<i64>, SDNodeXForm<imm, [{
1053 uint64_t enc = 63 - N->getZExtValue();
1054 enc = enc > 15 ? 15 : enc;
1055 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1058 // min(31, 63 - shift_amt)
1059 def i64shift_sext_i32 : Operand<i64>, SDNodeXForm<imm, [{
1060 uint64_t enc = 63 - N->getZExtValue();
1061 enc = enc > 31 ? 31 : enc;
1062 return CurDAG->getTargetConstant(enc, SDLoc(N), MVT::i64);
1065 def : Pat<(shl GPR32:$Rn, (i64 imm0_31:$imm)),
1066 (UBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
1067 (i64 (i32shift_b imm0_31:$imm)))>;
1068 def : Pat<(shl GPR64:$Rn, (i64 imm0_63:$imm)),
1069 (UBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
1070 (i64 (i64shift_b imm0_63:$imm)))>;
1072 let AddedComplexity = 10 in {
1073 def : Pat<(sra GPR32:$Rn, (i64 imm0_31:$imm)),
1074 (SBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1075 def : Pat<(sra GPR64:$Rn, (i64 imm0_63:$imm)),
1076 (SBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1079 def : InstAlias<"asr $dst, $src, $shift",
1080 (SBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1081 def : InstAlias<"asr $dst, $src, $shift",
1082 (SBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1083 def : InstAlias<"sxtb $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1084 def : InstAlias<"sxtb $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1085 def : InstAlias<"sxth $dst, $src", (SBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1086 def : InstAlias<"sxth $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1087 def : InstAlias<"sxtw $dst, $src", (SBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1089 def : Pat<(srl GPR32:$Rn, (i64 imm0_31:$imm)),
1090 (UBFMWri GPR32:$Rn, imm0_31:$imm, 31)>;
1091 def : Pat<(srl GPR64:$Rn, (i64 imm0_63:$imm)),
1092 (UBFMXri GPR64:$Rn, imm0_63:$imm, 63)>;
1094 def : InstAlias<"lsr $dst, $src, $shift",
1095 (UBFMWri GPR32:$dst, GPR32:$src, imm0_31:$shift, 31)>;
1096 def : InstAlias<"lsr $dst, $src, $shift",
1097 (UBFMXri GPR64:$dst, GPR64:$src, imm0_63:$shift, 63)>;
1098 def : InstAlias<"uxtb $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 7)>;
1099 def : InstAlias<"uxtb $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 7)>;
1100 def : InstAlias<"uxth $dst, $src", (UBFMWri GPR32:$dst, GPR32:$src, 0, 15)>;
1101 def : InstAlias<"uxth $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 15)>;
1102 def : InstAlias<"uxtw $dst, $src", (UBFMXri GPR64:$dst, GPR64:$src, 0, 31)>;
1104 //===----------------------------------------------------------------------===//
1105 // Conditional comparison instructions.
1106 //===----------------------------------------------------------------------===//
1107 defm CCMN : CondComparison<0, "ccmn", AArch64ccmn>;
1108 defm CCMP : CondComparison<1, "ccmp", AArch64ccmp>;
1110 //===----------------------------------------------------------------------===//
1111 // Conditional select instructions.
1112 //===----------------------------------------------------------------------===//
1113 defm CSEL : CondSelect<0, 0b00, "csel">;
1115 def inc : PatFrag<(ops node:$in), (add node:$in, 1)>;
1116 defm CSINC : CondSelectOp<0, 0b01, "csinc", inc>;
1117 defm CSINV : CondSelectOp<1, 0b00, "csinv", not>;
1118 defm CSNEG : CondSelectOp<1, 0b01, "csneg", ineg>;
1120 def : Pat<(AArch64csinv GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1121 (CSINVWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1122 def : Pat<(AArch64csinv GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1123 (CSINVXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1124 def : Pat<(AArch64csneg GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1125 (CSNEGWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1126 def : Pat<(AArch64csneg GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1127 (CSNEGXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1128 def : Pat<(AArch64csinc GPR32:$tval, GPR32:$fval, (i32 imm:$cc), NZCV),
1129 (CSINCWr GPR32:$tval, GPR32:$fval, (i32 imm:$cc))>;
1130 def : Pat<(AArch64csinc GPR64:$tval, GPR64:$fval, (i32 imm:$cc), NZCV),
1131 (CSINCXr GPR64:$tval, GPR64:$fval, (i32 imm:$cc))>;
1133 def : Pat<(AArch64csel (i32 0), (i32 1), (i32 imm:$cc), NZCV),
1134 (CSINCWr WZR, WZR, (i32 imm:$cc))>;
1135 def : Pat<(AArch64csel (i64 0), (i64 1), (i32 imm:$cc), NZCV),
1136 (CSINCXr XZR, XZR, (i32 imm:$cc))>;
1137 def : Pat<(AArch64csel GPR32:$tval, (i32 1), (i32 imm:$cc), NZCV),
1138 (CSINCWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1139 def : Pat<(AArch64csel GPR64:$tval, (i64 1), (i32 imm:$cc), NZCV),
1140 (CSINCXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1141 def : Pat<(AArch64csel (i32 1), GPR32:$fval, (i32 imm:$cc), NZCV),
1142 (CSINCWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1143 def : Pat<(AArch64csel (i64 1), GPR64:$fval, (i32 imm:$cc), NZCV),
1144 (CSINCXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1145 def : Pat<(AArch64csel (i32 0), (i32 -1), (i32 imm:$cc), NZCV),
1146 (CSINVWr WZR, WZR, (i32 imm:$cc))>;
1147 def : Pat<(AArch64csel (i64 0), (i64 -1), (i32 imm:$cc), NZCV),
1148 (CSINVXr XZR, XZR, (i32 imm:$cc))>;
1149 def : Pat<(AArch64csel GPR32:$tval, (i32 -1), (i32 imm:$cc), NZCV),
1150 (CSINVWr GPR32:$tval, WZR, (i32 imm:$cc))>;
1151 def : Pat<(AArch64csel GPR64:$tval, (i64 -1), (i32 imm:$cc), NZCV),
1152 (CSINVXr GPR64:$tval, XZR, (i32 imm:$cc))>;
1153 def : Pat<(AArch64csel (i32 -1), GPR32:$fval, (i32 imm:$cc), NZCV),
1154 (CSINVWr GPR32:$fval, WZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1155 def : Pat<(AArch64csel (i64 -1), GPR64:$fval, (i32 imm:$cc), NZCV),
1156 (CSINVXr GPR64:$fval, XZR, (i32 (inv_cond_XFORM imm:$cc)))>;
1158 // The inverse of the condition code from the alias instruction is what is used
1159 // in the aliased instruction. The parser all ready inverts the condition code
1160 // for these aliases.
1161 def : InstAlias<"cset $dst, $cc",
1162 (CSINCWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1163 def : InstAlias<"cset $dst, $cc",
1164 (CSINCXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1166 def : InstAlias<"csetm $dst, $cc",
1167 (CSINVWr GPR32:$dst, WZR, WZR, inv_ccode:$cc)>;
1168 def : InstAlias<"csetm $dst, $cc",
1169 (CSINVXr GPR64:$dst, XZR, XZR, inv_ccode:$cc)>;
1171 def : InstAlias<"cinc $dst, $src, $cc",
1172 (CSINCWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1173 def : InstAlias<"cinc $dst, $src, $cc",
1174 (CSINCXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1176 def : InstAlias<"cinv $dst, $src, $cc",
1177 (CSINVWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1178 def : InstAlias<"cinv $dst, $src, $cc",
1179 (CSINVXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1181 def : InstAlias<"cneg $dst, $src, $cc",
1182 (CSNEGWr GPR32:$dst, GPR32:$src, GPR32:$src, inv_ccode:$cc)>;
1183 def : InstAlias<"cneg $dst, $src, $cc",
1184 (CSNEGXr GPR64:$dst, GPR64:$src, GPR64:$src, inv_ccode:$cc)>;
1186 //===----------------------------------------------------------------------===//
1187 // PC-relative instructions.
1188 //===----------------------------------------------------------------------===//
1189 let isReMaterializable = 1 in {
1190 let hasSideEffects = 0, mayStore = 0, mayLoad = 0 in {
1191 def ADR : ADRI<0, "adr", adrlabel, []>;
1192 } // hasSideEffects = 0
1194 def ADRP : ADRI<1, "adrp", adrplabel,
1195 [(set GPR64:$Xd, (AArch64adrp tglobaladdr:$label))]>;
1196 } // isReMaterializable = 1
1198 // page address of a constant pool entry, block address
1199 def : Pat<(AArch64adrp tconstpool:$cp), (ADRP tconstpool:$cp)>;
1200 def : Pat<(AArch64adrp tblockaddress:$cp), (ADRP tblockaddress:$cp)>;
1202 //===----------------------------------------------------------------------===//
1203 // Unconditional branch (register) instructions.
1204 //===----------------------------------------------------------------------===//
1206 let isReturn = 1, isTerminator = 1, isBarrier = 1 in {
1207 def RET : BranchReg<0b0010, "ret", []>;
1208 def DRPS : SpecialReturn<0b0101, "drps">;
1209 def ERET : SpecialReturn<0b0100, "eret">;
1210 } // isReturn = 1, isTerminator = 1, isBarrier = 1
1212 // Default to the LR register.
1213 def : InstAlias<"ret", (RET LR)>;
1215 let isCall = 1, Defs = [LR], Uses = [SP] in {
1216 def BLR : BranchReg<0b0001, "blr", [(AArch64call GPR64:$Rn)]>;
1219 let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in {
1220 def BR : BranchReg<0b0000, "br", [(brind GPR64:$Rn)]>;
1221 } // isBranch, isTerminator, isBarrier, isIndirectBranch
1223 // Create a separate pseudo-instruction for codegen to use so that we don't
1224 // flag lr as used in every function. It'll be restored before the RET by the
1225 // epilogue if it's legitimately used.
1226 def RET_ReallyLR : Pseudo<(outs), (ins), [(AArch64retflag)]>,
1227 Sched<[WriteBrReg]> {
1228 let isTerminator = 1;
1233 // This is a directive-like pseudo-instruction. The purpose is to insert an
1234 // R_AARCH64_TLSDESC_CALL relocation at the offset of the following instruction
1235 // (which in the usual case is a BLR).
1236 let hasSideEffects = 1 in
1237 def TLSDESCCALL : Pseudo<(outs), (ins i64imm:$sym), []>, Sched<[]> {
1238 let AsmString = ".tlsdesccall $sym";
1241 // FIXME: maybe the scratch register used shouldn't be fixed to X1?
1242 // FIXME: can "hasSideEffects be dropped?
1243 let isCall = 1, Defs = [LR, X0, X1], hasSideEffects = 1,
1244 isCodeGenOnly = 1 in
1246 : Pseudo<(outs), (ins i64imm:$sym),
1247 [(AArch64tlsdesc_callseq tglobaltlsaddr:$sym)]>,
1248 Sched<[WriteI, WriteLD, WriteI, WriteBrReg]>;
1249 def : Pat<(AArch64tlsdesc_callseq texternalsym:$sym),
1250 (TLSDESC_CALLSEQ texternalsym:$sym)>;
1252 //===----------------------------------------------------------------------===//
1253 // Conditional branch (immediate) instruction.
1254 //===----------------------------------------------------------------------===//
1255 def Bcc : BranchCond;
1257 //===----------------------------------------------------------------------===//
1258 // Compare-and-branch instructions.
1259 //===----------------------------------------------------------------------===//
1260 defm CBZ : CmpBranch<0, "cbz", AArch64cbz>;
1261 defm CBNZ : CmpBranch<1, "cbnz", AArch64cbnz>;
1263 //===----------------------------------------------------------------------===//
1264 // Test-bit-and-branch instructions.
1265 //===----------------------------------------------------------------------===//
1266 defm TBZ : TestBranch<0, "tbz", AArch64tbz>;
1267 defm TBNZ : TestBranch<1, "tbnz", AArch64tbnz>;
1269 //===----------------------------------------------------------------------===//
1270 // Unconditional branch (immediate) instructions.
1271 //===----------------------------------------------------------------------===//
1272 let isBranch = 1, isTerminator = 1, isBarrier = 1 in {
1273 def B : BranchImm<0, "b", [(br bb:$addr)]>;
1274 } // isBranch, isTerminator, isBarrier
1276 let isCall = 1, Defs = [LR], Uses = [SP] in {
1277 def BL : CallImm<1, "bl", [(AArch64call tglobaladdr:$addr)]>;
1279 def : Pat<(AArch64call texternalsym:$func), (BL texternalsym:$func)>;
1281 //===----------------------------------------------------------------------===//
1282 // Exception generation instructions.
1283 //===----------------------------------------------------------------------===//
1284 def BRK : ExceptionGeneration<0b001, 0b00, "brk">;
1285 def DCPS1 : ExceptionGeneration<0b101, 0b01, "dcps1">;
1286 def DCPS2 : ExceptionGeneration<0b101, 0b10, "dcps2">;
1287 def DCPS3 : ExceptionGeneration<0b101, 0b11, "dcps3">;
1288 def HLT : ExceptionGeneration<0b010, 0b00, "hlt">;
1289 def HVC : ExceptionGeneration<0b000, 0b10, "hvc">;
1290 def SMC : ExceptionGeneration<0b000, 0b11, "smc">;
1291 def SVC : ExceptionGeneration<0b000, 0b01, "svc">;
1293 // DCPSn defaults to an immediate operand of zero if unspecified.
1294 def : InstAlias<"dcps1", (DCPS1 0)>;
1295 def : InstAlias<"dcps2", (DCPS2 0)>;
1296 def : InstAlias<"dcps3", (DCPS3 0)>;
1298 //===----------------------------------------------------------------------===//
1299 // Load instructions.
1300 //===----------------------------------------------------------------------===//
1302 // Pair (indexed, offset)
1303 defm LDPW : LoadPairOffset<0b00, 0, GPR32, simm7s4, "ldp">;
1304 defm LDPX : LoadPairOffset<0b10, 0, GPR64, simm7s8, "ldp">;
1305 defm LDPS : LoadPairOffset<0b00, 1, FPR32, simm7s4, "ldp">;
1306 defm LDPD : LoadPairOffset<0b01, 1, FPR64, simm7s8, "ldp">;
1307 defm LDPQ : LoadPairOffset<0b10, 1, FPR128, simm7s16, "ldp">;
1309 defm LDPSW : LoadPairOffset<0b01, 0, GPR64, simm7s4, "ldpsw">;
1311 // Pair (pre-indexed)
1312 def LDPWpre : LoadPairPreIdx<0b00, 0, GPR32, simm7s4, "ldp">;
1313 def LDPXpre : LoadPairPreIdx<0b10, 0, GPR64, simm7s8, "ldp">;
1314 def LDPSpre : LoadPairPreIdx<0b00, 1, FPR32, simm7s4, "ldp">;
1315 def LDPDpre : LoadPairPreIdx<0b01, 1, FPR64, simm7s8, "ldp">;
1316 def LDPQpre : LoadPairPreIdx<0b10, 1, FPR128, simm7s16, "ldp">;
1318 def LDPSWpre : LoadPairPreIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
1320 // Pair (post-indexed)
1321 def LDPWpost : LoadPairPostIdx<0b00, 0, GPR32, simm7s4, "ldp">;
1322 def LDPXpost : LoadPairPostIdx<0b10, 0, GPR64, simm7s8, "ldp">;
1323 def LDPSpost : LoadPairPostIdx<0b00, 1, FPR32, simm7s4, "ldp">;
1324 def LDPDpost : LoadPairPostIdx<0b01, 1, FPR64, simm7s8, "ldp">;
1325 def LDPQpost : LoadPairPostIdx<0b10, 1, FPR128, simm7s16, "ldp">;
1327 def LDPSWpost : LoadPairPostIdx<0b01, 0, GPR64, simm7s4, "ldpsw">;
1330 // Pair (no allocate)
1331 defm LDNPW : LoadPairNoAlloc<0b00, 0, GPR32, simm7s4, "ldnp">;
1332 defm LDNPX : LoadPairNoAlloc<0b10, 0, GPR64, simm7s8, "ldnp">;
1333 defm LDNPS : LoadPairNoAlloc<0b00, 1, FPR32, simm7s4, "ldnp">;
1334 defm LDNPD : LoadPairNoAlloc<0b01, 1, FPR64, simm7s8, "ldnp">;
1335 defm LDNPQ : LoadPairNoAlloc<0b10, 1, FPR128, simm7s16, "ldnp">;
1338 // (register offset)
1342 defm LDRBB : Load8RO<0b00, 0, 0b01, GPR32, "ldrb", i32, zextloadi8>;
1343 defm LDRHH : Load16RO<0b01, 0, 0b01, GPR32, "ldrh", i32, zextloadi16>;
1344 defm LDRW : Load32RO<0b10, 0, 0b01, GPR32, "ldr", i32, load>;
1345 defm LDRX : Load64RO<0b11, 0, 0b01, GPR64, "ldr", i64, load>;
1348 defm LDRB : Load8RO<0b00, 1, 0b01, FPR8, "ldr", untyped, load>;
1349 defm LDRH : Load16RO<0b01, 1, 0b01, FPR16, "ldr", f16, load>;
1350 defm LDRS : Load32RO<0b10, 1, 0b01, FPR32, "ldr", f32, load>;
1351 defm LDRD : Load64RO<0b11, 1, 0b01, FPR64, "ldr", f64, load>;
1352 defm LDRQ : Load128RO<0b00, 1, 0b11, FPR128, "ldr", f128, load>;
1354 // Load sign-extended half-word
1355 defm LDRSHW : Load16RO<0b01, 0, 0b11, GPR32, "ldrsh", i32, sextloadi16>;
1356 defm LDRSHX : Load16RO<0b01, 0, 0b10, GPR64, "ldrsh", i64, sextloadi16>;
1358 // Load sign-extended byte
1359 defm LDRSBW : Load8RO<0b00, 0, 0b11, GPR32, "ldrsb", i32, sextloadi8>;
1360 defm LDRSBX : Load8RO<0b00, 0, 0b10, GPR64, "ldrsb", i64, sextloadi8>;
1362 // Load sign-extended word
1363 defm LDRSW : Load32RO<0b10, 0, 0b10, GPR64, "ldrsw", i64, sextloadi32>;
1366 defm PRFM : PrefetchRO<0b11, 0, 0b10, "prfm">;
1368 // For regular load, we do not have any alignment requirement.
1369 // Thus, it is safe to directly map the vector loads with interesting
1370 // addressing modes.
1371 // FIXME: We could do the same for bitconvert to floating point vectors.
1372 multiclass ScalToVecROLoadPat<ROAddrMode ro, SDPatternOperator loadop,
1373 ValueType ScalTy, ValueType VecTy,
1374 Instruction LOADW, Instruction LOADX,
1376 def : Pat<(VecTy (scalar_to_vector (ScalTy
1377 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset))))),
1378 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1379 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$offset),
1382 def : Pat<(VecTy (scalar_to_vector (ScalTy
1383 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset))))),
1384 (INSERT_SUBREG (VecTy (IMPLICIT_DEF)),
1385 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$offset),
1389 let AddedComplexity = 10 in {
1390 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v8i8, LDRBroW, LDRBroX, bsub>;
1391 defm : ScalToVecROLoadPat<ro8, extloadi8, i32, v16i8, LDRBroW, LDRBroX, bsub>;
1393 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v4i16, LDRHroW, LDRHroX, hsub>;
1394 defm : ScalToVecROLoadPat<ro16, extloadi16, i32, v8i16, LDRHroW, LDRHroX, hsub>;
1396 defm : ScalToVecROLoadPat<ro16, load, i32, v4f16, LDRHroW, LDRHroX, hsub>;
1397 defm : ScalToVecROLoadPat<ro16, load, i32, v8f16, LDRHroW, LDRHroX, hsub>;
1399 defm : ScalToVecROLoadPat<ro32, load, i32, v2i32, LDRSroW, LDRSroX, ssub>;
1400 defm : ScalToVecROLoadPat<ro32, load, i32, v4i32, LDRSroW, LDRSroX, ssub>;
1402 defm : ScalToVecROLoadPat<ro32, load, f32, v2f32, LDRSroW, LDRSroX, ssub>;
1403 defm : ScalToVecROLoadPat<ro32, load, f32, v4f32, LDRSroW, LDRSroX, ssub>;
1405 defm : ScalToVecROLoadPat<ro64, load, i64, v2i64, LDRDroW, LDRDroX, dsub>;
1407 defm : ScalToVecROLoadPat<ro64, load, f64, v2f64, LDRDroW, LDRDroX, dsub>;
1410 def : Pat <(v1i64 (scalar_to_vector (i64
1411 (load (ro_Windexed64 GPR64sp:$Rn, GPR32:$Rm,
1412 ro_Wextend64:$extend))))),
1413 (LDRDroW GPR64sp:$Rn, GPR32:$Rm, ro_Wextend64:$extend)>;
1415 def : Pat <(v1i64 (scalar_to_vector (i64
1416 (load (ro_Xindexed64 GPR64sp:$Rn, GPR64:$Rm,
1417 ro_Xextend64:$extend))))),
1418 (LDRDroX GPR64sp:$Rn, GPR64:$Rm, ro_Xextend64:$extend)>;
1421 // Match all load 64 bits width whose type is compatible with FPR64
1422 multiclass VecROLoadPat<ROAddrMode ro, ValueType VecTy,
1423 Instruction LOADW, Instruction LOADX> {
1425 def : Pat<(VecTy (load (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1426 (LOADW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1428 def : Pat<(VecTy (load (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1429 (LOADX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1432 let AddedComplexity = 10 in {
1433 let Predicates = [IsLE] in {
1434 // We must do vector loads with LD1 in big-endian.
1435 defm : VecROLoadPat<ro64, v2i32, LDRDroW, LDRDroX>;
1436 defm : VecROLoadPat<ro64, v2f32, LDRDroW, LDRDroX>;
1437 defm : VecROLoadPat<ro64, v8i8, LDRDroW, LDRDroX>;
1438 defm : VecROLoadPat<ro64, v4i16, LDRDroW, LDRDroX>;
1439 defm : VecROLoadPat<ro64, v4f16, LDRDroW, LDRDroX>;
1442 defm : VecROLoadPat<ro64, v1i64, LDRDroW, LDRDroX>;
1443 defm : VecROLoadPat<ro64, v1f64, LDRDroW, LDRDroX>;
1445 // Match all load 128 bits width whose type is compatible with FPR128
1446 let Predicates = [IsLE] in {
1447 // We must do vector loads with LD1 in big-endian.
1448 defm : VecROLoadPat<ro128, v2i64, LDRQroW, LDRQroX>;
1449 defm : VecROLoadPat<ro128, v2f64, LDRQroW, LDRQroX>;
1450 defm : VecROLoadPat<ro128, v4i32, LDRQroW, LDRQroX>;
1451 defm : VecROLoadPat<ro128, v4f32, LDRQroW, LDRQroX>;
1452 defm : VecROLoadPat<ro128, v8i16, LDRQroW, LDRQroX>;
1453 defm : VecROLoadPat<ro128, v8f16, LDRQroW, LDRQroX>;
1454 defm : VecROLoadPat<ro128, v16i8, LDRQroW, LDRQroX>;
1456 } // AddedComplexity = 10
1459 multiclass ExtLoadTo64ROPat<ROAddrMode ro, SDPatternOperator loadop,
1460 Instruction INSTW, Instruction INSTX> {
1461 def : Pat<(i64 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1462 (SUBREG_TO_REG (i64 0),
1463 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
1466 def : Pat<(i64 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1467 (SUBREG_TO_REG (i64 0),
1468 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
1472 let AddedComplexity = 10 in {
1473 defm : ExtLoadTo64ROPat<ro8, zextloadi8, LDRBBroW, LDRBBroX>;
1474 defm : ExtLoadTo64ROPat<ro16, zextloadi16, LDRHHroW, LDRHHroX>;
1475 defm : ExtLoadTo64ROPat<ro32, zextloadi32, LDRWroW, LDRWroX>;
1477 // zextloadi1 -> zextloadi8
1478 defm : ExtLoadTo64ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1480 // extload -> zextload
1481 defm : ExtLoadTo64ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1482 defm : ExtLoadTo64ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1483 defm : ExtLoadTo64ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1485 // extloadi1 -> zextloadi8
1486 defm : ExtLoadTo64ROPat<ro8, extloadi1, LDRBBroW, LDRBBroX>;
1491 multiclass ExtLoadTo32ROPat<ROAddrMode ro, SDPatternOperator loadop,
1492 Instruction INSTW, Instruction INSTX> {
1493 def : Pat<(i32 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend))),
1494 (INSTW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
1496 def : Pat<(i32 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend))),
1497 (INSTX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
1501 let AddedComplexity = 10 in {
1502 // extload -> zextload
1503 defm : ExtLoadTo32ROPat<ro8, extloadi8, LDRBBroW, LDRBBroX>;
1504 defm : ExtLoadTo32ROPat<ro16, extloadi16, LDRHHroW, LDRHHroX>;
1505 defm : ExtLoadTo32ROPat<ro32, extloadi32, LDRWroW, LDRWroX>;
1507 // zextloadi1 -> zextloadi8
1508 defm : ExtLoadTo32ROPat<ro8, zextloadi1, LDRBBroW, LDRBBroX>;
1512 // (unsigned immediate)
1514 defm LDRX : LoadUI<0b11, 0, 0b01, GPR64, uimm12s8, "ldr",
1516 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1517 defm LDRW : LoadUI<0b10, 0, 0b01, GPR32, uimm12s4, "ldr",
1519 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1520 defm LDRB : LoadUI<0b00, 1, 0b01, FPR8, uimm12s1, "ldr",
1522 (load (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)))]>;
1523 defm LDRH : LoadUI<0b01, 1, 0b01, FPR16, uimm12s2, "ldr",
1524 [(set (f16 FPR16:$Rt),
1525 (load (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)))]>;
1526 defm LDRS : LoadUI<0b10, 1, 0b01, FPR32, uimm12s4, "ldr",
1527 [(set (f32 FPR32:$Rt),
1528 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)))]>;
1529 defm LDRD : LoadUI<0b11, 1, 0b01, FPR64, uimm12s8, "ldr",
1530 [(set (f64 FPR64:$Rt),
1531 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)))]>;
1532 defm LDRQ : LoadUI<0b00, 1, 0b11, FPR128, uimm12s16, "ldr",
1533 [(set (f128 FPR128:$Rt),
1534 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)))]>;
1536 // For regular load, we do not have any alignment requirement.
1537 // Thus, it is safe to directly map the vector loads with interesting
1538 // addressing modes.
1539 // FIXME: We could do the same for bitconvert to floating point vectors.
1540 def : Pat <(v8i8 (scalar_to_vector (i32
1541 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1542 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
1543 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1544 def : Pat <(v16i8 (scalar_to_vector (i32
1545 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
1546 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
1547 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub)>;
1548 def : Pat <(v4i16 (scalar_to_vector (i32
1549 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1550 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
1551 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1552 def : Pat <(v8i16 (scalar_to_vector (i32
1553 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
1554 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
1555 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub)>;
1556 def : Pat <(v2i32 (scalar_to_vector (i32
1557 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1558 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
1559 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1560 def : Pat <(v4i32 (scalar_to_vector (i32
1561 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
1562 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
1563 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub)>;
1564 def : Pat <(v1i64 (scalar_to_vector (i64
1565 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1566 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1567 def : Pat <(v2i64 (scalar_to_vector (i64
1568 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))))),
1569 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
1570 (LDRDui GPR64sp:$Rn, uimm12s8:$offset), dsub)>;
1572 // Match all load 64 bits width whose type is compatible with FPR64
1573 let Predicates = [IsLE] in {
1574 // We must use LD1 to perform vector loads in big-endian.
1575 def : Pat<(v2f32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1576 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1577 def : Pat<(v8i8 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1578 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1579 def : Pat<(v4i16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1580 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1581 def : Pat<(v2i32 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1582 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1583 def : Pat<(v4f16 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1584 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1586 def : Pat<(v1f64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1587 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1588 def : Pat<(v1i64 (load (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))),
1589 (LDRDui GPR64sp:$Rn, uimm12s8:$offset)>;
1591 // Match all load 128 bits width whose type is compatible with FPR128
1592 let Predicates = [IsLE] in {
1593 // We must use LD1 to perform vector loads in big-endian.
1594 def : Pat<(v4f32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1595 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1596 def : Pat<(v2f64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1597 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1598 def : Pat<(v16i8 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1599 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1600 def : Pat<(v8i16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1601 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1602 def : Pat<(v4i32 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1603 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1604 def : Pat<(v2i64 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1605 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1606 def : Pat<(v8f16 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1607 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1609 def : Pat<(f128 (load (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset))),
1610 (LDRQui GPR64sp:$Rn, uimm12s16:$offset)>;
1612 defm LDRHH : LoadUI<0b01, 0, 0b01, GPR32, uimm12s2, "ldrh",
1614 (zextloadi16 (am_indexed16 GPR64sp:$Rn,
1615 uimm12s2:$offset)))]>;
1616 defm LDRBB : LoadUI<0b00, 0, 0b01, GPR32, uimm12s1, "ldrb",
1618 (zextloadi8 (am_indexed8 GPR64sp:$Rn,
1619 uimm12s1:$offset)))]>;
1621 def : Pat<(i64 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1622 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1623 def : Pat<(i64 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1624 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
1626 // zextloadi1 -> zextloadi8
1627 def : Pat<(i32 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1628 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1629 def : Pat<(i64 (zextloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1630 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1632 // extload -> zextload
1633 def : Pat<(i32 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1634 (LDRHHui GPR64sp:$Rn, uimm12s2:$offset)>;
1635 def : Pat<(i32 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1636 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1637 def : Pat<(i32 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1638 (LDRBBui GPR64sp:$Rn, uimm12s1:$offset)>;
1639 def : Pat<(i64 (extloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
1640 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
1641 def : Pat<(i64 (extloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))),
1642 (SUBREG_TO_REG (i64 0), (LDRHHui GPR64sp:$Rn, uimm12s2:$offset), sub_32)>;
1643 def : Pat<(i64 (extloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1644 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1645 def : Pat<(i64 (extloadi1 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))),
1646 (SUBREG_TO_REG (i64 0), (LDRBBui GPR64sp:$Rn, uimm12s1:$offset), sub_32)>;
1648 // load sign-extended half-word
1649 defm LDRSHW : LoadUI<0b01, 0, 0b11, GPR32, uimm12s2, "ldrsh",
1651 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
1652 uimm12s2:$offset)))]>;
1653 defm LDRSHX : LoadUI<0b01, 0, 0b10, GPR64, uimm12s2, "ldrsh",
1655 (sextloadi16 (am_indexed16 GPR64sp:$Rn,
1656 uimm12s2:$offset)))]>;
1658 // load sign-extended byte
1659 defm LDRSBW : LoadUI<0b00, 0, 0b11, GPR32, uimm12s1, "ldrsb",
1661 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
1662 uimm12s1:$offset)))]>;
1663 defm LDRSBX : LoadUI<0b00, 0, 0b10, GPR64, uimm12s1, "ldrsb",
1665 (sextloadi8 (am_indexed8 GPR64sp:$Rn,
1666 uimm12s1:$offset)))]>;
1668 // load sign-extended word
1669 defm LDRSW : LoadUI<0b10, 0, 0b10, GPR64, uimm12s4, "ldrsw",
1671 (sextloadi32 (am_indexed32 GPR64sp:$Rn,
1672 uimm12s4:$offset)))]>;
1674 // load zero-extended word
1675 def : Pat<(i64 (zextloadi32 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))),
1676 (SUBREG_TO_REG (i64 0), (LDRWui GPR64sp:$Rn, uimm12s4:$offset), sub_32)>;
1679 def PRFMui : PrefetchUI<0b11, 0, 0b10, "prfm",
1680 [(AArch64Prefetch imm:$Rt,
1681 (am_indexed64 GPR64sp:$Rn,
1682 uimm12s8:$offset))]>;
1684 def : InstAlias<"prfm $Rt, [$Rn]", (PRFMui prfop:$Rt, GPR64sp:$Rn, 0)>;
1688 def LDRWl : LoadLiteral<0b00, 0, GPR32, "ldr">;
1689 def LDRXl : LoadLiteral<0b01, 0, GPR64, "ldr">;
1690 def LDRSl : LoadLiteral<0b00, 1, FPR32, "ldr">;
1691 def LDRDl : LoadLiteral<0b01, 1, FPR64, "ldr">;
1692 def LDRQl : LoadLiteral<0b10, 1, FPR128, "ldr">;
1694 // load sign-extended word
1695 def LDRSWl : LoadLiteral<0b10, 0, GPR64, "ldrsw">;
1698 def PRFMl : PrefetchLiteral<0b11, 0, "prfm", []>;
1699 // [(AArch64Prefetch imm:$Rt, tglobaladdr:$label)]>;
1702 // (unscaled immediate)
1703 defm LDURX : LoadUnscaled<0b11, 0, 0b01, GPR64, "ldur",
1705 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
1706 defm LDURW : LoadUnscaled<0b10, 0, 0b01, GPR32, "ldur",
1708 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1709 defm LDURB : LoadUnscaled<0b00, 1, 0b01, FPR8, "ldur",
1711 (load (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1712 defm LDURH : LoadUnscaled<0b01, 1, 0b01, FPR16, "ldur",
1714 (load (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1715 defm LDURS : LoadUnscaled<0b10, 1, 0b01, FPR32, "ldur",
1716 [(set (f32 FPR32:$Rt),
1717 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1718 defm LDURD : LoadUnscaled<0b11, 1, 0b01, FPR64, "ldur",
1719 [(set (f64 FPR64:$Rt),
1720 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset)))]>;
1721 defm LDURQ : LoadUnscaled<0b00, 1, 0b11, FPR128, "ldur",
1722 [(set (f128 FPR128:$Rt),
1723 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset)))]>;
1726 : LoadUnscaled<0b01, 0, 0b01, GPR32, "ldurh",
1728 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1730 : LoadUnscaled<0b00, 0, 0b01, GPR32, "ldurb",
1732 (zextloadi8 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1734 // Match all load 64 bits width whose type is compatible with FPR64
1735 let Predicates = [IsLE] in {
1736 def : Pat<(v2f32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1737 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1738 def : Pat<(v2i32 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1739 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1740 def : Pat<(v4i16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1741 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1742 def : Pat<(v8i8 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1743 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1744 def : Pat<(v4f16 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1745 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1747 def : Pat<(v1f64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1748 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1749 def : Pat<(v1i64 (load (am_unscaled64 GPR64sp:$Rn, simm9:$offset))),
1750 (LDURDi GPR64sp:$Rn, simm9:$offset)>;
1752 // Match all load 128 bits width whose type is compatible with FPR128
1753 let Predicates = [IsLE] in {
1754 def : Pat<(v2f64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1755 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1756 def : Pat<(v2i64 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1757 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1758 def : Pat<(v4f32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1759 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1760 def : Pat<(v4i32 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1761 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1762 def : Pat<(v8i16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1763 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1764 def : Pat<(v16i8 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1765 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1766 def : Pat<(v8f16 (load (am_unscaled128 GPR64sp:$Rn, simm9:$offset))),
1767 (LDURQi GPR64sp:$Rn, simm9:$offset)>;
1771 def : Pat<(i32 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1772 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
1773 def : Pat<(i32 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1774 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1775 def : Pat<(i32 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1776 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1777 def : Pat<(i64 (extloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
1778 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1779 def : Pat<(i64 (extloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1780 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1781 def : Pat<(i64 (extloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1782 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1783 def : Pat<(i64 (extloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1784 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1786 def : Pat<(i32 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1787 (LDURHHi GPR64sp:$Rn, simm9:$offset)>;
1788 def : Pat<(i32 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1789 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1790 def : Pat<(i32 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1791 (LDURBBi GPR64sp:$Rn, simm9:$offset)>;
1792 def : Pat<(i64 (zextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))),
1793 (SUBREG_TO_REG (i64 0), (LDURWi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1794 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1795 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1796 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1797 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1798 def : Pat<(i64 (zextloadi1 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1799 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1803 // LDR mnemonics fall back to LDUR for negative or unaligned offsets.
1805 // Define new assembler match classes as we want to only match these when
1806 // the don't otherwise match the scaled addressing mode for LDR/STR. Don't
1807 // associate a DiagnosticType either, as we want the diagnostic for the
1808 // canonical form (the scaled operand) to take precedence.
1809 class SImm9OffsetOperand<int Width> : AsmOperandClass {
1810 let Name = "SImm9OffsetFB" # Width;
1811 let PredicateMethod = "isSImm9OffsetFB<" # Width # ">";
1812 let RenderMethod = "addImmOperands";
1815 def SImm9OffsetFB8Operand : SImm9OffsetOperand<8>;
1816 def SImm9OffsetFB16Operand : SImm9OffsetOperand<16>;
1817 def SImm9OffsetFB32Operand : SImm9OffsetOperand<32>;
1818 def SImm9OffsetFB64Operand : SImm9OffsetOperand<64>;
1819 def SImm9OffsetFB128Operand : SImm9OffsetOperand<128>;
1821 def simm9_offset_fb8 : Operand<i64> {
1822 let ParserMatchClass = SImm9OffsetFB8Operand;
1824 def simm9_offset_fb16 : Operand<i64> {
1825 let ParserMatchClass = SImm9OffsetFB16Operand;
1827 def simm9_offset_fb32 : Operand<i64> {
1828 let ParserMatchClass = SImm9OffsetFB32Operand;
1830 def simm9_offset_fb64 : Operand<i64> {
1831 let ParserMatchClass = SImm9OffsetFB64Operand;
1833 def simm9_offset_fb128 : Operand<i64> {
1834 let ParserMatchClass = SImm9OffsetFB128Operand;
1837 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1838 (LDURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
1839 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1840 (LDURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1841 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1842 (LDURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1843 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1844 (LDURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1845 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1846 (LDURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1847 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1848 (LDURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
1849 def : InstAlias<"ldr $Rt, [$Rn, $offset]",
1850 (LDURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
1853 def : Pat<(i64 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))),
1854 (SUBREG_TO_REG (i64 0), (LDURBBi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1855 def : Pat<(i64 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))),
1856 (SUBREG_TO_REG (i64 0), (LDURHHi GPR64sp:$Rn, simm9:$offset), sub_32)>;
1858 // load sign-extended half-word
1860 : LoadUnscaled<0b01, 0, 0b11, GPR32, "ldursh",
1862 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1864 : LoadUnscaled<0b01, 0, 0b10, GPR64, "ldursh",
1866 (sextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset)))]>;
1868 // load sign-extended byte
1870 : LoadUnscaled<0b00, 0, 0b11, GPR32, "ldursb",
1872 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1874 : LoadUnscaled<0b00, 0, 0b10, GPR64, "ldursb",
1876 (sextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset)))]>;
1878 // load sign-extended word
1880 : LoadUnscaled<0b10, 0, 0b10, GPR64, "ldursw",
1882 (sextloadi32 (am_unscaled32 GPR64sp:$Rn, simm9:$offset)))]>;
1884 // zero and sign extending aliases from generic LDR* mnemonics to LDUR*.
1885 def : InstAlias<"ldrb $Rt, [$Rn, $offset]",
1886 (LDURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1887 def : InstAlias<"ldrh $Rt, [$Rn, $offset]",
1888 (LDURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1889 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
1890 (LDURSBWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1891 def : InstAlias<"ldrsb $Rt, [$Rn, $offset]",
1892 (LDURSBXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
1893 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
1894 (LDURSHWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1895 def : InstAlias<"ldrsh $Rt, [$Rn, $offset]",
1896 (LDURSHXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
1897 def : InstAlias<"ldrsw $Rt, [$Rn, $offset]",
1898 (LDURSWi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
1901 defm PRFUM : PrefetchUnscaled<0b11, 0, 0b10, "prfum",
1902 [(AArch64Prefetch imm:$Rt,
1903 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
1906 // (unscaled immediate, unprivileged)
1907 defm LDTRX : LoadUnprivileged<0b11, 0, 0b01, GPR64, "ldtr">;
1908 defm LDTRW : LoadUnprivileged<0b10, 0, 0b01, GPR32, "ldtr">;
1910 defm LDTRH : LoadUnprivileged<0b01, 0, 0b01, GPR32, "ldtrh">;
1911 defm LDTRB : LoadUnprivileged<0b00, 0, 0b01, GPR32, "ldtrb">;
1913 // load sign-extended half-word
1914 defm LDTRSHW : LoadUnprivileged<0b01, 0, 0b11, GPR32, "ldtrsh">;
1915 defm LDTRSHX : LoadUnprivileged<0b01, 0, 0b10, GPR64, "ldtrsh">;
1917 // load sign-extended byte
1918 defm LDTRSBW : LoadUnprivileged<0b00, 0, 0b11, GPR32, "ldtrsb">;
1919 defm LDTRSBX : LoadUnprivileged<0b00, 0, 0b10, GPR64, "ldtrsb">;
1921 // load sign-extended word
1922 defm LDTRSW : LoadUnprivileged<0b10, 0, 0b10, GPR64, "ldtrsw">;
1925 // (immediate pre-indexed)
1926 def LDRWpre : LoadPreIdx<0b10, 0, 0b01, GPR32, "ldr">;
1927 def LDRXpre : LoadPreIdx<0b11, 0, 0b01, GPR64, "ldr">;
1928 def LDRBpre : LoadPreIdx<0b00, 1, 0b01, FPR8, "ldr">;
1929 def LDRHpre : LoadPreIdx<0b01, 1, 0b01, FPR16, "ldr">;
1930 def LDRSpre : LoadPreIdx<0b10, 1, 0b01, FPR32, "ldr">;
1931 def LDRDpre : LoadPreIdx<0b11, 1, 0b01, FPR64, "ldr">;
1932 def LDRQpre : LoadPreIdx<0b00, 1, 0b11, FPR128, "ldr">;
1934 // load sign-extended half-word
1935 def LDRSHWpre : LoadPreIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1936 def LDRSHXpre : LoadPreIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1938 // load sign-extended byte
1939 def LDRSBWpre : LoadPreIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1940 def LDRSBXpre : LoadPreIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1942 // load zero-extended byte
1943 def LDRBBpre : LoadPreIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1944 def LDRHHpre : LoadPreIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1946 // load sign-extended word
1947 def LDRSWpre : LoadPreIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1950 // (immediate post-indexed)
1951 def LDRWpost : LoadPostIdx<0b10, 0, 0b01, GPR32, "ldr">;
1952 def LDRXpost : LoadPostIdx<0b11, 0, 0b01, GPR64, "ldr">;
1953 def LDRBpost : LoadPostIdx<0b00, 1, 0b01, FPR8, "ldr">;
1954 def LDRHpost : LoadPostIdx<0b01, 1, 0b01, FPR16, "ldr">;
1955 def LDRSpost : LoadPostIdx<0b10, 1, 0b01, FPR32, "ldr">;
1956 def LDRDpost : LoadPostIdx<0b11, 1, 0b01, FPR64, "ldr">;
1957 def LDRQpost : LoadPostIdx<0b00, 1, 0b11, FPR128, "ldr">;
1959 // load sign-extended half-word
1960 def LDRSHWpost : LoadPostIdx<0b01, 0, 0b11, GPR32, "ldrsh">;
1961 def LDRSHXpost : LoadPostIdx<0b01, 0, 0b10, GPR64, "ldrsh">;
1963 // load sign-extended byte
1964 def LDRSBWpost : LoadPostIdx<0b00, 0, 0b11, GPR32, "ldrsb">;
1965 def LDRSBXpost : LoadPostIdx<0b00, 0, 0b10, GPR64, "ldrsb">;
1967 // load zero-extended byte
1968 def LDRBBpost : LoadPostIdx<0b00, 0, 0b01, GPR32, "ldrb">;
1969 def LDRHHpost : LoadPostIdx<0b01, 0, 0b01, GPR32, "ldrh">;
1971 // load sign-extended word
1972 def LDRSWpost : LoadPostIdx<0b10, 0, 0b10, GPR64, "ldrsw">;
1974 //===----------------------------------------------------------------------===//
1975 // Store instructions.
1976 //===----------------------------------------------------------------------===//
1978 // Pair (indexed, offset)
1979 // FIXME: Use dedicated range-checked addressing mode operand here.
1980 defm STPW : StorePairOffset<0b00, 0, GPR32, simm7s4, "stp">;
1981 defm STPX : StorePairOffset<0b10, 0, GPR64, simm7s8, "stp">;
1982 defm STPS : StorePairOffset<0b00, 1, FPR32, simm7s4, "stp">;
1983 defm STPD : StorePairOffset<0b01, 1, FPR64, simm7s8, "stp">;
1984 defm STPQ : StorePairOffset<0b10, 1, FPR128, simm7s16, "stp">;
1986 // Pair (pre-indexed)
1987 def STPWpre : StorePairPreIdx<0b00, 0, GPR32, simm7s4, "stp">;
1988 def STPXpre : StorePairPreIdx<0b10, 0, GPR64, simm7s8, "stp">;
1989 def STPSpre : StorePairPreIdx<0b00, 1, FPR32, simm7s4, "stp">;
1990 def STPDpre : StorePairPreIdx<0b01, 1, FPR64, simm7s8, "stp">;
1991 def STPQpre : StorePairPreIdx<0b10, 1, FPR128, simm7s16, "stp">;
1993 // Pair (pre-indexed)
1994 def STPWpost : StorePairPostIdx<0b00, 0, GPR32, simm7s4, "stp">;
1995 def STPXpost : StorePairPostIdx<0b10, 0, GPR64, simm7s8, "stp">;
1996 def STPSpost : StorePairPostIdx<0b00, 1, FPR32, simm7s4, "stp">;
1997 def STPDpost : StorePairPostIdx<0b01, 1, FPR64, simm7s8, "stp">;
1998 def STPQpost : StorePairPostIdx<0b10, 1, FPR128, simm7s16, "stp">;
2000 // Pair (no allocate)
2001 defm STNPW : StorePairNoAlloc<0b00, 0, GPR32, simm7s4, "stnp">;
2002 defm STNPX : StorePairNoAlloc<0b10, 0, GPR64, simm7s8, "stnp">;
2003 defm STNPS : StorePairNoAlloc<0b00, 1, FPR32, simm7s4, "stnp">;
2004 defm STNPD : StorePairNoAlloc<0b01, 1, FPR64, simm7s8, "stnp">;
2005 defm STNPQ : StorePairNoAlloc<0b10, 1, FPR128, simm7s16, "stnp">;
2008 // (Register offset)
2011 defm STRBB : Store8RO< 0b00, 0, 0b00, GPR32, "strb", i32, truncstorei8>;
2012 defm STRHH : Store16RO<0b01, 0, 0b00, GPR32, "strh", i32, truncstorei16>;
2013 defm STRW : Store32RO<0b10, 0, 0b00, GPR32, "str", i32, store>;
2014 defm STRX : Store64RO<0b11, 0, 0b00, GPR64, "str", i64, store>;
2018 defm STRB : Store8RO< 0b00, 1, 0b00, FPR8, "str", untyped, store>;
2019 defm STRH : Store16RO<0b01, 1, 0b00, FPR16, "str", f16, store>;
2020 defm STRS : Store32RO<0b10, 1, 0b00, FPR32, "str", f32, store>;
2021 defm STRD : Store64RO<0b11, 1, 0b00, FPR64, "str", f64, store>;
2022 defm STRQ : Store128RO<0b00, 1, 0b10, FPR128, "str", f128, store>;
2024 multiclass TruncStoreFrom64ROPat<ROAddrMode ro, SDPatternOperator storeop,
2025 Instruction STRW, Instruction STRX> {
2027 def : Pat<(storeop GPR64:$Rt,
2028 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2029 (STRW (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2030 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2032 def : Pat<(storeop GPR64:$Rt,
2033 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2034 (STRX (EXTRACT_SUBREG GPR64:$Rt, sub_32),
2035 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2038 let AddedComplexity = 10 in {
2040 defm : TruncStoreFrom64ROPat<ro8, truncstorei8, STRBBroW, STRBBroX>;
2041 defm : TruncStoreFrom64ROPat<ro16, truncstorei16, STRHHroW, STRHHroX>;
2042 defm : TruncStoreFrom64ROPat<ro32, truncstorei32, STRWroW, STRWroX>;
2045 multiclass VecROStorePat<ROAddrMode ro, ValueType VecTy, RegisterClass FPR,
2046 Instruction STRW, Instruction STRX> {
2047 def : Pat<(store (VecTy FPR:$Rt),
2048 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2049 (STRW FPR:$Rt, GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2051 def : Pat<(store (VecTy FPR:$Rt),
2052 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2053 (STRX FPR:$Rt, GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2056 let AddedComplexity = 10 in {
2057 // Match all store 64 bits width whose type is compatible with FPR64
2058 let Predicates = [IsLE] in {
2059 // We must use ST1 to store vectors in big-endian.
2060 defm : VecROStorePat<ro64, v2i32, FPR64, STRDroW, STRDroX>;
2061 defm : VecROStorePat<ro64, v2f32, FPR64, STRDroW, STRDroX>;
2062 defm : VecROStorePat<ro64, v4i16, FPR64, STRDroW, STRDroX>;
2063 defm : VecROStorePat<ro64, v8i8, FPR64, STRDroW, STRDroX>;
2064 defm : VecROStorePat<ro64, v4f16, FPR64, STRDroW, STRDroX>;
2067 defm : VecROStorePat<ro64, v1i64, FPR64, STRDroW, STRDroX>;
2068 defm : VecROStorePat<ro64, v1f64, FPR64, STRDroW, STRDroX>;
2070 // Match all store 128 bits width whose type is compatible with FPR128
2071 let Predicates = [IsLE] in {
2072 // We must use ST1 to store vectors in big-endian.
2073 defm : VecROStorePat<ro128, v2i64, FPR128, STRQroW, STRQroX>;
2074 defm : VecROStorePat<ro128, v2f64, FPR128, STRQroW, STRQroX>;
2075 defm : VecROStorePat<ro128, v4i32, FPR128, STRQroW, STRQroX>;
2076 defm : VecROStorePat<ro128, v4f32, FPR128, STRQroW, STRQroX>;
2077 defm : VecROStorePat<ro128, v8i16, FPR128, STRQroW, STRQroX>;
2078 defm : VecROStorePat<ro128, v16i8, FPR128, STRQroW, STRQroX>;
2079 defm : VecROStorePat<ro128, v8f16, FPR128, STRQroW, STRQroX>;
2081 } // AddedComplexity = 10
2083 // Match stores from lane 0 to the appropriate subreg's store.
2084 multiclass VecROStoreLane0Pat<ROAddrMode ro, SDPatternOperator storeop,
2085 ValueType VecTy, ValueType STy,
2086 SubRegIndex SubRegIdx,
2087 Instruction STRW, Instruction STRX> {
2089 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2090 (ro.Wpat GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)),
2091 (STRW (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2092 GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend)>;
2094 def : Pat<(storeop (STy (vector_extract (VecTy VecListOne128:$Vt), 0)),
2095 (ro.Xpat GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)),
2096 (STRX (EXTRACT_SUBREG VecListOne128:$Vt, SubRegIdx),
2097 GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend)>;
2100 let AddedComplexity = 19 in {
2101 defm : VecROStoreLane0Pat<ro16, truncstorei16, v8i16, i32, hsub, STRHroW, STRHroX>;
2102 defm : VecROStoreLane0Pat<ro16, store , v8i16, i16, hsub, STRHroW, STRHroX>;
2103 defm : VecROStoreLane0Pat<ro32, truncstorei32, v4i32, i32, ssub, STRSroW, STRSroX>;
2104 defm : VecROStoreLane0Pat<ro32, store , v4i32, i32, ssub, STRSroW, STRSroX>;
2105 defm : VecROStoreLane0Pat<ro32, store , v4f32, f32, ssub, STRSroW, STRSroX>;
2106 defm : VecROStoreLane0Pat<ro64, store , v2i64, i64, dsub, STRDroW, STRDroX>;
2107 defm : VecROStoreLane0Pat<ro64, store , v2f64, f64, dsub, STRDroW, STRDroX>;
2111 // (unsigned immediate)
2112 defm STRX : StoreUI<0b11, 0, 0b00, GPR64, uimm12s8, "str",
2114 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2115 defm STRW : StoreUI<0b10, 0, 0b00, GPR32, uimm12s4, "str",
2117 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2118 defm STRB : StoreUI<0b00, 1, 0b00, FPR8, uimm12s1, "str",
2120 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))]>;
2121 defm STRH : StoreUI<0b01, 1, 0b00, FPR16, uimm12s2, "str",
2122 [(store (f16 FPR16:$Rt),
2123 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))]>;
2124 defm STRS : StoreUI<0b10, 1, 0b00, FPR32, uimm12s4, "str",
2125 [(store (f32 FPR32:$Rt),
2126 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))]>;
2127 defm STRD : StoreUI<0b11, 1, 0b00, FPR64, uimm12s8, "str",
2128 [(store (f64 FPR64:$Rt),
2129 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset))]>;
2130 defm STRQ : StoreUI<0b00, 1, 0b10, FPR128, uimm12s16, "str", []>;
2132 defm STRHH : StoreUI<0b01, 0, 0b00, GPR32, uimm12s2, "strh",
2133 [(truncstorei16 GPR32:$Rt,
2134 (am_indexed16 GPR64sp:$Rn,
2135 uimm12s2:$offset))]>;
2136 defm STRBB : StoreUI<0b00, 0, 0b00, GPR32, uimm12s1, "strb",
2137 [(truncstorei8 GPR32:$Rt,
2138 (am_indexed8 GPR64sp:$Rn,
2139 uimm12s1:$offset))]>;
2141 // Match all store 64 bits width whose type is compatible with FPR64
2142 let AddedComplexity = 10 in {
2143 let Predicates = [IsLE] in {
2144 // We must use ST1 to store vectors in big-endian.
2145 def : Pat<(store (v2f32 FPR64:$Rt),
2146 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2147 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2148 def : Pat<(store (v8i8 FPR64:$Rt),
2149 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2150 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2151 def : Pat<(store (v4i16 FPR64:$Rt),
2152 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2153 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2154 def : Pat<(store (v2i32 FPR64:$Rt),
2155 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2156 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2157 def : Pat<(store (v4f16 FPR64:$Rt),
2158 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2159 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2161 def : Pat<(store (v1f64 FPR64:$Rt),
2162 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2163 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2164 def : Pat<(store (v1i64 FPR64:$Rt),
2165 (am_indexed64 GPR64sp:$Rn, uimm12s8:$offset)),
2166 (STRDui FPR64:$Rt, GPR64sp:$Rn, uimm12s8:$offset)>;
2168 // Match all store 128 bits width whose type is compatible with FPR128
2169 let Predicates = [IsLE] in {
2170 // We must use ST1 to store vectors in big-endian.
2171 def : Pat<(store (v4f32 FPR128:$Rt),
2172 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2173 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2174 def : Pat<(store (v2f64 FPR128:$Rt),
2175 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2176 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2177 def : Pat<(store (v16i8 FPR128:$Rt),
2178 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2179 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2180 def : Pat<(store (v8i16 FPR128:$Rt),
2181 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2182 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2183 def : Pat<(store (v4i32 FPR128:$Rt),
2184 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2185 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2186 def : Pat<(store (v2i64 FPR128:$Rt),
2187 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2188 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2189 def : Pat<(store (v8f16 FPR128:$Rt),
2190 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2191 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2193 def : Pat<(store (f128 FPR128:$Rt),
2194 (am_indexed128 GPR64sp:$Rn, uimm12s16:$offset)),
2195 (STRQui FPR128:$Rt, GPR64sp:$Rn, uimm12s16:$offset)>;
2198 def : Pat<(truncstorei32 GPR64:$Rt,
2199 (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset)),
2200 (STRWui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s4:$offset)>;
2201 def : Pat<(truncstorei16 GPR64:$Rt,
2202 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset)),
2203 (STRHHui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s2:$offset)>;
2204 def : Pat<(truncstorei8 GPR64:$Rt, (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset)),
2205 (STRBBui (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, uimm12s1:$offset)>;
2207 } // AddedComplexity = 10
2210 // (unscaled immediate)
2211 defm STURX : StoreUnscaled<0b11, 0, 0b00, GPR64, "stur",
2213 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2214 defm STURW : StoreUnscaled<0b10, 0, 0b00, GPR32, "stur",
2216 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2217 defm STURB : StoreUnscaled<0b00, 1, 0b00, FPR8, "stur",
2219 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2220 defm STURH : StoreUnscaled<0b01, 1, 0b00, FPR16, "stur",
2221 [(store (f16 FPR16:$Rt),
2222 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2223 defm STURS : StoreUnscaled<0b10, 1, 0b00, FPR32, "stur",
2224 [(store (f32 FPR32:$Rt),
2225 (am_unscaled32 GPR64sp:$Rn, simm9:$offset))]>;
2226 defm STURD : StoreUnscaled<0b11, 1, 0b00, FPR64, "stur",
2227 [(store (f64 FPR64:$Rt),
2228 (am_unscaled64 GPR64sp:$Rn, simm9:$offset))]>;
2229 defm STURQ : StoreUnscaled<0b00, 1, 0b10, FPR128, "stur",
2230 [(store (f128 FPR128:$Rt),
2231 (am_unscaled128 GPR64sp:$Rn, simm9:$offset))]>;
2232 defm STURHH : StoreUnscaled<0b01, 0, 0b00, GPR32, "sturh",
2233 [(truncstorei16 GPR32:$Rt,
2234 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))]>;
2235 defm STURBB : StoreUnscaled<0b00, 0, 0b00, GPR32, "sturb",
2236 [(truncstorei8 GPR32:$Rt,
2237 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))]>;
2239 // Match all store 64 bits width whose type is compatible with FPR64
2240 let Predicates = [IsLE] in {
2241 // We must use ST1 to store vectors in big-endian.
2242 def : Pat<(store (v2f32 FPR64:$Rt),
2243 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2244 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2245 def : Pat<(store (v8i8 FPR64:$Rt),
2246 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2247 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2248 def : Pat<(store (v4i16 FPR64:$Rt),
2249 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2250 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2251 def : Pat<(store (v2i32 FPR64:$Rt),
2252 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2253 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2254 def : Pat<(store (v4f16 FPR64:$Rt),
2255 (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2256 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2258 def : Pat<(store (v1f64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2259 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2260 def : Pat<(store (v1i64 FPR64:$Rt), (am_unscaled64 GPR64sp:$Rn, simm9:$offset)),
2261 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2263 // Match all store 128 bits width whose type is compatible with FPR128
2264 let Predicates = [IsLE] in {
2265 // We must use ST1 to store vectors in big-endian.
2266 def : Pat<(store (v4f32 FPR128:$Rt),
2267 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2268 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2269 def : Pat<(store (v2f64 FPR128:$Rt),
2270 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2271 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2272 def : Pat<(store (v16i8 FPR128:$Rt),
2273 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2274 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2275 def : Pat<(store (v8i16 FPR128:$Rt),
2276 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2277 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2278 def : Pat<(store (v4i32 FPR128:$Rt),
2279 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2280 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2281 def : Pat<(store (v2i64 FPR128:$Rt),
2282 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2283 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2284 def : Pat<(store (v2f64 FPR128:$Rt),
2285 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2286 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2287 def : Pat<(store (v8f16 FPR128:$Rt),
2288 (am_unscaled128 GPR64sp:$Rn, simm9:$offset)),
2289 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9:$offset)>;
2292 // unscaled i64 truncating stores
2293 def : Pat<(truncstorei32 GPR64:$Rt, (am_unscaled32 GPR64sp:$Rn, simm9:$offset)),
2294 (STURWi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2295 def : Pat<(truncstorei16 GPR64:$Rt, (am_unscaled16 GPR64sp:$Rn, simm9:$offset)),
2296 (STURHHi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2297 def : Pat<(truncstorei8 GPR64:$Rt, (am_unscaled8 GPR64sp:$Rn, simm9:$offset)),
2298 (STURBBi (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$Rn, simm9:$offset)>;
2301 // STR mnemonics fall back to STUR for negative or unaligned offsets.
2302 def : InstAlias<"str $Rt, [$Rn, $offset]",
2303 (STURXi GPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2304 def : InstAlias<"str $Rt, [$Rn, $offset]",
2305 (STURWi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2306 def : InstAlias<"str $Rt, [$Rn, $offset]",
2307 (STURBi FPR8:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2308 def : InstAlias<"str $Rt, [$Rn, $offset]",
2309 (STURHi FPR16:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2310 def : InstAlias<"str $Rt, [$Rn, $offset]",
2311 (STURSi FPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb32:$offset), 0>;
2312 def : InstAlias<"str $Rt, [$Rn, $offset]",
2313 (STURDi FPR64:$Rt, GPR64sp:$Rn, simm9_offset_fb64:$offset), 0>;
2314 def : InstAlias<"str $Rt, [$Rn, $offset]",
2315 (STURQi FPR128:$Rt, GPR64sp:$Rn, simm9_offset_fb128:$offset), 0>;
2317 def : InstAlias<"strb $Rt, [$Rn, $offset]",
2318 (STURBBi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb8:$offset), 0>;
2319 def : InstAlias<"strh $Rt, [$Rn, $offset]",
2320 (STURHHi GPR32:$Rt, GPR64sp:$Rn, simm9_offset_fb16:$offset), 0>;
2323 // (unscaled immediate, unprivileged)
2324 defm STTRW : StoreUnprivileged<0b10, 0, 0b00, GPR32, "sttr">;
2325 defm STTRX : StoreUnprivileged<0b11, 0, 0b00, GPR64, "sttr">;
2327 defm STTRH : StoreUnprivileged<0b01, 0, 0b00, GPR32, "sttrh">;
2328 defm STTRB : StoreUnprivileged<0b00, 0, 0b00, GPR32, "sttrb">;
2331 // (immediate pre-indexed)
2332 def STRWpre : StorePreIdx<0b10, 0, 0b00, GPR32, "str", pre_store, i32>;
2333 def STRXpre : StorePreIdx<0b11, 0, 0b00, GPR64, "str", pre_store, i64>;
2334 def STRBpre : StorePreIdx<0b00, 1, 0b00, FPR8, "str", pre_store, untyped>;
2335 def STRHpre : StorePreIdx<0b01, 1, 0b00, FPR16, "str", pre_store, f16>;
2336 def STRSpre : StorePreIdx<0b10, 1, 0b00, FPR32, "str", pre_store, f32>;
2337 def STRDpre : StorePreIdx<0b11, 1, 0b00, FPR64, "str", pre_store, f64>;
2338 def STRQpre : StorePreIdx<0b00, 1, 0b10, FPR128, "str", pre_store, f128>;
2340 def STRBBpre : StorePreIdx<0b00, 0, 0b00, GPR32, "strb", pre_truncsti8, i32>;
2341 def STRHHpre : StorePreIdx<0b01, 0, 0b00, GPR32, "strh", pre_truncsti16, i32>;
2344 def : Pat<(pre_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2345 (STRWpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2347 def : Pat<(pre_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2348 (STRHHpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2350 def : Pat<(pre_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2351 (STRBBpre (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2354 def : Pat<(pre_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2355 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2356 def : Pat<(pre_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2357 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2358 def : Pat<(pre_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2359 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2360 def : Pat<(pre_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2361 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2362 def : Pat<(pre_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2363 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2364 def : Pat<(pre_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2365 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2366 def : Pat<(pre_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2367 (STRDpre FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2369 def : Pat<(pre_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2370 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2371 def : Pat<(pre_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2372 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2373 def : Pat<(pre_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2374 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2375 def : Pat<(pre_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2376 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2377 def : Pat<(pre_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2378 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2379 def : Pat<(pre_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2380 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2381 def : Pat<(pre_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2382 (STRQpre FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2385 // (immediate post-indexed)
2386 def STRWpost : StorePostIdx<0b10, 0, 0b00, GPR32, "str", post_store, i32>;
2387 def STRXpost : StorePostIdx<0b11, 0, 0b00, GPR64, "str", post_store, i64>;
2388 def STRBpost : StorePostIdx<0b00, 1, 0b00, FPR8, "str", post_store, untyped>;
2389 def STRHpost : StorePostIdx<0b01, 1, 0b00, FPR16, "str", post_store, f16>;
2390 def STRSpost : StorePostIdx<0b10, 1, 0b00, FPR32, "str", post_store, f32>;
2391 def STRDpost : StorePostIdx<0b11, 1, 0b00, FPR64, "str", post_store, f64>;
2392 def STRQpost : StorePostIdx<0b00, 1, 0b10, FPR128, "str", post_store, f128>;
2394 def STRBBpost : StorePostIdx<0b00, 0, 0b00, GPR32, "strb", post_truncsti8, i32>;
2395 def STRHHpost : StorePostIdx<0b01, 0, 0b00, GPR32, "strh", post_truncsti16, i32>;
2398 def : Pat<(post_truncsti32 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2399 (STRWpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2401 def : Pat<(post_truncsti16 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2402 (STRHHpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2404 def : Pat<(post_truncsti8 GPR64:$Rt, GPR64sp:$addr, simm9:$off),
2405 (STRBBpost (EXTRACT_SUBREG GPR64:$Rt, sub_32), GPR64sp:$addr,
2408 def : Pat<(post_store (v8i8 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2409 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2410 def : Pat<(post_store (v4i16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2411 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2412 def : Pat<(post_store (v2i32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2413 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2414 def : Pat<(post_store (v2f32 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2415 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2416 def : Pat<(post_store (v1i64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2417 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2418 def : Pat<(post_store (v1f64 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2419 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2420 def : Pat<(post_store (v4f16 FPR64:$Rt), GPR64sp:$addr, simm9:$off),
2421 (STRDpost FPR64:$Rt, GPR64sp:$addr, simm9:$off)>;
2423 def : Pat<(post_store (v16i8 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2424 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2425 def : Pat<(post_store (v8i16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2426 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2427 def : Pat<(post_store (v4i32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2428 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2429 def : Pat<(post_store (v4f32 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2430 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2431 def : Pat<(post_store (v2i64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2432 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2433 def : Pat<(post_store (v2f64 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2434 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2435 def : Pat<(post_store (v8f16 FPR128:$Rt), GPR64sp:$addr, simm9:$off),
2436 (STRQpost FPR128:$Rt, GPR64sp:$addr, simm9:$off)>;
2438 //===----------------------------------------------------------------------===//
2439 // Load/store exclusive instructions.
2440 //===----------------------------------------------------------------------===//
2442 def LDARW : LoadAcquire <0b10, 1, 1, 0, 1, GPR32, "ldar">;
2443 def LDARX : LoadAcquire <0b11, 1, 1, 0, 1, GPR64, "ldar">;
2444 def LDARB : LoadAcquire <0b00, 1, 1, 0, 1, GPR32, "ldarb">;
2445 def LDARH : LoadAcquire <0b01, 1, 1, 0, 1, GPR32, "ldarh">;
2447 def LDAXRW : LoadExclusive <0b10, 0, 1, 0, 1, GPR32, "ldaxr">;
2448 def LDAXRX : LoadExclusive <0b11, 0, 1, 0, 1, GPR64, "ldaxr">;
2449 def LDAXRB : LoadExclusive <0b00, 0, 1, 0, 1, GPR32, "ldaxrb">;
2450 def LDAXRH : LoadExclusive <0b01, 0, 1, 0, 1, GPR32, "ldaxrh">;
2452 def LDXRW : LoadExclusive <0b10, 0, 1, 0, 0, GPR32, "ldxr">;
2453 def LDXRX : LoadExclusive <0b11, 0, 1, 0, 0, GPR64, "ldxr">;
2454 def LDXRB : LoadExclusive <0b00, 0, 1, 0, 0, GPR32, "ldxrb">;
2455 def LDXRH : LoadExclusive <0b01, 0, 1, 0, 0, GPR32, "ldxrh">;
2457 def STLRW : StoreRelease <0b10, 1, 0, 0, 1, GPR32, "stlr">;
2458 def STLRX : StoreRelease <0b11, 1, 0, 0, 1, GPR64, "stlr">;
2459 def STLRB : StoreRelease <0b00, 1, 0, 0, 1, GPR32, "stlrb">;
2460 def STLRH : StoreRelease <0b01, 1, 0, 0, 1, GPR32, "stlrh">;
2462 def STLXRW : StoreExclusive<0b10, 0, 0, 0, 1, GPR32, "stlxr">;
2463 def STLXRX : StoreExclusive<0b11, 0, 0, 0, 1, GPR64, "stlxr">;
2464 def STLXRB : StoreExclusive<0b00, 0, 0, 0, 1, GPR32, "stlxrb">;
2465 def STLXRH : StoreExclusive<0b01, 0, 0, 0, 1, GPR32, "stlxrh">;
2467 def STXRW : StoreExclusive<0b10, 0, 0, 0, 0, GPR32, "stxr">;
2468 def STXRX : StoreExclusive<0b11, 0, 0, 0, 0, GPR64, "stxr">;
2469 def STXRB : StoreExclusive<0b00, 0, 0, 0, 0, GPR32, "stxrb">;
2470 def STXRH : StoreExclusive<0b01, 0, 0, 0, 0, GPR32, "stxrh">;
2472 def LDAXPW : LoadExclusivePair<0b10, 0, 1, 1, 1, GPR32, "ldaxp">;
2473 def LDAXPX : LoadExclusivePair<0b11, 0, 1, 1, 1, GPR64, "ldaxp">;
2475 def LDXPW : LoadExclusivePair<0b10, 0, 1, 1, 0, GPR32, "ldxp">;
2476 def LDXPX : LoadExclusivePair<0b11, 0, 1, 1, 0, GPR64, "ldxp">;
2478 def STLXPW : StoreExclusivePair<0b10, 0, 0, 1, 1, GPR32, "stlxp">;
2479 def STLXPX : StoreExclusivePair<0b11, 0, 0, 1, 1, GPR64, "stlxp">;
2481 def STXPW : StoreExclusivePair<0b10, 0, 0, 1, 0, GPR32, "stxp">;
2482 def STXPX : StoreExclusivePair<0b11, 0, 0, 1, 0, GPR64, "stxp">;
2484 let Predicates = [HasV8_1a] in {
2485 // v8.1a "Limited Order Region" extension load-acquire instructions
2486 def LDLARW : LoadAcquire <0b10, 1, 1, 0, 0, GPR32, "ldlar">;
2487 def LDLARX : LoadAcquire <0b11, 1, 1, 0, 0, GPR64, "ldlar">;
2488 def LDLARB : LoadAcquire <0b00, 1, 1, 0, 0, GPR32, "ldlarb">;
2489 def LDLARH : LoadAcquire <0b01, 1, 1, 0, 0, GPR32, "ldlarh">;
2491 // v8.1a "Limited Order Region" extension store-release instructions
2492 def STLLRW : StoreRelease <0b10, 1, 0, 0, 0, GPR32, "stllr">;
2493 def STLLRX : StoreRelease <0b11, 1, 0, 0, 0, GPR64, "stllr">;
2494 def STLLRB : StoreRelease <0b00, 1, 0, 0, 0, GPR32, "stllrb">;
2495 def STLLRH : StoreRelease <0b01, 1, 0, 0, 0, GPR32, "stllrh">;
2498 //===----------------------------------------------------------------------===//
2499 // Scaled floating point to integer conversion instructions.
2500 //===----------------------------------------------------------------------===//
2502 defm FCVTAS : FPToIntegerUnscaled<0b00, 0b100, "fcvtas", int_aarch64_neon_fcvtas>;
2503 defm FCVTAU : FPToIntegerUnscaled<0b00, 0b101, "fcvtau", int_aarch64_neon_fcvtau>;
2504 defm FCVTMS : FPToIntegerUnscaled<0b10, 0b000, "fcvtms", int_aarch64_neon_fcvtms>;
2505 defm FCVTMU : FPToIntegerUnscaled<0b10, 0b001, "fcvtmu", int_aarch64_neon_fcvtmu>;
2506 defm FCVTNS : FPToIntegerUnscaled<0b00, 0b000, "fcvtns", int_aarch64_neon_fcvtns>;
2507 defm FCVTNU : FPToIntegerUnscaled<0b00, 0b001, "fcvtnu", int_aarch64_neon_fcvtnu>;
2508 defm FCVTPS : FPToIntegerUnscaled<0b01, 0b000, "fcvtps", int_aarch64_neon_fcvtps>;
2509 defm FCVTPU : FPToIntegerUnscaled<0b01, 0b001, "fcvtpu", int_aarch64_neon_fcvtpu>;
2510 defm FCVTZS : FPToIntegerUnscaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2511 defm FCVTZU : FPToIntegerUnscaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2512 defm FCVTZS : FPToIntegerScaled<0b11, 0b000, "fcvtzs", fp_to_sint>;
2513 defm FCVTZU : FPToIntegerScaled<0b11, 0b001, "fcvtzu", fp_to_uint>;
2515 multiclass FPToIntegerIntPats<Intrinsic round, string INST> {
2516 def : Pat<(i32 (round f16:$Rn)), (!cast<Instruction>(INST # UWHr) $Rn)>;
2517 def : Pat<(i64 (round f16:$Rn)), (!cast<Instruction>(INST # UXHr) $Rn)>;
2518 def : Pat<(i32 (round f32:$Rn)), (!cast<Instruction>(INST # UWSr) $Rn)>;
2519 def : Pat<(i64 (round f32:$Rn)), (!cast<Instruction>(INST # UXSr) $Rn)>;
2520 def : Pat<(i32 (round f64:$Rn)), (!cast<Instruction>(INST # UWDr) $Rn)>;
2521 def : Pat<(i64 (round f64:$Rn)), (!cast<Instruction>(INST # UXDr) $Rn)>;
2523 def : Pat<(i32 (round (fmul f16:$Rn, fixedpoint_f16_i32:$scale))),
2524 (!cast<Instruction>(INST # SWHri) $Rn, $scale)>;
2525 def : Pat<(i64 (round (fmul f16:$Rn, fixedpoint_f16_i64:$scale))),
2526 (!cast<Instruction>(INST # SXHri) $Rn, $scale)>;
2527 def : Pat<(i32 (round (fmul f32:$Rn, fixedpoint_f32_i32:$scale))),
2528 (!cast<Instruction>(INST # SWSri) $Rn, $scale)>;
2529 def : Pat<(i64 (round (fmul f32:$Rn, fixedpoint_f32_i64:$scale))),
2530 (!cast<Instruction>(INST # SXSri) $Rn, $scale)>;
2531 def : Pat<(i32 (round (fmul f64:$Rn, fixedpoint_f64_i32:$scale))),
2532 (!cast<Instruction>(INST # SWDri) $Rn, $scale)>;
2533 def : Pat<(i64 (round (fmul f64:$Rn, fixedpoint_f64_i64:$scale))),
2534 (!cast<Instruction>(INST # SXDri) $Rn, $scale)>;
2537 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzs, "FCVTZS">;
2538 defm : FPToIntegerIntPats<int_aarch64_neon_fcvtzu, "FCVTZU">;
2540 multiclass FPToIntegerPats<SDNode to_int, SDNode round, string INST> {
2541 def : Pat<(i32 (to_int (round f32:$Rn))),
2542 (!cast<Instruction>(INST # UWSr) f32:$Rn)>;
2543 def : Pat<(i64 (to_int (round f32:$Rn))),
2544 (!cast<Instruction>(INST # UXSr) f32:$Rn)>;
2545 def : Pat<(i32 (to_int (round f64:$Rn))),
2546 (!cast<Instruction>(INST # UWDr) f64:$Rn)>;
2547 def : Pat<(i64 (to_int (round f64:$Rn))),
2548 (!cast<Instruction>(INST # UXDr) f64:$Rn)>;
2551 defm : FPToIntegerPats<fp_to_sint, fceil, "FCVTPS">;
2552 defm : FPToIntegerPats<fp_to_uint, fceil, "FCVTPU">;
2553 defm : FPToIntegerPats<fp_to_sint, ffloor, "FCVTMS">;
2554 defm : FPToIntegerPats<fp_to_uint, ffloor, "FCVTMU">;
2555 defm : FPToIntegerPats<fp_to_sint, ftrunc, "FCVTZS">;
2556 defm : FPToIntegerPats<fp_to_uint, ftrunc, "FCVTZU">;
2557 defm : FPToIntegerPats<fp_to_sint, fround, "FCVTAS">;
2558 defm : FPToIntegerPats<fp_to_uint, fround, "FCVTAU">;
2560 //===----------------------------------------------------------------------===//
2561 // Scaled integer to floating point conversion instructions.
2562 //===----------------------------------------------------------------------===//
2564 defm SCVTF : IntegerToFP<0, "scvtf", sint_to_fp>;
2565 defm UCVTF : IntegerToFP<1, "ucvtf", uint_to_fp>;
2567 //===----------------------------------------------------------------------===//
2568 // Unscaled integer to floating point conversion instruction.
2569 //===----------------------------------------------------------------------===//
2571 defm FMOV : UnscaledConversion<"fmov">;
2573 // Add pseudo ops for FMOV 0 so we can mark them as isReMaterializable
2574 let isReMaterializable = 1, isCodeGenOnly = 1, isAsCheapAsAMove = 1 in {
2575 def FMOVS0 : Pseudo<(outs FPR32:$Rd), (ins), [(set f32:$Rd, (fpimm0))]>,
2577 def FMOVD0 : Pseudo<(outs FPR64:$Rd), (ins), [(set f64:$Rd, (fpimm0))]>,
2581 //===----------------------------------------------------------------------===//
2582 // Floating point conversion instruction.
2583 //===----------------------------------------------------------------------===//
2585 defm FCVT : FPConversion<"fcvt">;
2587 //===----------------------------------------------------------------------===//
2588 // Floating point single operand instructions.
2589 //===----------------------------------------------------------------------===//
2591 defm FABS : SingleOperandFPData<0b0001, "fabs", fabs>;
2592 defm FMOV : SingleOperandFPData<0b0000, "fmov">;
2593 defm FNEG : SingleOperandFPData<0b0010, "fneg", fneg>;
2594 defm FRINTA : SingleOperandFPData<0b1100, "frinta", fround>;
2595 defm FRINTI : SingleOperandFPData<0b1111, "frinti", fnearbyint>;
2596 defm FRINTM : SingleOperandFPData<0b1010, "frintm", ffloor>;
2597 defm FRINTN : SingleOperandFPData<0b1000, "frintn", int_aarch64_neon_frintn>;
2598 defm FRINTP : SingleOperandFPData<0b1001, "frintp", fceil>;
2600 def : Pat<(v1f64 (int_aarch64_neon_frintn (v1f64 FPR64:$Rn))),
2601 (FRINTNDr FPR64:$Rn)>;
2603 defm FRINTX : SingleOperandFPData<0b1110, "frintx", frint>;
2604 defm FRINTZ : SingleOperandFPData<0b1011, "frintz", ftrunc>;
2606 let SchedRW = [WriteFDiv] in {
2607 defm FSQRT : SingleOperandFPData<0b0011, "fsqrt", fsqrt>;
2610 //===----------------------------------------------------------------------===//
2611 // Floating point two operand instructions.
2612 //===----------------------------------------------------------------------===//
2614 defm FADD : TwoOperandFPData<0b0010, "fadd", fadd>;
2615 let SchedRW = [WriteFDiv] in {
2616 defm FDIV : TwoOperandFPData<0b0001, "fdiv", fdiv>;
2618 defm FMAXNM : TwoOperandFPData<0b0110, "fmaxnm", fmaxnum>;
2619 defm FMAX : TwoOperandFPData<0b0100, "fmax", fmaxnan>;
2620 defm FMINNM : TwoOperandFPData<0b0111, "fminnm", fminnum>;
2621 defm FMIN : TwoOperandFPData<0b0101, "fmin", fminnan>;
2622 let SchedRW = [WriteFMul] in {
2623 defm FMUL : TwoOperandFPData<0b0000, "fmul", fmul>;
2624 defm FNMUL : TwoOperandFPDataNeg<0b1000, "fnmul", fmul>;
2626 defm FSUB : TwoOperandFPData<0b0011, "fsub", fsub>;
2628 def : Pat<(v1f64 (fmaxnan (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2629 (FMAXDrr FPR64:$Rn, FPR64:$Rm)>;
2630 def : Pat<(v1f64 (fminnan (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2631 (FMINDrr FPR64:$Rn, FPR64:$Rm)>;
2632 def : Pat<(v1f64 (fmaxnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2633 (FMAXNMDrr FPR64:$Rn, FPR64:$Rm)>;
2634 def : Pat<(v1f64 (fminnum (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
2635 (FMINNMDrr FPR64:$Rn, FPR64:$Rm)>;
2637 //===----------------------------------------------------------------------===//
2638 // Floating point three operand instructions.
2639 //===----------------------------------------------------------------------===//
2641 defm FMADD : ThreeOperandFPData<0, 0, "fmadd", fma>;
2642 defm FMSUB : ThreeOperandFPData<0, 1, "fmsub",
2643 TriOpFrag<(fma node:$LHS, (fneg node:$MHS), node:$RHS)> >;
2644 defm FNMADD : ThreeOperandFPData<1, 0, "fnmadd",
2645 TriOpFrag<(fneg (fma node:$LHS, node:$MHS, node:$RHS))> >;
2646 defm FNMSUB : ThreeOperandFPData<1, 1, "fnmsub",
2647 TriOpFrag<(fma node:$LHS, node:$MHS, (fneg node:$RHS))> >;
2649 // The following def pats catch the case where the LHS of an FMA is negated.
2650 // The TriOpFrag above catches the case where the middle operand is negated.
2652 // N.b. FMSUB etc have the accumulator at the *end* of (outs), unlike
2653 // the NEON variant.
2654 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, FPR32:$Ra)),
2655 (FMSUBSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2657 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, FPR64:$Ra)),
2658 (FMSUBDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2660 // We handled -(a + b*c) for FNMADD above, now it's time for "(-a) + (-b)*c" and
2662 def : Pat<(f32 (fma (fneg FPR32:$Rn), FPR32:$Rm, (fneg FPR32:$Ra))),
2663 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2665 def : Pat<(f64 (fma (fneg FPR64:$Rn), FPR64:$Rm, (fneg FPR64:$Ra))),
2666 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2668 def : Pat<(f32 (fma FPR32:$Rn, (fneg FPR32:$Rm), (fneg FPR32:$Ra))),
2669 (FNMADDSrrr FPR32:$Rn, FPR32:$Rm, FPR32:$Ra)>;
2671 def : Pat<(f64 (fma FPR64:$Rn, (fneg FPR64:$Rm), (fneg FPR64:$Ra))),
2672 (FNMADDDrrr FPR64:$Rn, FPR64:$Rm, FPR64:$Ra)>;
2674 //===----------------------------------------------------------------------===//
2675 // Floating point comparison instructions.
2676 //===----------------------------------------------------------------------===//
2678 defm FCMPE : FPComparison<1, "fcmpe">;
2679 defm FCMP : FPComparison<0, "fcmp", AArch64fcmp>;
2681 //===----------------------------------------------------------------------===//
2682 // Floating point conditional comparison instructions.
2683 //===----------------------------------------------------------------------===//
2685 defm FCCMPE : FPCondComparison<1, "fccmpe">;
2686 defm FCCMP : FPCondComparison<0, "fccmp", AArch64fccmp>;
2688 //===----------------------------------------------------------------------===//
2689 // Floating point conditional select instruction.
2690 //===----------------------------------------------------------------------===//
2692 defm FCSEL : FPCondSelect<"fcsel">;
2694 // CSEL instructions providing f128 types need to be handled by a
2695 // pseudo-instruction since the eventual code will need to introduce basic
2696 // blocks and control flow.
2697 def F128CSEL : Pseudo<(outs FPR128:$Rd),
2698 (ins FPR128:$Rn, FPR128:$Rm, ccode:$cond),
2699 [(set (f128 FPR128:$Rd),
2700 (AArch64csel FPR128:$Rn, FPR128:$Rm,
2701 (i32 imm:$cond), NZCV))]> {
2703 let usesCustomInserter = 1;
2704 let hasNoSchedulingInfo = 1;
2708 //===----------------------------------------------------------------------===//
2709 // Floating point immediate move.
2710 //===----------------------------------------------------------------------===//
2712 let isReMaterializable = 1 in {
2713 defm FMOV : FPMoveImmediate<"fmov">;
2716 //===----------------------------------------------------------------------===//
2717 // Advanced SIMD two vector instructions.
2718 //===----------------------------------------------------------------------===//
2720 defm UABDL : SIMDLongThreeVectorBHSabdl<1, 0b0111, "uabdl",
2721 int_aarch64_neon_uabd>;
2722 // Match UABDL in log2-shuffle patterns.
2723 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
2724 (v8i16 (add (sub (zext (v8i8 V64:$opA)),
2725 (zext (v8i8 V64:$opB))),
2726 (AArch64vashr v8i16:$src, (i32 15))))),
2727 (UABDLv8i8_v8i16 V64:$opA, V64:$opB)>;
2728 def : Pat<(xor (v8i16 (AArch64vashr v8i16:$src, (i32 15))),
2729 (v8i16 (add (sub (zext (extract_high_v16i8 V128:$opA)),
2730 (zext (extract_high_v16i8 V128:$opB))),
2731 (AArch64vashr v8i16:$src, (i32 15))))),
2732 (UABDLv16i8_v8i16 V128:$opA, V128:$opB)>;
2733 def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
2734 (v4i32 (add (sub (zext (v4i16 V64:$opA)),
2735 (zext (v4i16 V64:$opB))),
2736 (AArch64vashr v4i32:$src, (i32 31))))),
2737 (UABDLv4i16_v4i32 V64:$opA, V64:$opB)>;
2738 def : Pat<(xor (v4i32 (AArch64vashr v4i32:$src, (i32 31))),
2739 (v4i32 (add (sub (zext (extract_high_v8i16 V128:$opA)),
2740 (zext (extract_high_v8i16 V128:$opB))),
2741 (AArch64vashr v4i32:$src, (i32 31))))),
2742 (UABDLv8i16_v4i32 V128:$opA, V128:$opB)>;
2743 def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
2744 (v2i64 (add (sub (zext (v2i32 V64:$opA)),
2745 (zext (v2i32 V64:$opB))),
2746 (AArch64vashr v2i64:$src, (i32 63))))),
2747 (UABDLv2i32_v2i64 V64:$opA, V64:$opB)>;
2748 def : Pat<(xor (v2i64 (AArch64vashr v2i64:$src, (i32 63))),
2749 (v2i64 (add (sub (zext (extract_high_v4i32 V128:$opA)),
2750 (zext (extract_high_v4i32 V128:$opB))),
2751 (AArch64vashr v2i64:$src, (i32 63))))),
2752 (UABDLv4i32_v2i64 V128:$opA, V128:$opB)>;
2754 defm ABS : SIMDTwoVectorBHSD<0, 0b01011, "abs", int_aarch64_neon_abs>;
2755 def : Pat<(xor (v8i8 (AArch64vashr V64:$src, (i32 7))),
2756 (v8i8 (add V64:$src, (AArch64vashr V64:$src, (i32 7))))),
2757 (ABSv8i8 V64:$src)>;
2758 def : Pat<(xor (v4i16 (AArch64vashr V64:$src, (i32 15))),
2759 (v4i16 (add V64:$src, (AArch64vashr V64:$src, (i32 15))))),
2760 (ABSv4i16 V64:$src)>;
2761 def : Pat<(xor (v2i32 (AArch64vashr V64:$src, (i32 31))),
2762 (v2i32 (add V64:$src, (AArch64vashr V64:$src, (i32 31))))),
2763 (ABSv2i32 V64:$src)>;
2764 def : Pat<(xor (v16i8 (AArch64vashr V128:$src, (i32 7))),
2765 (v16i8 (add V128:$src, (AArch64vashr V128:$src, (i32 7))))),
2766 (ABSv16i8 V128:$src)>;
2767 def : Pat<(xor (v8i16 (AArch64vashr V128:$src, (i32 15))),
2768 (v8i16 (add V128:$src, (AArch64vashr V128:$src, (i32 15))))),
2769 (ABSv8i16 V128:$src)>;
2770 def : Pat<(xor (v4i32 (AArch64vashr V128:$src, (i32 31))),
2771 (v4i32 (add V128:$src, (AArch64vashr V128:$src, (i32 31))))),
2772 (ABSv4i32 V128:$src)>;
2773 def : Pat<(xor (v2i64 (AArch64vashr V128:$src, (i32 63))),
2774 (v2i64 (add V128:$src, (AArch64vashr V128:$src, (i32 63))))),
2775 (ABSv2i64 V128:$src)>;
2777 defm CLS : SIMDTwoVectorBHS<0, 0b00100, "cls", int_aarch64_neon_cls>;
2778 defm CLZ : SIMDTwoVectorBHS<1, 0b00100, "clz", ctlz>;
2779 defm CMEQ : SIMDCmpTwoVector<0, 0b01001, "cmeq", AArch64cmeqz>;
2780 defm CMGE : SIMDCmpTwoVector<1, 0b01000, "cmge", AArch64cmgez>;
2781 defm CMGT : SIMDCmpTwoVector<0, 0b01000, "cmgt", AArch64cmgtz>;
2782 defm CMLE : SIMDCmpTwoVector<1, 0b01001, "cmle", AArch64cmlez>;
2783 defm CMLT : SIMDCmpTwoVector<0, 0b01010, "cmlt", AArch64cmltz>;
2784 defm CNT : SIMDTwoVectorB<0, 0b00, 0b00101, "cnt", ctpop>;
2785 defm FABS : SIMDTwoVectorFP<0, 1, 0b01111, "fabs", fabs>;
2787 defm FCMEQ : SIMDFPCmpTwoVector<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
2788 defm FCMGE : SIMDFPCmpTwoVector<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
2789 defm FCMGT : SIMDFPCmpTwoVector<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
2790 defm FCMLE : SIMDFPCmpTwoVector<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
2791 defm FCMLT : SIMDFPCmpTwoVector<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
2792 defm FCVTAS : SIMDTwoVectorFPToInt<0,0,0b11100, "fcvtas",int_aarch64_neon_fcvtas>;
2793 defm FCVTAU : SIMDTwoVectorFPToInt<1,0,0b11100, "fcvtau",int_aarch64_neon_fcvtau>;
2794 defm FCVTL : SIMDFPWidenTwoVector<0, 0, 0b10111, "fcvtl">;
2795 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (v4i16 V64:$Rn))),
2796 (FCVTLv4i16 V64:$Rn)>;
2797 def : Pat<(v4f32 (int_aarch64_neon_vcvthf2fp (extract_subvector (v8i16 V128:$Rn),
2799 (FCVTLv8i16 V128:$Rn)>;
2800 def : Pat<(v2f64 (fpextend (v2f32 V64:$Rn))), (FCVTLv2i32 V64:$Rn)>;
2801 def : Pat<(v2f64 (fpextend (v2f32 (extract_subvector (v4f32 V128:$Rn),
2803 (FCVTLv4i32 V128:$Rn)>;
2805 def : Pat<(v4f32 (fpextend (v4f16 V64:$Rn))), (FCVTLv4i16 V64:$Rn)>;
2806 def : Pat<(v4f32 (fpextend (v4f16 (extract_subvector (v8f16 V128:$Rn),
2808 (FCVTLv8i16 V128:$Rn)>;
2810 defm FCVTMS : SIMDTwoVectorFPToInt<0,0,0b11011, "fcvtms",int_aarch64_neon_fcvtms>;
2811 defm FCVTMU : SIMDTwoVectorFPToInt<1,0,0b11011, "fcvtmu",int_aarch64_neon_fcvtmu>;
2812 defm FCVTNS : SIMDTwoVectorFPToInt<0,0,0b11010, "fcvtns",int_aarch64_neon_fcvtns>;
2813 defm FCVTNU : SIMDTwoVectorFPToInt<1,0,0b11010, "fcvtnu",int_aarch64_neon_fcvtnu>;
2814 defm FCVTN : SIMDFPNarrowTwoVector<0, 0, 0b10110, "fcvtn">;
2815 def : Pat<(v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn))),
2816 (FCVTNv4i16 V128:$Rn)>;
2817 def : Pat<(concat_vectors V64:$Rd,
2818 (v4i16 (int_aarch64_neon_vcvtfp2hf (v4f32 V128:$Rn)))),
2819 (FCVTNv8i16 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2820 def : Pat<(v2f32 (fpround (v2f64 V128:$Rn))), (FCVTNv2i32 V128:$Rn)>;
2821 def : Pat<(v4f16 (fpround (v4f32 V128:$Rn))), (FCVTNv4i16 V128:$Rn)>;
2822 def : Pat<(concat_vectors V64:$Rd, (v2f32 (fpround (v2f64 V128:$Rn)))),
2823 (FCVTNv4i32 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), V128:$Rn)>;
2824 defm FCVTPS : SIMDTwoVectorFPToInt<0,1,0b11010, "fcvtps",int_aarch64_neon_fcvtps>;
2825 defm FCVTPU : SIMDTwoVectorFPToInt<1,1,0b11010, "fcvtpu",int_aarch64_neon_fcvtpu>;
2826 defm FCVTXN : SIMDFPInexactCvtTwoVector<1, 0, 0b10110, "fcvtxn",
2827 int_aarch64_neon_fcvtxn>;
2828 defm FCVTZS : SIMDTwoVectorFPToInt<0, 1, 0b11011, "fcvtzs", fp_to_sint>;
2829 defm FCVTZU : SIMDTwoVectorFPToInt<1, 1, 0b11011, "fcvtzu", fp_to_uint>;
2831 def : Pat<(v4i16 (int_aarch64_neon_fcvtzs v4f16:$Rn)), (FCVTZSv4f16 $Rn)>;
2832 def : Pat<(v8i16 (int_aarch64_neon_fcvtzs v8f16:$Rn)), (FCVTZSv8f16 $Rn)>;
2833 def : Pat<(v2i32 (int_aarch64_neon_fcvtzs v2f32:$Rn)), (FCVTZSv2f32 $Rn)>;
2834 def : Pat<(v4i32 (int_aarch64_neon_fcvtzs v4f32:$Rn)), (FCVTZSv4f32 $Rn)>;
2835 def : Pat<(v2i64 (int_aarch64_neon_fcvtzs v2f64:$Rn)), (FCVTZSv2f64 $Rn)>;
2837 def : Pat<(v4i16 (int_aarch64_neon_fcvtzu v4f16:$Rn)), (FCVTZUv4f16 $Rn)>;
2838 def : Pat<(v8i16 (int_aarch64_neon_fcvtzu v8f16:$Rn)), (FCVTZUv8f16 $Rn)>;
2839 def : Pat<(v2i32 (int_aarch64_neon_fcvtzu v2f32:$Rn)), (FCVTZUv2f32 $Rn)>;
2840 def : Pat<(v4i32 (int_aarch64_neon_fcvtzu v4f32:$Rn)), (FCVTZUv4f32 $Rn)>;
2841 def : Pat<(v2i64 (int_aarch64_neon_fcvtzu v2f64:$Rn)), (FCVTZUv2f64 $Rn)>;
2843 defm FNEG : SIMDTwoVectorFP<1, 1, 0b01111, "fneg", fneg>;
2844 defm FRECPE : SIMDTwoVectorFP<0, 1, 0b11101, "frecpe", int_aarch64_neon_frecpe>;
2845 defm FRINTA : SIMDTwoVectorFP<1, 0, 0b11000, "frinta", fround>;
2846 defm FRINTI : SIMDTwoVectorFP<1, 1, 0b11001, "frinti", fnearbyint>;
2847 defm FRINTM : SIMDTwoVectorFP<0, 0, 0b11001, "frintm", ffloor>;
2848 defm FRINTN : SIMDTwoVectorFP<0, 0, 0b11000, "frintn", int_aarch64_neon_frintn>;
2849 defm FRINTP : SIMDTwoVectorFP<0, 1, 0b11000, "frintp", fceil>;
2850 defm FRINTX : SIMDTwoVectorFP<1, 0, 0b11001, "frintx", frint>;
2851 defm FRINTZ : SIMDTwoVectorFP<0, 1, 0b11001, "frintz", ftrunc>;
2852 defm FRSQRTE: SIMDTwoVectorFP<1, 1, 0b11101, "frsqrte", int_aarch64_neon_frsqrte>;
2853 defm FSQRT : SIMDTwoVectorFP<1, 1, 0b11111, "fsqrt", fsqrt>;
2854 defm NEG : SIMDTwoVectorBHSD<1, 0b01011, "neg",
2855 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
2856 defm NOT : SIMDTwoVectorB<1, 0b00, 0b00101, "not", vnot>;
2857 // Aliases for MVN -> NOT.
2858 def : InstAlias<"mvn{ $Vd.8b, $Vn.8b|.8b $Vd, $Vn}",
2859 (NOTv8i8 V64:$Vd, V64:$Vn)>;
2860 def : InstAlias<"mvn{ $Vd.16b, $Vn.16b|.16b $Vd, $Vn}",
2861 (NOTv16i8 V128:$Vd, V128:$Vn)>;
2863 def : Pat<(AArch64neg (v8i8 V64:$Rn)), (NEGv8i8 V64:$Rn)>;
2864 def : Pat<(AArch64neg (v16i8 V128:$Rn)), (NEGv16i8 V128:$Rn)>;
2865 def : Pat<(AArch64neg (v4i16 V64:$Rn)), (NEGv4i16 V64:$Rn)>;
2866 def : Pat<(AArch64neg (v8i16 V128:$Rn)), (NEGv8i16 V128:$Rn)>;
2867 def : Pat<(AArch64neg (v2i32 V64:$Rn)), (NEGv2i32 V64:$Rn)>;
2868 def : Pat<(AArch64neg (v4i32 V128:$Rn)), (NEGv4i32 V128:$Rn)>;
2869 def : Pat<(AArch64neg (v2i64 V128:$Rn)), (NEGv2i64 V128:$Rn)>;
2871 def : Pat<(AArch64not (v8i8 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2872 def : Pat<(AArch64not (v16i8 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2873 def : Pat<(AArch64not (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2874 def : Pat<(AArch64not (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2875 def : Pat<(AArch64not (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2876 def : Pat<(AArch64not (v1i64 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2877 def : Pat<(AArch64not (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2878 def : Pat<(AArch64not (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2880 def : Pat<(vnot (v4i16 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2881 def : Pat<(vnot (v8i16 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2882 def : Pat<(vnot (v2i32 V64:$Rn)), (NOTv8i8 V64:$Rn)>;
2883 def : Pat<(vnot (v4i32 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2884 def : Pat<(vnot (v2i64 V128:$Rn)), (NOTv16i8 V128:$Rn)>;
2886 defm RBIT : SIMDTwoVectorB<1, 0b01, 0b00101, "rbit", int_aarch64_neon_rbit>;
2887 defm REV16 : SIMDTwoVectorB<0, 0b00, 0b00001, "rev16", AArch64rev16>;
2888 defm REV32 : SIMDTwoVectorBH<1, 0b00000, "rev32", AArch64rev32>;
2889 defm REV64 : SIMDTwoVectorBHS<0, 0b00000, "rev64", AArch64rev64>;
2890 defm SADALP : SIMDLongTwoVectorTied<0, 0b00110, "sadalp",
2891 BinOpFrag<(add node:$LHS, (int_aarch64_neon_saddlp node:$RHS))> >;
2892 defm SADDLP : SIMDLongTwoVector<0, 0b00010, "saddlp", int_aarch64_neon_saddlp>;
2893 defm SCVTF : SIMDTwoVectorIntToFP<0, 0, 0b11101, "scvtf", sint_to_fp>;
2894 defm SHLL : SIMDVectorLShiftLongBySizeBHS;
2895 defm SQABS : SIMDTwoVectorBHSD<0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
2896 defm SQNEG : SIMDTwoVectorBHSD<1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
2897 defm SQXTN : SIMDMixedTwoVector<0, 0b10100, "sqxtn", int_aarch64_neon_sqxtn>;
2898 defm SQXTUN : SIMDMixedTwoVector<1, 0b10010, "sqxtun", int_aarch64_neon_sqxtun>;
2899 defm SUQADD : SIMDTwoVectorBHSDTied<0, 0b00011, "suqadd",int_aarch64_neon_suqadd>;
2900 defm UADALP : SIMDLongTwoVectorTied<1, 0b00110, "uadalp",
2901 BinOpFrag<(add node:$LHS, (int_aarch64_neon_uaddlp node:$RHS))> >;
2902 defm UADDLP : SIMDLongTwoVector<1, 0b00010, "uaddlp",
2903 int_aarch64_neon_uaddlp>;
2904 defm UCVTF : SIMDTwoVectorIntToFP<1, 0, 0b11101, "ucvtf", uint_to_fp>;
2905 defm UQXTN : SIMDMixedTwoVector<1, 0b10100, "uqxtn", int_aarch64_neon_uqxtn>;
2906 defm URECPE : SIMDTwoVectorS<0, 1, 0b11100, "urecpe", int_aarch64_neon_urecpe>;
2907 defm URSQRTE: SIMDTwoVectorS<1, 1, 0b11100, "ursqrte", int_aarch64_neon_ursqrte>;
2908 defm USQADD : SIMDTwoVectorBHSDTied<1, 0b00011, "usqadd",int_aarch64_neon_usqadd>;
2909 defm XTN : SIMDMixedTwoVector<0, 0b10010, "xtn", trunc>;
2911 def : Pat<(v4f16 (AArch64rev32 V64:$Rn)), (REV32v4i16 V64:$Rn)>;
2912 def : Pat<(v4f16 (AArch64rev64 V64:$Rn)), (REV64v4i16 V64:$Rn)>;
2913 def : Pat<(v8f16 (AArch64rev32 V128:$Rn)), (REV32v8i16 V128:$Rn)>;
2914 def : Pat<(v8f16 (AArch64rev64 V128:$Rn)), (REV64v8i16 V128:$Rn)>;
2915 def : Pat<(v2f32 (AArch64rev64 V64:$Rn)), (REV64v2i32 V64:$Rn)>;
2916 def : Pat<(v4f32 (AArch64rev64 V128:$Rn)), (REV64v4i32 V128:$Rn)>;
2918 // Patterns for vector long shift (by element width). These need to match all
2919 // three of zext, sext and anyext so it's easier to pull the patterns out of the
2921 multiclass SIMDVectorLShiftLongBySizeBHSPats<SDPatternOperator ext> {
2922 def : Pat<(AArch64vshl (v8i16 (ext (v8i8 V64:$Rn))), (i32 8)),
2923 (SHLLv8i8 V64:$Rn)>;
2924 def : Pat<(AArch64vshl (v8i16 (ext (extract_high_v16i8 V128:$Rn))), (i32 8)),
2925 (SHLLv16i8 V128:$Rn)>;
2926 def : Pat<(AArch64vshl (v4i32 (ext (v4i16 V64:$Rn))), (i32 16)),
2927 (SHLLv4i16 V64:$Rn)>;
2928 def : Pat<(AArch64vshl (v4i32 (ext (extract_high_v8i16 V128:$Rn))), (i32 16)),
2929 (SHLLv8i16 V128:$Rn)>;
2930 def : Pat<(AArch64vshl (v2i64 (ext (v2i32 V64:$Rn))), (i32 32)),
2931 (SHLLv2i32 V64:$Rn)>;
2932 def : Pat<(AArch64vshl (v2i64 (ext (extract_high_v4i32 V128:$Rn))), (i32 32)),
2933 (SHLLv4i32 V128:$Rn)>;
2936 defm : SIMDVectorLShiftLongBySizeBHSPats<anyext>;
2937 defm : SIMDVectorLShiftLongBySizeBHSPats<zext>;
2938 defm : SIMDVectorLShiftLongBySizeBHSPats<sext>;
2940 //===----------------------------------------------------------------------===//
2941 // Advanced SIMD three vector instructions.
2942 //===----------------------------------------------------------------------===//
2944 defm ADD : SIMDThreeSameVector<0, 0b10000, "add", add>;
2945 defm ADDP : SIMDThreeSameVector<0, 0b10111, "addp", int_aarch64_neon_addp>;
2946 defm CMEQ : SIMDThreeSameVector<1, 0b10001, "cmeq", AArch64cmeq>;
2947 defm CMGE : SIMDThreeSameVector<0, 0b00111, "cmge", AArch64cmge>;
2948 defm CMGT : SIMDThreeSameVector<0, 0b00110, "cmgt", AArch64cmgt>;
2949 defm CMHI : SIMDThreeSameVector<1, 0b00110, "cmhi", AArch64cmhi>;
2950 defm CMHS : SIMDThreeSameVector<1, 0b00111, "cmhs", AArch64cmhs>;
2951 defm CMTST : SIMDThreeSameVector<0, 0b10001, "cmtst", AArch64cmtst>;
2952 defm FABD : SIMDThreeSameVectorFP<1,1,0b010,"fabd", int_aarch64_neon_fabd>;
2953 defm FACGE : SIMDThreeSameVectorFPCmp<1,0,0b101,"facge",int_aarch64_neon_facge>;
2954 defm FACGT : SIMDThreeSameVectorFPCmp<1,1,0b101,"facgt",int_aarch64_neon_facgt>;
2955 defm FADDP : SIMDThreeSameVectorFP<1,0,0b010,"faddp",int_aarch64_neon_addp>;
2956 defm FADD : SIMDThreeSameVectorFP<0,0,0b010,"fadd", fadd>;
2957 defm FCMEQ : SIMDThreeSameVectorFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
2958 defm FCMGE : SIMDThreeSameVectorFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
2959 defm FCMGT : SIMDThreeSameVectorFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
2960 defm FDIV : SIMDThreeSameVectorFP<1,0,0b111,"fdiv", fdiv>;
2961 defm FMAXNMP : SIMDThreeSameVectorFP<1,0,0b000,"fmaxnmp", int_aarch64_neon_fmaxnmp>;
2962 defm FMAXNM : SIMDThreeSameVectorFP<0,0,0b000,"fmaxnm", fmaxnum>;
2963 defm FMAXP : SIMDThreeSameVectorFP<1,0,0b110,"fmaxp", int_aarch64_neon_fmaxp>;
2964 defm FMAX : SIMDThreeSameVectorFP<0,0,0b110,"fmax", fmaxnan>;
2965 defm FMINNMP : SIMDThreeSameVectorFP<1,1,0b000,"fminnmp", int_aarch64_neon_fminnmp>;
2966 defm FMINNM : SIMDThreeSameVectorFP<0,1,0b000,"fminnm", fminnum>;
2967 defm FMINP : SIMDThreeSameVectorFP<1,1,0b110,"fminp", int_aarch64_neon_fminp>;
2968 defm FMIN : SIMDThreeSameVectorFP<0,1,0b110,"fmin", fminnan>;
2970 // NOTE: The operands of the PatFrag are reordered on FMLA/FMLS because the
2971 // instruction expects the addend first, while the fma intrinsic puts it last.
2972 defm FMLA : SIMDThreeSameVectorFPTied<0, 0, 0b001, "fmla",
2973 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
2974 defm FMLS : SIMDThreeSameVectorFPTied<0, 1, 0b001, "fmls",
2975 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
2977 // The following def pats catch the case where the LHS of an FMA is negated.
2978 // The TriOpFrag above catches the case where the middle operand is negated.
2979 def : Pat<(v2f32 (fma (fneg V64:$Rn), V64:$Rm, V64:$Rd)),
2980 (FMLSv2f32 V64:$Rd, V64:$Rn, V64:$Rm)>;
2982 def : Pat<(v4f32 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2983 (FMLSv4f32 V128:$Rd, V128:$Rn, V128:$Rm)>;
2985 def : Pat<(v2f64 (fma (fneg V128:$Rn), V128:$Rm, V128:$Rd)),
2986 (FMLSv2f64 V128:$Rd, V128:$Rn, V128:$Rm)>;
2988 defm FMULX : SIMDThreeSameVectorFP<0,0,0b011,"fmulx", int_aarch64_neon_fmulx>;
2989 defm FMUL : SIMDThreeSameVectorFP<1,0,0b011,"fmul", fmul>;
2990 defm FRECPS : SIMDThreeSameVectorFP<0,0,0b111,"frecps", int_aarch64_neon_frecps>;
2991 defm FRSQRTS : SIMDThreeSameVectorFP<0,1,0b111,"frsqrts", int_aarch64_neon_frsqrts>;
2992 defm FSUB : SIMDThreeSameVectorFP<0,1,0b010,"fsub", fsub>;
2993 defm MLA : SIMDThreeSameVectorBHSTied<0, 0b10010, "mla",
2994 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))> >;
2995 defm MLS : SIMDThreeSameVectorBHSTied<1, 0b10010, "mls",
2996 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))> >;
2997 defm MUL : SIMDThreeSameVectorBHS<0, 0b10011, "mul", mul>;
2998 defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
2999 defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
3000 TriOpFrag<(add node:$LHS, (int_aarch64_neon_sabd node:$MHS, node:$RHS))> >;
3001 defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", int_aarch64_neon_sabd>;
3002 defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", int_aarch64_neon_shadd>;
3003 defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
3004 defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
3005 defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
3006 defm SMINP : SIMDThreeSameVectorBHS<0,0b10101,"sminp", int_aarch64_neon_sminp>;
3007 defm SMIN : SIMDThreeSameVectorBHS<0,0b01101,"smin", smin>;
3008 defm SQADD : SIMDThreeSameVector<0,0b00001,"sqadd", int_aarch64_neon_sqadd>;
3009 defm SQDMULH : SIMDThreeSameVectorHS<0,0b10110,"sqdmulh",int_aarch64_neon_sqdmulh>;
3010 defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrdmulh>;
3011 defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
3012 defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
3013 defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
3014 defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd",int_aarch64_neon_srhadd>;
3015 defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
3016 defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
3017 defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
3018 defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
3019 TriOpFrag<(add node:$LHS, (int_aarch64_neon_uabd node:$MHS, node:$RHS))> >;
3020 defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", int_aarch64_neon_uabd>;
3021 defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", int_aarch64_neon_uhadd>;
3022 defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
3023 defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
3024 defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
3025 defm UMINP : SIMDThreeSameVectorBHS<1,0b10101,"uminp", int_aarch64_neon_uminp>;
3026 defm UMIN : SIMDThreeSameVectorBHS<1,0b01101,"umin", umin>;
3027 defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
3028 defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
3029 defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
3030 defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
3031 defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", int_aarch64_neon_urhadd>;
3032 defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
3033 defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
3034 defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
3035 int_aarch64_neon_sqadd>;
3036 defm SQRDMLSH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10001,"sqrdmlsh",
3037 int_aarch64_neon_sqsub>;
3039 defm AND : SIMDLogicalThreeVector<0, 0b00, "and", and>;
3040 defm BIC : SIMDLogicalThreeVector<0, 0b01, "bic",
3041 BinOpFrag<(and node:$LHS, (vnot node:$RHS))> >;
3042 defm BIF : SIMDLogicalThreeVector<1, 0b11, "bif">;
3043 defm BIT : SIMDLogicalThreeVectorTied<1, 0b10, "bit", AArch64bit>;
3044 defm BSL : SIMDLogicalThreeVectorTied<1, 0b01, "bsl",
3045 TriOpFrag<(or (and node:$LHS, node:$MHS), (and (vnot node:$LHS), node:$RHS))>>;
3046 defm EOR : SIMDLogicalThreeVector<1, 0b00, "eor", xor>;
3047 defm ORN : SIMDLogicalThreeVector<0, 0b11, "orn",
3048 BinOpFrag<(or node:$LHS, (vnot node:$RHS))> >;
3049 defm ORR : SIMDLogicalThreeVector<0, 0b10, "orr", or>;
3052 def : Pat<(AArch64bsl (v8i8 V64:$Rd), V64:$Rn, V64:$Rm),
3053 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3054 def : Pat<(AArch64bsl (v4i16 V64:$Rd), V64:$Rn, V64:$Rm),
3055 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3056 def : Pat<(AArch64bsl (v2i32 V64:$Rd), V64:$Rn, V64:$Rm),
3057 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3058 def : Pat<(AArch64bsl (v1i64 V64:$Rd), V64:$Rn, V64:$Rm),
3059 (BSLv8i8 V64:$Rd, V64:$Rn, V64:$Rm)>;
3061 def : Pat<(AArch64bsl (v16i8 V128:$Rd), V128:$Rn, V128:$Rm),
3062 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3063 def : Pat<(AArch64bsl (v8i16 V128:$Rd), V128:$Rn, V128:$Rm),
3064 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3065 def : Pat<(AArch64bsl (v4i32 V128:$Rd), V128:$Rn, V128:$Rm),
3066 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3067 def : Pat<(AArch64bsl (v2i64 V128:$Rd), V128:$Rn, V128:$Rm),
3068 (BSLv16i8 V128:$Rd, V128:$Rn, V128:$Rm)>;
3070 def : InstAlias<"mov{\t$dst.16b, $src.16b|.16b\t$dst, $src}",
3071 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 1>;
3072 def : InstAlias<"mov{\t$dst.8h, $src.8h|.8h\t$dst, $src}",
3073 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3074 def : InstAlias<"mov{\t$dst.4s, $src.4s|.4s\t$dst, $src}",
3075 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3076 def : InstAlias<"mov{\t$dst.2d, $src.2d|.2d\t$dst, $src}",
3077 (ORRv16i8 V128:$dst, V128:$src, V128:$src), 0>;
3079 def : InstAlias<"mov{\t$dst.8b, $src.8b|.8b\t$dst, $src}",
3080 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 1>;
3081 def : InstAlias<"mov{\t$dst.4h, $src.4h|.4h\t$dst, $src}",
3082 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3083 def : InstAlias<"mov{\t$dst.2s, $src.2s|.2s\t$dst, $src}",
3084 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3085 def : InstAlias<"mov{\t$dst.1d, $src.1d|.1d\t$dst, $src}",
3086 (ORRv8i8 V64:$dst, V64:$src, V64:$src), 0>;
3088 def : InstAlias<"{cmls\t$dst.8b, $src1.8b, $src2.8b" #
3089 "|cmls.8b\t$dst, $src1, $src2}",
3090 (CMHSv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3091 def : InstAlias<"{cmls\t$dst.16b, $src1.16b, $src2.16b" #
3092 "|cmls.16b\t$dst, $src1, $src2}",
3093 (CMHSv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3094 def : InstAlias<"{cmls\t$dst.4h, $src1.4h, $src2.4h" #
3095 "|cmls.4h\t$dst, $src1, $src2}",
3096 (CMHSv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3097 def : InstAlias<"{cmls\t$dst.8h, $src1.8h, $src2.8h" #
3098 "|cmls.8h\t$dst, $src1, $src2}",
3099 (CMHSv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3100 def : InstAlias<"{cmls\t$dst.2s, $src1.2s, $src2.2s" #
3101 "|cmls.2s\t$dst, $src1, $src2}",
3102 (CMHSv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3103 def : InstAlias<"{cmls\t$dst.4s, $src1.4s, $src2.4s" #
3104 "|cmls.4s\t$dst, $src1, $src2}",
3105 (CMHSv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3106 def : InstAlias<"{cmls\t$dst.2d, $src1.2d, $src2.2d" #
3107 "|cmls.2d\t$dst, $src1, $src2}",
3108 (CMHSv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3110 def : InstAlias<"{cmlo\t$dst.8b, $src1.8b, $src2.8b" #
3111 "|cmlo.8b\t$dst, $src1, $src2}",
3112 (CMHIv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3113 def : InstAlias<"{cmlo\t$dst.16b, $src1.16b, $src2.16b" #
3114 "|cmlo.16b\t$dst, $src1, $src2}",
3115 (CMHIv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3116 def : InstAlias<"{cmlo\t$dst.4h, $src1.4h, $src2.4h" #
3117 "|cmlo.4h\t$dst, $src1, $src2}",
3118 (CMHIv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3119 def : InstAlias<"{cmlo\t$dst.8h, $src1.8h, $src2.8h" #
3120 "|cmlo.8h\t$dst, $src1, $src2}",
3121 (CMHIv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3122 def : InstAlias<"{cmlo\t$dst.2s, $src1.2s, $src2.2s" #
3123 "|cmlo.2s\t$dst, $src1, $src2}",
3124 (CMHIv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3125 def : InstAlias<"{cmlo\t$dst.4s, $src1.4s, $src2.4s" #
3126 "|cmlo.4s\t$dst, $src1, $src2}",
3127 (CMHIv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3128 def : InstAlias<"{cmlo\t$dst.2d, $src1.2d, $src2.2d" #
3129 "|cmlo.2d\t$dst, $src1, $src2}",
3130 (CMHIv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3132 def : InstAlias<"{cmle\t$dst.8b, $src1.8b, $src2.8b" #
3133 "|cmle.8b\t$dst, $src1, $src2}",
3134 (CMGEv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3135 def : InstAlias<"{cmle\t$dst.16b, $src1.16b, $src2.16b" #
3136 "|cmle.16b\t$dst, $src1, $src2}",
3137 (CMGEv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3138 def : InstAlias<"{cmle\t$dst.4h, $src1.4h, $src2.4h" #
3139 "|cmle.4h\t$dst, $src1, $src2}",
3140 (CMGEv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3141 def : InstAlias<"{cmle\t$dst.8h, $src1.8h, $src2.8h" #
3142 "|cmle.8h\t$dst, $src1, $src2}",
3143 (CMGEv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3144 def : InstAlias<"{cmle\t$dst.2s, $src1.2s, $src2.2s" #
3145 "|cmle.2s\t$dst, $src1, $src2}",
3146 (CMGEv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3147 def : InstAlias<"{cmle\t$dst.4s, $src1.4s, $src2.4s" #
3148 "|cmle.4s\t$dst, $src1, $src2}",
3149 (CMGEv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3150 def : InstAlias<"{cmle\t$dst.2d, $src1.2d, $src2.2d" #
3151 "|cmle.2d\t$dst, $src1, $src2}",
3152 (CMGEv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3154 def : InstAlias<"{cmlt\t$dst.8b, $src1.8b, $src2.8b" #
3155 "|cmlt.8b\t$dst, $src1, $src2}",
3156 (CMGTv8i8 V64:$dst, V64:$src2, V64:$src1), 0>;
3157 def : InstAlias<"{cmlt\t$dst.16b, $src1.16b, $src2.16b" #
3158 "|cmlt.16b\t$dst, $src1, $src2}",
3159 (CMGTv16i8 V128:$dst, V128:$src2, V128:$src1), 0>;
3160 def : InstAlias<"{cmlt\t$dst.4h, $src1.4h, $src2.4h" #
3161 "|cmlt.4h\t$dst, $src1, $src2}",
3162 (CMGTv4i16 V64:$dst, V64:$src2, V64:$src1), 0>;
3163 def : InstAlias<"{cmlt\t$dst.8h, $src1.8h, $src2.8h" #
3164 "|cmlt.8h\t$dst, $src1, $src2}",
3165 (CMGTv8i16 V128:$dst, V128:$src2, V128:$src1), 0>;
3166 def : InstAlias<"{cmlt\t$dst.2s, $src1.2s, $src2.2s" #
3167 "|cmlt.2s\t$dst, $src1, $src2}",
3168 (CMGTv2i32 V64:$dst, V64:$src2, V64:$src1), 0>;
3169 def : InstAlias<"{cmlt\t$dst.4s, $src1.4s, $src2.4s" #
3170 "|cmlt.4s\t$dst, $src1, $src2}",
3171 (CMGTv4i32 V128:$dst, V128:$src2, V128:$src1), 0>;
3172 def : InstAlias<"{cmlt\t$dst.2d, $src1.2d, $src2.2d" #
3173 "|cmlt.2d\t$dst, $src1, $src2}",
3174 (CMGTv2i64 V128:$dst, V128:$src2, V128:$src1), 0>;
3176 let Predicates = [HasNEON, HasFullFP16] in {
3177 def : InstAlias<"{fcmle\t$dst.4h, $src1.4h, $src2.4h" #
3178 "|fcmle.4h\t$dst, $src1, $src2}",
3179 (FCMGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3180 def : InstAlias<"{fcmle\t$dst.8h, $src1.8h, $src2.8h" #
3181 "|fcmle.8h\t$dst, $src1, $src2}",
3182 (FCMGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3184 def : InstAlias<"{fcmle\t$dst.2s, $src1.2s, $src2.2s" #
3185 "|fcmle.2s\t$dst, $src1, $src2}",
3186 (FCMGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3187 def : InstAlias<"{fcmle\t$dst.4s, $src1.4s, $src2.4s" #
3188 "|fcmle.4s\t$dst, $src1, $src2}",
3189 (FCMGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3190 def : InstAlias<"{fcmle\t$dst.2d, $src1.2d, $src2.2d" #
3191 "|fcmle.2d\t$dst, $src1, $src2}",
3192 (FCMGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3194 let Predicates = [HasNEON, HasFullFP16] in {
3195 def : InstAlias<"{fcmlt\t$dst.4h, $src1.4h, $src2.4h" #
3196 "|fcmlt.4h\t$dst, $src1, $src2}",
3197 (FCMGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3198 def : InstAlias<"{fcmlt\t$dst.8h, $src1.8h, $src2.8h" #
3199 "|fcmlt.8h\t$dst, $src1, $src2}",
3200 (FCMGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3202 def : InstAlias<"{fcmlt\t$dst.2s, $src1.2s, $src2.2s" #
3203 "|fcmlt.2s\t$dst, $src1, $src2}",
3204 (FCMGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3205 def : InstAlias<"{fcmlt\t$dst.4s, $src1.4s, $src2.4s" #
3206 "|fcmlt.4s\t$dst, $src1, $src2}",
3207 (FCMGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3208 def : InstAlias<"{fcmlt\t$dst.2d, $src1.2d, $src2.2d" #
3209 "|fcmlt.2d\t$dst, $src1, $src2}",
3210 (FCMGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3212 let Predicates = [HasNEON, HasFullFP16] in {
3213 def : InstAlias<"{facle\t$dst.4h, $src1.4h, $src2.4h" #
3214 "|facle.4h\t$dst, $src1, $src2}",
3215 (FACGEv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3216 def : InstAlias<"{facle\t$dst.8h, $src1.8h, $src2.8h" #
3217 "|facle.8h\t$dst, $src1, $src2}",
3218 (FACGEv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3220 def : InstAlias<"{facle\t$dst.2s, $src1.2s, $src2.2s" #
3221 "|facle.2s\t$dst, $src1, $src2}",
3222 (FACGEv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3223 def : InstAlias<"{facle\t$dst.4s, $src1.4s, $src2.4s" #
3224 "|facle.4s\t$dst, $src1, $src2}",
3225 (FACGEv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3226 def : InstAlias<"{facle\t$dst.2d, $src1.2d, $src2.2d" #
3227 "|facle.2d\t$dst, $src1, $src2}",
3228 (FACGEv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3230 let Predicates = [HasNEON, HasFullFP16] in {
3231 def : InstAlias<"{faclt\t$dst.4h, $src1.4h, $src2.4h" #
3232 "|faclt.4h\t$dst, $src1, $src2}",
3233 (FACGTv4f16 V64:$dst, V64:$src2, V64:$src1), 0>;
3234 def : InstAlias<"{faclt\t$dst.8h, $src1.8h, $src2.8h" #
3235 "|faclt.8h\t$dst, $src1, $src2}",
3236 (FACGTv8f16 V128:$dst, V128:$src2, V128:$src1), 0>;
3238 def : InstAlias<"{faclt\t$dst.2s, $src1.2s, $src2.2s" #
3239 "|faclt.2s\t$dst, $src1, $src2}",
3240 (FACGTv2f32 V64:$dst, V64:$src2, V64:$src1), 0>;
3241 def : InstAlias<"{faclt\t$dst.4s, $src1.4s, $src2.4s" #
3242 "|faclt.4s\t$dst, $src1, $src2}",
3243 (FACGTv4f32 V128:$dst, V128:$src2, V128:$src1), 0>;
3244 def : InstAlias<"{faclt\t$dst.2d, $src1.2d, $src2.2d" #
3245 "|faclt.2d\t$dst, $src1, $src2}",
3246 (FACGTv2f64 V128:$dst, V128:$src2, V128:$src1), 0>;
3248 //===----------------------------------------------------------------------===//
3249 // Advanced SIMD three scalar instructions.
3250 //===----------------------------------------------------------------------===//
3252 defm ADD : SIMDThreeScalarD<0, 0b10000, "add", add>;
3253 defm CMEQ : SIMDThreeScalarD<1, 0b10001, "cmeq", AArch64cmeq>;
3254 defm CMGE : SIMDThreeScalarD<0, 0b00111, "cmge", AArch64cmge>;
3255 defm CMGT : SIMDThreeScalarD<0, 0b00110, "cmgt", AArch64cmgt>;
3256 defm CMHI : SIMDThreeScalarD<1, 0b00110, "cmhi", AArch64cmhi>;
3257 defm CMHS : SIMDThreeScalarD<1, 0b00111, "cmhs", AArch64cmhs>;
3258 defm CMTST : SIMDThreeScalarD<0, 0b10001, "cmtst", AArch64cmtst>;
3259 defm FABD : SIMDFPThreeScalar<1, 1, 0b010, "fabd", int_aarch64_sisd_fabd>;
3260 def : Pat<(v1f64 (int_aarch64_neon_fabd (v1f64 FPR64:$Rn), (v1f64 FPR64:$Rm))),
3261 (FABD64 FPR64:$Rn, FPR64:$Rm)>;
3262 defm FACGE : SIMDThreeScalarFPCmp<1, 0, 0b101, "facge",
3263 int_aarch64_neon_facge>;
3264 defm FACGT : SIMDThreeScalarFPCmp<1, 1, 0b101, "facgt",
3265 int_aarch64_neon_facgt>;
3266 defm FCMEQ : SIMDThreeScalarFPCmp<0, 0, 0b100, "fcmeq", AArch64fcmeq>;
3267 defm FCMGE : SIMDThreeScalarFPCmp<1, 0, 0b100, "fcmge", AArch64fcmge>;
3268 defm FCMGT : SIMDThreeScalarFPCmp<1, 1, 0b100, "fcmgt", AArch64fcmgt>;
3269 defm FMULX : SIMDFPThreeScalar<0, 0, 0b011, "fmulx", int_aarch64_neon_fmulx>;
3270 defm FRECPS : SIMDFPThreeScalar<0, 0, 0b111, "frecps", int_aarch64_neon_frecps>;
3271 defm FRSQRTS : SIMDFPThreeScalar<0, 1, 0b111, "frsqrts", int_aarch64_neon_frsqrts>;
3272 defm SQADD : SIMDThreeScalarBHSD<0, 0b00001, "sqadd", int_aarch64_neon_sqadd>;
3273 defm SQDMULH : SIMDThreeScalarHS< 0, 0b10110, "sqdmulh", int_aarch64_neon_sqdmulh>;
3274 defm SQRDMULH : SIMDThreeScalarHS< 1, 0b10110, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
3275 defm SQRSHL : SIMDThreeScalarBHSD<0, 0b01011, "sqrshl",int_aarch64_neon_sqrshl>;
3276 defm SQSHL : SIMDThreeScalarBHSD<0, 0b01001, "sqshl", int_aarch64_neon_sqshl>;
3277 defm SQSUB : SIMDThreeScalarBHSD<0, 0b00101, "sqsub", int_aarch64_neon_sqsub>;
3278 defm SRSHL : SIMDThreeScalarD< 0, 0b01010, "srshl", int_aarch64_neon_srshl>;
3279 defm SSHL : SIMDThreeScalarD< 0, 0b01000, "sshl", int_aarch64_neon_sshl>;
3280 defm SUB : SIMDThreeScalarD< 1, 0b10000, "sub", sub>;
3281 defm UQADD : SIMDThreeScalarBHSD<1, 0b00001, "uqadd", int_aarch64_neon_uqadd>;
3282 defm UQRSHL : SIMDThreeScalarBHSD<1, 0b01011, "uqrshl",int_aarch64_neon_uqrshl>;
3283 defm UQSHL : SIMDThreeScalarBHSD<1, 0b01001, "uqshl", int_aarch64_neon_uqshl>;
3284 defm UQSUB : SIMDThreeScalarBHSD<1, 0b00101, "uqsub", int_aarch64_neon_uqsub>;
3285 defm URSHL : SIMDThreeScalarD< 1, 0b01010, "urshl", int_aarch64_neon_urshl>;
3286 defm USHL : SIMDThreeScalarD< 1, 0b01000, "ushl", int_aarch64_neon_ushl>;
3287 let Predicates = [HasV8_1a] in {
3288 defm SQRDMLAH : SIMDThreeScalarHSTied<1, 0, 0b10000, "sqrdmlah">;
3289 defm SQRDMLSH : SIMDThreeScalarHSTied<1, 0, 0b10001, "sqrdmlsh">;
3290 def : Pat<(i32 (int_aarch64_neon_sqadd
3292 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3293 (i32 FPR32:$Rm))))),
3294 (SQRDMLAHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3295 def : Pat<(i32 (int_aarch64_neon_sqsub
3297 (i32 (int_aarch64_neon_sqrdmulh (i32 FPR32:$Rn),
3298 (i32 FPR32:$Rm))))),
3299 (SQRDMLSHv1i32 FPR32:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3302 def : InstAlias<"cmls $dst, $src1, $src2",
3303 (CMHSv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3304 def : InstAlias<"cmle $dst, $src1, $src2",
3305 (CMGEv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3306 def : InstAlias<"cmlo $dst, $src1, $src2",
3307 (CMHIv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3308 def : InstAlias<"cmlt $dst, $src1, $src2",
3309 (CMGTv1i64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3310 def : InstAlias<"fcmle $dst, $src1, $src2",
3311 (FCMGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3312 def : InstAlias<"fcmle $dst, $src1, $src2",
3313 (FCMGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3314 def : InstAlias<"fcmlt $dst, $src1, $src2",
3315 (FCMGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3316 def : InstAlias<"fcmlt $dst, $src1, $src2",
3317 (FCMGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3318 def : InstAlias<"facle $dst, $src1, $src2",
3319 (FACGE32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3320 def : InstAlias<"facle $dst, $src1, $src2",
3321 (FACGE64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3322 def : InstAlias<"faclt $dst, $src1, $src2",
3323 (FACGT32 FPR32:$dst, FPR32:$src2, FPR32:$src1), 0>;
3324 def : InstAlias<"faclt $dst, $src1, $src2",
3325 (FACGT64 FPR64:$dst, FPR64:$src2, FPR64:$src1), 0>;
3327 //===----------------------------------------------------------------------===//
3328 // Advanced SIMD three scalar instructions (mixed operands).
3329 //===----------------------------------------------------------------------===//
3330 defm SQDMULL : SIMDThreeScalarMixedHS<0, 0b11010, "sqdmull",
3331 int_aarch64_neon_sqdmulls_scalar>;
3332 defm SQDMLAL : SIMDThreeScalarMixedTiedHS<0, 0b10010, "sqdmlal">;
3333 defm SQDMLSL : SIMDThreeScalarMixedTiedHS<0, 0b10110, "sqdmlsl">;
3335 def : Pat<(i64 (int_aarch64_neon_sqadd (i64 FPR64:$Rd),
3336 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3337 (i32 FPR32:$Rm))))),
3338 (SQDMLALi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3339 def : Pat<(i64 (int_aarch64_neon_sqsub (i64 FPR64:$Rd),
3340 (i64 (int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
3341 (i32 FPR32:$Rm))))),
3342 (SQDMLSLi32 FPR64:$Rd, FPR32:$Rn, FPR32:$Rm)>;
3344 //===----------------------------------------------------------------------===//
3345 // Advanced SIMD two scalar instructions.
3346 //===----------------------------------------------------------------------===//
3348 defm ABS : SIMDTwoScalarD< 0, 0b01011, "abs", int_aarch64_neon_abs>;
3349 defm CMEQ : SIMDCmpTwoScalarD< 0, 0b01001, "cmeq", AArch64cmeqz>;
3350 defm CMGE : SIMDCmpTwoScalarD< 1, 0b01000, "cmge", AArch64cmgez>;
3351 defm CMGT : SIMDCmpTwoScalarD< 0, 0b01000, "cmgt", AArch64cmgtz>;
3352 defm CMLE : SIMDCmpTwoScalarD< 1, 0b01001, "cmle", AArch64cmlez>;
3353 defm CMLT : SIMDCmpTwoScalarD< 0, 0b01010, "cmlt", AArch64cmltz>;
3354 defm FCMEQ : SIMDFPCmpTwoScalar<0, 1, 0b01101, "fcmeq", AArch64fcmeqz>;
3355 defm FCMGE : SIMDFPCmpTwoScalar<1, 1, 0b01100, "fcmge", AArch64fcmgez>;
3356 defm FCMGT : SIMDFPCmpTwoScalar<0, 1, 0b01100, "fcmgt", AArch64fcmgtz>;
3357 defm FCMLE : SIMDFPCmpTwoScalar<1, 1, 0b01101, "fcmle", AArch64fcmlez>;
3358 defm FCMLT : SIMDFPCmpTwoScalar<0, 1, 0b01110, "fcmlt", AArch64fcmltz>;
3359 defm FCVTAS : SIMDFPTwoScalar< 0, 0, 0b11100, "fcvtas">;
3360 defm FCVTAU : SIMDFPTwoScalar< 1, 0, 0b11100, "fcvtau">;
3361 defm FCVTMS : SIMDFPTwoScalar< 0, 0, 0b11011, "fcvtms">;
3362 defm FCVTMU : SIMDFPTwoScalar< 1, 0, 0b11011, "fcvtmu">;
3363 defm FCVTNS : SIMDFPTwoScalar< 0, 0, 0b11010, "fcvtns">;
3364 defm FCVTNU : SIMDFPTwoScalar< 1, 0, 0b11010, "fcvtnu">;
3365 defm FCVTPS : SIMDFPTwoScalar< 0, 1, 0b11010, "fcvtps">;
3366 defm FCVTPU : SIMDFPTwoScalar< 1, 1, 0b11010, "fcvtpu">;
3367 def FCVTXNv1i64 : SIMDInexactCvtTwoScalar<0b10110, "fcvtxn">;
3368 defm FCVTZS : SIMDFPTwoScalar< 0, 1, 0b11011, "fcvtzs">;
3369 defm FCVTZU : SIMDFPTwoScalar< 1, 1, 0b11011, "fcvtzu">;
3370 defm FRECPE : SIMDFPTwoScalar< 0, 1, 0b11101, "frecpe">;
3371 defm FRECPX : SIMDFPTwoScalar< 0, 1, 0b11111, "frecpx">;
3372 defm FRSQRTE : SIMDFPTwoScalar< 1, 1, 0b11101, "frsqrte">;
3373 defm NEG : SIMDTwoScalarD< 1, 0b01011, "neg",
3374 UnOpFrag<(sub immAllZerosV, node:$LHS)> >;
3375 defm SCVTF : SIMDFPTwoScalarCVT< 0, 0, 0b11101, "scvtf", AArch64sitof>;
3376 defm SQABS : SIMDTwoScalarBHSD< 0, 0b00111, "sqabs", int_aarch64_neon_sqabs>;
3377 defm SQNEG : SIMDTwoScalarBHSD< 1, 0b00111, "sqneg", int_aarch64_neon_sqneg>;
3378 defm SQXTN : SIMDTwoScalarMixedBHS< 0, 0b10100, "sqxtn", int_aarch64_neon_scalar_sqxtn>;
3379 defm SQXTUN : SIMDTwoScalarMixedBHS< 1, 0b10010, "sqxtun", int_aarch64_neon_scalar_sqxtun>;
3380 defm SUQADD : SIMDTwoScalarBHSDTied< 0, 0b00011, "suqadd",
3381 int_aarch64_neon_suqadd>;
3382 defm UCVTF : SIMDFPTwoScalarCVT< 1, 0, 0b11101, "ucvtf", AArch64uitof>;
3383 defm UQXTN : SIMDTwoScalarMixedBHS<1, 0b10100, "uqxtn", int_aarch64_neon_scalar_uqxtn>;
3384 defm USQADD : SIMDTwoScalarBHSDTied< 1, 0b00011, "usqadd",
3385 int_aarch64_neon_usqadd>;
3387 def : Pat<(AArch64neg (v1i64 V64:$Rn)), (NEGv1i64 V64:$Rn)>;
3389 def : Pat<(v1i64 (int_aarch64_neon_fcvtas (v1f64 FPR64:$Rn))),
3390 (FCVTASv1i64 FPR64:$Rn)>;
3391 def : Pat<(v1i64 (int_aarch64_neon_fcvtau (v1f64 FPR64:$Rn))),
3392 (FCVTAUv1i64 FPR64:$Rn)>;
3393 def : Pat<(v1i64 (int_aarch64_neon_fcvtms (v1f64 FPR64:$Rn))),
3394 (FCVTMSv1i64 FPR64:$Rn)>;
3395 def : Pat<(v1i64 (int_aarch64_neon_fcvtmu (v1f64 FPR64:$Rn))),
3396 (FCVTMUv1i64 FPR64:$Rn)>;
3397 def : Pat<(v1i64 (int_aarch64_neon_fcvtns (v1f64 FPR64:$Rn))),
3398 (FCVTNSv1i64 FPR64:$Rn)>;
3399 def : Pat<(v1i64 (int_aarch64_neon_fcvtnu (v1f64 FPR64:$Rn))),
3400 (FCVTNUv1i64 FPR64:$Rn)>;
3401 def : Pat<(v1i64 (int_aarch64_neon_fcvtps (v1f64 FPR64:$Rn))),
3402 (FCVTPSv1i64 FPR64:$Rn)>;
3403 def : Pat<(v1i64 (int_aarch64_neon_fcvtpu (v1f64 FPR64:$Rn))),
3404 (FCVTPUv1i64 FPR64:$Rn)>;
3406 def : Pat<(f32 (int_aarch64_neon_frecpe (f32 FPR32:$Rn))),
3407 (FRECPEv1i32 FPR32:$Rn)>;
3408 def : Pat<(f64 (int_aarch64_neon_frecpe (f64 FPR64:$Rn))),
3409 (FRECPEv1i64 FPR64:$Rn)>;
3410 def : Pat<(v1f64 (int_aarch64_neon_frecpe (v1f64 FPR64:$Rn))),
3411 (FRECPEv1i64 FPR64:$Rn)>;
3413 def : Pat<(f32 (AArch64frecpe (f32 FPR32:$Rn))),
3414 (FRECPEv1i32 FPR32:$Rn)>;
3415 def : Pat<(v2f32 (AArch64frecpe (v2f32 V64:$Rn))),
3416 (FRECPEv2f32 V64:$Rn)>;
3417 def : Pat<(v4f32 (AArch64frecpe (v4f32 FPR128:$Rn))),
3418 (FRECPEv4f32 FPR128:$Rn)>;
3419 def : Pat<(f64 (AArch64frecpe (f64 FPR64:$Rn))),
3420 (FRECPEv1i64 FPR64:$Rn)>;
3421 def : Pat<(v1f64 (AArch64frecpe (v1f64 FPR64:$Rn))),
3422 (FRECPEv1i64 FPR64:$Rn)>;
3423 def : Pat<(v2f64 (AArch64frecpe (v2f64 FPR128:$Rn))),
3424 (FRECPEv2f64 FPR128:$Rn)>;
3426 def : Pat<(f32 (AArch64frecps (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
3427 (FRECPS32 FPR32:$Rn, FPR32:$Rm)>;
3428 def : Pat<(v2f32 (AArch64frecps (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
3429 (FRECPSv2f32 V64:$Rn, V64:$Rm)>;
3430 def : Pat<(v4f32 (AArch64frecps (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
3431 (FRECPSv4f32 FPR128:$Rn, FPR128:$Rm)>;
3432 def : Pat<(f64 (AArch64frecps (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
3433 (FRECPS64 FPR64:$Rn, FPR64:$Rm)>;
3434 def : Pat<(v2f64 (AArch64frecps (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
3435 (FRECPSv2f64 FPR128:$Rn, FPR128:$Rm)>;
3437 def : Pat<(f32 (int_aarch64_neon_frecpx (f32 FPR32:$Rn))),
3438 (FRECPXv1i32 FPR32:$Rn)>;
3439 def : Pat<(f64 (int_aarch64_neon_frecpx (f64 FPR64:$Rn))),
3440 (FRECPXv1i64 FPR64:$Rn)>;
3442 def : Pat<(f32 (int_aarch64_neon_frsqrte (f32 FPR32:$Rn))),
3443 (FRSQRTEv1i32 FPR32:$Rn)>;
3444 def : Pat<(f64 (int_aarch64_neon_frsqrte (f64 FPR64:$Rn))),
3445 (FRSQRTEv1i64 FPR64:$Rn)>;
3446 def : Pat<(v1f64 (int_aarch64_neon_frsqrte (v1f64 FPR64:$Rn))),
3447 (FRSQRTEv1i64 FPR64:$Rn)>;
3449 def : Pat<(f32 (AArch64frsqrte (f32 FPR32:$Rn))),
3450 (FRSQRTEv1i32 FPR32:$Rn)>;
3451 def : Pat<(v2f32 (AArch64frsqrte (v2f32 V64:$Rn))),
3452 (FRSQRTEv2f32 V64:$Rn)>;
3453 def : Pat<(v4f32 (AArch64frsqrte (v4f32 FPR128:$Rn))),
3454 (FRSQRTEv4f32 FPR128:$Rn)>;
3455 def : Pat<(f64 (AArch64frsqrte (f64 FPR64:$Rn))),
3456 (FRSQRTEv1i64 FPR64:$Rn)>;
3457 def : Pat<(v1f64 (AArch64frsqrte (v1f64 FPR64:$Rn))),
3458 (FRSQRTEv1i64 FPR64:$Rn)>;
3459 def : Pat<(v2f64 (AArch64frsqrte (v2f64 FPR128:$Rn))),
3460 (FRSQRTEv2f64 FPR128:$Rn)>;
3462 def : Pat<(f32 (AArch64frsqrts (f32 FPR32:$Rn), (f32 FPR32:$Rm))),
3463 (FRSQRTS32 FPR32:$Rn, FPR32:$Rm)>;
3464 def : Pat<(v2f32 (AArch64frsqrts (v2f32 V64:$Rn), (v2f32 V64:$Rm))),
3465 (FRSQRTSv2f32 V64:$Rn, V64:$Rm)>;
3466 def : Pat<(v4f32 (AArch64frsqrts (v4f32 FPR128:$Rn), (v4f32 FPR128:$Rm))),
3467 (FRSQRTSv4f32 FPR128:$Rn, FPR128:$Rm)>;
3468 def : Pat<(f64 (AArch64frsqrts (f64 FPR64:$Rn), (f64 FPR64:$Rm))),
3469 (FRSQRTS64 FPR64:$Rn, FPR64:$Rm)>;
3470 def : Pat<(v2f64 (AArch64frsqrts (v2f64 FPR128:$Rn), (v2f64 FPR128:$Rm))),
3471 (FRSQRTSv2f64 FPR128:$Rn, FPR128:$Rm)>;
3473 // If an integer is about to be converted to a floating point value,
3474 // just load it on the floating point unit.
3475 // Here are the patterns for 8 and 16-bits to float.
3477 multiclass UIntToFPROLoadPat<ValueType DstTy, ValueType SrcTy,
3478 SDPatternOperator loadop, Instruction UCVTF,
3479 ROAddrMode ro, Instruction LDRW, Instruction LDRX,
3481 def : Pat<(DstTy (uint_to_fp (SrcTy
3482 (loadop (ro.Wpat GPR64sp:$Rn, GPR32:$Rm,
3483 ro.Wext:$extend))))),
3484 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
3485 (LDRW GPR64sp:$Rn, GPR32:$Rm, ro.Wext:$extend),
3488 def : Pat<(DstTy (uint_to_fp (SrcTy
3489 (loadop (ro.Xpat GPR64sp:$Rn, GPR64:$Rm,
3490 ro.Wext:$extend))))),
3491 (UCVTF (INSERT_SUBREG (DstTy (IMPLICIT_DEF)),
3492 (LDRX GPR64sp:$Rn, GPR64:$Rm, ro.Xext:$extend),
3496 defm : UIntToFPROLoadPat<f32, i32, zextloadi8,
3497 UCVTFv1i32, ro8, LDRBroW, LDRBroX, bsub>;
3498 def : Pat <(f32 (uint_to_fp (i32
3499 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
3500 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3501 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
3502 def : Pat <(f32 (uint_to_fp (i32
3503 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
3504 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3505 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
3506 // 16-bits -> float.
3507 defm : UIntToFPROLoadPat<f32, i32, zextloadi16,
3508 UCVTFv1i32, ro16, LDRHroW, LDRHroX, hsub>;
3509 def : Pat <(f32 (uint_to_fp (i32
3510 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
3511 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3512 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
3513 def : Pat <(f32 (uint_to_fp (i32
3514 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
3515 (UCVTFv1i32 (INSERT_SUBREG (f32 (IMPLICIT_DEF)),
3516 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
3517 // 32-bits are handled in target specific dag combine:
3518 // performIntToFpCombine.
3519 // 64-bits integer to 32-bits floating point, not possible with
3520 // UCVTF on floating point registers (both source and destination
3521 // must have the same size).
3523 // Here are the patterns for 8, 16, 32, and 64-bits to double.
3524 // 8-bits -> double.
3525 defm : UIntToFPROLoadPat<f64, i32, zextloadi8,
3526 UCVTFv1i64, ro8, LDRBroW, LDRBroX, bsub>;
3527 def : Pat <(f64 (uint_to_fp (i32
3528 (zextloadi8 (am_indexed8 GPR64sp:$Rn, uimm12s1:$offset))))),
3529 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3530 (LDRBui GPR64sp:$Rn, uimm12s1:$offset), bsub))>;
3531 def : Pat <(f64 (uint_to_fp (i32
3532 (zextloadi8 (am_unscaled8 GPR64sp:$Rn, simm9:$offset))))),
3533 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3534 (LDURBi GPR64sp:$Rn, simm9:$offset), bsub))>;
3535 // 16-bits -> double.
3536 defm : UIntToFPROLoadPat<f64, i32, zextloadi16,
3537 UCVTFv1i64, ro16, LDRHroW, LDRHroX, hsub>;
3538 def : Pat <(f64 (uint_to_fp (i32
3539 (zextloadi16 (am_indexed16 GPR64sp:$Rn, uimm12s2:$offset))))),
3540 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3541 (LDRHui GPR64sp:$Rn, uimm12s2:$offset), hsub))>;
3542 def : Pat <(f64 (uint_to_fp (i32
3543 (zextloadi16 (am_unscaled16 GPR64sp:$Rn, simm9:$offset))))),
3544 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3545 (LDURHi GPR64sp:$Rn, simm9:$offset), hsub))>;
3546 // 32-bits -> double.
3547 defm : UIntToFPROLoadPat<f64, i32, load,
3548 UCVTFv1i64, ro32, LDRSroW, LDRSroX, ssub>;
3549 def : Pat <(f64 (uint_to_fp (i32
3550 (load (am_indexed32 GPR64sp:$Rn, uimm12s4:$offset))))),
3551 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3552 (LDRSui GPR64sp:$Rn, uimm12s4:$offset), ssub))>;
3553 def : Pat <(f64 (uint_to_fp (i32
3554 (load (am_unscaled32 GPR64sp:$Rn, simm9:$offset))))),
3555 (UCVTFv1i64 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
3556 (LDURSi GPR64sp:$Rn, simm9:$offset), ssub))>;
3557 // 64-bits -> double are handled in target specific dag combine:
3558 // performIntToFpCombine.
3560 //===----------------------------------------------------------------------===//
3561 // Advanced SIMD three different-sized vector instructions.
3562 //===----------------------------------------------------------------------===//
3564 defm ADDHN : SIMDNarrowThreeVectorBHS<0,0b0100,"addhn", int_aarch64_neon_addhn>;
3565 defm SUBHN : SIMDNarrowThreeVectorBHS<0,0b0110,"subhn", int_aarch64_neon_subhn>;
3566 defm RADDHN : SIMDNarrowThreeVectorBHS<1,0b0100,"raddhn",int_aarch64_neon_raddhn>;
3567 defm RSUBHN : SIMDNarrowThreeVectorBHS<1,0b0110,"rsubhn",int_aarch64_neon_rsubhn>;
3568 defm PMULL : SIMDDifferentThreeVectorBD<0,0b1110,"pmull",int_aarch64_neon_pmull>;
3569 defm SABAL : SIMDLongThreeVectorTiedBHSabal<0,0b0101,"sabal",
3570 int_aarch64_neon_sabd>;
3571 defm SABDL : SIMDLongThreeVectorBHSabdl<0, 0b0111, "sabdl",
3572 int_aarch64_neon_sabd>;
3573 defm SADDL : SIMDLongThreeVectorBHS< 0, 0b0000, "saddl",
3574 BinOpFrag<(add (sext node:$LHS), (sext node:$RHS))>>;
3575 defm SADDW : SIMDWideThreeVectorBHS< 0, 0b0001, "saddw",
3576 BinOpFrag<(add node:$LHS, (sext node:$RHS))>>;
3577 defm SMLAL : SIMDLongThreeVectorTiedBHS<0, 0b1000, "smlal",
3578 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
3579 defm SMLSL : SIMDLongThreeVectorTiedBHS<0, 0b1010, "smlsl",
3580 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
3581 defm SMULL : SIMDLongThreeVectorBHS<0, 0b1100, "smull", int_aarch64_neon_smull>;
3582 defm SQDMLAL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1001, "sqdmlal",
3583 int_aarch64_neon_sqadd>;
3584 defm SQDMLSL : SIMDLongThreeVectorSQDMLXTiedHS<0, 0b1011, "sqdmlsl",
3585 int_aarch64_neon_sqsub>;
3586 defm SQDMULL : SIMDLongThreeVectorHS<0, 0b1101, "sqdmull",
3587 int_aarch64_neon_sqdmull>;
3588 defm SSUBL : SIMDLongThreeVectorBHS<0, 0b0010, "ssubl",
3589 BinOpFrag<(sub (sext node:$LHS), (sext node:$RHS))>>;
3590 defm SSUBW : SIMDWideThreeVectorBHS<0, 0b0011, "ssubw",
3591 BinOpFrag<(sub node:$LHS, (sext node:$RHS))>>;
3592 defm UABAL : SIMDLongThreeVectorTiedBHSabal<1, 0b0101, "uabal",
3593 int_aarch64_neon_uabd>;
3594 defm UADDL : SIMDLongThreeVectorBHS<1, 0b0000, "uaddl",
3595 BinOpFrag<(add (zext node:$LHS), (zext node:$RHS))>>;
3596 defm UADDW : SIMDWideThreeVectorBHS<1, 0b0001, "uaddw",
3597 BinOpFrag<(add node:$LHS, (zext node:$RHS))>>;
3598 defm UMLAL : SIMDLongThreeVectorTiedBHS<1, 0b1000, "umlal",
3599 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
3600 defm UMLSL : SIMDLongThreeVectorTiedBHS<1, 0b1010, "umlsl",
3601 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
3602 defm UMULL : SIMDLongThreeVectorBHS<1, 0b1100, "umull", int_aarch64_neon_umull>;
3603 defm USUBL : SIMDLongThreeVectorBHS<1, 0b0010, "usubl",
3604 BinOpFrag<(sub (zext node:$LHS), (zext node:$RHS))>>;
3605 defm USUBW : SIMDWideThreeVectorBHS< 1, 0b0011, "usubw",
3606 BinOpFrag<(sub node:$LHS, (zext node:$RHS))>>;
3608 // Additional patterns for SMULL and UMULL
3609 multiclass Neon_mul_widen_patterns<SDPatternOperator opnode,
3610 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
3611 def : Pat<(v8i16 (opnode (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
3612 (INST8B V64:$Rn, V64:$Rm)>;
3613 def : Pat<(v4i32 (opnode (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
3614 (INST4H V64:$Rn, V64:$Rm)>;
3615 def : Pat<(v2i64 (opnode (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
3616 (INST2S V64:$Rn, V64:$Rm)>;
3619 defm : Neon_mul_widen_patterns<AArch64smull, SMULLv8i8_v8i16,
3620 SMULLv4i16_v4i32, SMULLv2i32_v2i64>;
3621 defm : Neon_mul_widen_patterns<AArch64umull, UMULLv8i8_v8i16,
3622 UMULLv4i16_v4i32, UMULLv2i32_v2i64>;
3624 // Additional patterns for SMLAL/SMLSL and UMLAL/UMLSL
3625 multiclass Neon_mulacc_widen_patterns<SDPatternOperator opnode,
3626 Instruction INST8B, Instruction INST4H, Instruction INST2S> {
3627 def : Pat<(v8i16 (opnode (v8i16 V128:$Rd), (v8i8 V64:$Rn), (v8i8 V64:$Rm))),
3628 (INST8B V128:$Rd, V64:$Rn, V64:$Rm)>;
3629 def : Pat<(v4i32 (opnode (v4i32 V128:$Rd), (v4i16 V64:$Rn), (v4i16 V64:$Rm))),
3630 (INST4H V128:$Rd, V64:$Rn, V64:$Rm)>;
3631 def : Pat<(v2i64 (opnode (v2i64 V128:$Rd), (v2i32 V64:$Rn), (v2i32 V64:$Rm))),
3632 (INST2S V128:$Rd, V64:$Rn, V64:$Rm)>;
3635 defm : Neon_mulacc_widen_patterns<
3636 TriOpFrag<(add node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
3637 SMLALv8i8_v8i16, SMLALv4i16_v4i32, SMLALv2i32_v2i64>;
3638 defm : Neon_mulacc_widen_patterns<
3639 TriOpFrag<(add node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
3640 UMLALv8i8_v8i16, UMLALv4i16_v4i32, UMLALv2i32_v2i64>;
3641 defm : Neon_mulacc_widen_patterns<
3642 TriOpFrag<(sub node:$LHS, (AArch64smull node:$MHS, node:$RHS))>,
3643 SMLSLv8i8_v8i16, SMLSLv4i16_v4i32, SMLSLv2i32_v2i64>;
3644 defm : Neon_mulacc_widen_patterns<
3645 TriOpFrag<(sub node:$LHS, (AArch64umull node:$MHS, node:$RHS))>,
3646 UMLSLv8i8_v8i16, UMLSLv4i16_v4i32, UMLSLv2i32_v2i64>;
3648 // Patterns for 64-bit pmull
3649 def : Pat<(int_aarch64_neon_pmull64 V64:$Rn, V64:$Rm),
3650 (PMULLv1i64 V64:$Rn, V64:$Rm)>;
3651 def : Pat<(int_aarch64_neon_pmull64 (extractelt (v2i64 V128:$Rn), (i64 1)),
3652 (extractelt (v2i64 V128:$Rm), (i64 1))),
3653 (PMULLv2i64 V128:$Rn, V128:$Rm)>;
3655 // CodeGen patterns for addhn and subhn instructions, which can actually be
3656 // written in LLVM IR without too much difficulty.
3659 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm), (i32 8))))),
3660 (ADDHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
3661 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3663 (ADDHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
3664 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3666 (ADDHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
3667 def : Pat<(concat_vectors (v8i8 V64:$Rd),
3668 (trunc (v8i16 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3670 (ADDHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3671 V128:$Rn, V128:$Rm)>;
3672 def : Pat<(concat_vectors (v4i16 V64:$Rd),
3673 (trunc (v4i32 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3675 (ADDHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3676 V128:$Rn, V128:$Rm)>;
3677 def : Pat<(concat_vectors (v2i32 V64:$Rd),
3678 (trunc (v2i64 (AArch64vlshr (add V128:$Rn, V128:$Rm),
3680 (ADDHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3681 V128:$Rn, V128:$Rm)>;
3684 def : Pat<(v8i8 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm), (i32 8))))),
3685 (SUBHNv8i16_v8i8 V128:$Rn, V128:$Rm)>;
3686 def : Pat<(v4i16 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3688 (SUBHNv4i32_v4i16 V128:$Rn, V128:$Rm)>;
3689 def : Pat<(v2i32 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3691 (SUBHNv2i64_v2i32 V128:$Rn, V128:$Rm)>;
3692 def : Pat<(concat_vectors (v8i8 V64:$Rd),
3693 (trunc (v8i16 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3695 (SUBHNv8i16_v16i8 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3696 V128:$Rn, V128:$Rm)>;
3697 def : Pat<(concat_vectors (v4i16 V64:$Rd),
3698 (trunc (v4i32 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3700 (SUBHNv4i32_v8i16 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3701 V128:$Rn, V128:$Rm)>;
3702 def : Pat<(concat_vectors (v2i32 V64:$Rd),
3703 (trunc (v2i64 (AArch64vlshr (sub V128:$Rn, V128:$Rm),
3705 (SUBHNv2i64_v4i32 (SUBREG_TO_REG (i32 0), V64:$Rd, dsub),
3706 V128:$Rn, V128:$Rm)>;
3708 //----------------------------------------------------------------------------
3709 // AdvSIMD bitwise extract from vector instruction.
3710 //----------------------------------------------------------------------------
3712 defm EXT : SIMDBitwiseExtract<"ext">;
3714 def : Pat<(v4i16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3715 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3716 def : Pat<(v8i16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3717 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3718 def : Pat<(v2i32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3719 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3720 def : Pat<(v2f32 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3721 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3722 def : Pat<(v4i32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3723 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3724 def : Pat<(v4f32 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3725 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3726 def : Pat<(v2i64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3727 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3728 def : Pat<(v2f64 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3729 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3730 def : Pat<(v4f16 (AArch64ext V64:$Rn, V64:$Rm, (i32 imm:$imm))),
3731 (EXTv8i8 V64:$Rn, V64:$Rm, imm:$imm)>;
3732 def : Pat<(v8f16 (AArch64ext V128:$Rn, V128:$Rm, (i32 imm:$imm))),
3733 (EXTv16i8 V128:$Rn, V128:$Rm, imm:$imm)>;
3735 // We use EXT to handle extract_subvector to copy the upper 64-bits of a
3737 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 8))),
3738 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3739 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 4))),
3740 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3741 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 2))),
3742 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3743 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 1))),
3744 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3745 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 4))),
3746 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3747 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 2))),
3748 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3749 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 1))),
3750 (EXTRACT_SUBREG (EXTv16i8 V128:$Rn, V128:$Rn, 8), dsub)>;
3753 //----------------------------------------------------------------------------
3754 // AdvSIMD zip vector
3755 //----------------------------------------------------------------------------
3757 defm TRN1 : SIMDZipVector<0b010, "trn1", AArch64trn1>;
3758 defm TRN2 : SIMDZipVector<0b110, "trn2", AArch64trn2>;
3759 defm UZP1 : SIMDZipVector<0b001, "uzp1", AArch64uzp1>;
3760 defm UZP2 : SIMDZipVector<0b101, "uzp2", AArch64uzp2>;
3761 defm ZIP1 : SIMDZipVector<0b011, "zip1", AArch64zip1>;
3762 defm ZIP2 : SIMDZipVector<0b111, "zip2", AArch64zip2>;
3764 //----------------------------------------------------------------------------
3765 // AdvSIMD TBL/TBX instructions
3766 //----------------------------------------------------------------------------
3768 defm TBL : SIMDTableLookup< 0, "tbl">;
3769 defm TBX : SIMDTableLookupTied<1, "tbx">;
3771 def : Pat<(v8i8 (int_aarch64_neon_tbl1 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3772 (TBLv8i8One VecListOne128:$Rn, V64:$Ri)>;
3773 def : Pat<(v16i8 (int_aarch64_neon_tbl1 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3774 (TBLv16i8One V128:$Ri, V128:$Rn)>;
3776 def : Pat<(v8i8 (int_aarch64_neon_tbx1 (v8i8 V64:$Rd),
3777 (v16i8 VecListOne128:$Rn), (v8i8 V64:$Ri))),
3778 (TBXv8i8One V64:$Rd, VecListOne128:$Rn, V64:$Ri)>;
3779 def : Pat<(v16i8 (int_aarch64_neon_tbx1 (v16i8 V128:$Rd),
3780 (v16i8 V128:$Ri), (v16i8 V128:$Rn))),
3781 (TBXv16i8One V128:$Rd, V128:$Ri, V128:$Rn)>;
3784 //----------------------------------------------------------------------------
3785 // AdvSIMD scalar CPY instruction
3786 //----------------------------------------------------------------------------
3788 defm CPY : SIMDScalarCPY<"cpy">;
3790 //----------------------------------------------------------------------------
3791 // AdvSIMD scalar pairwise instructions
3792 //----------------------------------------------------------------------------
3794 defm ADDP : SIMDPairwiseScalarD<0, 0b11011, "addp">;
3795 defm FADDP : SIMDFPPairwiseScalar<0, 0b01101, "faddp">;
3796 defm FMAXNMP : SIMDFPPairwiseScalar<0, 0b01100, "fmaxnmp">;
3797 defm FMAXP : SIMDFPPairwiseScalar<0, 0b01111, "fmaxp">;
3798 defm FMINNMP : SIMDFPPairwiseScalar<1, 0b01100, "fminnmp">;
3799 defm FMINP : SIMDFPPairwiseScalar<1, 0b01111, "fminp">;
3800 def : Pat<(v2i64 (AArch64saddv V128:$Rn)),
3801 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
3802 def : Pat<(v2i64 (AArch64uaddv V128:$Rn)),
3803 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), (ADDPv2i64p V128:$Rn), dsub)>;
3804 def : Pat<(f32 (int_aarch64_neon_faddv (v2f32 V64:$Rn))),
3805 (FADDPv2i32p V64:$Rn)>;
3806 def : Pat<(f32 (int_aarch64_neon_faddv (v4f32 V128:$Rn))),
3807 (FADDPv2i32p (EXTRACT_SUBREG (FADDPv4f32 V128:$Rn, V128:$Rn), dsub))>;
3808 def : Pat<(f64 (int_aarch64_neon_faddv (v2f64 V128:$Rn))),
3809 (FADDPv2i64p V128:$Rn)>;
3810 def : Pat<(f32 (int_aarch64_neon_fmaxnmv (v2f32 V64:$Rn))),
3811 (FMAXNMPv2i32p V64:$Rn)>;
3812 def : Pat<(f64 (int_aarch64_neon_fmaxnmv (v2f64 V128:$Rn))),
3813 (FMAXNMPv2i64p V128:$Rn)>;
3814 def : Pat<(f32 (int_aarch64_neon_fmaxv (v2f32 V64:$Rn))),
3815 (FMAXPv2i32p V64:$Rn)>;
3816 def : Pat<(f64 (int_aarch64_neon_fmaxv (v2f64 V128:$Rn))),
3817 (FMAXPv2i64p V128:$Rn)>;
3818 def : Pat<(f32 (int_aarch64_neon_fminnmv (v2f32 V64:$Rn))),
3819 (FMINNMPv2i32p V64:$Rn)>;
3820 def : Pat<(f64 (int_aarch64_neon_fminnmv (v2f64 V128:$Rn))),
3821 (FMINNMPv2i64p V128:$Rn)>;
3822 def : Pat<(f32 (int_aarch64_neon_fminv (v2f32 V64:$Rn))),
3823 (FMINPv2i32p V64:$Rn)>;
3824 def : Pat<(f64 (int_aarch64_neon_fminv (v2f64 V128:$Rn))),
3825 (FMINPv2i64p V128:$Rn)>;
3827 //----------------------------------------------------------------------------
3828 // AdvSIMD INS/DUP instructions
3829 //----------------------------------------------------------------------------
3831 def DUPv8i8gpr : SIMDDupFromMain<0, {?,?,?,?,1}, ".8b", v8i8, V64, GPR32>;
3832 def DUPv16i8gpr : SIMDDupFromMain<1, {?,?,?,?,1}, ".16b", v16i8, V128, GPR32>;
3833 def DUPv4i16gpr : SIMDDupFromMain<0, {?,?,?,1,0}, ".4h", v4i16, V64, GPR32>;
3834 def DUPv8i16gpr : SIMDDupFromMain<1, {?,?,?,1,0}, ".8h", v8i16, V128, GPR32>;
3835 def DUPv2i32gpr : SIMDDupFromMain<0, {?,?,1,0,0}, ".2s", v2i32, V64, GPR32>;
3836 def DUPv4i32gpr : SIMDDupFromMain<1, {?,?,1,0,0}, ".4s", v4i32, V128, GPR32>;
3837 def DUPv2i64gpr : SIMDDupFromMain<1, {?,1,0,0,0}, ".2d", v2i64, V128, GPR64>;
3839 def DUPv2i64lane : SIMDDup64FromElement;
3840 def DUPv2i32lane : SIMDDup32FromElement<0, ".2s", v2i32, V64>;
3841 def DUPv4i32lane : SIMDDup32FromElement<1, ".4s", v4i32, V128>;
3842 def DUPv4i16lane : SIMDDup16FromElement<0, ".4h", v4i16, V64>;
3843 def DUPv8i16lane : SIMDDup16FromElement<1, ".8h", v8i16, V128>;
3844 def DUPv8i8lane : SIMDDup8FromElement <0, ".8b", v8i8, V64>;
3845 def DUPv16i8lane : SIMDDup8FromElement <1, ".16b", v16i8, V128>;
3847 def : Pat<(v2f32 (AArch64dup (f32 FPR32:$Rn))),
3848 (v2f32 (DUPv2i32lane
3849 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3851 def : Pat<(v4f32 (AArch64dup (f32 FPR32:$Rn))),
3852 (v4f32 (DUPv4i32lane
3853 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rn, ssub),
3855 def : Pat<(v2f64 (AArch64dup (f64 FPR64:$Rn))),
3856 (v2f64 (DUPv2i64lane
3857 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rn, dsub),
3859 def : Pat<(v4f16 (AArch64dup (f16 FPR16:$Rn))),
3860 (v4f16 (DUPv4i16lane
3861 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
3863 def : Pat<(v8f16 (AArch64dup (f16 FPR16:$Rn))),
3864 (v8f16 (DUPv8i16lane
3865 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR16:$Rn, hsub),
3868 def : Pat<(v4f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
3869 (DUPv4i16lane V128:$Rn, VectorIndexH:$imm)>;
3870 def : Pat<(v8f16 (AArch64duplane16 (v8f16 V128:$Rn), VectorIndexH:$imm)),
3871 (DUPv8i16lane V128:$Rn, VectorIndexH:$imm)>;
3873 def : Pat<(v2f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3874 (DUPv2i32lane V128:$Rn, VectorIndexS:$imm)>;
3875 def : Pat<(v4f32 (AArch64duplane32 (v4f32 V128:$Rn), VectorIndexS:$imm)),
3876 (DUPv4i32lane V128:$Rn, VectorIndexS:$imm)>;
3877 def : Pat<(v2f64 (AArch64duplane64 (v2f64 V128:$Rn), VectorIndexD:$imm)),
3878 (DUPv2i64lane V128:$Rn, VectorIndexD:$imm)>;
3880 // If there's an (AArch64dup (vector_extract ...) ...), we can use a duplane
3881 // instruction even if the types don't match: we just have to remap the lane
3882 // carefully. N.b. this trick only applies to truncations.
3883 def VecIndex_x2 : SDNodeXForm<imm, [{
3884 return CurDAG->getTargetConstant(2 * N->getZExtValue(), SDLoc(N), MVT::i64);
3886 def VecIndex_x4 : SDNodeXForm<imm, [{
3887 return CurDAG->getTargetConstant(4 * N->getZExtValue(), SDLoc(N), MVT::i64);
3889 def VecIndex_x8 : SDNodeXForm<imm, [{
3890 return CurDAG->getTargetConstant(8 * N->getZExtValue(), SDLoc(N), MVT::i64);
3893 multiclass DUPWithTruncPats<ValueType ResVT, ValueType Src64VT,
3894 ValueType Src128VT, ValueType ScalVT,
3895 Instruction DUP, SDNodeXForm IdxXFORM> {
3896 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src128VT V128:$Rn),
3898 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3900 def : Pat<(ResVT (AArch64dup (ScalVT (vector_extract (Src64VT V64:$Rn),
3902 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3905 defm : DUPWithTruncPats<v8i8, v4i16, v8i16, i32, DUPv8i8lane, VecIndex_x2>;
3906 defm : DUPWithTruncPats<v8i8, v2i32, v4i32, i32, DUPv8i8lane, VecIndex_x4>;
3907 defm : DUPWithTruncPats<v4i16, v2i32, v4i32, i32, DUPv4i16lane, VecIndex_x2>;
3909 defm : DUPWithTruncPats<v16i8, v4i16, v8i16, i32, DUPv16i8lane, VecIndex_x2>;
3910 defm : DUPWithTruncPats<v16i8, v2i32, v4i32, i32, DUPv16i8lane, VecIndex_x4>;
3911 defm : DUPWithTruncPats<v8i16, v2i32, v4i32, i32, DUPv8i16lane, VecIndex_x2>;
3913 multiclass DUPWithTrunci64Pats<ValueType ResVT, Instruction DUP,
3914 SDNodeXForm IdxXFORM> {
3915 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v2i64 V128:$Rn),
3917 (DUP V128:$Rn, (IdxXFORM imm:$idx))>;
3919 def : Pat<(ResVT (AArch64dup (i32 (trunc (extractelt (v1i64 V64:$Rn),
3921 (DUP (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), (IdxXFORM imm:$idx))>;
3924 defm : DUPWithTrunci64Pats<v8i8, DUPv8i8lane, VecIndex_x8>;
3925 defm : DUPWithTrunci64Pats<v4i16, DUPv4i16lane, VecIndex_x4>;
3926 defm : DUPWithTrunci64Pats<v2i32, DUPv2i32lane, VecIndex_x2>;
3928 defm : DUPWithTrunci64Pats<v16i8, DUPv16i8lane, VecIndex_x8>;
3929 defm : DUPWithTrunci64Pats<v8i16, DUPv8i16lane, VecIndex_x4>;
3930 defm : DUPWithTrunci64Pats<v4i32, DUPv4i32lane, VecIndex_x2>;
3932 // SMOV and UMOV definitions, with some extra patterns for convenience
3936 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3937 (i32 (SMOVvi8to32 V128:$Rn, VectorIndexB:$idx))>;
3938 def : Pat<(sext_inreg (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx), i8),
3939 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
3940 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3941 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3942 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3943 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
3944 def : Pat<(sext_inreg (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),i16),
3945 (i32 (SMOVvi16to32 V128:$Rn, VectorIndexH:$idx))>;
3946 def : Pat<(sext (i32 (vector_extract (v4i32 V128:$Rn), VectorIndexS:$idx))),
3947 (i64 (SMOVvi32to64 V128:$Rn, VectorIndexS:$idx))>;
3949 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v16i8 V128:$Rn),
3950 VectorIndexB:$idx)))), i8),
3951 (i64 (SMOVvi8to64 V128:$Rn, VectorIndexB:$idx))>;
3952 def : Pat<(sext_inreg (i64 (anyext (i32 (vector_extract (v8i16 V128:$Rn),
3953 VectorIndexH:$idx)))), i16),
3954 (i64 (SMOVvi16to64 V128:$Rn, VectorIndexH:$idx))>;
3956 // Extracting i8 or i16 elements will have the zero-extend transformed to
3957 // an 'and' mask by type legalization since neither i8 nor i16 are legal types
3958 // for AArch64. Match these patterns here since UMOV already zeroes out the high
3959 // bits of the destination register.
3960 def : Pat<(and (vector_extract (v16i8 V128:$Rn), VectorIndexB:$idx),
3962 (i32 (UMOVvi8 V128:$Rn, VectorIndexB:$idx))>;
3963 def : Pat<(and (vector_extract (v8i16 V128:$Rn), VectorIndexH:$idx),
3965 (i32 (UMOVvi16 V128:$Rn, VectorIndexH:$idx))>;
3969 def : Pat<(v16i8 (scalar_to_vector GPR32:$Rn)),
3970 (SUBREG_TO_REG (i32 0),
3971 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3972 def : Pat<(v8i8 (scalar_to_vector GPR32:$Rn)),
3973 (SUBREG_TO_REG (i32 0),
3974 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3976 def : Pat<(v8i16 (scalar_to_vector GPR32:$Rn)),
3977 (SUBREG_TO_REG (i32 0),
3978 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3979 def : Pat<(v4i16 (scalar_to_vector GPR32:$Rn)),
3980 (SUBREG_TO_REG (i32 0),
3981 (f32 (COPY_TO_REGCLASS GPR32:$Rn, FPR32)), ssub)>;
3983 def : Pat<(v2i32 (scalar_to_vector (i32 FPR32:$Rn))),
3984 (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)),
3985 (i32 FPR32:$Rn), ssub))>;
3986 def : Pat<(v4i32 (scalar_to_vector (i32 FPR32:$Rn))),
3987 (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
3988 (i32 FPR32:$Rn), ssub))>;
3989 def : Pat<(v2i64 (scalar_to_vector (i64 FPR64:$Rn))),
3990 (v2i64 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)),
3991 (i64 FPR64:$Rn), dsub))>;
3993 def : Pat<(v4f16 (scalar_to_vector (f16 FPR16:$Rn))),
3994 (INSERT_SUBREG (v4f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
3995 def : Pat<(v8f16 (scalar_to_vector (f16 FPR16:$Rn))),
3996 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rn, hsub)>;
3998 def : Pat<(v4f32 (scalar_to_vector (f32 FPR32:$Rn))),
3999 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4000 def : Pat<(v2f32 (scalar_to_vector (f32 FPR32:$Rn))),
4001 (INSERT_SUBREG (v2f32 (IMPLICIT_DEF)), FPR32:$Rn, ssub)>;
4002 def : Pat<(v2f64 (scalar_to_vector (f64 FPR64:$Rn))),
4003 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rn, dsub)>;
4005 def : Pat<(v4f16 (vector_insert (v4f16 V64:$Rn),
4006 (f16 FPR16:$Rm), (i64 VectorIndexS:$imm))),
4009 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4011 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4015 def : Pat<(v8f16 (vector_insert (v8f16 V128:$Rn),
4016 (f16 FPR16:$Rm), (i64 VectorIndexH:$imm))),
4018 V128:$Rn, VectorIndexH:$imm,
4019 (v8f16 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR16:$Rm, hsub)),
4022 def : Pat<(v2f32 (vector_insert (v2f32 V64:$Rn),
4023 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4026 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), V64:$Rn, dsub)),
4028 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4031 def : Pat<(v4f32 (vector_insert (v4f32 V128:$Rn),
4032 (f32 FPR32:$Rm), (i64 VectorIndexS:$imm))),
4034 V128:$Rn, VectorIndexS:$imm,
4035 (v4f32 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR32:$Rm, ssub)),
4037 def : Pat<(v2f64 (vector_insert (v2f64 V128:$Rn),
4038 (f64 FPR64:$Rm), (i64 VectorIndexD:$imm))),
4040 V128:$Rn, VectorIndexD:$imm,
4041 (v2f64 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$Rm, dsub)),
4044 // Copy an element at a constant index in one vector into a constant indexed
4045 // element of another.
4046 // FIXME refactor to a shared class/dev parameterized on vector type, vector
4047 // index type and INS extension
4048 def : Pat<(v16i8 (int_aarch64_neon_vcopy_lane
4049 (v16i8 V128:$Vd), VectorIndexB:$idx, (v16i8 V128:$Vs),
4050 VectorIndexB:$idx2)),
4052 V128:$Vd, VectorIndexB:$idx, V128:$Vs, VectorIndexB:$idx2)
4054 def : Pat<(v8i16 (int_aarch64_neon_vcopy_lane
4055 (v8i16 V128:$Vd), VectorIndexH:$idx, (v8i16 V128:$Vs),
4056 VectorIndexH:$idx2)),
4058 V128:$Vd, VectorIndexH:$idx, V128:$Vs, VectorIndexH:$idx2)
4060 def : Pat<(v4i32 (int_aarch64_neon_vcopy_lane
4061 (v4i32 V128:$Vd), VectorIndexS:$idx, (v4i32 V128:$Vs),
4062 VectorIndexS:$idx2)),
4064 V128:$Vd, VectorIndexS:$idx, V128:$Vs, VectorIndexS:$idx2)
4066 def : Pat<(v2i64 (int_aarch64_neon_vcopy_lane
4067 (v2i64 V128:$Vd), VectorIndexD:$idx, (v2i64 V128:$Vs),
4068 VectorIndexD:$idx2)),
4070 V128:$Vd, VectorIndexD:$idx, V128:$Vs, VectorIndexD:$idx2)
4073 multiclass Neon_INS_elt_pattern<ValueType VT128, ValueType VT64,
4074 ValueType VTScal, Instruction INS> {
4075 def : Pat<(VT128 (vector_insert V128:$src,
4076 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
4078 (INS V128:$src, imm:$Immd, V128:$Rn, imm:$Immn)>;
4080 def : Pat<(VT128 (vector_insert V128:$src,
4081 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
4083 (INS V128:$src, imm:$Immd,
4084 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn)>;
4086 def : Pat<(VT64 (vector_insert V64:$src,
4087 (VTScal (vector_extract (VT128 V128:$Rn), imm:$Immn)),
4089 (EXTRACT_SUBREG (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub),
4090 imm:$Immd, V128:$Rn, imm:$Immn),
4093 def : Pat<(VT64 (vector_insert V64:$src,
4094 (VTScal (vector_extract (VT64 V64:$Rn), imm:$Immn)),
4097 (INS (SUBREG_TO_REG (i64 0), V64:$src, dsub), imm:$Immd,
4098 (SUBREG_TO_REG (i64 0), V64:$Rn, dsub), imm:$Immn),
4102 defm : Neon_INS_elt_pattern<v8f16, v4f16, f16, INSvi16lane>;
4103 defm : Neon_INS_elt_pattern<v4f32, v2f32, f32, INSvi32lane>;
4104 defm : Neon_INS_elt_pattern<v2f64, v1f64, f64, INSvi64lane>;
4107 // Floating point vector extractions are codegen'd as either a sequence of
4108 // subregister extractions, or a MOV (aka CPY here, alias for DUP) if
4109 // the lane number is anything other than zero.
4110 def : Pat<(vector_extract (v2f64 V128:$Rn), 0),
4111 (f64 (EXTRACT_SUBREG V128:$Rn, dsub))>;
4112 def : Pat<(vector_extract (v4f32 V128:$Rn), 0),
4113 (f32 (EXTRACT_SUBREG V128:$Rn, ssub))>;
4114 def : Pat<(vector_extract (v8f16 V128:$Rn), 0),
4115 (f16 (EXTRACT_SUBREG V128:$Rn, hsub))>;
4117 def : Pat<(vector_extract (v2f64 V128:$Rn), VectorIndexD:$idx),
4118 (f64 (CPYi64 V128:$Rn, VectorIndexD:$idx))>;
4119 def : Pat<(vector_extract (v4f32 V128:$Rn), VectorIndexS:$idx),
4120 (f32 (CPYi32 V128:$Rn, VectorIndexS:$idx))>;
4121 def : Pat<(vector_extract (v8f16 V128:$Rn), VectorIndexH:$idx),
4122 (f16 (CPYi16 V128:$Rn, VectorIndexH:$idx))>;
4124 // All concat_vectors operations are canonicalised to act on i64 vectors for
4125 // AArch64. In the general case we need an instruction, which had just as well be
4127 class ConcatPat<ValueType DstTy, ValueType SrcTy>
4128 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rd), V64:$Rn)),
4129 (INSvi64lane (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub), 1,
4130 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub), 0)>;
4132 def : ConcatPat<v2i64, v1i64>;
4133 def : ConcatPat<v2f64, v1f64>;
4134 def : ConcatPat<v4i32, v2i32>;
4135 def : ConcatPat<v4f32, v2f32>;
4136 def : ConcatPat<v8i16, v4i16>;
4137 def : ConcatPat<v8f16, v4f16>;
4138 def : ConcatPat<v16i8, v8i8>;
4140 // If the high lanes are undef, though, we can just ignore them:
4141 class ConcatUndefPat<ValueType DstTy, ValueType SrcTy>
4142 : Pat<(DstTy (concat_vectors (SrcTy V64:$Rn), undef)),
4143 (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rn, dsub)>;
4145 def : ConcatUndefPat<v2i64, v1i64>;
4146 def : ConcatUndefPat<v2f64, v1f64>;
4147 def : ConcatUndefPat<v4i32, v2i32>;
4148 def : ConcatUndefPat<v4f32, v2f32>;
4149 def : ConcatUndefPat<v8i16, v4i16>;
4150 def : ConcatUndefPat<v16i8, v8i8>;
4152 //----------------------------------------------------------------------------
4153 // AdvSIMD across lanes instructions
4154 //----------------------------------------------------------------------------
4156 defm ADDV : SIMDAcrossLanesBHS<0, 0b11011, "addv">;
4157 defm SMAXV : SIMDAcrossLanesBHS<0, 0b01010, "smaxv">;
4158 defm SMINV : SIMDAcrossLanesBHS<0, 0b11010, "sminv">;
4159 defm UMAXV : SIMDAcrossLanesBHS<1, 0b01010, "umaxv">;
4160 defm UMINV : SIMDAcrossLanesBHS<1, 0b11010, "uminv">;
4161 defm SADDLV : SIMDAcrossLanesHSD<0, 0b00011, "saddlv">;
4162 defm UADDLV : SIMDAcrossLanesHSD<1, 0b00011, "uaddlv">;
4163 defm FMAXNMV : SIMDFPAcrossLanes<0b01100, 0, "fmaxnmv", int_aarch64_neon_fmaxnmv>;
4164 defm FMAXV : SIMDFPAcrossLanes<0b01111, 0, "fmaxv", int_aarch64_neon_fmaxv>;
4165 defm FMINNMV : SIMDFPAcrossLanes<0b01100, 1, "fminnmv", int_aarch64_neon_fminnmv>;
4166 defm FMINV : SIMDFPAcrossLanes<0b01111, 1, "fminv", int_aarch64_neon_fminv>;
4168 // Patterns for across-vector intrinsics, that have a node equivalent, that
4169 // returns a vector (with only the low lane defined) instead of a scalar.
4170 // In effect, opNode is the same as (scalar_to_vector (IntNode)).
4171 multiclass SIMDAcrossLanesIntrinsic<string baseOpc,
4172 SDPatternOperator opNode> {
4173 // If a lane instruction caught the vector_extract around opNode, we can
4174 // directly match the latter to the instruction.
4175 def : Pat<(v8i8 (opNode V64:$Rn)),
4176 (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
4177 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub)>;
4178 def : Pat<(v16i8 (opNode V128:$Rn)),
4179 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4180 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub)>;
4181 def : Pat<(v4i16 (opNode V64:$Rn)),
4182 (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
4183 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub)>;
4184 def : Pat<(v8i16 (opNode V128:$Rn)),
4185 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4186 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub)>;
4187 def : Pat<(v4i32 (opNode V128:$Rn)),
4188 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4189 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), ssub)>;
4192 // If none did, fallback to the explicit patterns, consuming the vector_extract.
4193 def : Pat<(i32 (vector_extract (insert_subvector undef, (v8i8 (opNode V64:$Rn)),
4194 (i32 0)), (i64 0))),
4195 (EXTRACT_SUBREG (INSERT_SUBREG (v8i8 (IMPLICIT_DEF)),
4196 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn),
4198 def : Pat<(i32 (vector_extract (v16i8 (opNode V128:$Rn)), (i64 0))),
4199 (EXTRACT_SUBREG (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4200 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn),
4202 def : Pat<(i32 (vector_extract (insert_subvector undef,
4203 (v4i16 (opNode V64:$Rn)), (i32 0)), (i64 0))),
4204 (EXTRACT_SUBREG (INSERT_SUBREG (v4i16 (IMPLICIT_DEF)),
4205 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn),
4207 def : Pat<(i32 (vector_extract (v8i16 (opNode V128:$Rn)), (i64 0))),
4208 (EXTRACT_SUBREG (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)),
4209 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn),
4211 def : Pat<(i32 (vector_extract (v4i32 (opNode V128:$Rn)), (i64 0))),
4212 (EXTRACT_SUBREG (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)),
4213 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn),
4218 multiclass SIMDAcrossLanesSignedIntrinsic<string baseOpc,
4219 SDPatternOperator opNode>
4220 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4221 // If there is a sign extension after this intrinsic, consume it as smov already
4223 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4224 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), i8)),
4226 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4227 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4229 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4230 (opNode (v16i8 V128:$Rn)), (i64 0))), i8)),
4232 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4233 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4235 def : Pat<(i32 (sext_inreg (i32 (vector_extract (insert_subvector undef,
4236 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), i16)),
4238 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4239 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4241 def : Pat<(i32 (sext_inreg (i32 (vector_extract
4242 (opNode (v8i16 V128:$Rn)), (i64 0))), i16)),
4244 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4245 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4249 multiclass SIMDAcrossLanesUnsignedIntrinsic<string baseOpc,
4250 SDPatternOperator opNode>
4251 : SIMDAcrossLanesIntrinsic<baseOpc, opNode> {
4252 // If there is a masking operation keeping only what has been actually
4253 // generated, consume it.
4254 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4255 (opNode (v8i8 V64:$Rn)), (i32 0)), (i64 0))), maski8_or_more)),
4256 (i32 (EXTRACT_SUBREG
4257 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4258 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), bsub),
4260 def : Pat<(i32 (and (i32 (vector_extract (opNode (v16i8 V128:$Rn)), (i64 0))),
4262 (i32 (EXTRACT_SUBREG
4263 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4264 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), bsub),
4266 def : Pat<(i32 (and (i32 (vector_extract (insert_subvector undef,
4267 (opNode (v4i16 V64:$Rn)), (i32 0)), (i64 0))), maski16_or_more)),
4268 (i32 (EXTRACT_SUBREG
4269 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4270 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), hsub),
4272 def : Pat<(i32 (and (i32 (vector_extract (opNode (v8i16 V128:$Rn)), (i64 0))),
4274 (i32 (EXTRACT_SUBREG
4275 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4276 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), hsub),
4280 defm : SIMDAcrossLanesSignedIntrinsic<"ADDV", AArch64saddv>;
4281 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4282 def : Pat<(v2i32 (AArch64saddv (v2i32 V64:$Rn))),
4283 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4285 defm : SIMDAcrossLanesUnsignedIntrinsic<"ADDV", AArch64uaddv>;
4286 // vaddv_[su]32 is special; -> ADDP Vd.2S,Vn.2S,Vm.2S; return Vd.s[0];Vn==Vm
4287 def : Pat<(v2i32 (AArch64uaddv (v2i32 V64:$Rn))),
4288 (ADDPv2i32 V64:$Rn, V64:$Rn)>;
4290 defm : SIMDAcrossLanesSignedIntrinsic<"SMAXV", AArch64smaxv>;
4291 def : Pat<(v2i32 (AArch64smaxv (v2i32 V64:$Rn))),
4292 (SMAXPv2i32 V64:$Rn, V64:$Rn)>;
4294 defm : SIMDAcrossLanesSignedIntrinsic<"SMINV", AArch64sminv>;
4295 def : Pat<(v2i32 (AArch64sminv (v2i32 V64:$Rn))),
4296 (SMINPv2i32 V64:$Rn, V64:$Rn)>;
4298 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMAXV", AArch64umaxv>;
4299 def : Pat<(v2i32 (AArch64umaxv (v2i32 V64:$Rn))),
4300 (UMAXPv2i32 V64:$Rn, V64:$Rn)>;
4302 defm : SIMDAcrossLanesUnsignedIntrinsic<"UMINV", AArch64uminv>;
4303 def : Pat<(v2i32 (AArch64uminv (v2i32 V64:$Rn))),
4304 (UMINPv2i32 V64:$Rn, V64:$Rn)>;
4306 multiclass SIMDAcrossLanesSignedLongIntrinsic<string baseOpc, Intrinsic intOp> {
4307 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4309 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4310 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4312 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4314 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4315 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4318 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4319 (i32 (EXTRACT_SUBREG
4320 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4321 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4323 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4324 (i32 (EXTRACT_SUBREG
4325 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4326 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4329 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4330 (i64 (EXTRACT_SUBREG
4331 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4332 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4336 multiclass SIMDAcrossLanesUnsignedLongIntrinsic<string baseOpc,
4338 def : Pat<(i32 (intOp (v8i8 V64:$Rn))),
4339 (i32 (EXTRACT_SUBREG
4340 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4341 (!cast<Instruction>(!strconcat(baseOpc, "v8i8v")) V64:$Rn), hsub),
4343 def : Pat<(i32 (intOp (v16i8 V128:$Rn))),
4344 (i32 (EXTRACT_SUBREG
4345 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4346 (!cast<Instruction>(!strconcat(baseOpc, "v16i8v")) V128:$Rn), hsub),
4349 def : Pat<(i32 (intOp (v4i16 V64:$Rn))),
4350 (i32 (EXTRACT_SUBREG
4351 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4352 (!cast<Instruction>(!strconcat(baseOpc, "v4i16v")) V64:$Rn), ssub),
4354 def : Pat<(i32 (intOp (v8i16 V128:$Rn))),
4355 (i32 (EXTRACT_SUBREG
4356 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4357 (!cast<Instruction>(!strconcat(baseOpc, "v8i16v")) V128:$Rn), ssub),
4360 def : Pat<(i64 (intOp (v4i32 V128:$Rn))),
4361 (i64 (EXTRACT_SUBREG
4362 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4363 (!cast<Instruction>(!strconcat(baseOpc, "v4i32v")) V128:$Rn), dsub),
4367 defm : SIMDAcrossLanesSignedLongIntrinsic<"SADDLV", int_aarch64_neon_saddlv>;
4368 defm : SIMDAcrossLanesUnsignedLongIntrinsic<"UADDLV", int_aarch64_neon_uaddlv>;
4370 // The vaddlv_s32 intrinsic gets mapped to SADDLP.
4371 def : Pat<(i64 (int_aarch64_neon_saddlv (v2i32 V64:$Rn))),
4372 (i64 (EXTRACT_SUBREG
4373 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4374 (SADDLPv2i32_v1i64 V64:$Rn), dsub),
4376 // The vaddlv_u32 intrinsic gets mapped to UADDLP.
4377 def : Pat<(i64 (int_aarch64_neon_uaddlv (v2i32 V64:$Rn))),
4378 (i64 (EXTRACT_SUBREG
4379 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)),
4380 (UADDLPv2i32_v1i64 V64:$Rn), dsub),
4383 //------------------------------------------------------------------------------
4384 // AdvSIMD modified immediate instructions
4385 //------------------------------------------------------------------------------
4388 defm BIC : SIMDModifiedImmVectorShiftTied<1, 0b11, 0b01, "bic", AArch64bici>;
4390 defm ORR : SIMDModifiedImmVectorShiftTied<0, 0b11, 0b01, "orr", AArch64orri>;
4392 def : InstAlias<"bic $Vd.4h, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4393 def : InstAlias<"bic $Vd.8h, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4394 def : InstAlias<"bic $Vd.2s, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4395 def : InstAlias<"bic $Vd.4s, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4397 def : InstAlias<"bic.4h $Vd, $imm", (BICv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4398 def : InstAlias<"bic.8h $Vd, $imm", (BICv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4399 def : InstAlias<"bic.2s $Vd, $imm", (BICv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4400 def : InstAlias<"bic.4s $Vd, $imm", (BICv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4402 def : InstAlias<"orr $Vd.4h, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0)>;
4403 def : InstAlias<"orr $Vd.8h, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0)>;
4404 def : InstAlias<"orr $Vd.2s, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0)>;
4405 def : InstAlias<"orr $Vd.4s, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0)>;
4407 def : InstAlias<"orr.4h $Vd, $imm", (ORRv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4408 def : InstAlias<"orr.8h $Vd, $imm", (ORRv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4409 def : InstAlias<"orr.2s $Vd, $imm", (ORRv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4410 def : InstAlias<"orr.4s $Vd, $imm", (ORRv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4413 def FMOVv2f64_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1111, V128, fpimm8,
4415 [(set (v2f64 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4416 def FMOVv2f32_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1111, V64, fpimm8,
4418 [(set (v2f32 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4419 def FMOVv4f32_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1111, V128, fpimm8,
4421 [(set (v4f32 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4422 let Predicates = [HasNEON, HasFullFP16] in {
4423 def FMOVv4f16_ns : SIMDModifiedImmVectorNoShift<0, 0, 1, 0b1111, V64, fpimm8,
4425 [(set (v4f16 V64:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4426 def FMOVv8f16_ns : SIMDModifiedImmVectorNoShift<1, 0, 1, 0b1111, V128, fpimm8,
4428 [(set (v8f16 V128:$Rd), (AArch64fmov imm0_255:$imm8))]>;
4429 } // Predicates = [HasNEON, HasFullFP16]
4433 // EDIT byte mask: scalar
4434 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
4435 def MOVID : SIMDModifiedImmScalarNoShift<0, 1, 0b1110, "movi",
4436 [(set FPR64:$Rd, simdimmtype10:$imm8)]>;
4437 // The movi_edit node has the immediate value already encoded, so we use
4438 // a plain imm0_255 here.
4439 def : Pat<(f64 (AArch64movi_edit imm0_255:$shift)),
4440 (MOVID imm0_255:$shift)>;
4442 def : Pat<(v1i64 immAllZerosV), (MOVID (i32 0))>;
4443 def : Pat<(v2i32 immAllZerosV), (MOVID (i32 0))>;
4444 def : Pat<(v4i16 immAllZerosV), (MOVID (i32 0))>;
4445 def : Pat<(v8i8 immAllZerosV), (MOVID (i32 0))>;
4447 def : Pat<(v1i64 immAllOnesV), (MOVID (i32 255))>;
4448 def : Pat<(v2i32 immAllOnesV), (MOVID (i32 255))>;
4449 def : Pat<(v4i16 immAllOnesV), (MOVID (i32 255))>;
4450 def : Pat<(v8i8 immAllOnesV), (MOVID (i32 255))>;
4452 // EDIT byte mask: 2d
4454 // The movi_edit node has the immediate value already encoded, so we use
4455 // a plain imm0_255 in the pattern
4456 let isReMaterializable = 1, isAsCheapAsAMove = 1 in
4457 def MOVIv2d_ns : SIMDModifiedImmVectorNoShift<1, 1, 0, 0b1110, V128,
4460 [(set (v2i64 V128:$Rd), (AArch64movi_edit imm0_255:$imm8))]>;
4462 def : Pat<(v2i64 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4463 def : Pat<(v4i32 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4464 def : Pat<(v8i16 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4465 def : Pat<(v16i8 immAllZerosV), (MOVIv2d_ns (i32 0))>;
4467 def : Pat<(v2i64 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4468 def : Pat<(v4i32 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4469 def : Pat<(v8i16 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4470 def : Pat<(v16i8 immAllOnesV), (MOVIv2d_ns (i32 255))>;
4472 def : Pat<(v2f64 (AArch64dup (f64 fpimm0))), (MOVIv2d_ns (i32 0))>;
4473 def : Pat<(v4f32 (AArch64dup (f32 fpimm0))), (MOVIv2d_ns (i32 0))>;
4475 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
4476 defm MOVI : SIMDModifiedImmVectorShift<0, 0b10, 0b00, "movi">;
4478 def : InstAlias<"movi $Vd.4h, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4479 def : InstAlias<"movi $Vd.8h, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4480 def : InstAlias<"movi $Vd.2s, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4481 def : InstAlias<"movi $Vd.4s, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4483 def : InstAlias<"movi.4h $Vd, $imm", (MOVIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4484 def : InstAlias<"movi.8h $Vd, $imm", (MOVIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4485 def : InstAlias<"movi.2s $Vd, $imm", (MOVIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4486 def : InstAlias<"movi.4s $Vd, $imm", (MOVIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4488 def : Pat<(v2i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4489 (MOVIv2i32 imm0_255:$imm8, imm:$shift)>;
4490 def : Pat<(v4i32 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4491 (MOVIv4i32 imm0_255:$imm8, imm:$shift)>;
4492 def : Pat<(v4i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4493 (MOVIv4i16 imm0_255:$imm8, imm:$shift)>;
4494 def : Pat<(v8i16 (AArch64movi_shift imm0_255:$imm8, (i32 imm:$shift))),
4495 (MOVIv8i16 imm0_255:$imm8, imm:$shift)>;
4497 // EDIT per word: 2s & 4s with MSL shifter
4498 def MOVIv2s_msl : SIMDModifiedImmMoveMSL<0, 0, {1,1,0,?}, V64, "movi", ".2s",
4499 [(set (v2i32 V64:$Rd),
4500 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4501 def MOVIv4s_msl : SIMDModifiedImmMoveMSL<1, 0, {1,1,0,?}, V128, "movi", ".4s",
4502 [(set (v4i32 V128:$Rd),
4503 (AArch64movi_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4505 // Per byte: 8b & 16b
4506 def MOVIv8b_ns : SIMDModifiedImmVectorNoShift<0, 0, 0, 0b1110, V64, imm0_255,
4508 [(set (v8i8 V64:$Rd), (AArch64movi imm0_255:$imm8))]>;
4509 def MOVIv16b_ns : SIMDModifiedImmVectorNoShift<1, 0, 0, 0b1110, V128, imm0_255,
4511 [(set (v16i8 V128:$Rd), (AArch64movi imm0_255:$imm8))]>;
4515 // EDIT per word & halfword: 2s, 4h, 4s, & 8h
4516 defm MVNI : SIMDModifiedImmVectorShift<1, 0b10, 0b00, "mvni">;
4518 def : InstAlias<"mvni $Vd.4h, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4519 def : InstAlias<"mvni $Vd.8h, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4520 def : InstAlias<"mvni $Vd.2s, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4521 def : InstAlias<"mvni $Vd.4s, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4523 def : InstAlias<"mvni.4h $Vd, $imm", (MVNIv4i16 V64:$Vd, imm0_255:$imm, 0), 0>;
4524 def : InstAlias<"mvni.8h $Vd, $imm", (MVNIv8i16 V128:$Vd, imm0_255:$imm, 0), 0>;
4525 def : InstAlias<"mvni.2s $Vd, $imm", (MVNIv2i32 V64:$Vd, imm0_255:$imm, 0), 0>;
4526 def : InstAlias<"mvni.4s $Vd, $imm", (MVNIv4i32 V128:$Vd, imm0_255:$imm, 0), 0>;
4528 def : Pat<(v2i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4529 (MVNIv2i32 imm0_255:$imm8, imm:$shift)>;
4530 def : Pat<(v4i32 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4531 (MVNIv4i32 imm0_255:$imm8, imm:$shift)>;
4532 def : Pat<(v4i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4533 (MVNIv4i16 imm0_255:$imm8, imm:$shift)>;
4534 def : Pat<(v8i16 (AArch64mvni_shift imm0_255:$imm8, (i32 imm:$shift))),
4535 (MVNIv8i16 imm0_255:$imm8, imm:$shift)>;
4537 // EDIT per word: 2s & 4s with MSL shifter
4538 def MVNIv2s_msl : SIMDModifiedImmMoveMSL<0, 1, {1,1,0,?}, V64, "mvni", ".2s",
4539 [(set (v2i32 V64:$Rd),
4540 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4541 def MVNIv4s_msl : SIMDModifiedImmMoveMSL<1, 1, {1,1,0,?}, V128, "mvni", ".4s",
4542 [(set (v4i32 V128:$Rd),
4543 (AArch64mvni_msl imm0_255:$imm8, (i32 imm:$shift)))]>;
4545 //----------------------------------------------------------------------------
4546 // AdvSIMD indexed element
4547 //----------------------------------------------------------------------------
4549 let hasSideEffects = 0 in {
4550 defm FMLA : SIMDFPIndexedTied<0, 0b0001, "fmla">;
4551 defm FMLS : SIMDFPIndexedTied<0, 0b0101, "fmls">;
4554 // NOTE: Operands are reordered in the FMLA/FMLS PatFrags because the
4555 // instruction expects the addend first, while the intrinsic expects it last.
4557 // On the other hand, there are quite a few valid combinatorial options due to
4558 // the commutativity of multiplication and the fact that (-x) * y = x * (-y).
4559 defm : SIMDFPIndexedTiedPatterns<"FMLA",
4560 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)>>;
4561 defm : SIMDFPIndexedTiedPatterns<"FMLA",
4562 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)>>;
4564 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4565 TriOpFrag<(fma node:$MHS, (fneg node:$RHS), node:$LHS)> >;
4566 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4567 TriOpFrag<(fma node:$RHS, (fneg node:$MHS), node:$LHS)> >;
4568 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4569 TriOpFrag<(fma (fneg node:$RHS), node:$MHS, node:$LHS)> >;
4570 defm : SIMDFPIndexedTiedPatterns<"FMLS",
4571 TriOpFrag<(fma (fneg node:$MHS), node:$RHS, node:$LHS)> >;
4573 multiclass FMLSIndexedAfterNegPatterns<SDPatternOperator OpNode> {
4574 // 3 variants for the .2s version: DUPLANE from 128-bit, DUPLANE from 64-bit
4576 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4577 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
4578 VectorIndexS:$idx))),
4579 (FMLSv2i32_indexed V64:$Rd, V64:$Rn, V128:$Rm, VectorIndexS:$idx)>;
4580 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4581 (v2f32 (AArch64duplane32
4582 (v4f32 (insert_subvector undef,
4583 (v2f32 (fneg V64:$Rm)),
4585 VectorIndexS:$idx)))),
4586 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
4587 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4588 VectorIndexS:$idx)>;
4589 def : Pat<(v2f32 (OpNode (v2f32 V64:$Rd), (v2f32 V64:$Rn),
4590 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
4591 (FMLSv2i32_indexed V64:$Rd, V64:$Rn,
4592 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
4594 // 3 variants for the .4s version: DUPLANE from 128-bit, DUPLANE from 64-bit
4596 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4597 (AArch64duplane32 (v4f32 (fneg V128:$Rm)),
4598 VectorIndexS:$idx))),
4599 (FMLSv4i32_indexed V128:$Rd, V128:$Rn, V128:$Rm,
4600 VectorIndexS:$idx)>;
4601 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4602 (v4f32 (AArch64duplane32
4603 (v4f32 (insert_subvector undef,
4604 (v2f32 (fneg V64:$Rm)),
4606 VectorIndexS:$idx)))),
4607 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
4608 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub),
4609 VectorIndexS:$idx)>;
4610 def : Pat<(v4f32 (OpNode (v4f32 V128:$Rd), (v4f32 V128:$Rn),
4611 (AArch64dup (f32 (fneg FPR32Op:$Rm))))),
4612 (FMLSv4i32_indexed V128:$Rd, V128:$Rn,
4613 (SUBREG_TO_REG (i32 0), FPR32Op:$Rm, ssub), (i64 0))>;
4615 // 2 variants for the .2d version: DUPLANE from 128-bit, and DUP scalar
4616 // (DUPLANE from 64-bit would be trivial).
4617 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
4618 (AArch64duplane64 (v2f64 (fneg V128:$Rm)),
4619 VectorIndexD:$idx))),
4621 V128:$Rd, V128:$Rn, V128:$Rm, VectorIndexS:$idx)>;
4622 def : Pat<(v2f64 (OpNode (v2f64 V128:$Rd), (v2f64 V128:$Rn),
4623 (AArch64dup (f64 (fneg FPR64Op:$Rm))))),
4624 (FMLSv2i64_indexed V128:$Rd, V128:$Rn,
4625 (SUBREG_TO_REG (i32 0), FPR64Op:$Rm, dsub), (i64 0))>;
4627 // 2 variants for 32-bit scalar version: extract from .2s or from .4s
4628 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
4629 (vector_extract (v4f32 (fneg V128:$Rm)),
4630 VectorIndexS:$idx))),
4631 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
4632 V128:$Rm, VectorIndexS:$idx)>;
4633 def : Pat<(f32 (OpNode (f32 FPR32:$Rd), (f32 FPR32:$Rn),
4634 (vector_extract (v4f32 (insert_subvector undef,
4635 (v2f32 (fneg V64:$Rm)),
4637 VectorIndexS:$idx))),
4638 (FMLSv1i32_indexed FPR32:$Rd, FPR32:$Rn,
4639 (SUBREG_TO_REG (i32 0), V64:$Rm, dsub), VectorIndexS:$idx)>;
4641 // 1 variant for 64-bit scalar version: extract from .1d or from .2d
4642 def : Pat<(f64 (OpNode (f64 FPR64:$Rd), (f64 FPR64:$Rn),
4643 (vector_extract (v2f64 (fneg V128:$Rm)),
4644 VectorIndexS:$idx))),
4645 (FMLSv1i64_indexed FPR64:$Rd, FPR64:$Rn,
4646 V128:$Rm, VectorIndexS:$idx)>;
4649 defm : FMLSIndexedAfterNegPatterns<
4650 TriOpFrag<(fma node:$RHS, node:$MHS, node:$LHS)> >;
4651 defm : FMLSIndexedAfterNegPatterns<
4652 TriOpFrag<(fma node:$MHS, node:$RHS, node:$LHS)> >;
4654 defm FMULX : SIMDFPIndexed<1, 0b1001, "fmulx", int_aarch64_neon_fmulx>;
4655 defm FMUL : SIMDFPIndexed<0, 0b1001, "fmul", fmul>;
4657 def : Pat<(v2f32 (fmul V64:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
4658 (FMULv2i32_indexed V64:$Rn,
4659 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
4661 def : Pat<(v4f32 (fmul V128:$Rn, (AArch64dup (f32 FPR32:$Rm)))),
4662 (FMULv4i32_indexed V128:$Rn,
4663 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR32:$Rm, ssub),
4665 def : Pat<(v2f64 (fmul V128:$Rn, (AArch64dup (f64 FPR64:$Rm)))),
4666 (FMULv2i64_indexed V128:$Rn,
4667 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$Rm, dsub),
4670 defm SQDMULH : SIMDIndexedHS<0, 0b1100, "sqdmulh", int_aarch64_neon_sqdmulh>;
4671 defm SQRDMULH : SIMDIndexedHS<0, 0b1101, "sqrdmulh", int_aarch64_neon_sqrdmulh>;
4672 defm MLA : SIMDVectorIndexedHSTied<1, 0b0000, "mla",
4673 TriOpFrag<(add node:$LHS, (mul node:$MHS, node:$RHS))>>;
4674 defm MLS : SIMDVectorIndexedHSTied<1, 0b0100, "mls",
4675 TriOpFrag<(sub node:$LHS, (mul node:$MHS, node:$RHS))>>;
4676 defm MUL : SIMDVectorIndexedHS<0, 0b1000, "mul", mul>;
4677 defm SMLAL : SIMDVectorIndexedLongSDTied<0, 0b0010, "smlal",
4678 TriOpFrag<(add node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4679 defm SMLSL : SIMDVectorIndexedLongSDTied<0, 0b0110, "smlsl",
4680 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_smull node:$MHS, node:$RHS))>>;
4681 defm SMULL : SIMDVectorIndexedLongSD<0, 0b1010, "smull",
4682 int_aarch64_neon_smull>;
4683 defm SQDMLAL : SIMDIndexedLongSQDMLXSDTied<0, 0b0011, "sqdmlal",
4684 int_aarch64_neon_sqadd>;
4685 defm SQDMLSL : SIMDIndexedLongSQDMLXSDTied<0, 0b0111, "sqdmlsl",
4686 int_aarch64_neon_sqsub>;
4687 defm SQRDMLAH : SIMDIndexedSQRDMLxHSDTied<1, 0b1101, "sqrdmlah",
4688 int_aarch64_neon_sqadd>;
4689 defm SQRDMLSH : SIMDIndexedSQRDMLxHSDTied<1, 0b1111, "sqrdmlsh",
4690 int_aarch64_neon_sqsub>;
4691 defm SQDMULL : SIMDIndexedLongSD<0, 0b1011, "sqdmull", int_aarch64_neon_sqdmull>;
4692 defm UMLAL : SIMDVectorIndexedLongSDTied<1, 0b0010, "umlal",
4693 TriOpFrag<(add node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4694 defm UMLSL : SIMDVectorIndexedLongSDTied<1, 0b0110, "umlsl",
4695 TriOpFrag<(sub node:$LHS, (int_aarch64_neon_umull node:$MHS, node:$RHS))>>;
4696 defm UMULL : SIMDVectorIndexedLongSD<1, 0b1010, "umull",
4697 int_aarch64_neon_umull>;
4699 // A scalar sqdmull with the second operand being a vector lane can be
4700 // handled directly with the indexed instruction encoding.
4701 def : Pat<(int_aarch64_neon_sqdmulls_scalar (i32 FPR32:$Rn),
4702 (vector_extract (v4i32 V128:$Vm),
4703 VectorIndexS:$idx)),
4704 (SQDMULLv1i64_indexed FPR32:$Rn, V128:$Vm, VectorIndexS:$idx)>;
4706 //----------------------------------------------------------------------------
4707 // AdvSIMD scalar shift instructions
4708 //----------------------------------------------------------------------------
4709 defm FCVTZS : SIMDFPScalarRShift<0, 0b11111, "fcvtzs">;
4710 defm FCVTZU : SIMDFPScalarRShift<1, 0b11111, "fcvtzu">;
4711 defm SCVTF : SIMDFPScalarRShift<0, 0b11100, "scvtf">;
4712 defm UCVTF : SIMDFPScalarRShift<1, 0b11100, "ucvtf">;
4713 // Codegen patterns for the above. We don't put these directly on the
4714 // instructions because TableGen's type inference can't handle the truth.
4715 // Having the same base pattern for fp <--> int totally freaks it out.
4716 def : Pat<(int_aarch64_neon_vcvtfp2fxs FPR32:$Rn, vecshiftR32:$imm),
4717 (FCVTZSs FPR32:$Rn, vecshiftR32:$imm)>;
4718 def : Pat<(int_aarch64_neon_vcvtfp2fxu FPR32:$Rn, vecshiftR32:$imm),
4719 (FCVTZUs FPR32:$Rn, vecshiftR32:$imm)>;
4720 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxs (f64 FPR64:$Rn), vecshiftR64:$imm)),
4721 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
4722 def : Pat<(i64 (int_aarch64_neon_vcvtfp2fxu (f64 FPR64:$Rn), vecshiftR64:$imm)),
4723 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
4724 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxs (v1f64 FPR64:$Rn),
4726 (FCVTZSd FPR64:$Rn, vecshiftR64:$imm)>;
4727 def : Pat<(v1i64 (int_aarch64_neon_vcvtfp2fxu (v1f64 FPR64:$Rn),
4729 (FCVTZUd FPR64:$Rn, vecshiftR64:$imm)>;
4730 def : Pat<(int_aarch64_neon_vcvtfxs2fp FPR32:$Rn, vecshiftR32:$imm),
4731 (SCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
4732 def : Pat<(int_aarch64_neon_vcvtfxu2fp FPR32:$Rn, vecshiftR32:$imm),
4733 (UCVTFs FPR32:$Rn, vecshiftR32:$imm)>;
4734 def : Pat<(f64 (int_aarch64_neon_vcvtfxs2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
4735 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4736 def : Pat<(f64 (int_aarch64_neon_vcvtfxu2fp (i64 FPR64:$Rn), vecshiftR64:$imm)),
4737 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4738 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxs2fp (v1i64 FPR64:$Rn),
4740 (SCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4741 def : Pat<(v1f64 (int_aarch64_neon_vcvtfxu2fp (v1i64 FPR64:$Rn),
4743 (UCVTFd FPR64:$Rn, vecshiftR64:$imm)>;
4745 defm SHL : SIMDScalarLShiftD< 0, 0b01010, "shl", AArch64vshl>;
4746 defm SLI : SIMDScalarLShiftDTied<1, 0b01010, "sli">;
4747 defm SQRSHRN : SIMDScalarRShiftBHS< 0, 0b10011, "sqrshrn",
4748 int_aarch64_neon_sqrshrn>;
4749 defm SQRSHRUN : SIMDScalarRShiftBHS< 1, 0b10001, "sqrshrun",
4750 int_aarch64_neon_sqrshrun>;
4751 defm SQSHLU : SIMDScalarLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
4752 defm SQSHL : SIMDScalarLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
4753 defm SQSHRN : SIMDScalarRShiftBHS< 0, 0b10010, "sqshrn",
4754 int_aarch64_neon_sqshrn>;
4755 defm SQSHRUN : SIMDScalarRShiftBHS< 1, 0b10000, "sqshrun",
4756 int_aarch64_neon_sqshrun>;
4757 defm SRI : SIMDScalarRShiftDTied< 1, 0b01000, "sri">;
4758 defm SRSHR : SIMDScalarRShiftD< 0, 0b00100, "srshr", AArch64srshri>;
4759 defm SRSRA : SIMDScalarRShiftDTied< 0, 0b00110, "srsra",
4760 TriOpFrag<(add node:$LHS,
4761 (AArch64srshri node:$MHS, node:$RHS))>>;
4762 defm SSHR : SIMDScalarRShiftD< 0, 0b00000, "sshr", AArch64vashr>;
4763 defm SSRA : SIMDScalarRShiftDTied< 0, 0b00010, "ssra",
4764 TriOpFrag<(add node:$LHS,
4765 (AArch64vashr node:$MHS, node:$RHS))>>;
4766 defm UQRSHRN : SIMDScalarRShiftBHS< 1, 0b10011, "uqrshrn",
4767 int_aarch64_neon_uqrshrn>;
4768 defm UQSHL : SIMDScalarLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
4769 defm UQSHRN : SIMDScalarRShiftBHS< 1, 0b10010, "uqshrn",
4770 int_aarch64_neon_uqshrn>;
4771 defm URSHR : SIMDScalarRShiftD< 1, 0b00100, "urshr", AArch64urshri>;
4772 defm URSRA : SIMDScalarRShiftDTied< 1, 0b00110, "ursra",
4773 TriOpFrag<(add node:$LHS,
4774 (AArch64urshri node:$MHS, node:$RHS))>>;
4775 defm USHR : SIMDScalarRShiftD< 1, 0b00000, "ushr", AArch64vlshr>;
4776 defm USRA : SIMDScalarRShiftDTied< 1, 0b00010, "usra",
4777 TriOpFrag<(add node:$LHS,
4778 (AArch64vlshr node:$MHS, node:$RHS))>>;
4780 //----------------------------------------------------------------------------
4781 // AdvSIMD vector shift instructions
4782 //----------------------------------------------------------------------------
4783 defm FCVTZS:SIMDVectorRShiftSD<0, 0b11111, "fcvtzs", int_aarch64_neon_vcvtfp2fxs>;
4784 defm FCVTZU:SIMDVectorRShiftSD<1, 0b11111, "fcvtzu", int_aarch64_neon_vcvtfp2fxu>;
4785 defm SCVTF: SIMDVectorRShiftToFP<0, 0b11100, "scvtf",
4786 int_aarch64_neon_vcvtfxs2fp>;
4787 defm RSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10001, "rshrn",
4788 int_aarch64_neon_rshrn>;
4789 defm SHL : SIMDVectorLShiftBHSD<0, 0b01010, "shl", AArch64vshl>;
4790 defm SHRN : SIMDVectorRShiftNarrowBHS<0, 0b10000, "shrn",
4791 BinOpFrag<(trunc (AArch64vashr node:$LHS, node:$RHS))>>;
4792 defm SLI : SIMDVectorLShiftBHSDTied<1, 0b01010, "sli", int_aarch64_neon_vsli>;
4793 def : Pat<(v1i64 (int_aarch64_neon_vsli (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4794 (i32 vecshiftL64:$imm))),
4795 (SLId FPR64:$Rd, FPR64:$Rn, vecshiftL64:$imm)>;
4796 defm SQRSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10011, "sqrshrn",
4797 int_aarch64_neon_sqrshrn>;
4798 defm SQRSHRUN: SIMDVectorRShiftNarrowBHS<1, 0b10001, "sqrshrun",
4799 int_aarch64_neon_sqrshrun>;
4800 defm SQSHLU : SIMDVectorLShiftBHSD<1, 0b01100, "sqshlu", AArch64sqshlui>;
4801 defm SQSHL : SIMDVectorLShiftBHSD<0, 0b01110, "sqshl", AArch64sqshli>;
4802 defm SQSHRN : SIMDVectorRShiftNarrowBHS<0, 0b10010, "sqshrn",
4803 int_aarch64_neon_sqshrn>;
4804 defm SQSHRUN : SIMDVectorRShiftNarrowBHS<1, 0b10000, "sqshrun",
4805 int_aarch64_neon_sqshrun>;
4806 defm SRI : SIMDVectorRShiftBHSDTied<1, 0b01000, "sri", int_aarch64_neon_vsri>;
4807 def : Pat<(v1i64 (int_aarch64_neon_vsri (v1i64 FPR64:$Rd), (v1i64 FPR64:$Rn),
4808 (i32 vecshiftR64:$imm))),
4809 (SRId FPR64:$Rd, FPR64:$Rn, vecshiftR64:$imm)>;
4810 defm SRSHR : SIMDVectorRShiftBHSD<0, 0b00100, "srshr", AArch64srshri>;
4811 defm SRSRA : SIMDVectorRShiftBHSDTied<0, 0b00110, "srsra",
4812 TriOpFrag<(add node:$LHS,
4813 (AArch64srshri node:$MHS, node:$RHS))> >;
4814 defm SSHLL : SIMDVectorLShiftLongBHSD<0, 0b10100, "sshll",
4815 BinOpFrag<(AArch64vshl (sext node:$LHS), node:$RHS)>>;
4817 defm SSHR : SIMDVectorRShiftBHSD<0, 0b00000, "sshr", AArch64vashr>;
4818 defm SSRA : SIMDVectorRShiftBHSDTied<0, 0b00010, "ssra",
4819 TriOpFrag<(add node:$LHS, (AArch64vashr node:$MHS, node:$RHS))>>;
4820 defm UCVTF : SIMDVectorRShiftToFP<1, 0b11100, "ucvtf",
4821 int_aarch64_neon_vcvtfxu2fp>;
4822 defm UQRSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10011, "uqrshrn",
4823 int_aarch64_neon_uqrshrn>;
4824 defm UQSHL : SIMDVectorLShiftBHSD<1, 0b01110, "uqshl", AArch64uqshli>;
4825 defm UQSHRN : SIMDVectorRShiftNarrowBHS<1, 0b10010, "uqshrn",
4826 int_aarch64_neon_uqshrn>;
4827 defm URSHR : SIMDVectorRShiftBHSD<1, 0b00100, "urshr", AArch64urshri>;
4828 defm URSRA : SIMDVectorRShiftBHSDTied<1, 0b00110, "ursra",
4829 TriOpFrag<(add node:$LHS,
4830 (AArch64urshri node:$MHS, node:$RHS))> >;
4831 defm USHLL : SIMDVectorLShiftLongBHSD<1, 0b10100, "ushll",
4832 BinOpFrag<(AArch64vshl (zext node:$LHS), node:$RHS)>>;
4833 defm USHR : SIMDVectorRShiftBHSD<1, 0b00000, "ushr", AArch64vlshr>;
4834 defm USRA : SIMDVectorRShiftBHSDTied<1, 0b00010, "usra",
4835 TriOpFrag<(add node:$LHS, (AArch64vlshr node:$MHS, node:$RHS))> >;
4837 // SHRN patterns for when a logical right shift was used instead of arithmetic
4838 // (the immediate guarantees no sign bits actually end up in the result so it
4840 def : Pat<(v8i8 (trunc (AArch64vlshr (v8i16 V128:$Rn), vecshiftR16Narrow:$imm))),
4841 (SHRNv8i8_shift V128:$Rn, vecshiftR16Narrow:$imm)>;
4842 def : Pat<(v4i16 (trunc (AArch64vlshr (v4i32 V128:$Rn), vecshiftR32Narrow:$imm))),
4843 (SHRNv4i16_shift V128:$Rn, vecshiftR32Narrow:$imm)>;
4844 def : Pat<(v2i32 (trunc (AArch64vlshr (v2i64 V128:$Rn), vecshiftR64Narrow:$imm))),
4845 (SHRNv2i32_shift V128:$Rn, vecshiftR64Narrow:$imm)>;
4847 def : Pat<(v16i8 (concat_vectors (v8i8 V64:$Rd),
4848 (trunc (AArch64vlshr (v8i16 V128:$Rn),
4849 vecshiftR16Narrow:$imm)))),
4850 (SHRNv16i8_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4851 V128:$Rn, vecshiftR16Narrow:$imm)>;
4852 def : Pat<(v8i16 (concat_vectors (v4i16 V64:$Rd),
4853 (trunc (AArch64vlshr (v4i32 V128:$Rn),
4854 vecshiftR32Narrow:$imm)))),
4855 (SHRNv8i16_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4856 V128:$Rn, vecshiftR32Narrow:$imm)>;
4857 def : Pat<(v4i32 (concat_vectors (v2i32 V64:$Rd),
4858 (trunc (AArch64vlshr (v2i64 V128:$Rn),
4859 vecshiftR64Narrow:$imm)))),
4860 (SHRNv4i32_shift (INSERT_SUBREG (IMPLICIT_DEF), V64:$Rd, dsub),
4861 V128:$Rn, vecshiftR32Narrow:$imm)>;
4863 // Vector sign and zero extensions are implemented with SSHLL and USSHLL.
4864 // Anyexts are implemented as zexts.
4865 def : Pat<(v8i16 (sext (v8i8 V64:$Rn))), (SSHLLv8i8_shift V64:$Rn, (i32 0))>;
4866 def : Pat<(v8i16 (zext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4867 def : Pat<(v8i16 (anyext (v8i8 V64:$Rn))), (USHLLv8i8_shift V64:$Rn, (i32 0))>;
4868 def : Pat<(v4i32 (sext (v4i16 V64:$Rn))), (SSHLLv4i16_shift V64:$Rn, (i32 0))>;
4869 def : Pat<(v4i32 (zext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4870 def : Pat<(v4i32 (anyext (v4i16 V64:$Rn))), (USHLLv4i16_shift V64:$Rn, (i32 0))>;
4871 def : Pat<(v2i64 (sext (v2i32 V64:$Rn))), (SSHLLv2i32_shift V64:$Rn, (i32 0))>;
4872 def : Pat<(v2i64 (zext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4873 def : Pat<(v2i64 (anyext (v2i32 V64:$Rn))), (USHLLv2i32_shift V64:$Rn, (i32 0))>;
4874 // Also match an extend from the upper half of a 128 bit source register.
4875 def : Pat<(v8i16 (anyext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4876 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4877 def : Pat<(v8i16 (zext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4878 (USHLLv16i8_shift V128:$Rn, (i32 0))>;
4879 def : Pat<(v8i16 (sext (v8i8 (extract_subvector V128:$Rn, (i64 8)) ))),
4880 (SSHLLv16i8_shift V128:$Rn, (i32 0))>;
4881 def : Pat<(v4i32 (anyext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4882 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4883 def : Pat<(v4i32 (zext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4884 (USHLLv8i16_shift V128:$Rn, (i32 0))>;
4885 def : Pat<(v4i32 (sext (v4i16 (extract_subvector V128:$Rn, (i64 4)) ))),
4886 (SSHLLv8i16_shift V128:$Rn, (i32 0))>;
4887 def : Pat<(v2i64 (anyext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4888 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4889 def : Pat<(v2i64 (zext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4890 (USHLLv4i32_shift V128:$Rn, (i32 0))>;
4891 def : Pat<(v2i64 (sext (v2i32 (extract_subvector V128:$Rn, (i64 2)) ))),
4892 (SSHLLv4i32_shift V128:$Rn, (i32 0))>;
4894 // Vector shift sxtl aliases
4895 def : InstAlias<"sxtl.8h $dst, $src1",
4896 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4897 def : InstAlias<"sxtl $dst.8h, $src1.8b",
4898 (SSHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4899 def : InstAlias<"sxtl.4s $dst, $src1",
4900 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4901 def : InstAlias<"sxtl $dst.4s, $src1.4h",
4902 (SSHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4903 def : InstAlias<"sxtl.2d $dst, $src1",
4904 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4905 def : InstAlias<"sxtl $dst.2d, $src1.2s",
4906 (SSHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4908 // Vector shift sxtl2 aliases
4909 def : InstAlias<"sxtl2.8h $dst, $src1",
4910 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4911 def : InstAlias<"sxtl2 $dst.8h, $src1.16b",
4912 (SSHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4913 def : InstAlias<"sxtl2.4s $dst, $src1",
4914 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4915 def : InstAlias<"sxtl2 $dst.4s, $src1.8h",
4916 (SSHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4917 def : InstAlias<"sxtl2.2d $dst, $src1",
4918 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4919 def : InstAlias<"sxtl2 $dst.2d, $src1.4s",
4920 (SSHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4922 // Vector shift uxtl aliases
4923 def : InstAlias<"uxtl.8h $dst, $src1",
4924 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4925 def : InstAlias<"uxtl $dst.8h, $src1.8b",
4926 (USHLLv8i8_shift V128:$dst, V64:$src1, 0)>;
4927 def : InstAlias<"uxtl.4s $dst, $src1",
4928 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4929 def : InstAlias<"uxtl $dst.4s, $src1.4h",
4930 (USHLLv4i16_shift V128:$dst, V64:$src1, 0)>;
4931 def : InstAlias<"uxtl.2d $dst, $src1",
4932 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4933 def : InstAlias<"uxtl $dst.2d, $src1.2s",
4934 (USHLLv2i32_shift V128:$dst, V64:$src1, 0)>;
4936 // Vector shift uxtl2 aliases
4937 def : InstAlias<"uxtl2.8h $dst, $src1",
4938 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4939 def : InstAlias<"uxtl2 $dst.8h, $src1.16b",
4940 (USHLLv16i8_shift V128:$dst, V128:$src1, 0)>;
4941 def : InstAlias<"uxtl2.4s $dst, $src1",
4942 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4943 def : InstAlias<"uxtl2 $dst.4s, $src1.8h",
4944 (USHLLv8i16_shift V128:$dst, V128:$src1, 0)>;
4945 def : InstAlias<"uxtl2.2d $dst, $src1",
4946 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4947 def : InstAlias<"uxtl2 $dst.2d, $src1.4s",
4948 (USHLLv4i32_shift V128:$dst, V128:$src1, 0)>;
4950 // If an integer is about to be converted to a floating point value,
4951 // just load it on the floating point unit.
4952 // These patterns are more complex because floating point loads do not
4953 // support sign extension.
4954 // The sign extension has to be explicitly added and is only supported for
4955 // one step: byte-to-half, half-to-word, word-to-doubleword.
4956 // SCVTF GPR -> FPR is 9 cycles.
4957 // SCVTF FPR -> FPR is 4 cyclces.
4958 // (sign extension with lengthen) SXTL FPR -> FPR is 2 cycles.
4959 // Therefore, we can do 2 sign extensions and one SCVTF FPR -> FPR
4960 // and still being faster.
4961 // However, this is not good for code size.
4962 // 8-bits -> float. 2 sizes step-up.
4963 class SExtLoadi8CVTf32Pat<dag addrmode, dag INST>
4964 : Pat<(f32 (sint_to_fp (i32 (sextloadi8 addrmode)))),
4965 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4970 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4977 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
4979 def : SExtLoadi8CVTf32Pat<(ro8.Wpat GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext),
4980 (LDRBroW GPR64sp:$Rn, GPR32:$Rm, ro8.Wext:$ext)>;
4981 def : SExtLoadi8CVTf32Pat<(ro8.Xpat GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext),
4982 (LDRBroX GPR64sp:$Rn, GPR64:$Rm, ro8.Xext:$ext)>;
4983 def : SExtLoadi8CVTf32Pat<(am_indexed8 GPR64sp:$Rn, uimm12s1:$offset),
4984 (LDRBui GPR64sp:$Rn, uimm12s1:$offset)>;
4985 def : SExtLoadi8CVTf32Pat<(am_unscaled8 GPR64sp:$Rn, simm9:$offset),
4986 (LDURBi GPR64sp:$Rn, simm9:$offset)>;
4988 // 16-bits -> float. 1 size step-up.
4989 class SExtLoadi16CVTf32Pat<dag addrmode, dag INST>
4990 : Pat<(f32 (sint_to_fp (i32 (sextloadi16 addrmode)))),
4991 (SCVTFv1i32 (f32 (EXTRACT_SUBREG
4993 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
4997 ssub)))>, Requires<[NotForCodeSize]>;
4999 def : SExtLoadi16CVTf32Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
5000 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
5001 def : SExtLoadi16CVTf32Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
5002 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
5003 def : SExtLoadi16CVTf32Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
5004 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
5005 def : SExtLoadi16CVTf32Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
5006 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
5008 // 32-bits to 32-bits are handled in target specific dag combine:
5009 // performIntToFpCombine.
5010 // 64-bits integer to 32-bits floating point, not possible with
5011 // SCVTF on floating point registers (both source and destination
5012 // must have the same size).
5014 // Here are the patterns for 8, 16, 32, and 64-bits to double.
5015 // 8-bits -> double. 3 size step-up: give up.
5016 // 16-bits -> double. 2 size step.
5017 class SExtLoadi16CVTf64Pat<dag addrmode, dag INST>
5018 : Pat <(f64 (sint_to_fp (i32 (sextloadi16 addrmode)))),
5019 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
5024 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5031 Requires<[NotForCodeSize, UseAlternateSExtLoadCVTF32]>;
5033 def : SExtLoadi16CVTf64Pat<(ro16.Wpat GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext),
5034 (LDRHroW GPR64sp:$Rn, GPR32:$Rm, ro16.Wext:$ext)>;
5035 def : SExtLoadi16CVTf64Pat<(ro16.Xpat GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext),
5036 (LDRHroX GPR64sp:$Rn, GPR64:$Rm, ro16.Xext:$ext)>;
5037 def : SExtLoadi16CVTf64Pat<(am_indexed16 GPR64sp:$Rn, uimm12s2:$offset),
5038 (LDRHui GPR64sp:$Rn, uimm12s2:$offset)>;
5039 def : SExtLoadi16CVTf64Pat<(am_unscaled16 GPR64sp:$Rn, simm9:$offset),
5040 (LDURHi GPR64sp:$Rn, simm9:$offset)>;
5041 // 32-bits -> double. 1 size step-up.
5042 class SExtLoadi32CVTf64Pat<dag addrmode, dag INST>
5043 : Pat <(f64 (sint_to_fp (i32 (load addrmode)))),
5044 (SCVTFv1i64 (f64 (EXTRACT_SUBREG
5046 (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
5050 dsub)))>, Requires<[NotForCodeSize]>;
5052 def : SExtLoadi32CVTf64Pat<(ro32.Wpat GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext),
5053 (LDRSroW GPR64sp:$Rn, GPR32:$Rm, ro32.Wext:$ext)>;
5054 def : SExtLoadi32CVTf64Pat<(ro32.Xpat GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext),
5055 (LDRSroX GPR64sp:$Rn, GPR64:$Rm, ro32.Xext:$ext)>;
5056 def : SExtLoadi32CVTf64Pat<(am_indexed32 GPR64sp:$Rn, uimm12s4:$offset),
5057 (LDRSui GPR64sp:$Rn, uimm12s4:$offset)>;
5058 def : SExtLoadi32CVTf64Pat<(am_unscaled32 GPR64sp:$Rn, simm9:$offset),
5059 (LDURSi GPR64sp:$Rn, simm9:$offset)>;
5061 // 64-bits -> double are handled in target specific dag combine:
5062 // performIntToFpCombine.
5065 //----------------------------------------------------------------------------
5066 // AdvSIMD Load-Store Structure
5067 //----------------------------------------------------------------------------
5068 defm LD1 : SIMDLd1Multiple<"ld1">;
5069 defm LD2 : SIMDLd2Multiple<"ld2">;
5070 defm LD3 : SIMDLd3Multiple<"ld3">;
5071 defm LD4 : SIMDLd4Multiple<"ld4">;
5073 defm ST1 : SIMDSt1Multiple<"st1">;
5074 defm ST2 : SIMDSt2Multiple<"st2">;
5075 defm ST3 : SIMDSt3Multiple<"st3">;
5076 defm ST4 : SIMDSt4Multiple<"st4">;
5078 class Ld1Pat<ValueType ty, Instruction INST>
5079 : Pat<(ty (load GPR64sp:$Rn)), (INST GPR64sp:$Rn)>;
5081 def : Ld1Pat<v16i8, LD1Onev16b>;
5082 def : Ld1Pat<v8i16, LD1Onev8h>;
5083 def : Ld1Pat<v4i32, LD1Onev4s>;
5084 def : Ld1Pat<v2i64, LD1Onev2d>;
5085 def : Ld1Pat<v8i8, LD1Onev8b>;
5086 def : Ld1Pat<v4i16, LD1Onev4h>;
5087 def : Ld1Pat<v2i32, LD1Onev2s>;
5088 def : Ld1Pat<v1i64, LD1Onev1d>;
5090 class St1Pat<ValueType ty, Instruction INST>
5091 : Pat<(store ty:$Vt, GPR64sp:$Rn),
5092 (INST ty:$Vt, GPR64sp:$Rn)>;
5094 def : St1Pat<v16i8, ST1Onev16b>;
5095 def : St1Pat<v8i16, ST1Onev8h>;
5096 def : St1Pat<v4i32, ST1Onev4s>;
5097 def : St1Pat<v2i64, ST1Onev2d>;
5098 def : St1Pat<v8i8, ST1Onev8b>;
5099 def : St1Pat<v4i16, ST1Onev4h>;
5100 def : St1Pat<v2i32, ST1Onev2s>;
5101 def : St1Pat<v1i64, ST1Onev1d>;
5107 defm LD1R : SIMDLdR<0, 0b110, 0, "ld1r", "One", 1, 2, 4, 8>;
5108 defm LD2R : SIMDLdR<1, 0b110, 0, "ld2r", "Two", 2, 4, 8, 16>;
5109 defm LD3R : SIMDLdR<0, 0b111, 0, "ld3r", "Three", 3, 6, 12, 24>;
5110 defm LD4R : SIMDLdR<1, 0b111, 0, "ld4r", "Four", 4, 8, 16, 32>;
5111 let mayLoad = 1, hasSideEffects = 0 in {
5112 defm LD1 : SIMDLdSingleBTied<0, 0b000, "ld1", VecListOneb, GPR64pi1>;
5113 defm LD1 : SIMDLdSingleHTied<0, 0b010, 0, "ld1", VecListOneh, GPR64pi2>;
5114 defm LD1 : SIMDLdSingleSTied<0, 0b100, 0b00, "ld1", VecListOnes, GPR64pi4>;
5115 defm LD1 : SIMDLdSingleDTied<0, 0b100, 0b01, "ld1", VecListOned, GPR64pi8>;
5116 defm LD2 : SIMDLdSingleBTied<1, 0b000, "ld2", VecListTwob, GPR64pi2>;
5117 defm LD2 : SIMDLdSingleHTied<1, 0b010, 0, "ld2", VecListTwoh, GPR64pi4>;
5118 defm LD2 : SIMDLdSingleSTied<1, 0b100, 0b00, "ld2", VecListTwos, GPR64pi8>;
5119 defm LD2 : SIMDLdSingleDTied<1, 0b100, 0b01, "ld2", VecListTwod, GPR64pi16>;
5120 defm LD3 : SIMDLdSingleBTied<0, 0b001, "ld3", VecListThreeb, GPR64pi3>;
5121 defm LD3 : SIMDLdSingleHTied<0, 0b011, 0, "ld3", VecListThreeh, GPR64pi6>;
5122 defm LD3 : SIMDLdSingleSTied<0, 0b101, 0b00, "ld3", VecListThrees, GPR64pi12>;
5123 defm LD3 : SIMDLdSingleDTied<0, 0b101, 0b01, "ld3", VecListThreed, GPR64pi24>;
5124 defm LD4 : SIMDLdSingleBTied<1, 0b001, "ld4", VecListFourb, GPR64pi4>;
5125 defm LD4 : SIMDLdSingleHTied<1, 0b011, 0, "ld4", VecListFourh, GPR64pi8>;
5126 defm LD4 : SIMDLdSingleSTied<1, 0b101, 0b00, "ld4", VecListFours, GPR64pi16>;
5127 defm LD4 : SIMDLdSingleDTied<1, 0b101, 0b01, "ld4", VecListFourd, GPR64pi32>;
5130 def : Pat<(v8i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
5131 (LD1Rv8b GPR64sp:$Rn)>;
5132 def : Pat<(v16i8 (AArch64dup (i32 (extloadi8 GPR64sp:$Rn)))),
5133 (LD1Rv16b GPR64sp:$Rn)>;
5134 def : Pat<(v4i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
5135 (LD1Rv4h GPR64sp:$Rn)>;
5136 def : Pat<(v8i16 (AArch64dup (i32 (extloadi16 GPR64sp:$Rn)))),
5137 (LD1Rv8h GPR64sp:$Rn)>;
5138 def : Pat<(v2i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
5139 (LD1Rv2s GPR64sp:$Rn)>;
5140 def : Pat<(v4i32 (AArch64dup (i32 (load GPR64sp:$Rn)))),
5141 (LD1Rv4s GPR64sp:$Rn)>;
5142 def : Pat<(v2i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
5143 (LD1Rv2d GPR64sp:$Rn)>;
5144 def : Pat<(v1i64 (AArch64dup (i64 (load GPR64sp:$Rn)))),
5145 (LD1Rv1d GPR64sp:$Rn)>;
5146 // Grab the floating point version too
5147 def : Pat<(v2f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
5148 (LD1Rv2s GPR64sp:$Rn)>;
5149 def : Pat<(v4f32 (AArch64dup (f32 (load GPR64sp:$Rn)))),
5150 (LD1Rv4s GPR64sp:$Rn)>;
5151 def : Pat<(v2f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
5152 (LD1Rv2d GPR64sp:$Rn)>;
5153 def : Pat<(v1f64 (AArch64dup (f64 (load GPR64sp:$Rn)))),
5154 (LD1Rv1d GPR64sp:$Rn)>;
5155 def : Pat<(v4f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
5156 (LD1Rv4h GPR64sp:$Rn)>;
5157 def : Pat<(v8f16 (AArch64dup (f16 (load GPR64sp:$Rn)))),
5158 (LD1Rv8h GPR64sp:$Rn)>;
5160 class Ld1Lane128Pat<SDPatternOperator scalar_load, Operand VecIndex,
5161 ValueType VTy, ValueType STy, Instruction LD1>
5162 : Pat<(vector_insert (VTy VecListOne128:$Rd),
5163 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
5164 (LD1 VecListOne128:$Rd, VecIndex:$idx, GPR64sp:$Rn)>;
5166 def : Ld1Lane128Pat<extloadi8, VectorIndexB, v16i8, i32, LD1i8>;
5167 def : Ld1Lane128Pat<extloadi16, VectorIndexH, v8i16, i32, LD1i16>;
5168 def : Ld1Lane128Pat<load, VectorIndexS, v4i32, i32, LD1i32>;
5169 def : Ld1Lane128Pat<load, VectorIndexS, v4f32, f32, LD1i32>;
5170 def : Ld1Lane128Pat<load, VectorIndexD, v2i64, i64, LD1i64>;
5171 def : Ld1Lane128Pat<load, VectorIndexD, v2f64, f64, LD1i64>;
5172 def : Ld1Lane128Pat<load, VectorIndexH, v8f16, f16, LD1i16>;
5174 class Ld1Lane64Pat<SDPatternOperator scalar_load, Operand VecIndex,
5175 ValueType VTy, ValueType STy, Instruction LD1>
5176 : Pat<(vector_insert (VTy VecListOne64:$Rd),
5177 (STy (scalar_load GPR64sp:$Rn)), VecIndex:$idx),
5179 (LD1 (SUBREG_TO_REG (i32 0), VecListOne64:$Rd, dsub),
5180 VecIndex:$idx, GPR64sp:$Rn),
5183 def : Ld1Lane64Pat<extloadi8, VectorIndexB, v8i8, i32, LD1i8>;
5184 def : Ld1Lane64Pat<extloadi16, VectorIndexH, v4i16, i32, LD1i16>;
5185 def : Ld1Lane64Pat<load, VectorIndexS, v2i32, i32, LD1i32>;
5186 def : Ld1Lane64Pat<load, VectorIndexS, v2f32, f32, LD1i32>;
5187 def : Ld1Lane64Pat<load, VectorIndexH, v4f16, f16, LD1i16>;
5190 defm LD1 : SIMDLdSt1SingleAliases<"ld1">;
5191 defm LD2 : SIMDLdSt2SingleAliases<"ld2">;
5192 defm LD3 : SIMDLdSt3SingleAliases<"ld3">;
5193 defm LD4 : SIMDLdSt4SingleAliases<"ld4">;
5196 defm ST1 : SIMDStSingleB<0, 0b000, "st1", VecListOneb, GPR64pi1>;
5197 defm ST1 : SIMDStSingleH<0, 0b010, 0, "st1", VecListOneh, GPR64pi2>;
5198 defm ST1 : SIMDStSingleS<0, 0b100, 0b00, "st1", VecListOnes, GPR64pi4>;
5199 defm ST1 : SIMDStSingleD<0, 0b100, 0b01, "st1", VecListOned, GPR64pi8>;
5201 let AddedComplexity = 19 in
5202 class St1Lane128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5203 ValueType VTy, ValueType STy, Instruction ST1>
5205 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5207 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn)>;
5209 def : St1Lane128Pat<truncstorei8, VectorIndexB, v16i8, i32, ST1i8>;
5210 def : St1Lane128Pat<truncstorei16, VectorIndexH, v8i16, i32, ST1i16>;
5211 def : St1Lane128Pat<store, VectorIndexS, v4i32, i32, ST1i32>;
5212 def : St1Lane128Pat<store, VectorIndexS, v4f32, f32, ST1i32>;
5213 def : St1Lane128Pat<store, VectorIndexD, v2i64, i64, ST1i64>;
5214 def : St1Lane128Pat<store, VectorIndexD, v2f64, f64, ST1i64>;
5215 def : St1Lane128Pat<store, VectorIndexH, v8f16, f16, ST1i16>;
5217 let AddedComplexity = 19 in
5218 class St1Lane64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5219 ValueType VTy, ValueType STy, Instruction ST1>
5221 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5223 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5224 VecIndex:$idx, GPR64sp:$Rn)>;
5226 def : St1Lane64Pat<truncstorei8, VectorIndexB, v8i8, i32, ST1i8>;
5227 def : St1Lane64Pat<truncstorei16, VectorIndexH, v4i16, i32, ST1i16>;
5228 def : St1Lane64Pat<store, VectorIndexS, v2i32, i32, ST1i32>;
5229 def : St1Lane64Pat<store, VectorIndexS, v2f32, f32, ST1i32>;
5230 def : St1Lane64Pat<store, VectorIndexH, v4f16, f16, ST1i16>;
5232 multiclass St1LanePost64Pat<SDPatternOperator scalar_store, Operand VecIndex,
5233 ValueType VTy, ValueType STy, Instruction ST1,
5235 def : Pat<(scalar_store
5236 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5237 GPR64sp:$Rn, offset),
5238 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5239 VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5241 def : Pat<(scalar_store
5242 (STy (vector_extract (VTy VecListOne64:$Vt), VecIndex:$idx)),
5243 GPR64sp:$Rn, GPR64:$Rm),
5244 (ST1 (SUBREG_TO_REG (i32 0), VecListOne64:$Vt, dsub),
5245 VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5248 defm : St1LanePost64Pat<post_truncsti8, VectorIndexB, v8i8, i32, ST1i8_POST, 1>;
5249 defm : St1LanePost64Pat<post_truncsti16, VectorIndexH, v4i16, i32, ST1i16_POST,
5251 defm : St1LanePost64Pat<post_store, VectorIndexS, v2i32, i32, ST1i32_POST, 4>;
5252 defm : St1LanePost64Pat<post_store, VectorIndexS, v2f32, f32, ST1i32_POST, 4>;
5253 defm : St1LanePost64Pat<post_store, VectorIndexD, v1i64, i64, ST1i64_POST, 8>;
5254 defm : St1LanePost64Pat<post_store, VectorIndexD, v1f64, f64, ST1i64_POST, 8>;
5255 defm : St1LanePost64Pat<post_store, VectorIndexH, v4f16, f16, ST1i16_POST, 2>;
5257 multiclass St1LanePost128Pat<SDPatternOperator scalar_store, Operand VecIndex,
5258 ValueType VTy, ValueType STy, Instruction ST1,
5260 def : Pat<(scalar_store
5261 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5262 GPR64sp:$Rn, offset),
5263 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, XZR)>;
5265 def : Pat<(scalar_store
5266 (STy (vector_extract (VTy VecListOne128:$Vt), VecIndex:$idx)),
5267 GPR64sp:$Rn, GPR64:$Rm),
5268 (ST1 VecListOne128:$Vt, VecIndex:$idx, GPR64sp:$Rn, $Rm)>;
5271 defm : St1LanePost128Pat<post_truncsti8, VectorIndexB, v16i8, i32, ST1i8_POST,
5273 defm : St1LanePost128Pat<post_truncsti16, VectorIndexH, v8i16, i32, ST1i16_POST,
5275 defm : St1LanePost128Pat<post_store, VectorIndexS, v4i32, i32, ST1i32_POST, 4>;
5276 defm : St1LanePost128Pat<post_store, VectorIndexS, v4f32, f32, ST1i32_POST, 4>;
5277 defm : St1LanePost128Pat<post_store, VectorIndexD, v2i64, i64, ST1i64_POST, 8>;
5278 defm : St1LanePost128Pat<post_store, VectorIndexD, v2f64, f64, ST1i64_POST, 8>;
5279 defm : St1LanePost128Pat<post_store, VectorIndexH, v8f16, f16, ST1i16_POST, 2>;
5281 let mayStore = 1, hasSideEffects = 0 in {
5282 defm ST2 : SIMDStSingleB<1, 0b000, "st2", VecListTwob, GPR64pi2>;
5283 defm ST2 : SIMDStSingleH<1, 0b010, 0, "st2", VecListTwoh, GPR64pi4>;
5284 defm ST2 : SIMDStSingleS<1, 0b100, 0b00, "st2", VecListTwos, GPR64pi8>;
5285 defm ST2 : SIMDStSingleD<1, 0b100, 0b01, "st2", VecListTwod, GPR64pi16>;
5286 defm ST3 : SIMDStSingleB<0, 0b001, "st3", VecListThreeb, GPR64pi3>;
5287 defm ST3 : SIMDStSingleH<0, 0b011, 0, "st3", VecListThreeh, GPR64pi6>;
5288 defm ST3 : SIMDStSingleS<0, 0b101, 0b00, "st3", VecListThrees, GPR64pi12>;
5289 defm ST3 : SIMDStSingleD<0, 0b101, 0b01, "st3", VecListThreed, GPR64pi24>;
5290 defm ST4 : SIMDStSingleB<1, 0b001, "st4", VecListFourb, GPR64pi4>;
5291 defm ST4 : SIMDStSingleH<1, 0b011, 0, "st4", VecListFourh, GPR64pi8>;
5292 defm ST4 : SIMDStSingleS<1, 0b101, 0b00, "st4", VecListFours, GPR64pi16>;
5293 defm ST4 : SIMDStSingleD<1, 0b101, 0b01, "st4", VecListFourd, GPR64pi32>;
5296 defm ST1 : SIMDLdSt1SingleAliases<"st1">;
5297 defm ST2 : SIMDLdSt2SingleAliases<"st2">;
5298 defm ST3 : SIMDLdSt3SingleAliases<"st3">;
5299 defm ST4 : SIMDLdSt4SingleAliases<"st4">;
5301 //----------------------------------------------------------------------------
5302 // Crypto extensions
5303 //----------------------------------------------------------------------------
5305 def AESErr : AESTiedInst<0b0100, "aese", int_aarch64_crypto_aese>;
5306 def AESDrr : AESTiedInst<0b0101, "aesd", int_aarch64_crypto_aesd>;
5307 def AESMCrr : AESInst< 0b0110, "aesmc", int_aarch64_crypto_aesmc>;
5308 def AESIMCrr : AESInst< 0b0111, "aesimc", int_aarch64_crypto_aesimc>;
5310 def SHA1Crrr : SHATiedInstQSV<0b000, "sha1c", int_aarch64_crypto_sha1c>;
5311 def SHA1Prrr : SHATiedInstQSV<0b001, "sha1p", int_aarch64_crypto_sha1p>;
5312 def SHA1Mrrr : SHATiedInstQSV<0b010, "sha1m", int_aarch64_crypto_sha1m>;
5313 def SHA1SU0rrr : SHATiedInstVVV<0b011, "sha1su0", int_aarch64_crypto_sha1su0>;
5314 def SHA256Hrrr : SHATiedInstQQV<0b100, "sha256h", int_aarch64_crypto_sha256h>;
5315 def SHA256H2rrr : SHATiedInstQQV<0b101, "sha256h2",int_aarch64_crypto_sha256h2>;
5316 def SHA256SU1rrr :SHATiedInstVVV<0b110, "sha256su1",int_aarch64_crypto_sha256su1>;
5318 def SHA1Hrr : SHAInstSS< 0b0000, "sha1h", int_aarch64_crypto_sha1h>;
5319 def SHA1SU1rr : SHATiedInstVV<0b0001, "sha1su1", int_aarch64_crypto_sha1su1>;
5320 def SHA256SU0rr : SHATiedInstVV<0b0010, "sha256su0",int_aarch64_crypto_sha256su0>;
5322 //----------------------------------------------------------------------------
5324 //----------------------------------------------------------------------------
5325 // FIXME: Like for X86, these should go in their own separate .td file.
5327 def def32 : PatLeaf<(i32 GPR32:$src), [{
5331 // In the case of a 32-bit def that is known to implicitly zero-extend,
5332 // we can use a SUBREG_TO_REG.
5333 def : Pat<(i64 (zext def32:$src)), (SUBREG_TO_REG (i64 0), GPR32:$src, sub_32)>;
5335 // For an anyext, we don't care what the high bits are, so we can perform an
5336 // INSERT_SUBREF into an IMPLICIT_DEF.
5337 def : Pat<(i64 (anyext GPR32:$src)),
5338 (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32)>;
5340 // When we need to explicitly zero-extend, we use a 32-bit MOV instruction and
5341 // then assert the extension has happened.
5342 def : Pat<(i64 (zext GPR32:$src)),
5343 (SUBREG_TO_REG (i32 0), (ORRWrs WZR, GPR32:$src, 0), sub_32)>;
5345 // To sign extend, we use a signed bitfield move instruction (SBFM) on the
5346 // containing super-reg.
5347 def : Pat<(i64 (sext GPR32:$src)),
5348 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$src, sub_32), 0, 31)>;
5349 def : Pat<(i64 (sext_inreg GPR64:$src, i32)), (SBFMXri GPR64:$src, 0, 31)>;
5350 def : Pat<(i64 (sext_inreg GPR64:$src, i16)), (SBFMXri GPR64:$src, 0, 15)>;
5351 def : Pat<(i64 (sext_inreg GPR64:$src, i8)), (SBFMXri GPR64:$src, 0, 7)>;
5352 def : Pat<(i64 (sext_inreg GPR64:$src, i1)), (SBFMXri GPR64:$src, 0, 0)>;
5353 def : Pat<(i32 (sext_inreg GPR32:$src, i16)), (SBFMWri GPR32:$src, 0, 15)>;
5354 def : Pat<(i32 (sext_inreg GPR32:$src, i8)), (SBFMWri GPR32:$src, 0, 7)>;
5355 def : Pat<(i32 (sext_inreg GPR32:$src, i1)), (SBFMWri GPR32:$src, 0, 0)>;
5357 def : Pat<(shl (sext_inreg GPR32:$Rn, i8), (i64 imm0_31:$imm)),
5358 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
5359 (i64 (i32shift_sext_i8 imm0_31:$imm)))>;
5360 def : Pat<(shl (sext_inreg GPR64:$Rn, i8), (i64 imm0_63:$imm)),
5361 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
5362 (i64 (i64shift_sext_i8 imm0_63:$imm)))>;
5364 def : Pat<(shl (sext_inreg GPR32:$Rn, i16), (i64 imm0_31:$imm)),
5365 (SBFMWri GPR32:$Rn, (i64 (i32shift_a imm0_31:$imm)),
5366 (i64 (i32shift_sext_i16 imm0_31:$imm)))>;
5367 def : Pat<(shl (sext_inreg GPR64:$Rn, i16), (i64 imm0_63:$imm)),
5368 (SBFMXri GPR64:$Rn, (i64 (i64shift_a imm0_63:$imm)),
5369 (i64 (i64shift_sext_i16 imm0_63:$imm)))>;
5371 def : Pat<(shl (i64 (sext GPR32:$Rn)), (i64 imm0_63:$imm)),
5372 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
5373 (i64 (i64shift_a imm0_63:$imm)),
5374 (i64 (i64shift_sext_i32 imm0_63:$imm)))>;
5376 // sra patterns have an AddedComplexity of 10, so make sure we have a higher
5377 // AddedComplexity for the following patterns since we want to match sext + sra
5378 // patterns before we attempt to match a single sra node.
5379 let AddedComplexity = 20 in {
5380 // We support all sext + sra combinations which preserve at least one bit of the
5381 // original value which is to be sign extended. E.g. we support shifts up to
5383 def : Pat<(sra (sext_inreg GPR32:$Rn, i8), (i64 imm0_7:$imm)),
5384 (SBFMWri GPR32:$Rn, (i64 imm0_7:$imm), 7)>;
5385 def : Pat<(sra (sext_inreg GPR64:$Rn, i8), (i64 imm0_7:$imm)),
5386 (SBFMXri GPR64:$Rn, (i64 imm0_7:$imm), 7)>;
5388 def : Pat<(sra (sext_inreg GPR32:$Rn, i16), (i64 imm0_15:$imm)),
5389 (SBFMWri GPR32:$Rn, (i64 imm0_15:$imm), 15)>;
5390 def : Pat<(sra (sext_inreg GPR64:$Rn, i16), (i64 imm0_15:$imm)),
5391 (SBFMXri GPR64:$Rn, (i64 imm0_15:$imm), 15)>;
5393 def : Pat<(sra (i64 (sext GPR32:$Rn)), (i64 imm0_31:$imm)),
5394 (SBFMXri (INSERT_SUBREG (i64 (IMPLICIT_DEF)), GPR32:$Rn, sub_32),
5395 (i64 imm0_31:$imm), 31)>;
5396 } // AddedComplexity = 20
5398 // To truncate, we can simply extract from a subregister.
5399 def : Pat<(i32 (trunc GPR64sp:$src)),
5400 (i32 (EXTRACT_SUBREG GPR64sp:$src, sub_32))>;
5402 // __builtin_trap() uses the BRK instruction on AArch64.
5403 def : Pat<(trap), (BRK 1)>;
5405 // Conversions within AdvSIMD types in the same register size are free.
5406 // But because we need a consistent lane ordering, in big endian many
5407 // conversions require one or more REV instructions.
5409 // Consider a simple memory load followed by a bitconvert then a store.
5411 // v1 = BITCAST v2i32 v0 to v4i16
5414 // In big endian mode every memory access has an implicit byte swap. LDR and
5415 // STR do a 64-bit byte swap, whereas LD1/ST1 do a byte swap per lane - that
5416 // is, they treat the vector as a sequence of elements to be byte-swapped.
5417 // The two pairs of instructions are fundamentally incompatible. We've decided
5418 // to use LD1/ST1 only to simplify compiler implementation.
5420 // LD1/ST1 perform the equivalent of a sequence of LDR/STR + REV. This makes
5421 // the original code sequence:
5423 // v1 = REV v2i32 (implicit)
5424 // v2 = BITCAST v2i32 v1 to v4i16
5425 // v3 = REV v4i16 v2 (implicit)
5428 // But this is now broken - the value stored is different to the value loaded
5429 // due to lane reordering. To fix this, on every BITCAST we must perform two
5432 // v1 = REV v2i32 (implicit)
5434 // v3 = BITCAST v2i32 v2 to v4i16
5436 // v5 = REV v4i16 v4 (implicit)
5439 // This means an extra two instructions, but actually in most cases the two REV
5440 // instructions can be combined into one. For example:
5441 // (REV64_2s (REV64_4h X)) === (REV32_4h X)
5443 // There is also no 128-bit REV instruction. This must be synthesized with an
5446 // Most bitconverts require some sort of conversion. The only exceptions are:
5447 // a) Identity conversions - vNfX <-> vNiX
5448 // b) Single-lane-to-scalar - v1fX <-> fX or v1iX <-> iX
5451 // Natural vector casts (64 bit)
5452 def : Pat<(v8i8 (AArch64NvCast (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
5453 def : Pat<(v4i16 (AArch64NvCast (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
5454 def : Pat<(v4f16 (AArch64NvCast (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
5455 def : Pat<(v2i32 (AArch64NvCast (v2i32 FPR64:$src))), (v2i32 FPR64:$src)>;
5456 def : Pat<(v2f32 (AArch64NvCast (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
5457 def : Pat<(v1i64 (AArch64NvCast (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
5459 def : Pat<(v8i8 (AArch64NvCast (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
5460 def : Pat<(v4i16 (AArch64NvCast (v4i16 FPR64:$src))), (v4i16 FPR64:$src)>;
5461 def : Pat<(v4f16 (AArch64NvCast (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
5462 def : Pat<(v2i32 (AArch64NvCast (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
5463 def : Pat<(v1i64 (AArch64NvCast (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
5465 def : Pat<(v8i8 (AArch64NvCast (v8i8 FPR64:$src))), (v8i8 FPR64:$src)>;
5466 def : Pat<(v4i16 (AArch64NvCast (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
5467 def : Pat<(v4f16 (AArch64NvCast (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
5468 def : Pat<(v2i32 (AArch64NvCast (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
5469 def : Pat<(v1i64 (AArch64NvCast (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
5471 def : Pat<(v8i8 (AArch64NvCast (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5472 def : Pat<(v4i16 (AArch64NvCast (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5473 def : Pat<(v4f16 (AArch64NvCast (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5474 def : Pat<(v2i32 (AArch64NvCast (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5475 def : Pat<(v2f32 (AArch64NvCast (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5476 def : Pat<(v1i64 (AArch64NvCast (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5477 def : Pat<(v1f64 (AArch64NvCast (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
5479 def : Pat<(v8i8 (AArch64NvCast (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
5480 def : Pat<(v4i16 (AArch64NvCast (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
5481 def : Pat<(v2i32 (AArch64NvCast (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
5482 def : Pat<(v2f32 (AArch64NvCast (v2f32 FPR64:$src))), (v2f32 FPR64:$src)>;
5483 def : Pat<(v1i64 (AArch64NvCast (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
5485 // Natural vector casts (128 bit)
5486 def : Pat<(v16i8 (AArch64NvCast (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
5487 def : Pat<(v8i16 (AArch64NvCast (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
5488 def : Pat<(v8f16 (AArch64NvCast (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
5489 def : Pat<(v4i32 (AArch64NvCast (v4i32 FPR128:$src))), (v4i32 FPR128:$src)>;
5490 def : Pat<(v4f32 (AArch64NvCast (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
5491 def : Pat<(v2i64 (AArch64NvCast (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
5492 def : Pat<(v2f64 (AArch64NvCast (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
5494 def : Pat<(v16i8 (AArch64NvCast (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
5495 def : Pat<(v8i16 (AArch64NvCast (v8i16 FPR128:$src))), (v8i16 FPR128:$src)>;
5496 def : Pat<(v8f16 (AArch64NvCast (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
5497 def : Pat<(v4i32 (AArch64NvCast (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
5498 def : Pat<(v2i64 (AArch64NvCast (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
5499 def : Pat<(v4f32 (AArch64NvCast (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
5500 def : Pat<(v2f64 (AArch64NvCast (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
5502 def : Pat<(v16i8 (AArch64NvCast (v16i8 FPR128:$src))), (v16i8 FPR128:$src)>;
5503 def : Pat<(v8i16 (AArch64NvCast (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
5504 def : Pat<(v8f16 (AArch64NvCast (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
5505 def : Pat<(v4i32 (AArch64NvCast (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
5506 def : Pat<(v2i64 (AArch64NvCast (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
5507 def : Pat<(v4f32 (AArch64NvCast (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
5508 def : Pat<(v2f64 (AArch64NvCast (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
5510 def : Pat<(v16i8 (AArch64NvCast (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
5511 def : Pat<(v8i16 (AArch64NvCast (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
5512 def : Pat<(v8f16 (AArch64NvCast (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
5513 def : Pat<(v4i32 (AArch64NvCast (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
5514 def : Pat<(v2i64 (AArch64NvCast (v2i64 FPR128:$src))), (v2i64 FPR128:$src)>;
5515 def : Pat<(v4f32 (AArch64NvCast (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
5516 def : Pat<(v2f64 (AArch64NvCast (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
5518 def : Pat<(v16i8 (AArch64NvCast (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
5519 def : Pat<(v8i16 (AArch64NvCast (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
5520 def : Pat<(v4i32 (AArch64NvCast (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
5521 def : Pat<(v4f32 (AArch64NvCast (v4f32 FPR128:$src))), (v4f32 FPR128:$src)>;
5522 def : Pat<(v2i64 (AArch64NvCast (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
5523 def : Pat<(v8f16 (AArch64NvCast (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
5524 def : Pat<(v2f64 (AArch64NvCast (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
5526 def : Pat<(v16i8 (AArch64NvCast (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
5527 def : Pat<(v8i16 (AArch64NvCast (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
5528 def : Pat<(v4i32 (AArch64NvCast (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
5529 def : Pat<(v2i64 (AArch64NvCast (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
5530 def : Pat<(v2f64 (AArch64NvCast (v2f64 FPR128:$src))), (v2f64 FPR128:$src)>;
5531 def : Pat<(v8f16 (AArch64NvCast (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
5532 def : Pat<(v4f32 (AArch64NvCast (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
5534 let Predicates = [IsLE] in {
5535 def : Pat<(v8i8 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5536 def : Pat<(v4i16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5537 def : Pat<(v2i32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5538 def : Pat<(v4f16 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5539 def : Pat<(v2f32 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5541 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
5542 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5543 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
5544 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5545 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
5546 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5547 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
5548 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5549 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
5550 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5551 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
5552 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5554 let Predicates = [IsBE] in {
5555 def : Pat<(v8i8 (bitconvert GPR64:$Xn)),
5556 (REV64v8i8 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5557 def : Pat<(v4i16 (bitconvert GPR64:$Xn)),
5558 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5559 def : Pat<(v2i32 (bitconvert GPR64:$Xn)),
5560 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5561 def : Pat<(v4f16 (bitconvert GPR64:$Xn)),
5562 (REV64v4i16 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5563 def : Pat<(v2f32 (bitconvert GPR64:$Xn)),
5564 (REV64v2i32 (COPY_TO_REGCLASS GPR64:$Xn, FPR64))>;
5566 def : Pat<(i64 (bitconvert (v8i8 V64:$Vn))),
5567 (REV64v8i8 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5568 def : Pat<(i64 (bitconvert (v4i16 V64:$Vn))),
5569 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5570 def : Pat<(i64 (bitconvert (v2i32 V64:$Vn))),
5571 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5572 def : Pat<(i64 (bitconvert (v4f16 V64:$Vn))),
5573 (REV64v4i16 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5574 def : Pat<(i64 (bitconvert (v2f32 V64:$Vn))),
5575 (REV64v2i32 (COPY_TO_REGCLASS V64:$Vn, GPR64))>;
5577 def : Pat<(v1i64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5578 def : Pat<(v1f64 (bitconvert GPR64:$Xn)), (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5579 def : Pat<(i64 (bitconvert (v1i64 V64:$Vn))),
5580 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5581 def : Pat<(v1i64 (scalar_to_vector GPR64:$Xn)),
5582 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5583 def : Pat<(v1f64 (scalar_to_vector GPR64:$Xn)),
5584 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5585 def : Pat<(v1f64 (scalar_to_vector (f64 FPR64:$Xn))), (v1f64 FPR64:$Xn)>;
5587 def : Pat<(f32 (bitconvert (i32 GPR32:$Xn))),
5588 (COPY_TO_REGCLASS GPR32:$Xn, FPR32)>;
5589 def : Pat<(i32 (bitconvert (f32 FPR32:$Xn))),
5590 (COPY_TO_REGCLASS FPR32:$Xn, GPR32)>;
5591 def : Pat<(f64 (bitconvert (i64 GPR64:$Xn))),
5592 (COPY_TO_REGCLASS GPR64:$Xn, FPR64)>;
5593 def : Pat<(i64 (bitconvert (f64 FPR64:$Xn))),
5594 (COPY_TO_REGCLASS FPR64:$Xn, GPR64)>;
5595 def : Pat<(i64 (bitconvert (v1f64 V64:$Vn))),
5596 (COPY_TO_REGCLASS V64:$Vn, GPR64)>;
5598 let Predicates = [IsLE] in {
5599 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))), (v1i64 FPR64:$src)>;
5600 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))), (v1i64 FPR64:$src)>;
5601 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))), (v1i64 FPR64:$src)>;
5602 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))), (v1i64 FPR64:$src)>;
5603 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))), (v1i64 FPR64:$src)>;
5605 let Predicates = [IsBE] in {
5606 def : Pat<(v1i64 (bitconvert (v2i32 FPR64:$src))),
5607 (v1i64 (REV64v2i32 FPR64:$src))>;
5608 def : Pat<(v1i64 (bitconvert (v4i16 FPR64:$src))),
5609 (v1i64 (REV64v4i16 FPR64:$src))>;
5610 def : Pat<(v1i64 (bitconvert (v8i8 FPR64:$src))),
5611 (v1i64 (REV64v8i8 FPR64:$src))>;
5612 def : Pat<(v1i64 (bitconvert (v4f16 FPR64:$src))),
5613 (v1i64 (REV64v4i16 FPR64:$src))>;
5614 def : Pat<(v1i64 (bitconvert (v2f32 FPR64:$src))),
5615 (v1i64 (REV64v2i32 FPR64:$src))>;
5617 def : Pat<(v1i64 (bitconvert (v1f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5618 def : Pat<(v1i64 (bitconvert (f64 FPR64:$src))), (v1i64 FPR64:$src)>;
5620 let Predicates = [IsLE] in {
5621 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))), (v2i32 FPR64:$src)>;
5622 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))), (v2i32 FPR64:$src)>;
5623 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))), (v2i32 FPR64:$src)>;
5624 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5625 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))), (v2i32 FPR64:$src)>;
5626 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))), (v2i32 FPR64:$src)>;
5628 let Predicates = [IsBE] in {
5629 def : Pat<(v2i32 (bitconvert (v1i64 FPR64:$src))),
5630 (v2i32 (REV64v2i32 FPR64:$src))>;
5631 def : Pat<(v2i32 (bitconvert (v4i16 FPR64:$src))),
5632 (v2i32 (REV32v4i16 FPR64:$src))>;
5633 def : Pat<(v2i32 (bitconvert (v8i8 FPR64:$src))),
5634 (v2i32 (REV32v8i8 FPR64:$src))>;
5635 def : Pat<(v2i32 (bitconvert (f64 FPR64:$src))),
5636 (v2i32 (REV64v2i32 FPR64:$src))>;
5637 def : Pat<(v2i32 (bitconvert (v1f64 FPR64:$src))),
5638 (v2i32 (REV64v2i32 FPR64:$src))>;
5639 def : Pat<(v2i32 (bitconvert (v4f16 FPR64:$src))),
5640 (v2i32 (REV64v4i16 FPR64:$src))>;
5642 def : Pat<(v2i32 (bitconvert (v2f32 FPR64:$src))), (v2i32 FPR64:$src)>;
5644 let Predicates = [IsLE] in {
5645 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))), (v4i16 FPR64:$src)>;
5646 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))), (v4i16 FPR64:$src)>;
5647 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))), (v4i16 FPR64:$src)>;
5648 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5649 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))), (v4i16 FPR64:$src)>;
5650 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))), (v4i16 FPR64:$src)>;
5651 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))), (v4i16 FPR64:$src)>;
5653 let Predicates = [IsBE] in {
5654 def : Pat<(v4i16 (bitconvert (v1i64 FPR64:$src))),
5655 (v4i16 (REV64v4i16 FPR64:$src))>;
5656 def : Pat<(v4i16 (bitconvert (v2i32 FPR64:$src))),
5657 (v4i16 (REV32v4i16 FPR64:$src))>;
5658 def : Pat<(v4i16 (bitconvert (v8i8 FPR64:$src))),
5659 (v4i16 (REV16v8i8 FPR64:$src))>;
5660 def : Pat<(v4i16 (bitconvert (f64 FPR64:$src))),
5661 (v4i16 (REV64v4i16 FPR64:$src))>;
5662 def : Pat<(v4i16 (bitconvert (v4f16 FPR64:$src))),
5663 (v4i16 (REV32v4i16 FPR64:$src))>;
5664 def : Pat<(v4i16 (bitconvert (v2f32 FPR64:$src))),
5665 (v4i16 (REV32v4i16 FPR64:$src))>;
5666 def : Pat<(v4i16 (bitconvert (v1f64 FPR64:$src))),
5667 (v4i16 (REV64v4i16 FPR64:$src))>;
5670 let Predicates = [IsLE] in {
5671 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))), (v4f16 FPR64:$src)>;
5672 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))), (v4f16 FPR64:$src)>;
5673 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))), (v4f16 FPR64:$src)>;
5674 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))), (v4f16 FPR64:$src)>;
5675 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5676 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))), (v4f16 FPR64:$src)>;
5677 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))), (v4f16 FPR64:$src)>;
5679 let Predicates = [IsBE] in {
5680 def : Pat<(v4f16 (bitconvert (v1i64 FPR64:$src))),
5681 (v4f16 (REV64v4i16 FPR64:$src))>;
5682 def : Pat<(v4f16 (bitconvert (v2i32 FPR64:$src))),
5683 (v4f16 (REV64v4i16 FPR64:$src))>;
5684 def : Pat<(v4f16 (bitconvert (v4i16 FPR64:$src))),
5685 (v4f16 (REV64v4i16 FPR64:$src))>;
5686 def : Pat<(v4f16 (bitconvert (v8i8 FPR64:$src))),
5687 (v4f16 (REV16v8i8 FPR64:$src))>;
5688 def : Pat<(v4f16 (bitconvert (f64 FPR64:$src))),
5689 (v4f16 (REV64v4i16 FPR64:$src))>;
5690 def : Pat<(v4f16 (bitconvert (v2f32 FPR64:$src))),
5691 (v4f16 (REV64v4i16 FPR64:$src))>;
5692 def : Pat<(v4f16 (bitconvert (v1f64 FPR64:$src))),
5693 (v4f16 (REV64v4i16 FPR64:$src))>;
5698 let Predicates = [IsLE] in {
5699 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))), (v8i8 FPR64:$src)>;
5700 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))), (v8i8 FPR64:$src)>;
5701 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))), (v8i8 FPR64:$src)>;
5702 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5703 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))), (v8i8 FPR64:$src)>;
5704 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))), (v8i8 FPR64:$src)>;
5705 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))), (v8i8 FPR64:$src)>;
5707 let Predicates = [IsBE] in {
5708 def : Pat<(v8i8 (bitconvert (v1i64 FPR64:$src))),
5709 (v8i8 (REV64v8i8 FPR64:$src))>;
5710 def : Pat<(v8i8 (bitconvert (v2i32 FPR64:$src))),
5711 (v8i8 (REV32v8i8 FPR64:$src))>;
5712 def : Pat<(v8i8 (bitconvert (v4i16 FPR64:$src))),
5713 (v8i8 (REV16v8i8 FPR64:$src))>;
5714 def : Pat<(v8i8 (bitconvert (f64 FPR64:$src))),
5715 (v8i8 (REV64v8i8 FPR64:$src))>;
5716 def : Pat<(v8i8 (bitconvert (v2f32 FPR64:$src))),
5717 (v8i8 (REV32v8i8 FPR64:$src))>;
5718 def : Pat<(v8i8 (bitconvert (v1f64 FPR64:$src))),
5719 (v8i8 (REV64v8i8 FPR64:$src))>;
5720 def : Pat<(v8i8 (bitconvert (v4f16 FPR64:$src))),
5721 (v8i8 (REV16v8i8 FPR64:$src))>;
5724 let Predicates = [IsLE] in {
5725 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))), (f64 FPR64:$src)>;
5726 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))), (f64 FPR64:$src)>;
5727 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))), (f64 FPR64:$src)>;
5728 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))), (f64 FPR64:$src)>;
5729 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))), (f64 FPR64:$src)>;
5731 let Predicates = [IsBE] in {
5732 def : Pat<(f64 (bitconvert (v2i32 FPR64:$src))),
5733 (f64 (REV64v2i32 FPR64:$src))>;
5734 def : Pat<(f64 (bitconvert (v4i16 FPR64:$src))),
5735 (f64 (REV64v4i16 FPR64:$src))>;
5736 def : Pat<(f64 (bitconvert (v2f32 FPR64:$src))),
5737 (f64 (REV64v2i32 FPR64:$src))>;
5738 def : Pat<(f64 (bitconvert (v8i8 FPR64:$src))),
5739 (f64 (REV64v8i8 FPR64:$src))>;
5740 def : Pat<(f64 (bitconvert (v4f16 FPR64:$src))),
5741 (f64 (REV64v4i16 FPR64:$src))>;
5743 def : Pat<(f64 (bitconvert (v1i64 FPR64:$src))), (f64 FPR64:$src)>;
5744 def : Pat<(f64 (bitconvert (v1f64 FPR64:$src))), (f64 FPR64:$src)>;
5746 let Predicates = [IsLE] in {
5747 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))), (v1f64 FPR64:$src)>;
5748 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))), (v1f64 FPR64:$src)>;
5749 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))), (v1f64 FPR64:$src)>;
5750 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))), (v1f64 FPR64:$src)>;
5751 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))), (v1f64 FPR64:$src)>;
5753 let Predicates = [IsBE] in {
5754 def : Pat<(v1f64 (bitconvert (v2i32 FPR64:$src))),
5755 (v1f64 (REV64v2i32 FPR64:$src))>;
5756 def : Pat<(v1f64 (bitconvert (v4i16 FPR64:$src))),
5757 (v1f64 (REV64v4i16 FPR64:$src))>;
5758 def : Pat<(v1f64 (bitconvert (v8i8 FPR64:$src))),
5759 (v1f64 (REV64v8i8 FPR64:$src))>;
5760 def : Pat<(v1f64 (bitconvert (v2f32 FPR64:$src))),
5761 (v1f64 (REV64v2i32 FPR64:$src))>;
5762 def : Pat<(v1f64 (bitconvert (v4f16 FPR64:$src))),
5763 (v1f64 (REV64v4i16 FPR64:$src))>;
5765 def : Pat<(v1f64 (bitconvert (v1i64 FPR64:$src))), (v1f64 FPR64:$src)>;
5766 def : Pat<(v1f64 (bitconvert (f64 FPR64:$src))), (v1f64 FPR64:$src)>;
5768 let Predicates = [IsLE] in {
5769 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))), (v2f32 FPR64:$src)>;
5770 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))), (v2f32 FPR64:$src)>;
5771 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))), (v2f32 FPR64:$src)>;
5772 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5773 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))), (v2f32 FPR64:$src)>;
5774 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))), (v2f32 FPR64:$src)>;
5776 let Predicates = [IsBE] in {
5777 def : Pat<(v2f32 (bitconvert (v1i64 FPR64:$src))),
5778 (v2f32 (REV64v2i32 FPR64:$src))>;
5779 def : Pat<(v2f32 (bitconvert (v4i16 FPR64:$src))),
5780 (v2f32 (REV32v4i16 FPR64:$src))>;
5781 def : Pat<(v2f32 (bitconvert (v8i8 FPR64:$src))),
5782 (v2f32 (REV32v8i8 FPR64:$src))>;
5783 def : Pat<(v2f32 (bitconvert (v1f64 FPR64:$src))),
5784 (v2f32 (REV64v2i32 FPR64:$src))>;
5785 def : Pat<(v2f32 (bitconvert (f64 FPR64:$src))),
5786 (v2f32 (REV64v2i32 FPR64:$src))>;
5787 def : Pat<(v2f32 (bitconvert (v4f16 FPR64:$src))),
5788 (v2f32 (REV64v4i16 FPR64:$src))>;
5790 def : Pat<(v2f32 (bitconvert (v2i32 FPR64:$src))), (v2f32 FPR64:$src)>;
5792 let Predicates = [IsLE] in {
5793 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))), (f128 FPR128:$src)>;
5794 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))), (f128 FPR128:$src)>;
5795 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))), (f128 FPR128:$src)>;
5796 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))), (f128 FPR128:$src)>;
5797 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))), (f128 FPR128:$src)>;
5798 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))), (f128 FPR128:$src)>;
5799 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))), (f128 FPR128:$src)>;
5801 let Predicates = [IsBE] in {
5802 def : Pat<(f128 (bitconvert (v2i64 FPR128:$src))),
5803 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
5804 def : Pat<(f128 (bitconvert (v4i32 FPR128:$src))),
5805 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
5806 (REV64v4i32 FPR128:$src), (i32 8)))>;
5807 def : Pat<(f128 (bitconvert (v8i16 FPR128:$src))),
5808 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
5809 (REV64v8i16 FPR128:$src), (i32 8)))>;
5810 def : Pat<(f128 (bitconvert (v8f16 FPR128:$src))),
5811 (f128 (EXTv16i8 (REV64v8i16 FPR128:$src),
5812 (REV64v8i16 FPR128:$src), (i32 8)))>;
5813 def : Pat<(f128 (bitconvert (v2f64 FPR128:$src))),
5814 (f128 (EXTv16i8 FPR128:$src, FPR128:$src, (i32 8)))>;
5815 def : Pat<(f128 (bitconvert (v4f32 FPR128:$src))),
5816 (f128 (EXTv16i8 (REV64v4i32 FPR128:$src),
5817 (REV64v4i32 FPR128:$src), (i32 8)))>;
5818 def : Pat<(f128 (bitconvert (v16i8 FPR128:$src))),
5819 (f128 (EXTv16i8 (REV64v16i8 FPR128:$src),
5820 (REV64v16i8 FPR128:$src), (i32 8)))>;
5823 let Predicates = [IsLE] in {
5824 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))), (v2f64 FPR128:$src)>;
5825 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))), (v2f64 FPR128:$src)>;
5826 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))), (v2f64 FPR128:$src)>;
5827 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))), (v2f64 FPR128:$src)>;
5828 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))), (v2f64 FPR128:$src)>;
5829 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))), (v2f64 FPR128:$src)>;
5831 let Predicates = [IsBE] in {
5832 def : Pat<(v2f64 (bitconvert (f128 FPR128:$src))),
5833 (v2f64 (EXTv16i8 FPR128:$src,
5834 FPR128:$src, (i32 8)))>;
5835 def : Pat<(v2f64 (bitconvert (v4i32 FPR128:$src))),
5836 (v2f64 (REV64v4i32 FPR128:$src))>;
5837 def : Pat<(v2f64 (bitconvert (v8i16 FPR128:$src))),
5838 (v2f64 (REV64v8i16 FPR128:$src))>;
5839 def : Pat<(v2f64 (bitconvert (v8f16 FPR128:$src))),
5840 (v2f64 (REV64v8i16 FPR128:$src))>;
5841 def : Pat<(v2f64 (bitconvert (v16i8 FPR128:$src))),
5842 (v2f64 (REV64v16i8 FPR128:$src))>;
5843 def : Pat<(v2f64 (bitconvert (v4f32 FPR128:$src))),
5844 (v2f64 (REV64v4i32 FPR128:$src))>;
5846 def : Pat<(v2f64 (bitconvert (v2i64 FPR128:$src))), (v2f64 FPR128:$src)>;
5848 let Predicates = [IsLE] in {
5849 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))), (v4f32 FPR128:$src)>;
5850 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))), (v4f32 FPR128:$src)>;
5851 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))), (v4f32 FPR128:$src)>;
5852 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))), (v4f32 FPR128:$src)>;
5853 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))), (v4f32 FPR128:$src)>;
5854 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))), (v4f32 FPR128:$src)>;
5856 let Predicates = [IsBE] in {
5857 def : Pat<(v4f32 (bitconvert (f128 FPR128:$src))),
5858 (v4f32 (EXTv16i8 (REV64v4i32 FPR128:$src),
5859 (REV64v4i32 FPR128:$src), (i32 8)))>;
5860 def : Pat<(v4f32 (bitconvert (v8i16 FPR128:$src))),
5861 (v4f32 (REV32v8i16 FPR128:$src))>;
5862 def : Pat<(v4f32 (bitconvert (v8f16 FPR128:$src))),
5863 (v4f32 (REV32v8i16 FPR128:$src))>;
5864 def : Pat<(v4f32 (bitconvert (v16i8 FPR128:$src))),
5865 (v4f32 (REV32v16i8 FPR128:$src))>;
5866 def : Pat<(v4f32 (bitconvert (v2i64 FPR128:$src))),
5867 (v4f32 (REV64v4i32 FPR128:$src))>;
5868 def : Pat<(v4f32 (bitconvert (v2f64 FPR128:$src))),
5869 (v4f32 (REV64v4i32 FPR128:$src))>;
5871 def : Pat<(v4f32 (bitconvert (v4i32 FPR128:$src))), (v4f32 FPR128:$src)>;
5873 let Predicates = [IsLE] in {
5874 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))), (v2i64 FPR128:$src)>;
5875 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))), (v2i64 FPR128:$src)>;
5876 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))), (v2i64 FPR128:$src)>;
5877 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))), (v2i64 FPR128:$src)>;
5878 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))), (v2i64 FPR128:$src)>;
5879 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))), (v2i64 FPR128:$src)>;
5881 let Predicates = [IsBE] in {
5882 def : Pat<(v2i64 (bitconvert (f128 FPR128:$src))),
5883 (v2i64 (EXTv16i8 FPR128:$src,
5884 FPR128:$src, (i32 8)))>;
5885 def : Pat<(v2i64 (bitconvert (v4i32 FPR128:$src))),
5886 (v2i64 (REV64v4i32 FPR128:$src))>;
5887 def : Pat<(v2i64 (bitconvert (v8i16 FPR128:$src))),
5888 (v2i64 (REV64v8i16 FPR128:$src))>;
5889 def : Pat<(v2i64 (bitconvert (v16i8 FPR128:$src))),
5890 (v2i64 (REV64v16i8 FPR128:$src))>;
5891 def : Pat<(v2i64 (bitconvert (v4f32 FPR128:$src))),
5892 (v2i64 (REV64v4i32 FPR128:$src))>;
5893 def : Pat<(v2i64 (bitconvert (v8f16 FPR128:$src))),
5894 (v2i64 (REV64v8i16 FPR128:$src))>;
5896 def : Pat<(v2i64 (bitconvert (v2f64 FPR128:$src))), (v2i64 FPR128:$src)>;
5898 let Predicates = [IsLE] in {
5899 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))), (v4i32 FPR128:$src)>;
5900 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))), (v4i32 FPR128:$src)>;
5901 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))), (v4i32 FPR128:$src)>;
5902 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))), (v4i32 FPR128:$src)>;
5903 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))), (v4i32 FPR128:$src)>;
5904 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))), (v4i32 FPR128:$src)>;
5906 let Predicates = [IsBE] in {
5907 def : Pat<(v4i32 (bitconvert (f128 FPR128:$src))),
5908 (v4i32 (EXTv16i8 (REV64v4i32 FPR128:$src),
5909 (REV64v4i32 FPR128:$src),
5911 def : Pat<(v4i32 (bitconvert (v2i64 FPR128:$src))),
5912 (v4i32 (REV64v4i32 FPR128:$src))>;
5913 def : Pat<(v4i32 (bitconvert (v8i16 FPR128:$src))),
5914 (v4i32 (REV32v8i16 FPR128:$src))>;
5915 def : Pat<(v4i32 (bitconvert (v16i8 FPR128:$src))),
5916 (v4i32 (REV32v16i8 FPR128:$src))>;
5917 def : Pat<(v4i32 (bitconvert (v2f64 FPR128:$src))),
5918 (v4i32 (REV64v4i32 FPR128:$src))>;
5919 def : Pat<(v4i32 (bitconvert (v8f16 FPR128:$src))),
5920 (v4i32 (REV32v8i16 FPR128:$src))>;
5922 def : Pat<(v4i32 (bitconvert (v4f32 FPR128:$src))), (v4i32 FPR128:$src)>;
5924 let Predicates = [IsLE] in {
5925 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))), (v8i16 FPR128:$src)>;
5926 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))), (v8i16 FPR128:$src)>;
5927 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))), (v8i16 FPR128:$src)>;
5928 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))), (v8i16 FPR128:$src)>;
5929 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))), (v8i16 FPR128:$src)>;
5930 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))), (v8i16 FPR128:$src)>;
5931 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))), (v8i16 FPR128:$src)>;
5933 let Predicates = [IsBE] in {
5934 def : Pat<(v8i16 (bitconvert (f128 FPR128:$src))),
5935 (v8i16 (EXTv16i8 (REV64v8i16 FPR128:$src),
5936 (REV64v8i16 FPR128:$src),
5938 def : Pat<(v8i16 (bitconvert (v2i64 FPR128:$src))),
5939 (v8i16 (REV64v8i16 FPR128:$src))>;
5940 def : Pat<(v8i16 (bitconvert (v4i32 FPR128:$src))),
5941 (v8i16 (REV32v8i16 FPR128:$src))>;
5942 def : Pat<(v8i16 (bitconvert (v16i8 FPR128:$src))),
5943 (v8i16 (REV16v16i8 FPR128:$src))>;
5944 def : Pat<(v8i16 (bitconvert (v2f64 FPR128:$src))),
5945 (v8i16 (REV64v8i16 FPR128:$src))>;
5946 def : Pat<(v8i16 (bitconvert (v4f32 FPR128:$src))),
5947 (v8i16 (REV32v8i16 FPR128:$src))>;
5948 def : Pat<(v8i16 (bitconvert (v8f16 FPR128:$src))),
5949 (v8i16 (REV32v8i16 FPR128:$src))>;
5952 let Predicates = [IsLE] in {
5953 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))), (v8f16 FPR128:$src)>;
5954 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))), (v8f16 FPR128:$src)>;
5955 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))), (v8f16 FPR128:$src)>;
5956 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))), (v8f16 FPR128:$src)>;
5957 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))), (v8f16 FPR128:$src)>;
5958 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))), (v8f16 FPR128:$src)>;
5959 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))), (v8f16 FPR128:$src)>;
5961 let Predicates = [IsBE] in {
5962 def : Pat<(v8f16 (bitconvert (f128 FPR128:$src))),
5963 (v8f16 (EXTv16i8 (REV64v8i16 FPR128:$src),
5964 (REV64v8i16 FPR128:$src),
5966 def : Pat<(v8f16 (bitconvert (v2i64 FPR128:$src))),
5967 (v8f16 (REV64v8i16 FPR128:$src))>;
5968 def : Pat<(v8f16 (bitconvert (v4i32 FPR128:$src))),
5969 (v8f16 (REV32v8i16 FPR128:$src))>;
5970 def : Pat<(v8f16 (bitconvert (v8i16 FPR128:$src))),
5971 (v8f16 (REV64v8i16 FPR128:$src))>;
5972 def : Pat<(v8f16 (bitconvert (v16i8 FPR128:$src))),
5973 (v8f16 (REV16v16i8 FPR128:$src))>;
5974 def : Pat<(v8f16 (bitconvert (v2f64 FPR128:$src))),
5975 (v8f16 (REV64v8i16 FPR128:$src))>;
5976 def : Pat<(v8f16 (bitconvert (v4f32 FPR128:$src))),
5977 (v8f16 (REV32v8i16 FPR128:$src))>;
5980 let Predicates = [IsLE] in {
5981 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))), (v16i8 FPR128:$src)>;
5982 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))), (v16i8 FPR128:$src)>;
5983 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))), (v16i8 FPR128:$src)>;
5984 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))), (v16i8 FPR128:$src)>;
5985 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))), (v16i8 FPR128:$src)>;
5986 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))), (v16i8 FPR128:$src)>;
5987 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))), (v16i8 FPR128:$src)>;
5989 let Predicates = [IsBE] in {
5990 def : Pat<(v16i8 (bitconvert (f128 FPR128:$src))),
5991 (v16i8 (EXTv16i8 (REV64v16i8 FPR128:$src),
5992 (REV64v16i8 FPR128:$src),
5994 def : Pat<(v16i8 (bitconvert (v2i64 FPR128:$src))),
5995 (v16i8 (REV64v16i8 FPR128:$src))>;
5996 def : Pat<(v16i8 (bitconvert (v4i32 FPR128:$src))),
5997 (v16i8 (REV32v16i8 FPR128:$src))>;
5998 def : Pat<(v16i8 (bitconvert (v8i16 FPR128:$src))),
5999 (v16i8 (REV16v16i8 FPR128:$src))>;
6000 def : Pat<(v16i8 (bitconvert (v2f64 FPR128:$src))),
6001 (v16i8 (REV64v16i8 FPR128:$src))>;
6002 def : Pat<(v16i8 (bitconvert (v4f32 FPR128:$src))),
6003 (v16i8 (REV32v16i8 FPR128:$src))>;
6004 def : Pat<(v16i8 (bitconvert (v8f16 FPR128:$src))),
6005 (v16i8 (REV16v16i8 FPR128:$src))>;
6008 def : Pat<(v4i16 (extract_subvector V128:$Rn, (i64 0))),
6009 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6010 def : Pat<(v8i8 (extract_subvector V128:$Rn, (i64 0))),
6011 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6012 def : Pat<(v2f32 (extract_subvector V128:$Rn, (i64 0))),
6013 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6014 def : Pat<(v4f16 (extract_subvector V128:$Rn, (i64 0))),
6015 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6016 def : Pat<(v2i32 (extract_subvector V128:$Rn, (i64 0))),
6017 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6018 def : Pat<(v1i64 (extract_subvector V128:$Rn, (i64 0))),
6019 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6020 def : Pat<(v1f64 (extract_subvector V128:$Rn, (i64 0))),
6021 (EXTRACT_SUBREG V128:$Rn, dsub)>;
6023 def : Pat<(v8i8 (extract_subvector (v16i8 FPR128:$Rn), (i64 1))),
6024 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6025 def : Pat<(v4i16 (extract_subvector (v8i16 FPR128:$Rn), (i64 1))),
6026 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6027 def : Pat<(v2i32 (extract_subvector (v4i32 FPR128:$Rn), (i64 1))),
6028 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6029 def : Pat<(v1i64 (extract_subvector (v2i64 FPR128:$Rn), (i64 1))),
6030 (EXTRACT_SUBREG (DUPv2i64lane FPR128:$Rn, 1), dsub)>;
6032 // A 64-bit subvector insert to the first 128-bit vector position
6033 // is a subregister copy that needs no instruction.
6034 def : Pat<(insert_subvector undef, (v1i64 FPR64:$src), (i32 0)),
6035 (INSERT_SUBREG (v2i64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6036 def : Pat<(insert_subvector undef, (v1f64 FPR64:$src), (i32 0)),
6037 (INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6038 def : Pat<(insert_subvector undef, (v2i32 FPR64:$src), (i32 0)),
6039 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6040 def : Pat<(insert_subvector undef, (v2f32 FPR64:$src), (i32 0)),
6041 (INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6042 def : Pat<(insert_subvector undef, (v4i16 FPR64:$src), (i32 0)),
6043 (INSERT_SUBREG (v8i16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6044 def : Pat<(insert_subvector undef, (v4f16 FPR64:$src), (i32 0)),
6045 (INSERT_SUBREG (v8f16 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6046 def : Pat<(insert_subvector undef, (v8i8 FPR64:$src), (i32 0)),
6047 (INSERT_SUBREG (v16i8 (IMPLICIT_DEF)), FPR64:$src, dsub)>;
6049 // Use pair-wise add instructions when summing up the lanes for v2f64, v2i64
6051 def : Pat<(i64 (add (vector_extract (v2i64 FPR128:$Rn), (i64 0)),
6052 (vector_extract (v2i64 FPR128:$Rn), (i64 1)))),
6053 (i64 (ADDPv2i64p (v2i64 FPR128:$Rn)))>;
6054 def : Pat<(f64 (fadd (vector_extract (v2f64 FPR128:$Rn), (i64 0)),
6055 (vector_extract (v2f64 FPR128:$Rn), (i64 1)))),
6056 (f64 (FADDPv2i64p (v2f64 FPR128:$Rn)))>;
6057 // vector_extract on 64-bit vectors gets promoted to a 128 bit vector,
6058 // so we match on v4f32 here, not v2f32. This will also catch adding
6059 // the low two lanes of a true v4f32 vector.
6060 def : Pat<(fadd (vector_extract (v4f32 FPR128:$Rn), (i64 0)),
6061 (vector_extract (v4f32 FPR128:$Rn), (i64 1))),
6062 (f32 (FADDPv2i32p (EXTRACT_SUBREG FPR128:$Rn, dsub)))>;
6064 // Scalar 64-bit shifts in FPR64 registers.
6065 def : Pat<(i64 (int_aarch64_neon_sshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6066 (SSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6067 def : Pat<(i64 (int_aarch64_neon_ushl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6068 (USHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6069 def : Pat<(i64 (int_aarch64_neon_srshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6070 (SRSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6071 def : Pat<(i64 (int_aarch64_neon_urshl (i64 FPR64:$Rn), (i64 FPR64:$Rm))),
6072 (URSHLv1i64 FPR64:$Rn, FPR64:$Rm)>;
6074 // Patterns for nontemporal/no-allocate stores.
6075 // We have to resort to tricks to turn a single-input store into a store pair,
6076 // because there is no single-input nontemporal store, only STNP.
6077 let Predicates = [IsLE] in {
6078 let AddedComplexity = 15 in {
6079 class NTStore128Pat<ValueType VT> :
6080 Pat<(nontemporalstore (VT FPR128:$Rt),
6081 (am_indexed7s64 GPR64sp:$Rn, simm7s8:$offset)),
6082 (STNPDi (EXTRACT_SUBREG FPR128:$Rt, dsub),
6083 (CPYi64 FPR128:$Rt, (i64 1)),
6084 GPR64sp:$Rn, simm7s8:$offset)>;
6086 def : NTStore128Pat<v2i64>;
6087 def : NTStore128Pat<v4i32>;
6088 def : NTStore128Pat<v8i16>;
6089 def : NTStore128Pat<v16i8>;
6091 class NTStore64Pat<ValueType VT> :
6092 Pat<(nontemporalstore (VT FPR64:$Rt),
6093 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
6094 (STNPSi (EXTRACT_SUBREG FPR64:$Rt, ssub),
6095 (CPYi32 (SUBREG_TO_REG (i64 0), FPR64:$Rt, dsub), (i64 1)),
6096 GPR64sp:$Rn, simm7s4:$offset)>;
6098 // FIXME: Shouldn't v1f64 loads/stores be promoted to v1i64?
6099 def : NTStore64Pat<v1f64>;
6100 def : NTStore64Pat<v1i64>;
6101 def : NTStore64Pat<v2i32>;
6102 def : NTStore64Pat<v4i16>;
6103 def : NTStore64Pat<v8i8>;
6105 def : Pat<(nontemporalstore GPR64:$Rt,
6106 (am_indexed7s32 GPR64sp:$Rn, simm7s4:$offset)),
6107 (STNPWi (EXTRACT_SUBREG GPR64:$Rt, sub_32),
6108 (EXTRACT_SUBREG (UBFMXri GPR64:$Rt, 32, 63), sub_32),
6109 GPR64sp:$Rn, simm7s4:$offset)>;
6110 } // AddedComplexity=10
6111 } // Predicates = [IsLE]
6113 // Tail call return handling. These are all compiler pseudo-instructions,
6114 // so no encoding information or anything like that.
6115 let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [SP] in {
6116 def TCRETURNdi : Pseudo<(outs), (ins i64imm:$dst, i32imm:$FPDiff), []>,
6117 Sched<[WriteBrReg]>;
6118 def TCRETURNri : Pseudo<(outs), (ins tcGPR64:$dst, i32imm:$FPDiff), []>,
6119 Sched<[WriteBrReg]>;
6122 def : Pat<(AArch64tcret tcGPR64:$dst, (i32 timm:$FPDiff)),
6123 (TCRETURNri tcGPR64:$dst, imm:$FPDiff)>;
6124 def : Pat<(AArch64tcret tglobaladdr:$dst, (i32 timm:$FPDiff)),
6125 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
6126 def : Pat<(AArch64tcret texternalsym:$dst, (i32 timm:$FPDiff)),
6127 (TCRETURNdi texternalsym:$dst, imm:$FPDiff)>;
6129 include "AArch64InstrAtomics.td"