1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "aarch64-isel"
17 #include "AArch64ISelLowering.h"
18 #include "AArch64MachineFunctionInfo.h"
19 #include "AArch64TargetMachine.h"
20 #include "AArch64TargetObjectFile.h"
21 #include "Utils/AArch64BaseInfo.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/CallingConvLower.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
28 #include "llvm/IR/CallingConv.h"
32 static TargetLoweringObjectFile *createTLOF(AArch64TargetMachine &TM) {
33 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
34 assert (Subtarget->isTargetELF() && "unknown subtarget type");
35 return new AArch64ElfTargetObjectFile();
38 AArch64TargetLowering::AArch64TargetLowering(AArch64TargetMachine &TM)
39 : TargetLowering(TM, createTLOF(TM)), Itins(TM.getInstrItineraryData()) {
41 const AArch64Subtarget *Subtarget = &TM.getSubtarget<AArch64Subtarget>();
43 // SIMD compares set the entire lane's bits to 1
44 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
46 // Scalar register <-> type mapping
47 addRegisterClass(MVT::i32, &AArch64::GPR32RegClass);
48 addRegisterClass(MVT::i64, &AArch64::GPR64RegClass);
50 if (Subtarget->hasFPARMv8()) {
51 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass);
52 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass);
53 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass);
54 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass);
57 if (Subtarget->hasNEON()) {
59 addRegisterClass(MVT::v1i8, &AArch64::FPR8RegClass);
60 addRegisterClass(MVT::v1i16, &AArch64::FPR16RegClass);
61 addRegisterClass(MVT::v1i32, &AArch64::FPR32RegClass);
62 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
63 addRegisterClass(MVT::v1f32, &AArch64::FPR32RegClass);
64 addRegisterClass(MVT::v1f64, &AArch64::FPR64RegClass);
65 addRegisterClass(MVT::v8i8, &AArch64::FPR64RegClass);
66 addRegisterClass(MVT::v4i16, &AArch64::FPR64RegClass);
67 addRegisterClass(MVT::v2i32, &AArch64::FPR64RegClass);
68 addRegisterClass(MVT::v1i64, &AArch64::FPR64RegClass);
69 addRegisterClass(MVT::v2f32, &AArch64::FPR64RegClass);
70 addRegisterClass(MVT::v16i8, &AArch64::FPR128RegClass);
71 addRegisterClass(MVT::v8i16, &AArch64::FPR128RegClass);
72 addRegisterClass(MVT::v4i32, &AArch64::FPR128RegClass);
73 addRegisterClass(MVT::v2i64, &AArch64::FPR128RegClass);
74 addRegisterClass(MVT::v4f32, &AArch64::FPR128RegClass);
75 addRegisterClass(MVT::v2f64, &AArch64::FPR128RegClass);
78 computeRegisterProperties();
80 // We combine OR nodes for bitfield and NEON BSL operations.
81 setTargetDAGCombine(ISD::OR);
83 setTargetDAGCombine(ISD::AND);
84 setTargetDAGCombine(ISD::SRA);
85 setTargetDAGCombine(ISD::SRL);
86 setTargetDAGCombine(ISD::SHL);
88 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
89 setTargetDAGCombine(ISD::INTRINSIC_VOID);
90 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
92 // AArch64 does not have i1 loads, or much of anything for i1 really.
93 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
94 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
95 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
97 setStackPointerRegisterToSaveRestore(AArch64::XSP);
98 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand);
99 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
100 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
102 // We'll lower globals to wrappers for selection.
103 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom);
104 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
106 // A64 instructions have the comparison predicate attached to the user of the
107 // result, but having a separate comparison is valuable for matching.
108 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
109 setOperationAction(ISD::BR_CC, MVT::i64, Custom);
110 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
111 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
113 setOperationAction(ISD::SELECT, MVT::i32, Custom);
114 setOperationAction(ISD::SELECT, MVT::i64, Custom);
115 setOperationAction(ISD::SELECT, MVT::f32, Custom);
116 setOperationAction(ISD::SELECT, MVT::f64, Custom);
118 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
119 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
120 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
121 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
123 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
125 setOperationAction(ISD::SETCC, MVT::i32, Custom);
126 setOperationAction(ISD::SETCC, MVT::i64, Custom);
127 setOperationAction(ISD::SETCC, MVT::f32, Custom);
128 setOperationAction(ISD::SETCC, MVT::f64, Custom);
130 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
131 setOperationAction(ISD::JumpTable, MVT::i32, Custom);
132 setOperationAction(ISD::JumpTable, MVT::i64, Custom);
134 setOperationAction(ISD::VASTART, MVT::Other, Custom);
135 setOperationAction(ISD::VACOPY, MVT::Other, Custom);
136 setOperationAction(ISD::VAEND, MVT::Other, Expand);
137 setOperationAction(ISD::VAARG, MVT::Other, Expand);
139 setOperationAction(ISD::BlockAddress, MVT::i64, Custom);
141 setOperationAction(ISD::ROTL, MVT::i32, Expand);
142 setOperationAction(ISD::ROTL, MVT::i64, Expand);
144 setOperationAction(ISD::UREM, MVT::i32, Expand);
145 setOperationAction(ISD::UREM, MVT::i64, Expand);
146 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
147 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
149 setOperationAction(ISD::SREM, MVT::i32, Expand);
150 setOperationAction(ISD::SREM, MVT::i64, Expand);
151 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
152 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
154 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
155 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
157 // Legal floating-point operations.
158 setOperationAction(ISD::FABS, MVT::f32, Legal);
159 setOperationAction(ISD::FABS, MVT::f64, Legal);
161 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
162 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
164 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
165 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
167 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
168 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
170 setOperationAction(ISD::FNEG, MVT::f32, Legal);
171 setOperationAction(ISD::FNEG, MVT::f64, Legal);
173 setOperationAction(ISD::FRINT, MVT::f32, Legal);
174 setOperationAction(ISD::FRINT, MVT::f64, Legal);
176 setOperationAction(ISD::FSQRT, MVT::f32, Legal);
177 setOperationAction(ISD::FSQRT, MVT::f64, Legal);
179 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
180 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
182 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
183 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
184 setOperationAction(ISD::ConstantFP, MVT::f128, Legal);
186 // Illegal floating-point operations.
187 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
188 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
190 setOperationAction(ISD::FCOS, MVT::f32, Expand);
191 setOperationAction(ISD::FCOS, MVT::f64, Expand);
193 setOperationAction(ISD::FEXP, MVT::f32, Expand);
194 setOperationAction(ISD::FEXP, MVT::f64, Expand);
196 setOperationAction(ISD::FEXP2, MVT::f32, Expand);
197 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
199 setOperationAction(ISD::FLOG, MVT::f32, Expand);
200 setOperationAction(ISD::FLOG, MVT::f64, Expand);
202 setOperationAction(ISD::FLOG2, MVT::f32, Expand);
203 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
205 setOperationAction(ISD::FLOG10, MVT::f32, Expand);
206 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
208 setOperationAction(ISD::FPOW, MVT::f32, Expand);
209 setOperationAction(ISD::FPOW, MVT::f64, Expand);
211 setOperationAction(ISD::FPOWI, MVT::f32, Expand);
212 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
214 setOperationAction(ISD::FREM, MVT::f32, Expand);
215 setOperationAction(ISD::FREM, MVT::f64, Expand);
217 setOperationAction(ISD::FSIN, MVT::f32, Expand);
218 setOperationAction(ISD::FSIN, MVT::f64, Expand);
220 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
221 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
223 // Virtually no operation on f128 is legal, but LLVM can't expand them when
224 // there's a valid register class, so we need custom operations in most cases.
225 setOperationAction(ISD::FABS, MVT::f128, Expand);
226 setOperationAction(ISD::FADD, MVT::f128, Custom);
227 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
228 setOperationAction(ISD::FCOS, MVT::f128, Expand);
229 setOperationAction(ISD::FDIV, MVT::f128, Custom);
230 setOperationAction(ISD::FMA, MVT::f128, Expand);
231 setOperationAction(ISD::FMUL, MVT::f128, Custom);
232 setOperationAction(ISD::FNEG, MVT::f128, Expand);
233 setOperationAction(ISD::FP_EXTEND, MVT::f128, Expand);
234 setOperationAction(ISD::FP_ROUND, MVT::f128, Expand);
235 setOperationAction(ISD::FPOW, MVT::f128, Expand);
236 setOperationAction(ISD::FREM, MVT::f128, Expand);
237 setOperationAction(ISD::FRINT, MVT::f128, Expand);
238 setOperationAction(ISD::FSIN, MVT::f128, Expand);
239 setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
240 setOperationAction(ISD::FSQRT, MVT::f128, Expand);
241 setOperationAction(ISD::FSUB, MVT::f128, Custom);
242 setOperationAction(ISD::FTRUNC, MVT::f128, Expand);
243 setOperationAction(ISD::SETCC, MVT::f128, Custom);
244 setOperationAction(ISD::BR_CC, MVT::f128, Custom);
245 setOperationAction(ISD::SELECT, MVT::f128, Expand);
246 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
247 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
249 // Lowering for many of the conversions is actually specified by the non-f128
250 // type. The LowerXXX function will be trivial when f128 isn't involved.
251 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
252 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
253 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom);
254 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
255 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
256 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom);
257 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
258 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
259 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom);
260 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
261 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
262 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom);
263 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
264 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
266 // This prevents LLVM trying to compress double constants into a floating
267 // constant-pool entry and trying to load from there. It's of doubtful benefit
268 // for A64: we'd need LDR followed by FCVT, I believe.
269 setLoadExtAction(ISD::EXTLOAD, MVT::f64, Expand);
270 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
271 setLoadExtAction(ISD::EXTLOAD, MVT::f16, Expand);
273 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
274 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
275 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
276 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
277 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
278 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
280 setExceptionPointerRegister(AArch64::X0);
281 setExceptionSelectorRegister(AArch64::X1);
283 if (Subtarget->hasNEON()) {
284 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i8, Custom);
285 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i8, Custom);
286 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i8, Custom);
287 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i16, Custom);
288 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i16, Custom);
289 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i16, Custom);
290 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i32, Custom);
291 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Custom);
292 setOperationAction(ISD::BUILD_VECTOR, MVT::v4i32, Custom);
293 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i64, Custom);
294 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
295 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f32, Custom);
296 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f32, Custom);
297 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
298 setOperationAction(ISD::BUILD_VECTOR, MVT::v1f64, Custom);
299 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
301 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i8, Custom);
302 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i8, Custom);
303 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i16, Custom);
304 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i16, Custom);
305 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i32, Custom);
306 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i32, Custom);
307 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1i64, Custom);
308 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
309 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Custom);
310 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
311 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v1f64, Custom);
312 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
314 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i8, Legal);
315 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
316 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
317 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
318 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i16, Legal);
319 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4i32, Legal);
320 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2i64, Legal);
321 setOperationAction(ISD::CONCAT_VECTORS, MVT::v4f32, Legal);
322 setOperationAction(ISD::CONCAT_VECTORS, MVT::v2f64, Legal);
324 setOperationAction(ISD::SETCC, MVT::v8i8, Custom);
325 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
326 setOperationAction(ISD::SETCC, MVT::v4i16, Custom);
327 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
328 setOperationAction(ISD::SETCC, MVT::v2i32, Custom);
329 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
330 setOperationAction(ISD::SETCC, MVT::v1i64, Custom);
331 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
332 setOperationAction(ISD::SETCC, MVT::v1f32, Custom);
333 setOperationAction(ISD::SETCC, MVT::v2f32, Custom);
334 setOperationAction(ISD::SETCC, MVT::v4f32, Custom);
335 setOperationAction(ISD::SETCC, MVT::v1f64, Custom);
336 setOperationAction(ISD::SETCC, MVT::v2f64, Custom);
338 setOperationAction(ISD::FFLOOR, MVT::v2f32, Legal);
339 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
340 setOperationAction(ISD::FFLOOR, MVT::v1f64, Legal);
341 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
343 setOperationAction(ISD::FCEIL, MVT::v2f32, Legal);
344 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
345 setOperationAction(ISD::FCEIL, MVT::v1f64, Legal);
346 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
348 setOperationAction(ISD::FTRUNC, MVT::v2f32, Legal);
349 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
350 setOperationAction(ISD::FTRUNC, MVT::v1f64, Legal);
351 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
353 setOperationAction(ISD::FRINT, MVT::v2f32, Legal);
354 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
355 setOperationAction(ISD::FRINT, MVT::v1f64, Legal);
356 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
358 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Legal);
359 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
360 setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Legal);
361 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
363 setOperationAction(ISD::FROUND, MVT::v2f32, Legal);
364 setOperationAction(ISD::FROUND, MVT::v4f32, Legal);
365 setOperationAction(ISD::FROUND, MVT::v1f64, Legal);
366 setOperationAction(ISD::FROUND, MVT::v2f64, Legal);
370 EVT AArch64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
371 // It's reasonably important that this value matches the "natural" legal
372 // promotion from i1 for scalar types. Otherwise LegalizeTypes can get itself
373 // in a twist (e.g. inserting an any_extend which then becomes i64 -> i64).
374 if (!VT.isVector()) return MVT::i32;
375 return VT.changeVectorElementTypeToInteger();
378 static void getExclusiveOperation(unsigned Size, AtomicOrdering Ord,
381 static const unsigned LoadBares[] = {AArch64::LDXR_byte, AArch64::LDXR_hword,
382 AArch64::LDXR_word, AArch64::LDXR_dword};
383 static const unsigned LoadAcqs[] = {AArch64::LDAXR_byte, AArch64::LDAXR_hword,
384 AArch64::LDAXR_word, AArch64::LDAXR_dword};
385 static const unsigned StoreBares[] = {AArch64::STXR_byte, AArch64::STXR_hword,
386 AArch64::STXR_word, AArch64::STXR_dword};
387 static const unsigned StoreRels[] = {AArch64::STLXR_byte,AArch64::STLXR_hword,
388 AArch64::STLXR_word, AArch64::STLXR_dword};
390 const unsigned *LoadOps, *StoreOps;
391 if (Ord == Acquire || Ord == AcquireRelease || Ord == SequentiallyConsistent)
396 if (Ord == Release || Ord == AcquireRelease || Ord == SequentiallyConsistent)
397 StoreOps = StoreRels;
399 StoreOps = StoreBares;
401 assert(isPowerOf2_32(Size) && Size <= 8 &&
402 "unsupported size for atomic binary op!");
404 LdrOpc = LoadOps[Log2_32(Size)];
405 StrOpc = StoreOps[Log2_32(Size)];
408 // FIXME: AArch64::DTripleRegClass and AArch64::QTripleRegClass don't really
409 // have value type mapped, and they are both being defined as MVT::untyped.
410 // Without knowing the MVT type, MachineLICM::getRegisterClassIDAndCost
411 // would fail to figure out the register pressure correctly.
412 std::pair<const TargetRegisterClass*, uint8_t>
413 AArch64TargetLowering::findRepresentativeClass(MVT VT) const{
414 const TargetRegisterClass *RRC = 0;
416 switch (VT.SimpleTy) {
418 return TargetLowering::findRepresentativeClass(VT);
420 RRC = &AArch64::QPairRegClass;
424 RRC = &AArch64::QQuadRegClass;
428 return std::make_pair(RRC, Cost);
432 AArch64TargetLowering::emitAtomicBinary(MachineInstr *MI, MachineBasicBlock *BB,
434 unsigned BinOpcode) const {
435 // This also handles ATOMIC_SWAP, indicated by BinOpcode==0.
436 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
438 const BasicBlock *LLVM_BB = BB->getBasicBlock();
439 MachineFunction *MF = BB->getParent();
440 MachineFunction::iterator It = BB;
443 unsigned dest = MI->getOperand(0).getReg();
444 unsigned ptr = MI->getOperand(1).getReg();
445 unsigned incr = MI->getOperand(2).getReg();
446 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
447 DebugLoc dl = MI->getDebugLoc();
449 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
451 unsigned ldrOpc, strOpc;
452 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
454 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
455 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
456 MF->insert(It, loopMBB);
457 MF->insert(It, exitMBB);
459 // Transfer the remainder of BB and its successor edges to exitMBB.
460 exitMBB->splice(exitMBB->begin(), BB,
461 llvm::next(MachineBasicBlock::iterator(MI)),
463 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
465 const TargetRegisterClass *TRC
466 = Size == 8 ? &AArch64::GPR64RegClass : &AArch64::GPR32RegClass;
467 unsigned scratch = (!BinOpcode) ? incr : MRI.createVirtualRegister(TRC);
471 // fallthrough --> loopMBB
472 BB->addSuccessor(loopMBB);
476 // <binop> scratch, dest, incr
477 // stxr stxr_status, scratch, ptr
478 // cbnz stxr_status, loopMBB
479 // fallthrough --> exitMBB
481 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
483 // All arithmetic operations we'll be creating are designed to take an extra
484 // shift or extend operand, which we can conveniently set to zero.
486 // Operand order needs to go the other way for NAND.
487 if (BinOpcode == AArch64::BICwww_lsl || BinOpcode == AArch64::BICxxx_lsl)
488 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
489 .addReg(incr).addReg(dest).addImm(0);
491 BuildMI(BB, dl, TII->get(BinOpcode), scratch)
492 .addReg(dest).addReg(incr).addImm(0);
495 // From the stxr, the register is GPR32; from the cmp it's GPR32wsp
496 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
497 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
499 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(scratch).addReg(ptr);
500 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
501 .addReg(stxr_status).addMBB(loopMBB);
503 BB->addSuccessor(loopMBB);
504 BB->addSuccessor(exitMBB);
510 MI->eraseFromParent(); // The instruction is gone now.
516 AArch64TargetLowering::emitAtomicBinaryMinMax(MachineInstr *MI,
517 MachineBasicBlock *BB,
520 A64CC::CondCodes Cond) const {
521 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
523 const BasicBlock *LLVM_BB = BB->getBasicBlock();
524 MachineFunction *MF = BB->getParent();
525 MachineFunction::iterator It = BB;
528 unsigned dest = MI->getOperand(0).getReg();
529 unsigned ptr = MI->getOperand(1).getReg();
530 unsigned incr = MI->getOperand(2).getReg();
531 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(3).getImm());
533 unsigned oldval = dest;
534 DebugLoc dl = MI->getDebugLoc();
536 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
537 const TargetRegisterClass *TRC, *TRCsp;
539 TRC = &AArch64::GPR64RegClass;
540 TRCsp = &AArch64::GPR64xspRegClass;
542 TRC = &AArch64::GPR32RegClass;
543 TRCsp = &AArch64::GPR32wspRegClass;
546 unsigned ldrOpc, strOpc;
547 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
549 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
550 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
551 MF->insert(It, loopMBB);
552 MF->insert(It, exitMBB);
554 // Transfer the remainder of BB and its successor edges to exitMBB.
555 exitMBB->splice(exitMBB->begin(), BB,
556 llvm::next(MachineBasicBlock::iterator(MI)),
558 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
560 unsigned scratch = MRI.createVirtualRegister(TRC);
561 MRI.constrainRegClass(scratch, TRCsp);
565 // fallthrough --> loopMBB
566 BB->addSuccessor(loopMBB);
570 // cmp incr, dest (, sign extend if necessary)
571 // csel scratch, dest, incr, cond
572 // stxr stxr_status, scratch, ptr
573 // cbnz stxr_status, loopMBB
574 // fallthrough --> exitMBB
576 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
578 // Build compare and cmov instructions.
579 MRI.constrainRegClass(incr, TRCsp);
580 BuildMI(BB, dl, TII->get(CmpOp))
581 .addReg(incr).addReg(oldval).addImm(0);
583 BuildMI(BB, dl, TII->get(Size == 8 ? AArch64::CSELxxxc : AArch64::CSELwwwc),
585 .addReg(oldval).addReg(incr).addImm(Cond);
587 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
588 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
590 BuildMI(BB, dl, TII->get(strOpc), stxr_status)
591 .addReg(scratch).addReg(ptr);
592 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
593 .addReg(stxr_status).addMBB(loopMBB);
595 BB->addSuccessor(loopMBB);
596 BB->addSuccessor(exitMBB);
602 MI->eraseFromParent(); // The instruction is gone now.
608 AArch64TargetLowering::emitAtomicCmpSwap(MachineInstr *MI,
609 MachineBasicBlock *BB,
610 unsigned Size) const {
611 unsigned dest = MI->getOperand(0).getReg();
612 unsigned ptr = MI->getOperand(1).getReg();
613 unsigned oldval = MI->getOperand(2).getReg();
614 unsigned newval = MI->getOperand(3).getReg();
615 AtomicOrdering Ord = static_cast<AtomicOrdering>(MI->getOperand(4).getImm());
616 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
617 DebugLoc dl = MI->getDebugLoc();
619 MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
620 const TargetRegisterClass *TRCsp;
621 TRCsp = Size == 8 ? &AArch64::GPR64xspRegClass : &AArch64::GPR32wspRegClass;
623 unsigned ldrOpc, strOpc;
624 getExclusiveOperation(Size, Ord, ldrOpc, strOpc);
626 MachineFunction *MF = BB->getParent();
627 const BasicBlock *LLVM_BB = BB->getBasicBlock();
628 MachineFunction::iterator It = BB;
629 ++It; // insert the new blocks after the current block
631 MachineBasicBlock *loop1MBB = MF->CreateMachineBasicBlock(LLVM_BB);
632 MachineBasicBlock *loop2MBB = MF->CreateMachineBasicBlock(LLVM_BB);
633 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
634 MF->insert(It, loop1MBB);
635 MF->insert(It, loop2MBB);
636 MF->insert(It, exitMBB);
638 // Transfer the remainder of BB and its successor edges to exitMBB.
639 exitMBB->splice(exitMBB->begin(), BB,
640 llvm::next(MachineBasicBlock::iterator(MI)),
642 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
646 // fallthrough --> loop1MBB
647 BB->addSuccessor(loop1MBB);
654 BuildMI(BB, dl, TII->get(ldrOpc), dest).addReg(ptr);
656 unsigned CmpOp = Size == 8 ? AArch64::CMPxx_lsl : AArch64::CMPww_lsl;
657 MRI.constrainRegClass(dest, TRCsp);
658 BuildMI(BB, dl, TII->get(CmpOp))
659 .addReg(dest).addReg(oldval).addImm(0);
660 BuildMI(BB, dl, TII->get(AArch64::Bcc))
661 .addImm(A64CC::NE).addMBB(exitMBB);
662 BB->addSuccessor(loop2MBB);
663 BB->addSuccessor(exitMBB);
666 // strex stxr_status, newval, [ptr]
667 // cbnz stxr_status, loop1MBB
669 unsigned stxr_status = MRI.createVirtualRegister(&AArch64::GPR32RegClass);
670 MRI.constrainRegClass(stxr_status, &AArch64::GPR32wspRegClass);
672 BuildMI(BB, dl, TII->get(strOpc), stxr_status).addReg(newval).addReg(ptr);
673 BuildMI(BB, dl, TII->get(AArch64::CBNZw))
674 .addReg(stxr_status).addMBB(loop1MBB);
675 BB->addSuccessor(loop1MBB);
676 BB->addSuccessor(exitMBB);
682 MI->eraseFromParent(); // The instruction is gone now.
688 AArch64TargetLowering::EmitF128CSEL(MachineInstr *MI,
689 MachineBasicBlock *MBB) const {
690 // We materialise the F128CSEL pseudo-instruction using conditional branches
691 // and loads, giving an instruciton sequence like:
700 // Using virtual registers would probably not be beneficial since COPY
701 // instructions are expensive for f128 (there's no actual instruction to
704 // An alternative would be to do an integer-CSEL on some address. E.g.:
709 // csel x0, x0, x1, ne
712 // It's unclear which approach is actually optimal.
713 const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
714 MachineFunction *MF = MBB->getParent();
715 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
716 DebugLoc DL = MI->getDebugLoc();
717 MachineFunction::iterator It = MBB;
720 unsigned DestReg = MI->getOperand(0).getReg();
721 unsigned IfTrueReg = MI->getOperand(1).getReg();
722 unsigned IfFalseReg = MI->getOperand(2).getReg();
723 unsigned CondCode = MI->getOperand(3).getImm();
724 bool NZCVKilled = MI->getOperand(4).isKill();
726 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB);
727 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB);
728 MF->insert(It, TrueBB);
729 MF->insert(It, EndBB);
731 // Transfer rest of current basic-block to EndBB
732 EndBB->splice(EndBB->begin(), MBB,
733 llvm::next(MachineBasicBlock::iterator(MI)),
735 EndBB->transferSuccessorsAndUpdatePHIs(MBB);
737 // We need somewhere to store the f128 value needed.
738 int ScratchFI = MF->getFrameInfo()->CreateSpillStackObject(16, 16);
740 // [... start of incoming MBB ...]
741 // str qIFFALSE, [sp]
744 BuildMI(MBB, DL, TII->get(AArch64::LSFP128_STR))
746 .addFrameIndex(ScratchFI)
748 BuildMI(MBB, DL, TII->get(AArch64::Bcc))
751 BuildMI(MBB, DL, TII->get(AArch64::Bimm))
753 MBB->addSuccessor(TrueBB);
754 MBB->addSuccessor(EndBB);
757 // NZCV is live-through TrueBB.
758 TrueBB->addLiveIn(AArch64::NZCV);
759 EndBB->addLiveIn(AArch64::NZCV);
764 BuildMI(TrueBB, DL, TII->get(AArch64::LSFP128_STR))
766 .addFrameIndex(ScratchFI)
769 // Note: fallthrough. We can rely on LLVM adding a branch if it reorders the
771 TrueBB->addSuccessor(EndBB);
775 // [... rest of incoming MBB ...]
776 MachineInstr *StartOfEnd = EndBB->begin();
777 BuildMI(*EndBB, StartOfEnd, DL, TII->get(AArch64::LSFP128_LDR), DestReg)
778 .addFrameIndex(ScratchFI)
781 MI->eraseFromParent();
786 AArch64TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
787 MachineBasicBlock *MBB) const {
788 switch (MI->getOpcode()) {
789 default: llvm_unreachable("Unhandled instruction with custom inserter");
790 case AArch64::F128CSEL:
791 return EmitF128CSEL(MI, MBB);
792 case AArch64::ATOMIC_LOAD_ADD_I8:
793 return emitAtomicBinary(MI, MBB, 1, AArch64::ADDwww_lsl);
794 case AArch64::ATOMIC_LOAD_ADD_I16:
795 return emitAtomicBinary(MI, MBB, 2, AArch64::ADDwww_lsl);
796 case AArch64::ATOMIC_LOAD_ADD_I32:
797 return emitAtomicBinary(MI, MBB, 4, AArch64::ADDwww_lsl);
798 case AArch64::ATOMIC_LOAD_ADD_I64:
799 return emitAtomicBinary(MI, MBB, 8, AArch64::ADDxxx_lsl);
801 case AArch64::ATOMIC_LOAD_SUB_I8:
802 return emitAtomicBinary(MI, MBB, 1, AArch64::SUBwww_lsl);
803 case AArch64::ATOMIC_LOAD_SUB_I16:
804 return emitAtomicBinary(MI, MBB, 2, AArch64::SUBwww_lsl);
805 case AArch64::ATOMIC_LOAD_SUB_I32:
806 return emitAtomicBinary(MI, MBB, 4, AArch64::SUBwww_lsl);
807 case AArch64::ATOMIC_LOAD_SUB_I64:
808 return emitAtomicBinary(MI, MBB, 8, AArch64::SUBxxx_lsl);
810 case AArch64::ATOMIC_LOAD_AND_I8:
811 return emitAtomicBinary(MI, MBB, 1, AArch64::ANDwww_lsl);
812 case AArch64::ATOMIC_LOAD_AND_I16:
813 return emitAtomicBinary(MI, MBB, 2, AArch64::ANDwww_lsl);
814 case AArch64::ATOMIC_LOAD_AND_I32:
815 return emitAtomicBinary(MI, MBB, 4, AArch64::ANDwww_lsl);
816 case AArch64::ATOMIC_LOAD_AND_I64:
817 return emitAtomicBinary(MI, MBB, 8, AArch64::ANDxxx_lsl);
819 case AArch64::ATOMIC_LOAD_OR_I8:
820 return emitAtomicBinary(MI, MBB, 1, AArch64::ORRwww_lsl);
821 case AArch64::ATOMIC_LOAD_OR_I16:
822 return emitAtomicBinary(MI, MBB, 2, AArch64::ORRwww_lsl);
823 case AArch64::ATOMIC_LOAD_OR_I32:
824 return emitAtomicBinary(MI, MBB, 4, AArch64::ORRwww_lsl);
825 case AArch64::ATOMIC_LOAD_OR_I64:
826 return emitAtomicBinary(MI, MBB, 8, AArch64::ORRxxx_lsl);
828 case AArch64::ATOMIC_LOAD_XOR_I8:
829 return emitAtomicBinary(MI, MBB, 1, AArch64::EORwww_lsl);
830 case AArch64::ATOMIC_LOAD_XOR_I16:
831 return emitAtomicBinary(MI, MBB, 2, AArch64::EORwww_lsl);
832 case AArch64::ATOMIC_LOAD_XOR_I32:
833 return emitAtomicBinary(MI, MBB, 4, AArch64::EORwww_lsl);
834 case AArch64::ATOMIC_LOAD_XOR_I64:
835 return emitAtomicBinary(MI, MBB, 8, AArch64::EORxxx_lsl);
837 case AArch64::ATOMIC_LOAD_NAND_I8:
838 return emitAtomicBinary(MI, MBB, 1, AArch64::BICwww_lsl);
839 case AArch64::ATOMIC_LOAD_NAND_I16:
840 return emitAtomicBinary(MI, MBB, 2, AArch64::BICwww_lsl);
841 case AArch64::ATOMIC_LOAD_NAND_I32:
842 return emitAtomicBinary(MI, MBB, 4, AArch64::BICwww_lsl);
843 case AArch64::ATOMIC_LOAD_NAND_I64:
844 return emitAtomicBinary(MI, MBB, 8, AArch64::BICxxx_lsl);
846 case AArch64::ATOMIC_LOAD_MIN_I8:
847 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::GT);
848 case AArch64::ATOMIC_LOAD_MIN_I16:
849 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::GT);
850 case AArch64::ATOMIC_LOAD_MIN_I32:
851 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::GT);
852 case AArch64::ATOMIC_LOAD_MIN_I64:
853 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::GT);
855 case AArch64::ATOMIC_LOAD_MAX_I8:
856 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_sxtb, A64CC::LT);
857 case AArch64::ATOMIC_LOAD_MAX_I16:
858 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_sxth, A64CC::LT);
859 case AArch64::ATOMIC_LOAD_MAX_I32:
860 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LT);
861 case AArch64::ATOMIC_LOAD_MAX_I64:
862 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LT);
864 case AArch64::ATOMIC_LOAD_UMIN_I8:
865 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::HI);
866 case AArch64::ATOMIC_LOAD_UMIN_I16:
867 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::HI);
868 case AArch64::ATOMIC_LOAD_UMIN_I32:
869 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::HI);
870 case AArch64::ATOMIC_LOAD_UMIN_I64:
871 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::HI);
873 case AArch64::ATOMIC_LOAD_UMAX_I8:
874 return emitAtomicBinaryMinMax(MI, MBB, 1, AArch64::CMPww_uxtb, A64CC::LO);
875 case AArch64::ATOMIC_LOAD_UMAX_I16:
876 return emitAtomicBinaryMinMax(MI, MBB, 2, AArch64::CMPww_uxth, A64CC::LO);
877 case AArch64::ATOMIC_LOAD_UMAX_I32:
878 return emitAtomicBinaryMinMax(MI, MBB, 4, AArch64::CMPww_lsl, A64CC::LO);
879 case AArch64::ATOMIC_LOAD_UMAX_I64:
880 return emitAtomicBinaryMinMax(MI, MBB, 8, AArch64::CMPxx_lsl, A64CC::LO);
882 case AArch64::ATOMIC_SWAP_I8:
883 return emitAtomicBinary(MI, MBB, 1, 0);
884 case AArch64::ATOMIC_SWAP_I16:
885 return emitAtomicBinary(MI, MBB, 2, 0);
886 case AArch64::ATOMIC_SWAP_I32:
887 return emitAtomicBinary(MI, MBB, 4, 0);
888 case AArch64::ATOMIC_SWAP_I64:
889 return emitAtomicBinary(MI, MBB, 8, 0);
891 case AArch64::ATOMIC_CMP_SWAP_I8:
892 return emitAtomicCmpSwap(MI, MBB, 1);
893 case AArch64::ATOMIC_CMP_SWAP_I16:
894 return emitAtomicCmpSwap(MI, MBB, 2);
895 case AArch64::ATOMIC_CMP_SWAP_I32:
896 return emitAtomicCmpSwap(MI, MBB, 4);
897 case AArch64::ATOMIC_CMP_SWAP_I64:
898 return emitAtomicCmpSwap(MI, MBB, 8);
903 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
905 case AArch64ISD::BR_CC: return "AArch64ISD::BR_CC";
906 case AArch64ISD::Call: return "AArch64ISD::Call";
907 case AArch64ISD::FPMOV: return "AArch64ISD::FPMOV";
908 case AArch64ISD::GOTLoad: return "AArch64ISD::GOTLoad";
909 case AArch64ISD::BFI: return "AArch64ISD::BFI";
910 case AArch64ISD::EXTR: return "AArch64ISD::EXTR";
911 case AArch64ISD::Ret: return "AArch64ISD::Ret";
912 case AArch64ISD::SBFX: return "AArch64ISD::SBFX";
913 case AArch64ISD::SELECT_CC: return "AArch64ISD::SELECT_CC";
914 case AArch64ISD::SETCC: return "AArch64ISD::SETCC";
915 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN";
916 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
917 case AArch64ISD::TLSDESCCALL: return "AArch64ISD::TLSDESCCALL";
918 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge";
919 case AArch64ISD::WrapperSmall: return "AArch64ISD::WrapperSmall";
921 case AArch64ISD::NEON_BSL:
922 return "AArch64ISD::NEON_BSL";
923 case AArch64ISD::NEON_MOVIMM:
924 return "AArch64ISD::NEON_MOVIMM";
925 case AArch64ISD::NEON_MVNIMM:
926 return "AArch64ISD::NEON_MVNIMM";
927 case AArch64ISD::NEON_FMOVIMM:
928 return "AArch64ISD::NEON_FMOVIMM";
929 case AArch64ISD::NEON_CMP:
930 return "AArch64ISD::NEON_CMP";
931 case AArch64ISD::NEON_CMPZ:
932 return "AArch64ISD::NEON_CMPZ";
933 case AArch64ISD::NEON_TST:
934 return "AArch64ISD::NEON_TST";
935 case AArch64ISD::NEON_QSHLs:
936 return "AArch64ISD::NEON_QSHLs";
937 case AArch64ISD::NEON_QSHLu:
938 return "AArch64ISD::NEON_QSHLu";
939 case AArch64ISD::NEON_VDUP:
940 return "AArch64ISD::NEON_VDUP";
941 case AArch64ISD::NEON_VDUPLANE:
942 return "AArch64ISD::NEON_VDUPLANE";
943 case AArch64ISD::NEON_REV16:
944 return "AArch64ISD::NEON_REV16";
945 case AArch64ISD::NEON_REV32:
946 return "AArch64ISD::NEON_REV32";
947 case AArch64ISD::NEON_REV64:
948 return "AArch64ISD::NEON_REV64";
949 case AArch64ISD::NEON_UZP1:
950 return "AArch64ISD::NEON_UZP1";
951 case AArch64ISD::NEON_UZP2:
952 return "AArch64ISD::NEON_UZP2";
953 case AArch64ISD::NEON_ZIP1:
954 return "AArch64ISD::NEON_ZIP1";
955 case AArch64ISD::NEON_ZIP2:
956 return "AArch64ISD::NEON_ZIP2";
957 case AArch64ISD::NEON_TRN1:
958 return "AArch64ISD::NEON_TRN1";
959 case AArch64ISD::NEON_TRN2:
960 return "AArch64ISD::NEON_TRN2";
961 case AArch64ISD::NEON_LD1_UPD:
962 return "AArch64ISD::NEON_LD1_UPD";
963 case AArch64ISD::NEON_LD2_UPD:
964 return "AArch64ISD::NEON_LD2_UPD";
965 case AArch64ISD::NEON_LD3_UPD:
966 return "AArch64ISD::NEON_LD3_UPD";
967 case AArch64ISD::NEON_LD4_UPD:
968 return "AArch64ISD::NEON_LD4_UPD";
969 case AArch64ISD::NEON_ST1_UPD:
970 return "AArch64ISD::NEON_ST1_UPD";
971 case AArch64ISD::NEON_ST2_UPD:
972 return "AArch64ISD::NEON_ST2_UPD";
973 case AArch64ISD::NEON_ST3_UPD:
974 return "AArch64ISD::NEON_ST3_UPD";
975 case AArch64ISD::NEON_ST4_UPD:
976 return "AArch64ISD::NEON_ST4_UPD";
977 case AArch64ISD::NEON_LD1x2_UPD:
978 return "AArch64ISD::NEON_LD1x2_UPD";
979 case AArch64ISD::NEON_LD1x3_UPD:
980 return "AArch64ISD::NEON_LD1x3_UPD";
981 case AArch64ISD::NEON_LD1x4_UPD:
982 return "AArch64ISD::NEON_LD1x4_UPD";
983 case AArch64ISD::NEON_ST1x2_UPD:
984 return "AArch64ISD::NEON_ST1x2_UPD";
985 case AArch64ISD::NEON_ST1x3_UPD:
986 return "AArch64ISD::NEON_ST1x3_UPD";
987 case AArch64ISD::NEON_ST1x4_UPD:
988 return "AArch64ISD::NEON_ST1x4_UPD";
989 case AArch64ISD::NEON_LD2DUP:
990 return "AArch64ISD::NEON_LD2DUP";
991 case AArch64ISD::NEON_LD3DUP:
992 return "AArch64ISD::NEON_LD3DUP";
993 case AArch64ISD::NEON_LD4DUP:
994 return "AArch64ISD::NEON_LD4DUP";
995 case AArch64ISD::NEON_LD2DUP_UPD:
996 return "AArch64ISD::NEON_LD2DUP_UPD";
997 case AArch64ISD::NEON_LD3DUP_UPD:
998 return "AArch64ISD::NEON_LD3DUP_UPD";
999 case AArch64ISD::NEON_LD4DUP_UPD:
1000 return "AArch64ISD::NEON_LD4DUP_UPD";
1001 case AArch64ISD::NEON_LD2LN_UPD:
1002 return "AArch64ISD::NEON_LD2LN_UPD";
1003 case AArch64ISD::NEON_LD3LN_UPD:
1004 return "AArch64ISD::NEON_LD3LN_UPD";
1005 case AArch64ISD::NEON_LD4LN_UPD:
1006 return "AArch64ISD::NEON_LD4LN_UPD";
1007 case AArch64ISD::NEON_ST2LN_UPD:
1008 return "AArch64ISD::NEON_ST2LN_UPD";
1009 case AArch64ISD::NEON_ST3LN_UPD:
1010 return "AArch64ISD::NEON_ST3LN_UPD";
1011 case AArch64ISD::NEON_ST4LN_UPD:
1012 return "AArch64ISD::NEON_ST4LN_UPD";
1013 case AArch64ISD::NEON_VEXTRACT:
1014 return "AArch64ISD::NEON_VEXTRACT";
1020 static const uint16_t AArch64FPRArgRegs[] = {
1021 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3,
1022 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7
1024 static const unsigned NumFPRArgRegs = llvm::array_lengthof(AArch64FPRArgRegs);
1026 static const uint16_t AArch64ArgRegs[] = {
1027 AArch64::X0, AArch64::X1, AArch64::X2, AArch64::X3,
1028 AArch64::X4, AArch64::X5, AArch64::X6, AArch64::X7
1030 static const unsigned NumArgRegs = llvm::array_lengthof(AArch64ArgRegs);
1032 static bool CC_AArch64NoMoreRegs(unsigned ValNo, MVT ValVT, MVT LocVT,
1033 CCValAssign::LocInfo LocInfo,
1034 ISD::ArgFlagsTy ArgFlags, CCState &State) {
1035 // Mark all remaining general purpose registers as allocated. We don't
1036 // backtrack: if (for example) an i128 gets put on the stack, no subsequent
1037 // i64 will go in registers (C.11).
1038 for (unsigned i = 0; i < NumArgRegs; ++i)
1039 State.AllocateReg(AArch64ArgRegs[i]);
1044 #include "AArch64GenCallingConv.inc"
1046 CCAssignFn *AArch64TargetLowering::CCAssignFnForNode(CallingConv::ID CC) const {
1049 default: llvm_unreachable("Unsupported calling convention");
1050 case CallingConv::Fast:
1051 case CallingConv::C:
1057 AArch64TargetLowering::SaveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG,
1058 SDLoc DL, SDValue &Chain) const {
1059 MachineFunction &MF = DAG.getMachineFunction();
1060 MachineFrameInfo *MFI = MF.getFrameInfo();
1061 AArch64MachineFunctionInfo *FuncInfo
1062 = MF.getInfo<AArch64MachineFunctionInfo>();
1064 SmallVector<SDValue, 8> MemOps;
1066 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(AArch64ArgRegs,
1068 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(AArch64FPRArgRegs,
1071 unsigned GPRSaveSize = 8 * (NumArgRegs - FirstVariadicGPR);
1073 if (GPRSaveSize != 0) {
1074 GPRIdx = MFI->CreateStackObject(GPRSaveSize, 8, false);
1076 SDValue FIN = DAG.getFrameIndex(GPRIdx, getPointerTy());
1078 for (unsigned i = FirstVariadicGPR; i < NumArgRegs; ++i) {
1079 unsigned VReg = MF.addLiveIn(AArch64ArgRegs[i], &AArch64::GPR64RegClass);
1080 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
1081 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1082 MachinePointerInfo::getStack(i * 8),
1084 MemOps.push_back(Store);
1085 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1086 DAG.getConstant(8, getPointerTy()));
1090 if (getSubtarget()->hasFPARMv8()) {
1091 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR);
1093 // According to the AArch64 Procedure Call Standard, section B.1/B.3, we
1094 // can omit a register save area if we know we'll never use registers of
1096 if (FPRSaveSize != 0) {
1097 FPRIdx = MFI->CreateStackObject(FPRSaveSize, 16, false);
1099 SDValue FIN = DAG.getFrameIndex(FPRIdx, getPointerTy());
1101 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) {
1102 unsigned VReg = MF.addLiveIn(AArch64FPRArgRegs[i],
1103 &AArch64::FPR128RegClass);
1104 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128);
1105 SDValue Store = DAG.getStore(Val.getValue(1), DL, Val, FIN,
1106 MachinePointerInfo::getStack(i * 16),
1108 MemOps.push_back(Store);
1109 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(), FIN,
1110 DAG.getConstant(16, getPointerTy()));
1113 FuncInfo->setVariadicFPRIdx(FPRIdx);
1114 FuncInfo->setVariadicFPRSize(FPRSaveSize);
1117 int StackIdx = MFI->CreateFixedObject(8, CCInfo.getNextStackOffset(), true);
1119 FuncInfo->setVariadicStackIdx(StackIdx);
1120 FuncInfo->setVariadicGPRIdx(GPRIdx);
1121 FuncInfo->setVariadicGPRSize(GPRSaveSize);
1123 if (!MemOps.empty()) {
1124 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
1131 AArch64TargetLowering::LowerFormalArguments(SDValue Chain,
1132 CallingConv::ID CallConv, bool isVarArg,
1133 const SmallVectorImpl<ISD::InputArg> &Ins,
1134 SDLoc dl, SelectionDAG &DAG,
1135 SmallVectorImpl<SDValue> &InVals) const {
1136 MachineFunction &MF = DAG.getMachineFunction();
1137 AArch64MachineFunctionInfo *FuncInfo
1138 = MF.getInfo<AArch64MachineFunctionInfo>();
1139 MachineFrameInfo *MFI = MF.getFrameInfo();
1140 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1142 SmallVector<CCValAssign, 16> ArgLocs;
1143 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1144 getTargetMachine(), ArgLocs, *DAG.getContext());
1145 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForNode(CallConv));
1147 SmallVector<SDValue, 16> ArgValues;
1150 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1151 CCValAssign &VA = ArgLocs[i];
1152 ISD::ArgFlagsTy Flags = Ins[i].Flags;
1154 if (Flags.isByVal()) {
1155 // Byval is used for small structs and HFAs in the PCS, but the system
1156 // should work in a non-compliant manner for larger structs.
1157 EVT PtrTy = getPointerTy();
1158 int Size = Flags.getByValSize();
1159 unsigned NumRegs = (Size + 7) / 8;
1161 unsigned FrameIdx = MFI->CreateFixedObject(8 * NumRegs,
1162 VA.getLocMemOffset(),
1164 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrTy);
1165 InVals.push_back(FrameIdxN);
1168 } else if (VA.isRegLoc()) {
1169 MVT RegVT = VA.getLocVT();
1170 const TargetRegisterClass *RC = getRegClassFor(RegVT);
1171 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
1173 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
1174 } else { // VA.isRegLoc()
1175 assert(VA.isMemLoc());
1177 int FI = MFI->CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
1178 VA.getLocMemOffset(), true);
1180 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
1181 ArgValue = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1182 MachinePointerInfo::getFixedStack(FI),
1183 false, false, false, 0);
1188 switch (VA.getLocInfo()) {
1189 default: llvm_unreachable("Unknown loc info!");
1190 case CCValAssign::Full: break;
1191 case CCValAssign::BCvt:
1192 ArgValue = DAG.getNode(ISD::BITCAST,dl, VA.getValVT(), ArgValue);
1194 case CCValAssign::SExt:
1195 case CCValAssign::ZExt:
1196 case CCValAssign::AExt: {
1197 unsigned DestSize = VA.getValVT().getSizeInBits();
1198 unsigned DestSubReg;
1201 case 8: DestSubReg = AArch64::sub_8; break;
1202 case 16: DestSubReg = AArch64::sub_16; break;
1203 case 32: DestSubReg = AArch64::sub_32; break;
1204 case 64: DestSubReg = AArch64::sub_64; break;
1205 default: llvm_unreachable("Unexpected argument promotion");
1208 ArgValue = SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl,
1209 VA.getValVT(), ArgValue,
1210 DAG.getTargetConstant(DestSubReg, MVT::i32)),
1216 InVals.push_back(ArgValue);
1220 SaveVarArgRegisters(CCInfo, DAG, dl, Chain);
1222 unsigned StackArgSize = CCInfo.getNextStackOffset();
1223 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) {
1224 // This is a non-standard ABI so by fiat I say we're allowed to make full
1225 // use of the stack area to be popped, which must be aligned to 16 bytes in
1227 StackArgSize = RoundUpToAlignment(StackArgSize, 16);
1229 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding
1230 // a multiple of 16.
1231 FuncInfo->setArgumentStackToRestore(StackArgSize);
1233 // This realignment carries over to the available bytes below. Our own
1234 // callers will guarantee the space is free by giving an aligned value to
1237 // Even if we're not expected to free up the space, it's useful to know how
1238 // much is there while considering tail calls (because we can reuse it).
1239 FuncInfo->setBytesInStackArgArea(StackArgSize);
1245 AArch64TargetLowering::LowerReturn(SDValue Chain,
1246 CallingConv::ID CallConv, bool isVarArg,
1247 const SmallVectorImpl<ISD::OutputArg> &Outs,
1248 const SmallVectorImpl<SDValue> &OutVals,
1249 SDLoc dl, SelectionDAG &DAG) const {
1250 // CCValAssign - represent the assignment of the return value to a location.
1251 SmallVector<CCValAssign, 16> RVLocs;
1253 // CCState - Info about the registers and stack slots.
1254 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1255 getTargetMachine(), RVLocs, *DAG.getContext());
1257 // Analyze outgoing return values.
1258 CCInfo.AnalyzeReturn(Outs, CCAssignFnForNode(CallConv));
1261 SmallVector<SDValue, 4> RetOps(1, Chain);
1263 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
1264 // PCS: "If the type, T, of the result of a function is such that
1265 // void func(T arg) would require that arg be passed as a value in a
1266 // register (or set of registers) according to the rules in 5.4, then the
1267 // result is returned in the same registers as would be used for such an
1270 // Otherwise, the caller shall reserve a block of memory of sufficient
1271 // size and alignment to hold the result. The address of the memory block
1272 // shall be passed as an additional argument to the function in x8."
1274 // This is implemented in two places. The register-return values are dealt
1275 // with here, more complex returns are passed as an sret parameter, which
1276 // means we don't have to worry about it during actual return.
1277 CCValAssign &VA = RVLocs[i];
1278 assert(VA.isRegLoc() && "Only register-returns should be created by PCS");
1281 SDValue Arg = OutVals[i];
1283 // There's no convenient note in the ABI about this as there is for normal
1284 // arguments, but it says return values are passed in the same registers as
1285 // an argument would be. I believe that includes the comments about
1286 // unspecified higher bits, putting the burden of widening on the *caller*
1287 // for return values.
1288 switch (VA.getLocInfo()) {
1289 default: llvm_unreachable("Unknown loc info");
1290 case CCValAssign::Full: break;
1291 case CCValAssign::SExt:
1292 case CCValAssign::ZExt:
1293 case CCValAssign::AExt:
1294 // Floating-point values should only be extended when they're going into
1295 // memory, which can't happen here so an integer extend is acceptable.
1296 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1298 case CCValAssign::BCvt:
1299 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1303 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
1304 Flag = Chain.getValue(1);
1305 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1308 RetOps[0] = Chain; // Update chain.
1310 // Add the flag if we have it.
1312 RetOps.push_back(Flag);
1314 return DAG.getNode(AArch64ISD::Ret, dl, MVT::Other,
1315 &RetOps[0], RetOps.size());
1319 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
1320 SmallVectorImpl<SDValue> &InVals) const {
1321 SelectionDAG &DAG = CLI.DAG;
1323 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1324 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1325 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1326 SDValue Chain = CLI.Chain;
1327 SDValue Callee = CLI.Callee;
1328 bool &IsTailCall = CLI.IsTailCall;
1329 CallingConv::ID CallConv = CLI.CallConv;
1330 bool IsVarArg = CLI.IsVarArg;
1332 MachineFunction &MF = DAG.getMachineFunction();
1333 AArch64MachineFunctionInfo *FuncInfo
1334 = MF.getInfo<AArch64MachineFunctionInfo>();
1335 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt;
1336 bool IsStructRet = !Outs.empty() && Outs[0].Flags.isSRet();
1337 bool IsSibCall = false;
1340 IsTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1341 IsVarArg, IsStructRet, MF.getFunction()->hasStructRetAttr(),
1342 Outs, OutVals, Ins, DAG);
1344 // A sibling call is one where we're under the usual C ABI and not planning
1345 // to change that but can still do a tail call:
1346 if (!TailCallOpt && IsTailCall)
1350 SmallVector<CCValAssign, 16> ArgLocs;
1351 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1352 getTargetMachine(), ArgLocs, *DAG.getContext());
1353 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CallConv));
1355 // On AArch64 (and all other architectures I'm aware of) the most this has to
1356 // do is adjust the stack pointer.
1357 unsigned NumBytes = RoundUpToAlignment(CCInfo.getNextStackOffset(), 16);
1359 // Since we're not changing the ABI to make this a tail call, the memory
1360 // operands are already available in the caller's incoming argument space.
1364 // FPDiff is the byte offset of the call's argument area from the callee's.
1365 // Stores to callee stack arguments will be placed in FixedStackSlots offset
1366 // by this amount for a tail call. In a sibling call it must be 0 because the
1367 // caller will deallocate the entire stack and the callee still expects its
1368 // arguments to begin at SP+0. Completely unused for non-tail calls.
1371 if (IsTailCall && !IsSibCall) {
1372 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
1374 // FPDiff will be negative if this tail call requires more space than we
1375 // would automatically have in our incoming argument space. Positive if we
1376 // can actually shrink the stack.
1377 FPDiff = NumReusableBytes - NumBytes;
1379 // The stack pointer must be 16-byte aligned at all times it's used for a
1380 // memory operation, which in practice means at *all* times and in
1381 // particular across call boundaries. Therefore our own arguments started at
1382 // a 16-byte aligned SP and the delta applied for the tail call should
1383 // satisfy the same constraint.
1384 assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
1388 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true),
1391 SDValue StackPtr = DAG.getCopyFromReg(Chain, dl, AArch64::XSP,
1394 SmallVector<SDValue, 8> MemOpChains;
1395 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
1397 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1398 CCValAssign &VA = ArgLocs[i];
1399 ISD::ArgFlagsTy Flags = Outs[i].Flags;
1400 SDValue Arg = OutVals[i];
1402 // Callee does the actual widening, so all extensions just use an implicit
1403 // definition of the rest of the Loc. Aesthetically, this would be nicer as
1404 // an ANY_EXTEND, but that isn't valid for floating-point types and this
1405 // alternative works on integer types too.
1406 switch (VA.getLocInfo()) {
1407 default: llvm_unreachable("Unknown loc info!");
1408 case CCValAssign::Full: break;
1409 case CCValAssign::SExt:
1410 case CCValAssign::ZExt:
1411 case CCValAssign::AExt: {
1412 unsigned SrcSize = VA.getValVT().getSizeInBits();
1416 case 8: SrcSubReg = AArch64::sub_8; break;
1417 case 16: SrcSubReg = AArch64::sub_16; break;
1418 case 32: SrcSubReg = AArch64::sub_32; break;
1419 case 64: SrcSubReg = AArch64::sub_64; break;
1420 default: llvm_unreachable("Unexpected argument promotion");
1423 Arg = SDValue(DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
1425 DAG.getUNDEF(VA.getLocVT()),
1427 DAG.getTargetConstant(SrcSubReg, MVT::i32)),
1432 case CCValAssign::BCvt:
1433 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1437 if (VA.isRegLoc()) {
1438 // A normal register (sub-) argument. For now we just note it down because
1439 // we want to copy things into registers as late as possible to avoid
1440 // register-pressure (and possibly worse).
1441 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1445 assert(VA.isMemLoc() && "unexpected argument location");
1448 MachinePointerInfo DstInfo;
1450 uint32_t OpSize = Flags.isByVal() ? Flags.getByValSize() :
1451 VA.getLocVT().getSizeInBits();
1452 OpSize = (OpSize + 7) / 8;
1453 int32_t Offset = VA.getLocMemOffset() + FPDiff;
1454 int FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
1456 DstAddr = DAG.getFrameIndex(FI, getPointerTy());
1457 DstInfo = MachinePointerInfo::getFixedStack(FI);
1459 // Make sure any stack arguments overlapping with where we're storing are
1460 // loaded before this eventual operation. Otherwise they'll be clobbered.
1461 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI);
1463 SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset());
1465 DstAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
1466 DstInfo = MachinePointerInfo::getStack(VA.getLocMemOffset());
1469 if (Flags.isByVal()) {
1470 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i64);
1471 SDValue Cpy = DAG.getMemcpy(Chain, dl, DstAddr, Arg, SizeNode,
1472 Flags.getByValAlign(),
1473 /*isVolatile = */ false,
1474 /*alwaysInline = */ false,
1475 DstInfo, MachinePointerInfo(0));
1476 MemOpChains.push_back(Cpy);
1478 // Normal stack argument, put it where it's needed.
1479 SDValue Store = DAG.getStore(Chain, dl, Arg, DstAddr, DstInfo,
1481 MemOpChains.push_back(Store);
1485 // The loads and stores generated above shouldn't clash with each
1486 // other. Combining them with this TokenFactor notes that fact for the rest of
1488 if (!MemOpChains.empty())
1489 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1490 &MemOpChains[0], MemOpChains.size());
1492 // Most of the rest of the instructions need to be glued together; we don't
1493 // want assignments to actual registers used by a call to be rearranged by a
1494 // well-meaning scheduler.
1497 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1498 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1499 RegsToPass[i].second, InFlag);
1500 InFlag = Chain.getValue(1);
1503 // The linker is responsible for inserting veneers when necessary to put a
1504 // function call destination in range, so we don't need to bother with a
1506 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
1507 const GlobalValue *GV = G->getGlobal();
1508 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy());
1509 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
1510 const char *Sym = S->getSymbol();
1511 Callee = DAG.getTargetExternalSymbol(Sym, getPointerTy());
1514 // We don't usually want to end the call-sequence here because we would tidy
1515 // the frame up *after* the call, however in the ABI-changing tail-call case
1516 // we've carefully laid out the parameters so that when sp is reset they'll be
1517 // in the correct location.
1518 if (IsTailCall && !IsSibCall) {
1519 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1520 DAG.getIntPtrConstant(0, true), InFlag, dl);
1521 InFlag = Chain.getValue(1);
1524 // We produce the following DAG scheme for the actual call instruction:
1525 // (AArch64Call Chain, Callee, reg1, ..., regn, preserveMask, inflag?
1527 // Most arguments aren't going to be used and just keep the values live as
1528 // far as LLVM is concerned. It's expected to be selected as simply "bl
1529 // callee" (for a direct, non-tail call).
1530 std::vector<SDValue> Ops;
1531 Ops.push_back(Chain);
1532 Ops.push_back(Callee);
1535 // Each tail call may have to adjust the stack by a different amount, so
1536 // this information must travel along with the operation for eventual
1537 // consumption by emitEpilogue.
1538 Ops.push_back(DAG.getTargetConstant(FPDiff, MVT::i32));
1541 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1542 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1543 RegsToPass[i].second.getValueType()));
1546 // Add a register mask operand representing the call-preserved registers. This
1547 // is used later in codegen to constrain register-allocation.
1548 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
1549 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
1550 assert(Mask && "Missing call preserved mask for calling convention");
1551 Ops.push_back(DAG.getRegisterMask(Mask));
1553 // If we needed glue, put it in as the last argument.
1554 if (InFlag.getNode())
1555 Ops.push_back(InFlag);
1557 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1560 return DAG.getNode(AArch64ISD::TC_RETURN, dl, NodeTys, &Ops[0], Ops.size());
1563 Chain = DAG.getNode(AArch64ISD::Call, dl, NodeTys, &Ops[0], Ops.size());
1564 InFlag = Chain.getValue(1);
1566 // Now we can reclaim the stack, just as well do it before working out where
1567 // our return value is.
1569 uint64_t CalleePopBytes
1570 = DoesCalleeRestoreStack(CallConv, TailCallOpt) ? NumBytes : 0;
1572 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
1573 DAG.getIntPtrConstant(CalleePopBytes, true),
1575 InFlag = Chain.getValue(1);
1578 return LowerCallResult(Chain, InFlag, CallConv,
1579 IsVarArg, Ins, dl, DAG, InVals);
1583 AArch64TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1584 CallingConv::ID CallConv, bool IsVarArg,
1585 const SmallVectorImpl<ISD::InputArg> &Ins,
1586 SDLoc dl, SelectionDAG &DAG,
1587 SmallVectorImpl<SDValue> &InVals) const {
1588 // Assign locations to each value returned by this call.
1589 SmallVector<CCValAssign, 16> RVLocs;
1590 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(),
1591 getTargetMachine(), RVLocs, *DAG.getContext());
1592 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForNode(CallConv));
1594 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1595 CCValAssign VA = RVLocs[i];
1597 // Return values that are too big to fit into registers should use an sret
1598 // pointer, so this can be a lot simpler than the main argument code.
1599 assert(VA.isRegLoc() && "Memory locations not expected for call return");
1601 SDValue Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1603 Chain = Val.getValue(1);
1604 InFlag = Val.getValue(2);
1606 switch (VA.getLocInfo()) {
1607 default: llvm_unreachable("Unknown loc info!");
1608 case CCValAssign::Full: break;
1609 case CCValAssign::BCvt:
1610 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1612 case CCValAssign::ZExt:
1613 case CCValAssign::SExt:
1614 case CCValAssign::AExt:
1615 // Floating-point arguments only get extended/truncated if they're going
1616 // in memory, so using the integer operation is acceptable here.
1617 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
1621 InVals.push_back(Val);
1628 AArch64TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
1629 CallingConv::ID CalleeCC,
1631 bool IsCalleeStructRet,
1632 bool IsCallerStructRet,
1633 const SmallVectorImpl<ISD::OutputArg> &Outs,
1634 const SmallVectorImpl<SDValue> &OutVals,
1635 const SmallVectorImpl<ISD::InputArg> &Ins,
1636 SelectionDAG& DAG) const {
1638 // For CallingConv::C this function knows whether the ABI needs
1639 // changing. That's not true for other conventions so they will have to opt in
1641 if (!IsTailCallConvention(CalleeCC) && CalleeCC != CallingConv::C)
1644 const MachineFunction &MF = DAG.getMachineFunction();
1645 const Function *CallerF = MF.getFunction();
1646 CallingConv::ID CallerCC = CallerF->getCallingConv();
1647 bool CCMatch = CallerCC == CalleeCC;
1649 // Byval parameters hand the function a pointer directly into the stack area
1650 // we want to reuse during a tail call. Working around this *is* possible (see
1651 // X86) but less efficient and uglier in LowerCall.
1652 for (Function::const_arg_iterator i = CallerF->arg_begin(),
1653 e = CallerF->arg_end(); i != e; ++i)
1654 if (i->hasByValAttr())
1657 if (getTargetMachine().Options.GuaranteedTailCallOpt) {
1658 if (IsTailCallConvention(CalleeCC) && CCMatch)
1663 // Now we search for cases where we can use a tail call without changing the
1664 // ABI. Sibcall is used in some places (particularly gcc) to refer to this
1667 // I want anyone implementing a new calling convention to think long and hard
1668 // about this assert.
1669 assert((!IsVarArg || CalleeCC == CallingConv::C)
1670 && "Unexpected variadic calling convention");
1672 if (IsVarArg && !Outs.empty()) {
1673 // At least two cases here: if caller is fastcc then we can't have any
1674 // memory arguments (we'd be expected to clean up the stack afterwards). If
1675 // caller is C then we could potentially use its argument area.
1677 // FIXME: for now we take the most conservative of these in both cases:
1678 // disallow all variadic memory operands.
1679 SmallVector<CCValAssign, 16> ArgLocs;
1680 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1681 getTargetMachine(), ArgLocs, *DAG.getContext());
1683 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1684 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
1685 if (!ArgLocs[i].isRegLoc())
1689 // If the calling conventions do not match, then we'd better make sure the
1690 // results are returned in the same way as what the caller expects.
1692 SmallVector<CCValAssign, 16> RVLocs1;
1693 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(),
1694 getTargetMachine(), RVLocs1, *DAG.getContext());
1695 CCInfo1.AnalyzeCallResult(Ins, CCAssignFnForNode(CalleeCC));
1697 SmallVector<CCValAssign, 16> RVLocs2;
1698 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(),
1699 getTargetMachine(), RVLocs2, *DAG.getContext());
1700 CCInfo2.AnalyzeCallResult(Ins, CCAssignFnForNode(CallerCC));
1702 if (RVLocs1.size() != RVLocs2.size())
1704 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
1705 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
1707 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
1709 if (RVLocs1[i].isRegLoc()) {
1710 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
1713 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
1719 // Nothing more to check if the callee is taking no arguments
1723 SmallVector<CCValAssign, 16> ArgLocs;
1724 CCState CCInfo(CalleeCC, IsVarArg, DAG.getMachineFunction(),
1725 getTargetMachine(), ArgLocs, *DAG.getContext());
1727 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForNode(CalleeCC));
1729 const AArch64MachineFunctionInfo *FuncInfo
1730 = MF.getInfo<AArch64MachineFunctionInfo>();
1732 // If the stack arguments for this call would fit into our own save area then
1733 // the call can be made tail.
1734 return CCInfo.getNextStackOffset() <= FuncInfo->getBytesInStackArgArea();
1737 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC,
1738 bool TailCallOpt) const {
1739 return CallCC == CallingConv::Fast && TailCallOpt;
1742 bool AArch64TargetLowering::IsTailCallConvention(CallingConv::ID CallCC) const {
1743 return CallCC == CallingConv::Fast;
1746 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain,
1748 MachineFrameInfo *MFI,
1749 int ClobberedFI) const {
1750 SmallVector<SDValue, 8> ArgChains;
1751 int64_t FirstByte = MFI->getObjectOffset(ClobberedFI);
1752 int64_t LastByte = FirstByte + MFI->getObjectSize(ClobberedFI) - 1;
1754 // Include the original chain at the beginning of the list. When this is
1755 // used by target LowerCall hooks, this helps legalize find the
1756 // CALLSEQ_BEGIN node.
1757 ArgChains.push_back(Chain);
1759 // Add a chain value for each stack argument corresponding
1760 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(),
1761 UE = DAG.getEntryNode().getNode()->use_end(); U != UE; ++U)
1762 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U))
1763 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr()))
1764 if (FI->getIndex() < 0) {
1765 int64_t InFirstByte = MFI->getObjectOffset(FI->getIndex());
1766 int64_t InLastByte = InFirstByte;
1767 InLastByte += MFI->getObjectSize(FI->getIndex()) - 1;
1769 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) ||
1770 (FirstByte <= InFirstByte && InFirstByte <= LastByte))
1771 ArgChains.push_back(SDValue(L, 1));
1774 // Build a tokenfactor for all the chains.
1775 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other,
1776 &ArgChains[0], ArgChains.size());
1779 static A64CC::CondCodes IntCCToA64CC(ISD::CondCode CC) {
1781 case ISD::SETEQ: return A64CC::EQ;
1782 case ISD::SETGT: return A64CC::GT;
1783 case ISD::SETGE: return A64CC::GE;
1784 case ISD::SETLT: return A64CC::LT;
1785 case ISD::SETLE: return A64CC::LE;
1786 case ISD::SETNE: return A64CC::NE;
1787 case ISD::SETUGT: return A64CC::HI;
1788 case ISD::SETUGE: return A64CC::HS;
1789 case ISD::SETULT: return A64CC::LO;
1790 case ISD::SETULE: return A64CC::LS;
1791 default: llvm_unreachable("Unexpected condition code");
1795 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Val) const {
1796 // icmp is implemented using adds/subs immediate, which take an unsigned
1797 // 12-bit immediate, optionally shifted left by 12 bits.
1799 // Symmetric by using adds/subs
1803 return (Val & ~0xfff) == 0 || (Val & ~0xfff000) == 0;
1806 SDValue AArch64TargetLowering::getSelectableIntSetCC(SDValue LHS, SDValue RHS,
1807 ISD::CondCode CC, SDValue &A64cc,
1808 SelectionDAG &DAG, SDLoc &dl) const {
1809 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
1811 EVT VT = RHSC->getValueType(0);
1812 bool knownInvalid = false;
1814 // I'm not convinced the rest of LLVM handles these edge cases properly, but
1815 // we can at least get it right.
1816 if (isSignedIntSetCC(CC)) {
1817 C = RHSC->getSExtValue();
1818 } else if (RHSC->getZExtValue() > INT64_MAX) {
1819 // A 64-bit constant not representable by a signed 64-bit integer is far
1820 // too big to fit into a SUBS immediate anyway.
1821 knownInvalid = true;
1823 C = RHSC->getZExtValue();
1826 if (!knownInvalid && !isLegalICmpImmediate(C)) {
1827 // Constant does not fit, try adjusting it by one?
1832 if (isLegalICmpImmediate(C-1)) {
1833 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
1834 RHS = DAG.getConstant(C-1, VT);
1839 if (isLegalICmpImmediate(C-1)) {
1840 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
1841 RHS = DAG.getConstant(C-1, VT);
1846 if (isLegalICmpImmediate(C+1)) {
1847 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
1848 RHS = DAG.getConstant(C+1, VT);
1853 if (isLegalICmpImmediate(C+1)) {
1854 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
1855 RHS = DAG.getConstant(C+1, VT);
1862 A64CC::CondCodes CondCode = IntCCToA64CC(CC);
1863 A64cc = DAG.getConstant(CondCode, MVT::i32);
1864 return DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1865 DAG.getCondCode(CC));
1868 static A64CC::CondCodes FPCCToA64CC(ISD::CondCode CC,
1869 A64CC::CondCodes &Alternative) {
1870 A64CC::CondCodes CondCode = A64CC::Invalid;
1871 Alternative = A64CC::Invalid;
1874 default: llvm_unreachable("Unknown FP condition!");
1876 case ISD::SETOEQ: CondCode = A64CC::EQ; break;
1878 case ISD::SETOGT: CondCode = A64CC::GT; break;
1880 case ISD::SETOGE: CondCode = A64CC::GE; break;
1881 case ISD::SETOLT: CondCode = A64CC::MI; break;
1882 case ISD::SETOLE: CondCode = A64CC::LS; break;
1883 case ISD::SETONE: CondCode = A64CC::MI; Alternative = A64CC::GT; break;
1884 case ISD::SETO: CondCode = A64CC::VC; break;
1885 case ISD::SETUO: CondCode = A64CC::VS; break;
1886 case ISD::SETUEQ: CondCode = A64CC::EQ; Alternative = A64CC::VS; break;
1887 case ISD::SETUGT: CondCode = A64CC::HI; break;
1888 case ISD::SETUGE: CondCode = A64CC::PL; break;
1890 case ISD::SETULT: CondCode = A64CC::LT; break;
1892 case ISD::SETULE: CondCode = A64CC::LE; break;
1894 case ISD::SETUNE: CondCode = A64CC::NE; break;
1900 AArch64TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1902 EVT PtrVT = getPointerTy();
1903 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1905 switch(getTargetMachine().getCodeModel()) {
1906 case CodeModel::Small:
1907 // The most efficient code is PC-relative anyway for the small memory model,
1908 // so we don't need to worry about relocation model.
1909 return DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
1910 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1911 AArch64II::MO_NO_FLAG),
1912 DAG.getTargetBlockAddress(BA, PtrVT, 0,
1913 AArch64II::MO_LO12),
1914 DAG.getConstant(/*Alignment=*/ 4, MVT::i32));
1915 case CodeModel::Large:
1917 AArch64ISD::WrapperLarge, DL, PtrVT,
1918 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G3),
1919 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
1920 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
1921 DAG.getTargetBlockAddress(BA, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
1923 llvm_unreachable("Only small and large code models supported now");
1928 // (BRCOND chain, val, dest)
1930 AArch64TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
1932 SDValue Chain = Op.getOperand(0);
1933 SDValue TheBit = Op.getOperand(1);
1934 SDValue DestBB = Op.getOperand(2);
1936 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
1937 // that as the consumer we are responsible for ignoring rubbish in higher
1939 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
1940 DAG.getConstant(1, MVT::i32));
1942 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
1943 DAG.getConstant(0, TheBit.getValueType()),
1944 DAG.getCondCode(ISD::SETNE));
1946 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other, Chain,
1947 A64CMP, DAG.getConstant(A64CC::NE, MVT::i32),
1951 // (BR_CC chain, condcode, lhs, rhs, dest)
1953 AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
1955 SDValue Chain = Op.getOperand(0);
1956 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
1957 SDValue LHS = Op.getOperand(2);
1958 SDValue RHS = Op.getOperand(3);
1959 SDValue DestBB = Op.getOperand(4);
1961 if (LHS.getValueType() == MVT::f128) {
1962 // f128 comparisons are lowered to runtime calls by a routine which sets
1963 // LHS, RHS and CC appropriately for the rest of this function to continue.
1964 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
1966 // If softenSetCCOperands returned a scalar, we need to compare the result
1967 // against zero to select between true and false values.
1968 if (RHS.getNode() == 0) {
1969 RHS = DAG.getConstant(0, LHS.getValueType());
1974 if (LHS.getValueType().isInteger()) {
1977 // Integers are handled in a separate function because the combinations of
1978 // immediates and tests can get hairy and we may want to fiddle things.
1979 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
1981 return DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1982 Chain, CmpOp, A64cc, DestBB);
1985 // Note that some LLVM floating-point CondCodes can't be lowered to a single
1986 // conditional branch, hence FPCCToA64CC can set a second test, where either
1987 // passing is sufficient.
1988 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
1989 CondCode = FPCCToA64CC(CC, Alternative);
1990 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
1991 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
1992 DAG.getCondCode(CC));
1993 SDValue A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1994 Chain, SetCC, A64cc, DestBB);
1996 if (Alternative != A64CC::Invalid) {
1997 A64cc = DAG.getConstant(Alternative, MVT::i32);
1998 A64BR_CC = DAG.getNode(AArch64ISD::BR_CC, dl, MVT::Other,
1999 A64BR_CC, SetCC, A64cc, DestBB);
2007 AArch64TargetLowering::LowerF128ToCall(SDValue Op, SelectionDAG &DAG,
2008 RTLIB::Libcall Call) const {
2011 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
2012 EVT ArgVT = Op.getOperand(i).getValueType();
2013 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
2014 Entry.Node = Op.getOperand(i); Entry.Ty = ArgTy;
2015 Entry.isSExt = false;
2016 Entry.isZExt = false;
2017 Args.push_back(Entry);
2019 SDValue Callee = DAG.getExternalSymbol(getLibcallName(Call), getPointerTy());
2021 Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
2023 // By default, the input chain to this libcall is the entry node of the
2024 // function. If the libcall is going to be emitted as a tail call then
2025 // isUsedByReturnOnly will change it to the right chain if the return
2026 // node which is being folded has a non-entry input chain.
2027 SDValue InChain = DAG.getEntryNode();
2029 // isTailCall may be true since the callee does not reference caller stack
2030 // frame. Check if it's in the right position.
2031 SDValue TCChain = InChain;
2032 bool isTailCall = isInTailCallPosition(DAG, Op.getNode(), TCChain);
2037 CallLoweringInfo CLI(InChain, RetTy, false, false, false, false,
2038 0, getLibcallCallingConv(Call), isTailCall,
2039 /*doesNotReturn=*/false, /*isReturnValueUsed=*/true,
2040 Callee, Args, DAG, SDLoc(Op));
2041 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
2043 if (!CallInfo.second.getNode())
2044 // It's a tailcall, return the chain (which is the DAG root).
2045 return DAG.getRoot();
2047 return CallInfo.first;
2051 AArch64TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
2052 if (Op.getOperand(0).getValueType() != MVT::f128) {
2053 // It's legal except when f128 is involved
2058 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
2060 SDValue SrcVal = Op.getOperand(0);
2061 return makeLibCall(DAG, LC, Op.getValueType(), &SrcVal, 1,
2062 /*isSigned*/ false, SDLoc(Op)).first;
2066 AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
2067 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering");
2070 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
2072 return LowerF128ToCall(Op, DAG, LC);
2076 AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
2077 bool IsSigned) const {
2078 if (Op.getOperand(0).getValueType() != MVT::f128) {
2079 // It's legal except when f128 is involved
2085 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType());
2087 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType());
2089 return LowerF128ToCall(Op, DAG, LC);
2092 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
2093 MachineFunction &MF = DAG.getMachineFunction();
2094 MachineFrameInfo *MFI = MF.getFrameInfo();
2095 MFI->setReturnAddressIsTaken(true);
2097 EVT VT = Op.getValueType();
2099 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2101 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
2102 SDValue Offset = DAG.getConstant(8, MVT::i64);
2103 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
2104 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
2105 MachinePointerInfo(), false, false, false, 0);
2108 // Return X30, which contains the return address. Mark it an implicit live-in.
2109 unsigned Reg = MF.addLiveIn(AArch64::X30, getRegClassFor(MVT::i64));
2110 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, MVT::i64);
2114 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG)
2116 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
2117 MFI->setFrameAddressIsTaken(true);
2119 EVT VT = Op.getValueType();
2121 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
2122 unsigned FrameReg = AArch64::X29;
2123 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
2125 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
2126 MachinePointerInfo(),
2127 false, false, false, 0);
2132 AArch64TargetLowering::LowerGlobalAddressELFLarge(SDValue Op,
2133 SelectionDAG &DAG) const {
2134 assert(getTargetMachine().getCodeModel() == CodeModel::Large);
2135 assert(getTargetMachine().getRelocationModel() == Reloc::Static);
2137 EVT PtrVT = getPointerTy();
2139 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2140 const GlobalValue *GV = GN->getGlobal();
2142 SDValue GlobalAddr = DAG.getNode(
2143 AArch64ISD::WrapperLarge, dl, PtrVT,
2144 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G3),
2145 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G2_NC),
2146 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G1_NC),
2147 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, AArch64II::MO_ABS_G0_NC));
2149 if (GN->getOffset() != 0)
2150 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2151 DAG.getConstant(GN->getOffset(), PtrVT));
2157 AArch64TargetLowering::LowerGlobalAddressELFSmall(SDValue Op,
2158 SelectionDAG &DAG) const {
2159 assert(getTargetMachine().getCodeModel() == CodeModel::Small);
2161 EVT PtrVT = getPointerTy();
2163 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
2164 const GlobalValue *GV = GN->getGlobal();
2165 unsigned Alignment = GV->getAlignment();
2166 Reloc::Model RelocM = getTargetMachine().getRelocationModel();
2167 if (GV->isWeakForLinker() && GV->isDeclaration() && RelocM == Reloc::Static) {
2168 // Weak undefined symbols can't use ADRP/ADD pair since they should evaluate
2169 // to zero when they remain undefined. In PIC mode the GOT can take care of
2170 // this, but in absolute mode we use a constant pool load.
2172 PoolAddr = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2173 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2174 AArch64II::MO_NO_FLAG),
2175 DAG.getTargetConstantPool(GV, PtrVT, 0, 0,
2176 AArch64II::MO_LO12),
2177 DAG.getConstant(8, MVT::i32));
2178 SDValue GlobalAddr = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), PoolAddr,
2179 MachinePointerInfo::getConstantPool(),
2180 /*isVolatile=*/ false,
2181 /*isNonTemporal=*/ true,
2182 /*isInvariant=*/ true, 8);
2183 if (GN->getOffset() != 0)
2184 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalAddr,
2185 DAG.getConstant(GN->getOffset(), PtrVT));
2190 if (Alignment == 0) {
2191 const PointerType *GVPtrTy = cast<PointerType>(GV->getType());
2192 if (GVPtrTy->getElementType()->isSized()) {
2194 = getDataLayout()->getABITypeAlignment(GVPtrTy->getElementType());
2196 // Be conservative if we can't guess, not that it really matters:
2197 // functions and labels aren't valid for loads, and the methods used to
2198 // actually calculate an address work with any alignment.
2203 unsigned char HiFixup, LoFixup;
2204 bool UseGOT = getSubtarget()->GVIsIndirectSymbol(GV, RelocM);
2207 HiFixup = AArch64II::MO_GOT;
2208 LoFixup = AArch64II::MO_GOT_LO12;
2211 HiFixup = AArch64II::MO_NO_FLAG;
2212 LoFixup = AArch64II::MO_LO12;
2215 // AArch64's small model demands the following sequence:
2216 // ADRP x0, somewhere
2217 // ADD x0, x0, #:lo12:somewhere ; (or LDR directly).
2218 SDValue GlobalRef = DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2219 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2221 DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
2223 DAG.getConstant(Alignment, MVT::i32));
2226 GlobalRef = DAG.getNode(AArch64ISD::GOTLoad, dl, PtrVT, DAG.getEntryNode(),
2230 if (GN->getOffset() != 0)
2231 return DAG.getNode(ISD::ADD, dl, PtrVT, GlobalRef,
2232 DAG.getConstant(GN->getOffset(), PtrVT));
2238 AArch64TargetLowering::LowerGlobalAddressELF(SDValue Op,
2239 SelectionDAG &DAG) const {
2240 // TableGen doesn't have easy access to the CodeModel or RelocationModel, so
2241 // we make those distinctions here.
2243 switch (getTargetMachine().getCodeModel()) {
2244 case CodeModel::Small:
2245 return LowerGlobalAddressELFSmall(Op, DAG);
2246 case CodeModel::Large:
2247 return LowerGlobalAddressELFLarge(Op, DAG);
2249 llvm_unreachable("Only small and large code models supported now");
2253 SDValue AArch64TargetLowering::LowerTLSDescCall(SDValue SymAddr,
2256 SelectionDAG &DAG) const {
2257 EVT PtrVT = getPointerTy();
2259 // The function we need to call is simply the first entry in the GOT for this
2260 // descriptor, load it in preparation.
2261 SDValue Func, Chain;
2262 Func = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2265 // The function takes only one argument: the address of the descriptor itself
2268 Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, AArch64::X0, DescAddr, Glue);
2269 Glue = Chain.getValue(1);
2271 // Finally, there's a special calling-convention which means that the lookup
2272 // must preserve all registers (except X0, obviously).
2273 const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
2274 const AArch64RegisterInfo *A64RI
2275 = static_cast<const AArch64RegisterInfo *>(TRI);
2276 const uint32_t *Mask = A64RI->getTLSDescCallPreservedMask();
2278 // We're now ready to populate the argument list, as with a normal call:
2279 std::vector<SDValue> Ops;
2280 Ops.push_back(Chain);
2281 Ops.push_back(Func);
2282 Ops.push_back(SymAddr);
2283 Ops.push_back(DAG.getRegister(AArch64::X0, PtrVT));
2284 Ops.push_back(DAG.getRegisterMask(Mask));
2285 Ops.push_back(Glue);
2287 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2288 Chain = DAG.getNode(AArch64ISD::TLSDESCCALL, DL, NodeTys, &Ops[0],
2290 Glue = Chain.getValue(1);
2292 // After the call, the offset from TPIDR_EL0 is in X0, copy it out and pass it
2293 // back to the generic handling code.
2294 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue);
2298 AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op,
2299 SelectionDAG &DAG) const {
2300 assert(getSubtarget()->isTargetELF() &&
2301 "TLS not implemented for non-ELF targets");
2302 assert(getTargetMachine().getCodeModel() == CodeModel::Small
2303 && "TLS only supported in small memory model");
2304 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2306 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal());
2309 EVT PtrVT = getPointerTy();
2311 const GlobalValue *GV = GA->getGlobal();
2313 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT);
2315 if (Model == TLSModel::InitialExec) {
2316 TPOff = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2317 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2318 AArch64II::MO_GOTTPREL),
2319 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2320 AArch64II::MO_GOTTPREL_LO12),
2321 DAG.getConstant(8, MVT::i32));
2322 TPOff = DAG.getNode(AArch64ISD::GOTLoad, DL, PtrVT, DAG.getEntryNode(),
2324 } else if (Model == TLSModel::LocalExec) {
2325 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2326 AArch64II::MO_TPREL_G1);
2327 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2328 AArch64II::MO_TPREL_G0_NC);
2330 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2331 DAG.getTargetConstant(1, MVT::i32)), 0);
2332 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2334 DAG.getTargetConstant(0, MVT::i32)), 0);
2335 } else if (Model == TLSModel::GeneralDynamic) {
2336 // Accesses used in this sequence go via the TLS descriptor which lives in
2337 // the GOT. Prepare an address we can use to handle this.
2338 SDValue HiDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2339 AArch64II::MO_TLSDESC);
2340 SDValue LoDesc = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0,
2341 AArch64II::MO_TLSDESC_LO12);
2342 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2344 DAG.getConstant(8, MVT::i32));
2345 SDValue SymAddr = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0);
2347 TPOff = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2348 } else if (Model == TLSModel::LocalDynamic) {
2349 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS
2350 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate
2351 // the beginning of the module's TLS region, followed by a DTPREL offset
2354 // These accesses will need deduplicating if there's more than one.
2355 AArch64MachineFunctionInfo* MFI = DAG.getMachineFunction()
2356 .getInfo<AArch64MachineFunctionInfo>();
2357 MFI->incNumLocalDynamicTLSAccesses();
2360 // Get the location of _TLS_MODULE_BASE_:
2361 SDValue HiDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2362 AArch64II::MO_TLSDESC);
2363 SDValue LoDesc = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT,
2364 AArch64II::MO_TLSDESC_LO12);
2365 SDValue DescAddr = DAG.getNode(AArch64ISD::WrapperSmall, DL, PtrVT,
2367 DAG.getConstant(8, MVT::i32));
2368 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT);
2370 ThreadBase = LowerTLSDescCall(SymAddr, DescAddr, DL, DAG);
2372 // Get the variable's offset from _TLS_MODULE_BASE_
2373 SDValue HiVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2374 AArch64II::MO_DTPREL_G1);
2375 SDValue LoVar = DAG.getTargetGlobalAddress(GV, DL, MVT::i64, 0,
2376 AArch64II::MO_DTPREL_G0_NC);
2378 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVZxii, DL, PtrVT, HiVar,
2379 DAG.getTargetConstant(0, MVT::i32)), 0);
2380 TPOff = SDValue(DAG.getMachineNode(AArch64::MOVKxii, DL, PtrVT,
2382 DAG.getTargetConstant(0, MVT::i32)), 0);
2384 llvm_unreachable("Unsupported TLS access model");
2387 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff);
2391 AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG,
2392 bool IsSigned) const {
2393 if (Op.getValueType() != MVT::f128) {
2394 // Legal for everything except f128.
2400 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2402 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType());
2404 return LowerF128ToCall(Op, DAG, LC);
2409 AArch64TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
2410 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
2412 EVT PtrVT = getPointerTy();
2414 // When compiling PIC, jump tables get put in the code section so a static
2415 // relocation-style is acceptable for both cases.
2416 switch (getTargetMachine().getCodeModel()) {
2417 case CodeModel::Small:
2418 return DAG.getNode(AArch64ISD::WrapperSmall, dl, PtrVT,
2419 DAG.getTargetJumpTable(JT->getIndex(), PtrVT),
2420 DAG.getTargetJumpTable(JT->getIndex(), PtrVT,
2421 AArch64II::MO_LO12),
2422 DAG.getConstant(1, MVT::i32));
2423 case CodeModel::Large:
2425 AArch64ISD::WrapperLarge, dl, PtrVT,
2426 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G3),
2427 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G2_NC),
2428 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G1_NC),
2429 DAG.getTargetJumpTable(JT->getIndex(), PtrVT, AArch64II::MO_ABS_G0_NC));
2431 llvm_unreachable("Only small and large code models supported now");
2435 // (SELECT_CC lhs, rhs, iftrue, iffalse, condcode)
2437 AArch64TargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
2439 SDValue LHS = Op.getOperand(0);
2440 SDValue RHS = Op.getOperand(1);
2441 SDValue IfTrue = Op.getOperand(2);
2442 SDValue IfFalse = Op.getOperand(3);
2443 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
2445 if (LHS.getValueType() == MVT::f128) {
2446 // f128 comparisons are lowered to libcalls, but slot in nicely here
2448 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2450 // If softenSetCCOperands returned a scalar, we need to compare the result
2451 // against zero to select between true and false values.
2452 if (RHS.getNode() == 0) {
2453 RHS = DAG.getConstant(0, LHS.getValueType());
2458 if (LHS.getValueType().isInteger()) {
2461 // Integers are handled in a separate function because the combinations of
2462 // immediates and tests can get hairy and we may want to fiddle things.
2463 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2465 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2466 CmpOp, IfTrue, IfFalse, A64cc);
2469 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2470 // conditional branch, hence FPCCToA64CC can set a second test, where either
2471 // passing is sufficient.
2472 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2473 CondCode = FPCCToA64CC(CC, Alternative);
2474 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2475 SDValue SetCC = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2476 DAG.getCondCode(CC));
2477 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl,
2479 SetCC, IfTrue, IfFalse, A64cc);
2481 if (Alternative != A64CC::Invalid) {
2482 A64cc = DAG.getConstant(Alternative, MVT::i32);
2483 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2484 SetCC, IfTrue, A64SELECT_CC, A64cc);
2488 return A64SELECT_CC;
2491 // (SELECT testbit, iftrue, iffalse)
2493 AArch64TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
2495 SDValue TheBit = Op.getOperand(0);
2496 SDValue IfTrue = Op.getOperand(1);
2497 SDValue IfFalse = Op.getOperand(2);
2499 // AArch64 BooleanContents is the default UndefinedBooleanContent, which means
2500 // that as the consumer we are responsible for ignoring rubbish in higher
2502 TheBit = DAG.getNode(ISD::AND, dl, MVT::i32, TheBit,
2503 DAG.getConstant(1, MVT::i32));
2504 SDValue A64CMP = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, TheBit,
2505 DAG.getConstant(0, TheBit.getValueType()),
2506 DAG.getCondCode(ISD::SETNE));
2508 return DAG.getNode(AArch64ISD::SELECT_CC, dl, Op.getValueType(),
2509 A64CMP, IfTrue, IfFalse,
2510 DAG.getConstant(A64CC::NE, MVT::i32));
2513 static SDValue LowerVectorSETCC(SDValue Op, SelectionDAG &DAG) {
2515 SDValue LHS = Op.getOperand(0);
2516 SDValue RHS = Op.getOperand(1);
2517 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2518 EVT VT = Op.getValueType();
2519 bool Invert = false;
2523 if (LHS.getValueType().isInteger()) {
2525 // Attempt to use Vector Integer Compare Mask Test instruction.
2526 // TST = icmp ne (and (op0, op1), zero).
2527 if (CC == ISD::SETNE) {
2528 if (((LHS.getOpcode() == ISD::AND) &&
2529 ISD::isBuildVectorAllZeros(RHS.getNode())) ||
2530 ((RHS.getOpcode() == ISD::AND) &&
2531 ISD::isBuildVectorAllZeros(LHS.getNode()))) {
2533 SDValue AndOp = (LHS.getOpcode() == ISD::AND) ? LHS : RHS;
2534 SDValue NewLHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(0));
2535 SDValue NewRHS = DAG.getNode(ISD::BITCAST, DL, VT, AndOp.getOperand(1));
2536 return DAG.getNode(AArch64ISD::NEON_TST, DL, VT, NewLHS, NewRHS);
2540 // Attempt to use Vector Integer Compare Mask against Zero instr (Signed).
2541 // Note: Compare against Zero does not support unsigned predicates.
2542 if ((ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2543 ISD::isBuildVectorAllZeros(LHS.getNode())) &&
2544 !isUnsignedIntSetCC(CC)) {
2546 // If LHS is the zero value, swap operands and CondCode.
2547 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2548 CC = getSetCCSwappedOperands(CC);
2553 // Ensure valid CondCode for Compare Mask against Zero instruction:
2554 // EQ, GE, GT, LE, LT.
2555 if (ISD::SETNE == CC) {
2560 // Using constant type to differentiate integer and FP compares with zero.
2561 Op1 = DAG.getConstant(0, MVT::i32);
2562 Opcode = AArch64ISD::NEON_CMPZ;
2565 // Attempt to use Vector Integer Compare Mask instr (Signed/Unsigned).
2566 // Ensure valid CondCode for Compare Mask instr: EQ, GE, GT, UGE, UGT.
2570 llvm_unreachable("Illegal integer comparison.");
2586 CC = getSetCCSwappedOperands(CC);
2590 std::swap(LHS, RHS);
2592 Opcode = AArch64ISD::NEON_CMP;
2597 // Generate Compare Mask instr or Compare Mask against Zero instr.
2599 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2602 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2607 // Now handle Floating Point cases.
2608 // Attempt to use Vector Floating Point Compare Mask against Zero instruction.
2609 if (ISD::isBuildVectorAllZeros(RHS.getNode()) ||
2610 ISD::isBuildVectorAllZeros(LHS.getNode())) {
2612 // If LHS is the zero value, swap operands and CondCode.
2613 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
2614 CC = getSetCCSwappedOperands(CC);
2619 // Using constant type to differentiate integer and FP compares with zero.
2620 Op1 = DAG.getConstantFP(0, MVT::f32);
2621 Opcode = AArch64ISD::NEON_CMPZ;
2623 // Attempt to use Vector Floating Point Compare Mask instruction.
2626 Opcode = AArch64ISD::NEON_CMP;
2630 // Some register compares have to be implemented with swapped CC and operands,
2631 // e.g.: OLT implemented as OGT with swapped operands.
2632 bool SwapIfRegArgs = false;
2634 // Ensure valid CondCode for FP Compare Mask against Zero instruction:
2635 // EQ, GE, GT, LE, LT.
2636 // And ensure valid CondCode for FP Compare Mask instruction: EQ, GE, GT.
2639 llvm_unreachable("Illegal FP comparison");
2642 Invert = true; // Fallthrough
2650 SwapIfRegArgs = true;
2659 SwapIfRegArgs = true;
2668 SwapIfRegArgs = true;
2677 SwapIfRegArgs = true;
2684 Invert = true; // Fallthrough
2686 // Expand this to (OGT |OLT).
2688 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGT));
2690 SwapIfRegArgs = true;
2693 Invert = true; // Fallthrough
2695 // Expand this to (OGE | OLT).
2697 DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(ISD::SETGE));
2699 SwapIfRegArgs = true;
2703 if (Opcode == AArch64ISD::NEON_CMP && SwapIfRegArgs) {
2704 CC = getSetCCSwappedOperands(CC);
2705 std::swap(Op0, Op1);
2708 // Generate FP Compare Mask instr or FP Compare Mask against Zero instr
2709 SDValue NeonCmp = DAG.getNode(Opcode, DL, VT, Op0, Op1, DAG.getCondCode(CC));
2711 if (NeonCmpAlt.getNode())
2712 NeonCmp = DAG.getNode(ISD::OR, DL, VT, NeonCmp, NeonCmpAlt);
2715 NeonCmp = DAG.getNOT(DL, NeonCmp, VT);
2720 // (SETCC lhs, rhs, condcode)
2722 AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
2724 SDValue LHS = Op.getOperand(0);
2725 SDValue RHS = Op.getOperand(1);
2726 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
2727 EVT VT = Op.getValueType();
2730 return LowerVectorSETCC(Op, DAG);
2732 if (LHS.getValueType() == MVT::f128) {
2733 // f128 comparisons will be lowered to libcalls giving a valid LHS and RHS
2734 // for the rest of the function (some i32 or i64 values).
2735 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl);
2737 // If softenSetCCOperands returned a scalar, use it.
2738 if (RHS.getNode() == 0) {
2739 assert(LHS.getValueType() == Op.getValueType() &&
2740 "Unexpected setcc expansion!");
2745 if (LHS.getValueType().isInteger()) {
2748 // Integers are handled in a separate function because the combinations of
2749 // immediates and tests can get hairy and we may want to fiddle things.
2750 SDValue CmpOp = getSelectableIntSetCC(LHS, RHS, CC, A64cc, DAG, dl);
2752 return DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2753 CmpOp, DAG.getConstant(1, VT), DAG.getConstant(0, VT),
2757 // Note that some LLVM floating-point CondCodes can't be lowered to a single
2758 // conditional branch, hence FPCCToA64CC can set a second test, where either
2759 // passing is sufficient.
2760 A64CC::CondCodes CondCode, Alternative = A64CC::Invalid;
2761 CondCode = FPCCToA64CC(CC, Alternative);
2762 SDValue A64cc = DAG.getConstant(CondCode, MVT::i32);
2763 SDValue CmpOp = DAG.getNode(AArch64ISD::SETCC, dl, MVT::i32, LHS, RHS,
2764 DAG.getCondCode(CC));
2765 SDValue A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT,
2766 CmpOp, DAG.getConstant(1, VT),
2767 DAG.getConstant(0, VT), A64cc);
2769 if (Alternative != A64CC::Invalid) {
2770 A64cc = DAG.getConstant(Alternative, MVT::i32);
2771 A64SELECT_CC = DAG.getNode(AArch64ISD::SELECT_CC, dl, VT, CmpOp,
2772 DAG.getConstant(1, VT), A64SELECT_CC, A64cc);
2775 return A64SELECT_CC;
2779 AArch64TargetLowering::LowerVACOPY(SDValue Op, SelectionDAG &DAG) const {
2780 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
2781 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
2783 // We have to make sure we copy the entire structure: 8+8+8+4+4 = 32 bytes
2784 // rather than just 8.
2785 return DAG.getMemcpy(Op.getOperand(0), SDLoc(Op),
2786 Op.getOperand(1), Op.getOperand(2),
2787 DAG.getConstant(32, MVT::i32), 8, false, false,
2788 MachinePointerInfo(DestSV), MachinePointerInfo(SrcSV));
2792 AArch64TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
2793 // The layout of the va_list struct is specified in the AArch64 Procedure Call
2794 // Standard, section B.3.
2795 MachineFunction &MF = DAG.getMachineFunction();
2796 AArch64MachineFunctionInfo *FuncInfo
2797 = MF.getInfo<AArch64MachineFunctionInfo>();
2800 SDValue Chain = Op.getOperand(0);
2801 SDValue VAList = Op.getOperand(1);
2802 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
2803 SmallVector<SDValue, 4> MemOps;
2805 // void *__stack at offset 0
2806 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVariadicStackIdx(),
2808 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList,
2809 MachinePointerInfo(SV), false, false, 0));
2811 // void *__gr_top at offset 8
2812 int GPRSize = FuncInfo->getVariadicGPRSize();
2814 SDValue GRTop, GRTopAddr;
2816 GRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2817 DAG.getConstant(8, getPointerTy()));
2819 GRTop = DAG.getFrameIndex(FuncInfo->getVariadicGPRIdx(), getPointerTy());
2820 GRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), GRTop,
2821 DAG.getConstant(GPRSize, getPointerTy()));
2823 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr,
2824 MachinePointerInfo(SV, 8),
2828 // void *__vr_top at offset 16
2829 int FPRSize = FuncInfo->getVariadicFPRSize();
2831 SDValue VRTop, VRTopAddr;
2832 VRTopAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2833 DAG.getConstant(16, getPointerTy()));
2835 VRTop = DAG.getFrameIndex(FuncInfo->getVariadicFPRIdx(), getPointerTy());
2836 VRTop = DAG.getNode(ISD::ADD, DL, getPointerTy(), VRTop,
2837 DAG.getConstant(FPRSize, getPointerTy()));
2839 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr,
2840 MachinePointerInfo(SV, 16),
2844 // int __gr_offs at offset 24
2845 SDValue GROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2846 DAG.getConstant(24, getPointerTy()));
2847 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-GPRSize, MVT::i32),
2848 GROffsAddr, MachinePointerInfo(SV, 24),
2851 // int __vr_offs at offset 28
2852 SDValue VROffsAddr = DAG.getNode(ISD::ADD, DL, getPointerTy(), VAList,
2853 DAG.getConstant(28, getPointerTy()));
2854 MemOps.push_back(DAG.getStore(Chain, DL, DAG.getConstant(-FPRSize, MVT::i32),
2855 VROffsAddr, MachinePointerInfo(SV, 28),
2858 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, &MemOps[0],
2863 AArch64TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
2864 switch (Op.getOpcode()) {
2865 default: llvm_unreachable("Don't know how to custom lower this!");
2866 case ISD::FADD: return LowerF128ToCall(Op, DAG, RTLIB::ADD_F128);
2867 case ISD::FSUB: return LowerF128ToCall(Op, DAG, RTLIB::SUB_F128);
2868 case ISD::FMUL: return LowerF128ToCall(Op, DAG, RTLIB::MUL_F128);
2869 case ISD::FDIV: return LowerF128ToCall(Op, DAG, RTLIB::DIV_F128);
2870 case ISD::FP_TO_SINT: return LowerFP_TO_INT(Op, DAG, true);
2871 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG, false);
2872 case ISD::SINT_TO_FP: return LowerINT_TO_FP(Op, DAG, true);
2873 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG, false);
2874 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
2875 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
2876 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
2877 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
2879 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
2880 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
2881 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
2882 case ISD::GlobalAddress: return LowerGlobalAddressELF(Op, DAG);
2883 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
2884 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
2885 case ISD::SELECT: return LowerSELECT(Op, DAG);
2886 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
2887 case ISD::SETCC: return LowerSETCC(Op, DAG);
2888 case ISD::VACOPY: return LowerVACOPY(Op, DAG);
2889 case ISD::VASTART: return LowerVASTART(Op, DAG);
2890 case ISD::BUILD_VECTOR:
2891 return LowerBUILD_VECTOR(Op, DAG, getSubtarget());
2892 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
2898 /// Check if the specified splat value corresponds to a valid vector constant
2899 /// for a Neon instruction with a "modified immediate" operand (e.g., MOVI). If
2900 /// so, return the encoded 8-bit immediate and the OpCmode instruction fields
2902 static bool isNeonModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
2903 unsigned SplatBitSize, SelectionDAG &DAG,
2904 bool is128Bits, NeonModImmType type, EVT &VT,
2905 unsigned &Imm, unsigned &OpCmode) {
2906 switch (SplatBitSize) {
2908 llvm_unreachable("unexpected size for isNeonModifiedImm");
2910 if (type != Neon_Mov_Imm)
2912 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
2913 // Neon movi per byte: Op=0, Cmode=1110.
2916 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
2920 // Neon move inst per halfword
2921 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
2922 if ((SplatBits & ~0xff) == 0) {
2923 // Value = 0x00nn is 0x00nn LSL 0
2924 // movi: Op=0, Cmode=1000; mvni: Op=1, Cmode=1000
2925 // bic: Op=1, Cmode=1001; orr: Op=0, Cmode=1001
2931 if ((SplatBits & ~0xff00) == 0) {
2932 // Value = 0xnn00 is 0x00nn LSL 8
2933 // movi: Op=0, Cmode=1010; mvni: Op=1, Cmode=1010
2934 // bic: Op=1, Cmode=1011; orr: Op=0, Cmode=1011
2936 Imm = SplatBits >> 8;
2940 // can't handle any other
2945 // First the LSL variants (MSL is unusable by some interested instructions).
2947 // Neon move instr per word, shift zeros
2948 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
2949 if ((SplatBits & ~0xff) == 0) {
2950 // Value = 0x000000nn is 0x000000nn LSL 0
2951 // movi: Op=0, Cmode= 0000; mvni: Op=1, Cmode= 0000
2952 // bic: Op=1, Cmode= 0001; orr: Op=0, Cmode= 0001
2958 if ((SplatBits & ~0xff00) == 0) {
2959 // Value = 0x0000nn00 is 0x000000nn LSL 8
2960 // movi: Op=0, Cmode= 0010; mvni: Op=1, Cmode= 0010
2961 // bic: Op=1, Cmode= 0011; orr : Op=0, Cmode= 0011
2963 Imm = SplatBits >> 8;
2967 if ((SplatBits & ~0xff0000) == 0) {
2968 // Value = 0x00nn0000 is 0x000000nn LSL 16
2969 // movi: Op=0, Cmode= 0100; mvni: Op=1, Cmode= 0100
2970 // bic: Op=1, Cmode= 0101; orr: Op=0, Cmode= 0101
2972 Imm = SplatBits >> 16;
2976 if ((SplatBits & ~0xff000000) == 0) {
2977 // Value = 0xnn000000 is 0x000000nn LSL 24
2978 // movi: Op=0, Cmode= 0110; mvni: Op=1, Cmode= 0110
2979 // bic: Op=1, Cmode= 0111; orr: Op=0, Cmode= 0111
2981 Imm = SplatBits >> 24;
2986 // Now the MSL immediates.
2988 // Neon move instr per word, shift ones
2989 if ((SplatBits & ~0xffff) == 0 &&
2990 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
2991 // Value = 0x0000nnff is 0x000000nn MSL 8
2992 // movi: Op=0, Cmode= 1100; mvni: Op=1, Cmode= 1100
2994 Imm = SplatBits >> 8;
2998 if ((SplatBits & ~0xffffff) == 0 &&
2999 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
3000 // Value = 0x00nnffff is 0x000000nn MSL 16
3001 // movi: Op=1, Cmode= 1101; mvni: Op=1, Cmode= 1101
3003 Imm = SplatBits >> 16;
3007 // can't handle any other
3012 if (type != Neon_Mov_Imm)
3014 // Neon move instr bytemask, where each byte is either 0x00 or 0xff.
3015 // movi Op=1, Cmode=1110.
3017 uint64_t BitMask = 0xff;
3019 unsigned ImmMask = 1;
3021 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
3022 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
3025 } else if ((SplatBits & BitMask) != 0) {
3032 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
3040 static SDValue PerformANDCombine(SDNode *N,
3041 TargetLowering::DAGCombinerInfo &DCI) {
3043 SelectionDAG &DAG = DCI.DAG;
3045 EVT VT = N->getValueType(0);
3047 // We're looking for an SRA/SHL pair which form an SBFX.
3049 if (VT != MVT::i32 && VT != MVT::i64)
3052 if (!isa<ConstantSDNode>(N->getOperand(1)))
3055 uint64_t TruncMask = N->getConstantOperandVal(1);
3056 if (!isMask_64(TruncMask))
3059 uint64_t Width = CountPopulation_64(TruncMask);
3060 SDValue Shift = N->getOperand(0);
3062 if (Shift.getOpcode() != ISD::SRL)
3065 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3067 uint64_t LSB = Shift->getConstantOperandVal(1);
3069 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3072 return DAG.getNode(AArch64ISD::UBFX, DL, VT, Shift.getOperand(0),
3073 DAG.getConstant(LSB, MVT::i64),
3074 DAG.getConstant(LSB + Width - 1, MVT::i64));
3077 /// For a true bitfield insert, the bits getting into that contiguous mask
3078 /// should come from the low part of an existing value: they must be formed from
3079 /// a compatible SHL operation (unless they're already low). This function
3080 /// checks that condition and returns the least-significant bit that's
3081 /// intended. If the operation not a field preparation, -1 is returned.
3082 static int32_t getLSBForBFI(SelectionDAG &DAG, SDLoc DL, EVT VT,
3083 SDValue &MaskedVal, uint64_t Mask) {
3084 if (!isShiftedMask_64(Mask))
3087 // Now we need to alter MaskedVal so that it is an appropriate input for a BFI
3088 // instruction. BFI will do a left-shift by LSB before applying the mask we've
3089 // spotted, so in general we should pre-emptively "undo" that by making sure
3090 // the incoming bits have had a right-shift applied to them.
3092 // This right shift, however, will combine with existing left/right shifts. In
3093 // the simplest case of a completely straight bitfield operation, it will be
3094 // expected to completely cancel out with an existing SHL. More complicated
3095 // cases (e.g. bitfield to bitfield copy) may still need a real shift before
3098 uint64_t LSB = countTrailingZeros(Mask);
3099 int64_t ShiftRightRequired = LSB;
3100 if (MaskedVal.getOpcode() == ISD::SHL &&
3101 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3102 ShiftRightRequired -= MaskedVal.getConstantOperandVal(1);
3103 MaskedVal = MaskedVal.getOperand(0);
3104 } else if (MaskedVal.getOpcode() == ISD::SRL &&
3105 isa<ConstantSDNode>(MaskedVal.getOperand(1))) {
3106 ShiftRightRequired += MaskedVal.getConstantOperandVal(1);
3107 MaskedVal = MaskedVal.getOperand(0);
3110 if (ShiftRightRequired > 0)
3111 MaskedVal = DAG.getNode(ISD::SRL, DL, VT, MaskedVal,
3112 DAG.getConstant(ShiftRightRequired, MVT::i64));
3113 else if (ShiftRightRequired < 0) {
3114 // We could actually end up with a residual left shift, for example with
3115 // "struc.bitfield = val << 1".
3116 MaskedVal = DAG.getNode(ISD::SHL, DL, VT, MaskedVal,
3117 DAG.getConstant(-ShiftRightRequired, MVT::i64));
3123 /// Searches from N for an existing AArch64ISD::BFI node, possibly surrounded by
3124 /// a mask and an extension. Returns true if a BFI was found and provides
3125 /// information on its surroundings.
3126 static bool findMaskedBFI(SDValue N, SDValue &BFI, uint64_t &Mask,
3129 if (N.getOpcode() == ISD::ZERO_EXTEND) {
3131 N = N.getOperand(0);
3134 if (N.getOpcode() == ISD::AND && isa<ConstantSDNode>(N.getOperand(1))) {
3135 Mask = N->getConstantOperandVal(1);
3136 N = N.getOperand(0);
3138 // Mask is the whole width.
3139 Mask = -1ULL >> (64 - N.getValueType().getSizeInBits());
3142 if (N.getOpcode() == AArch64ISD::BFI) {
3150 /// Try to combine a subtree (rooted at an OR) into a "masked BFI" node, which
3151 /// is roughly equivalent to (and (BFI ...), mask). This form is used because it
3152 /// can often be further combined with a larger mask. Ultimately, we want mask
3153 /// to be 2^32-1 or 2^64-1 so the AND can be skipped.
3154 static SDValue tryCombineToBFI(SDNode *N,
3155 TargetLowering::DAGCombinerInfo &DCI,
3156 const AArch64Subtarget *Subtarget) {
3157 SelectionDAG &DAG = DCI.DAG;
3159 EVT VT = N->getValueType(0);
3161 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3163 // We need the LHS to be (and SOMETHING, MASK). Find out what that mask is or
3164 // abandon the effort.
3165 SDValue LHS = N->getOperand(0);
3166 if (LHS.getOpcode() != ISD::AND)
3170 if (isa<ConstantSDNode>(LHS.getOperand(1)))
3171 LHSMask = LHS->getConstantOperandVal(1);
3175 // We also need the RHS to be (and SOMETHING, MASK). Find out what that mask
3176 // is or abandon the effort.
3177 SDValue RHS = N->getOperand(1);
3178 if (RHS.getOpcode() != ISD::AND)
3182 if (isa<ConstantSDNode>(RHS.getOperand(1)))
3183 RHSMask = RHS->getConstantOperandVal(1);
3187 // Can't do anything if the masks are incompatible.
3188 if (LHSMask & RHSMask)
3191 // Now we need one of the masks to be a contiguous field. Without loss of
3192 // generality that should be the RHS one.
3193 SDValue Bitfield = LHS.getOperand(0);
3194 if (getLSBForBFI(DAG, DL, VT, Bitfield, LHSMask) != -1) {
3195 // We know that LHS is a candidate new value, and RHS isn't already a better
3197 std::swap(LHS, RHS);
3198 std::swap(LHSMask, RHSMask);
3201 // We've done our best to put the right operands in the right places, all we
3202 // can do now is check whether a BFI exists.
3203 Bitfield = RHS.getOperand(0);
3204 int32_t LSB = getLSBForBFI(DAG, DL, VT, Bitfield, RHSMask);
3208 uint32_t Width = CountPopulation_64(RHSMask);
3209 assert(Width && "Expected non-zero bitfield width");
3211 SDValue BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3212 LHS.getOperand(0), Bitfield,
3213 DAG.getConstant(LSB, MVT::i64),
3214 DAG.getConstant(Width, MVT::i64));
3217 if ((LHSMask | RHSMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3220 return DAG.getNode(ISD::AND, DL, VT, BFI,
3221 DAG.getConstant(LHSMask | RHSMask, VT));
3224 /// Search for the bitwise combining (with careful masks) of a MaskedBFI and its
3225 /// original input. This is surprisingly common because SROA splits things up
3226 /// into i8 chunks, so the originally detected MaskedBFI may actually only act
3227 /// on the low (say) byte of a word. This is then orred into the rest of the
3228 /// word afterwards.
3230 /// Basic input: (or (and OLDFIELD, MASK1), (MaskedBFI MASK2, OLDFIELD, ...)).
3232 /// If MASK1 and MASK2 are compatible, we can fold the whole thing into the
3233 /// MaskedBFI. We can also deal with a certain amount of extend/truncate being
3235 static SDValue tryCombineToLargerBFI(SDNode *N,
3236 TargetLowering::DAGCombinerInfo &DCI,
3237 const AArch64Subtarget *Subtarget) {
3238 SelectionDAG &DAG = DCI.DAG;
3240 EVT VT = N->getValueType(0);
3242 // First job is to hunt for a MaskedBFI on either the left or right. Swap
3243 // operands if it's actually on the right.
3245 SDValue PossExtraMask;
3246 uint64_t ExistingMask = 0;
3247 bool Extended = false;
3248 if (findMaskedBFI(N->getOperand(0), BFI, ExistingMask, Extended))
3249 PossExtraMask = N->getOperand(1);
3250 else if (findMaskedBFI(N->getOperand(1), BFI, ExistingMask, Extended))
3251 PossExtraMask = N->getOperand(0);
3255 // We can only combine a BFI with another compatible mask.
3256 if (PossExtraMask.getOpcode() != ISD::AND ||
3257 !isa<ConstantSDNode>(PossExtraMask.getOperand(1)))
3260 uint64_t ExtraMask = PossExtraMask->getConstantOperandVal(1);
3262 // Masks must be compatible.
3263 if (ExtraMask & ExistingMask)
3266 SDValue OldBFIVal = BFI.getOperand(0);
3267 SDValue NewBFIVal = BFI.getOperand(1);
3269 // We skipped a ZERO_EXTEND above, so the input to the MaskedBFIs should be
3270 // 32-bit and we'll be forming a 64-bit MaskedBFI. The MaskedBFI arguments
3271 // need to be made compatible.
3272 assert(VT == MVT::i64 && BFI.getValueType() == MVT::i32
3273 && "Invalid types for BFI");
3274 OldBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, OldBFIVal);
3275 NewBFIVal = DAG.getNode(ISD::ANY_EXTEND, DL, VT, NewBFIVal);
3278 // We need the MaskedBFI to be combined with a mask of the *same* value.
3279 if (PossExtraMask.getOperand(0) != OldBFIVal)
3282 BFI = DAG.getNode(AArch64ISD::BFI, DL, VT,
3283 OldBFIVal, NewBFIVal,
3284 BFI.getOperand(2), BFI.getOperand(3));
3286 // If the masking is trivial, we don't need to create it.
3287 if ((ExtraMask | ExistingMask) == (-1ULL >> (64 - VT.getSizeInBits())))
3290 return DAG.getNode(ISD::AND, DL, VT, BFI,
3291 DAG.getConstant(ExtraMask | ExistingMask, VT));
3294 /// An EXTR instruction is made up of two shifts, ORed together. This helper
3295 /// searches for and classifies those shifts.
3296 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount,
3298 if (N.getOpcode() == ISD::SHL)
3300 else if (N.getOpcode() == ISD::SRL)
3305 if (!isa<ConstantSDNode>(N.getOperand(1)))
3308 ShiftAmount = N->getConstantOperandVal(1);
3309 Src = N->getOperand(0);
3313 /// EXTR instruction extracts a contiguous chunk of bits from two existing
3314 /// registers viewed as a high/low pair. This function looks for the pattern:
3315 /// (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) and replaces it with an
3316 /// EXTR. Can't quite be done in TableGen because the two immediates aren't
3318 static SDValue tryCombineToEXTR(SDNode *N,
3319 TargetLowering::DAGCombinerInfo &DCI) {
3320 SelectionDAG &DAG = DCI.DAG;
3322 EVT VT = N->getValueType(0);
3324 assert(N->getOpcode() == ISD::OR && "Unexpected root");
3326 if (VT != MVT::i32 && VT != MVT::i64)
3330 uint32_t ShiftLHS = 0;
3332 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi))
3336 uint32_t ShiftRHS = 0;
3338 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi))
3341 // If they're both trying to come from the high part of the register, they're
3342 // not really an EXTR.
3343 if (LHSFromHi == RHSFromHi)
3346 if (ShiftLHS + ShiftRHS != VT.getSizeInBits())
3350 std::swap(LHS, RHS);
3351 std::swap(ShiftLHS, ShiftRHS);
3354 return DAG.getNode(AArch64ISD::EXTR, DL, VT,
3356 DAG.getConstant(ShiftRHS, MVT::i64));
3359 /// Target-specific dag combine xforms for ISD::OR
3360 static SDValue PerformORCombine(SDNode *N,
3361 TargetLowering::DAGCombinerInfo &DCI,
3362 const AArch64Subtarget *Subtarget) {
3364 SelectionDAG &DAG = DCI.DAG;
3366 EVT VT = N->getValueType(0);
3368 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
3371 // Attempt to recognise bitfield-insert operations.
3372 SDValue Res = tryCombineToBFI(N, DCI, Subtarget);
3376 // Attempt to combine an existing MaskedBFI operation into one with a larger
3378 Res = tryCombineToLargerBFI(N, DCI, Subtarget);
3382 Res = tryCombineToEXTR(N, DCI);
3386 if (!Subtarget->hasNEON())
3389 // Attempt to use vector immediate-form BSL
3390 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
3392 SDValue N0 = N->getOperand(0);
3393 if (N0.getOpcode() != ISD::AND)
3396 SDValue N1 = N->getOperand(1);
3397 if (N1.getOpcode() != ISD::AND)
3400 if (VT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
3402 unsigned SplatBitSize;
3404 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
3406 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
3409 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
3411 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
3413 !HasAnyUndefs && SplatBits0 == ~SplatBits1) {
3414 // Canonicalize the vector type to make instruction selection simpler.
3415 EVT CanonicalVT = VT.is128BitVector() ? MVT::v16i8 : MVT::v8i8;
3416 SDValue Result = DAG.getNode(AArch64ISD::NEON_BSL, DL, CanonicalVT,
3417 N0->getOperand(1), N0->getOperand(0),
3419 return DAG.getNode(ISD::BITCAST, DL, VT, Result);
3427 /// Target-specific dag combine xforms for ISD::SRA
3428 static SDValue PerformSRACombine(SDNode *N,
3429 TargetLowering::DAGCombinerInfo &DCI) {
3431 SelectionDAG &DAG = DCI.DAG;
3433 EVT VT = N->getValueType(0);
3435 // We're looking for an SRA/SHL pair which form an SBFX.
3437 if (VT != MVT::i32 && VT != MVT::i64)
3440 if (!isa<ConstantSDNode>(N->getOperand(1)))
3443 uint64_t ExtraSignBits = N->getConstantOperandVal(1);
3444 SDValue Shift = N->getOperand(0);
3446 if (Shift.getOpcode() != ISD::SHL)
3449 if (!isa<ConstantSDNode>(Shift->getOperand(1)))
3452 uint64_t BitsOnLeft = Shift->getConstantOperandVal(1);
3453 uint64_t Width = VT.getSizeInBits() - ExtraSignBits;
3454 uint64_t LSB = VT.getSizeInBits() - Width - BitsOnLeft;
3456 if (LSB > VT.getSizeInBits() || Width > VT.getSizeInBits())
3459 return DAG.getNode(AArch64ISD::SBFX, DL, VT, Shift.getOperand(0),
3460 DAG.getConstant(LSB, MVT::i64),
3461 DAG.getConstant(LSB + Width - 1, MVT::i64));
3464 /// Check if this is a valid build_vector for the immediate operand of
3465 /// a vector shift operation, where all the elements of the build_vector
3466 /// must have the same constant integer value.
3467 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
3468 // Ignore bit_converts.
3469 while (Op.getOpcode() == ISD::BITCAST)
3470 Op = Op.getOperand(0);
3471 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
3472 APInt SplatBits, SplatUndef;
3473 unsigned SplatBitSize;
3475 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
3476 HasAnyUndefs, ElementBits) ||
3477 SplatBitSize > ElementBits)
3479 Cnt = SplatBits.getSExtValue();
3483 /// Check if this is a valid build_vector for the immediate operand of
3484 /// a vector shift left operation. That value must be in the range:
3485 /// 0 <= Value < ElementBits
3486 static bool isVShiftLImm(SDValue Op, EVT VT, int64_t &Cnt) {
3487 assert(VT.isVector() && "vector shift count is not a vector type");
3488 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3489 if (!getVShiftImm(Op, ElementBits, Cnt))
3491 return (Cnt >= 0 && Cnt < ElementBits);
3494 /// Check if this is a valid build_vector for the immediate operand of a
3495 /// vector shift right operation. The value must be in the range:
3496 /// 1 <= Value <= ElementBits
3497 static bool isVShiftRImm(SDValue Op, EVT VT, int64_t &Cnt) {
3498 assert(VT.isVector() && "vector shift count is not a vector type");
3499 unsigned ElementBits = VT.getVectorElementType().getSizeInBits();
3500 if (!getVShiftImm(Op, ElementBits, Cnt))
3502 return (Cnt >= 1 && Cnt <= ElementBits);
3505 /// Checks for immediate versions of vector shifts and lowers them.
3506 static SDValue PerformShiftCombine(SDNode *N,
3507 TargetLowering::DAGCombinerInfo &DCI,
3508 const AArch64Subtarget *ST) {
3509 SelectionDAG &DAG = DCI.DAG;
3510 EVT VT = N->getValueType(0);
3511 if (N->getOpcode() == ISD::SRA && (VT == MVT::i32 || VT == MVT::i64))
3512 return PerformSRACombine(N, DCI);
3514 // Nothing to be done for scalar shifts.
3515 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
3516 if (!VT.isVector() || !TLI.isTypeLegal(VT))
3519 assert(ST->hasNEON() && "unexpected vector shift");
3522 switch (N->getOpcode()) {
3524 llvm_unreachable("unexpected shift opcode");
3527 if (isVShiftLImm(N->getOperand(1), VT, Cnt)) {
3529 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
3530 DAG.getConstant(Cnt, MVT::i32));
3531 return DAG.getNode(ISD::SHL, SDLoc(N), VT, N->getOperand(0), RHS);
3537 if (isVShiftRImm(N->getOperand(1), VT, Cnt)) {
3539 DAG.getNode(AArch64ISD::NEON_VDUP, SDLoc(N->getOperand(1)), VT,
3540 DAG.getConstant(Cnt, MVT::i32));
3541 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, N->getOperand(0), RHS);
3549 /// ARM-specific DAG combining for intrinsics.
3550 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
3551 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
3555 // Don't do anything for most intrinsics.
3558 case Intrinsic::arm_neon_vqshifts:
3559 case Intrinsic::arm_neon_vqshiftu:
3560 EVT VT = N->getOperand(1).getValueType();
3562 if (!isVShiftLImm(N->getOperand(2), VT, Cnt))
3564 unsigned VShiftOpc = (IntNo == Intrinsic::arm_neon_vqshifts)
3565 ? AArch64ISD::NEON_QSHLs
3566 : AArch64ISD::NEON_QSHLu;
3567 return DAG.getNode(VShiftOpc, SDLoc(N), N->getValueType(0),
3568 N->getOperand(1), DAG.getConstant(Cnt, MVT::i32));
3574 /// Target-specific DAG combine function for NEON load/store intrinsics
3575 /// to merge base address updates.
3576 static SDValue CombineBaseUpdate(SDNode *N,
3577 TargetLowering::DAGCombinerInfo &DCI) {
3578 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
3581 SelectionDAG &DAG = DCI.DAG;
3582 bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
3583 N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
3584 unsigned AddrOpIdx = (isIntrinsic ? 2 : 1);
3585 SDValue Addr = N->getOperand(AddrOpIdx);
3587 // Search for a use of the address operand that is an increment.
3588 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
3589 UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
3591 if (User->getOpcode() != ISD::ADD ||
3592 UI.getUse().getResNo() != Addr.getResNo())
3595 // Check that the add is independent of the load/store. Otherwise, folding
3596 // it would create a cycle.
3597 if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
3600 // Find the new opcode for the updating load/store.
3602 bool isLaneOp = false;
3603 unsigned NewOpc = 0;
3604 unsigned NumVecs = 0;
3606 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
3608 default: llvm_unreachable("unexpected intrinsic for Neon base update");
3609 case Intrinsic::arm_neon_vld1: NewOpc = AArch64ISD::NEON_LD1_UPD;
3611 case Intrinsic::arm_neon_vld2: NewOpc = AArch64ISD::NEON_LD2_UPD;
3613 case Intrinsic::arm_neon_vld3: NewOpc = AArch64ISD::NEON_LD3_UPD;
3615 case Intrinsic::arm_neon_vld4: NewOpc = AArch64ISD::NEON_LD4_UPD;
3617 case Intrinsic::arm_neon_vst1: NewOpc = AArch64ISD::NEON_ST1_UPD;
3618 NumVecs = 1; isLoad = false; break;
3619 case Intrinsic::arm_neon_vst2: NewOpc = AArch64ISD::NEON_ST2_UPD;
3620 NumVecs = 2; isLoad = false; break;
3621 case Intrinsic::arm_neon_vst3: NewOpc = AArch64ISD::NEON_ST3_UPD;
3622 NumVecs = 3; isLoad = false; break;
3623 case Intrinsic::arm_neon_vst4: NewOpc = AArch64ISD::NEON_ST4_UPD;
3624 NumVecs = 4; isLoad = false; break;
3625 case Intrinsic::aarch64_neon_vld1x2: NewOpc = AArch64ISD::NEON_LD1x2_UPD;
3627 case Intrinsic::aarch64_neon_vld1x3: NewOpc = AArch64ISD::NEON_LD1x3_UPD;
3629 case Intrinsic::aarch64_neon_vld1x4: NewOpc = AArch64ISD::NEON_LD1x4_UPD;
3631 case Intrinsic::aarch64_neon_vst1x2: NewOpc = AArch64ISD::NEON_ST1x2_UPD;
3632 NumVecs = 2; isLoad = false; break;
3633 case Intrinsic::aarch64_neon_vst1x3: NewOpc = AArch64ISD::NEON_ST1x3_UPD;
3634 NumVecs = 3; isLoad = false; break;
3635 case Intrinsic::aarch64_neon_vst1x4: NewOpc = AArch64ISD::NEON_ST1x4_UPD;
3636 NumVecs = 4; isLoad = false; break;
3637 case Intrinsic::arm_neon_vld2lane: NewOpc = AArch64ISD::NEON_LD2LN_UPD;
3638 NumVecs = 2; isLaneOp = true; break;
3639 case Intrinsic::arm_neon_vld3lane: NewOpc = AArch64ISD::NEON_LD3LN_UPD;
3640 NumVecs = 3; isLaneOp = true; break;
3641 case Intrinsic::arm_neon_vld4lane: NewOpc = AArch64ISD::NEON_LD4LN_UPD;
3642 NumVecs = 4; isLaneOp = true; break;
3643 case Intrinsic::arm_neon_vst2lane: NewOpc = AArch64ISD::NEON_ST2LN_UPD;
3644 NumVecs = 2; isLoad = false; isLaneOp = true; break;
3645 case Intrinsic::arm_neon_vst3lane: NewOpc = AArch64ISD::NEON_ST3LN_UPD;
3646 NumVecs = 3; isLoad = false; isLaneOp = true; break;
3647 case Intrinsic::arm_neon_vst4lane: NewOpc = AArch64ISD::NEON_ST4LN_UPD;
3648 NumVecs = 4; isLoad = false; isLaneOp = true; break;
3652 switch (N->getOpcode()) {
3653 default: llvm_unreachable("unexpected opcode for Neon base update");
3654 case AArch64ISD::NEON_LD2DUP: NewOpc = AArch64ISD::NEON_LD2DUP_UPD;
3656 case AArch64ISD::NEON_LD3DUP: NewOpc = AArch64ISD::NEON_LD3DUP_UPD;
3658 case AArch64ISD::NEON_LD4DUP: NewOpc = AArch64ISD::NEON_LD4DUP_UPD;
3663 // Find the size of memory referenced by the load/store.
3666 VecTy = N->getValueType(0);
3668 VecTy = N->getOperand(AddrOpIdx + 1).getValueType();
3669 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
3671 NumBytes /= VecTy.getVectorNumElements();
3673 // If the increment is a constant, it must match the memory ref size.
3674 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
3675 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
3676 uint32_t IncVal = CInc->getZExtValue();
3677 if (IncVal != NumBytes)
3679 Inc = DAG.getTargetConstant(IncVal, MVT::i32);
3682 // Create the new updating load/store node.
3684 unsigned NumResultVecs = (isLoad ? NumVecs : 0);
3686 for (n = 0; n < NumResultVecs; ++n)
3688 Tys[n++] = MVT::i64;
3689 Tys[n] = MVT::Other;
3690 SDVTList SDTys = DAG.getVTList(Tys, NumResultVecs + 2);
3691 SmallVector<SDValue, 8> Ops;
3692 Ops.push_back(N->getOperand(0)); // incoming chain
3693 Ops.push_back(N->getOperand(AddrOpIdx));
3695 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands(); ++i) {
3696 Ops.push_back(N->getOperand(i));
3698 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N);
3699 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys,
3700 Ops.data(), Ops.size(),
3701 MemInt->getMemoryVT(),
3702 MemInt->getMemOperand());
3705 std::vector<SDValue> NewResults;
3706 for (unsigned i = 0; i < NumResultVecs; ++i) {
3707 NewResults.push_back(SDValue(UpdN.getNode(), i));
3709 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
3710 DCI.CombineTo(N, NewResults);
3711 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
3718 /// For a VDUPLANE node N, check if its source operand is a vldN-lane (N > 1)
3719 /// intrinsic, and if all the other uses of that intrinsic are also VDUPLANEs.
3720 /// If so, combine them to a vldN-dup operation and return true.
3721 static SDValue CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
3722 SelectionDAG &DAG = DCI.DAG;
3723 EVT VT = N->getValueType(0);
3725 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
3726 SDNode *VLD = N->getOperand(0).getNode();
3727 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
3729 unsigned NumVecs = 0;
3730 unsigned NewOpc = 0;
3731 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
3732 if (IntNo == Intrinsic::arm_neon_vld2lane) {
3734 NewOpc = AArch64ISD::NEON_LD2DUP;
3735 } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
3737 NewOpc = AArch64ISD::NEON_LD3DUP;
3738 } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
3740 NewOpc = AArch64ISD::NEON_LD4DUP;
3745 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
3746 // numbers match the load.
3747 unsigned VLDLaneNo =
3748 cast<ConstantSDNode>(VLD->getOperand(NumVecs + 3))->getZExtValue();
3749 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
3751 // Ignore uses of the chain result.
3752 if (UI.getUse().getResNo() == NumVecs)
3755 if (User->getOpcode() != AArch64ISD::NEON_VDUPLANE ||
3756 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
3760 // Create the vldN-dup node.
3763 for (n = 0; n < NumVecs; ++n)
3765 Tys[n] = MVT::Other;
3766 SDVTList SDTys = DAG.getVTList(Tys, NumVecs + 1);
3767 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
3768 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
3769 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys, Ops, 2,
3770 VLDMemInt->getMemoryVT(),
3771 VLDMemInt->getMemOperand());
3774 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
3776 unsigned ResNo = UI.getUse().getResNo();
3777 // Ignore uses of the chain result.
3778 if (ResNo == NumVecs)
3781 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
3784 // Now the vldN-lane intrinsic is dead except for its chain result.
3785 // Update uses of the chain.
3786 std::vector<SDValue> VLDDupResults;
3787 for (unsigned n = 0; n < NumVecs; ++n)
3788 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
3789 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
3790 DCI.CombineTo(VLD, VLDDupResults);
3792 return SDValue(N, 0);
3796 AArch64TargetLowering::PerformDAGCombine(SDNode *N,
3797 DAGCombinerInfo &DCI) const {
3798 switch (N->getOpcode()) {
3800 case ISD::AND: return PerformANDCombine(N, DCI);
3801 case ISD::OR: return PerformORCombine(N, DCI, getSubtarget());
3805 return PerformShiftCombine(N, DCI, getSubtarget());
3806 case ISD::INTRINSIC_WO_CHAIN:
3807 return PerformIntrinsicCombine(N, DCI.DAG);
3808 case AArch64ISD::NEON_VDUPLANE:
3809 return CombineVLDDUP(N, DCI);
3810 case AArch64ISD::NEON_LD2DUP:
3811 case AArch64ISD::NEON_LD3DUP:
3812 case AArch64ISD::NEON_LD4DUP:
3813 return CombineBaseUpdate(N, DCI);
3814 case ISD::INTRINSIC_VOID:
3815 case ISD::INTRINSIC_W_CHAIN:
3816 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
3817 case Intrinsic::arm_neon_vld1:
3818 case Intrinsic::arm_neon_vld2:
3819 case Intrinsic::arm_neon_vld3:
3820 case Intrinsic::arm_neon_vld4:
3821 case Intrinsic::arm_neon_vst1:
3822 case Intrinsic::arm_neon_vst2:
3823 case Intrinsic::arm_neon_vst3:
3824 case Intrinsic::arm_neon_vst4:
3825 case Intrinsic::arm_neon_vld2lane:
3826 case Intrinsic::arm_neon_vld3lane:
3827 case Intrinsic::arm_neon_vld4lane:
3828 case Intrinsic::aarch64_neon_vld1x2:
3829 case Intrinsic::aarch64_neon_vld1x3:
3830 case Intrinsic::aarch64_neon_vld1x4:
3831 case Intrinsic::aarch64_neon_vst1x2:
3832 case Intrinsic::aarch64_neon_vst1x3:
3833 case Intrinsic::aarch64_neon_vst1x4:
3834 case Intrinsic::arm_neon_vst2lane:
3835 case Intrinsic::arm_neon_vst3lane:
3836 case Intrinsic::arm_neon_vst4lane:
3837 return CombineBaseUpdate(N, DCI);
3846 AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
3847 VT = VT.getScalarType();
3852 switch (VT.getSimpleVT().SimpleTy) {
3866 // Check whether a Build Vector could be presented as Shuffle Vector. If yes,
3867 // try to call LowerVECTOR_SHUFFLE to lower it.
3868 bool AArch64TargetLowering::isKnownShuffleVector(SDValue Op, SelectionDAG &DAG,
3869 SDValue &Res) const {
3871 EVT VT = Op.getValueType();
3872 unsigned NumElts = VT.getVectorNumElements();
3873 unsigned V0NumElts = 0;
3877 // Check if all elements are extracted from less than 3 vectors.
3878 for (unsigned i = 0; i < NumElts; ++i) {
3879 SDValue Elt = Op.getOperand(i);
3880 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
3883 if (V0.getNode() == 0) {
3884 V0 = Elt.getOperand(0);
3885 V0NumElts = V0.getValueType().getVectorNumElements();
3887 if (Elt.getOperand(0) == V0) {
3888 Mask[i] = (cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue());
3890 } else if (V1.getNode() == 0) {
3891 V1 = Elt.getOperand(0);
3893 if (Elt.getOperand(0) == V1) {
3894 unsigned Lane = cast<ConstantSDNode>(Elt->getOperand(1))->getZExtValue();
3895 Mask[i] = (Lane + V0NumElts);
3902 if (!V1.getNode() && V0NumElts == NumElts * 2) {
3903 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
3904 DAG.getConstant(NumElts, MVT::i64));
3905 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V0,
3906 DAG.getConstant(0, MVT::i64));
3907 V0NumElts = V0.getValueType().getVectorNumElements();
3910 if (V1.getNode() && NumElts == V0NumElts &&
3911 V0NumElts == V1.getValueType().getVectorNumElements()) {
3912 SDValue Shuffle = DAG.getVectorShuffle(VT, DL, V0, V1, Mask);
3913 Res = LowerVECTOR_SHUFFLE(Shuffle, DAG);
3919 // If this is a case we can't handle, return null and let the default
3920 // expansion code take care of it.
3922 AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
3923 const AArch64Subtarget *ST) const {
3925 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
3927 EVT VT = Op.getValueType();
3929 APInt SplatBits, SplatUndef;
3930 unsigned SplatBitSize;
3933 unsigned UseNeonMov = VT.getSizeInBits() >= 64;
3935 // Note we favor lowering MOVI over MVNI.
3936 // This has implications on the definition of patterns in TableGen to select
3937 // BIC immediate instructions but not ORR immediate instructions.
3938 // If this lowering order is changed, TableGen patterns for BIC immediate and
3939 // ORR immediate instructions have to be updated.
3941 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
3942 if (SplatBitSize <= 64) {
3943 // First attempt to use vector immediate-form MOVI
3946 unsigned OpCmode = 0;
3948 if (isNeonModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
3949 SplatBitSize, DAG, VT.is128BitVector(),
3950 Neon_Mov_Imm, NeonMovVT, Imm, OpCmode)) {
3951 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
3952 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
3954 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
3955 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MOVIMM, DL, NeonMovVT,
3956 ImmVal, OpCmodeVal);
3957 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
3961 // Then attempt to use vector immediate-form MVNI
3962 uint64_t NegatedImm = (~SplatBits).getZExtValue();
3963 if (isNeonModifiedImm(NegatedImm, SplatUndef.getZExtValue(), SplatBitSize,
3964 DAG, VT.is128BitVector(), Neon_Mvn_Imm, NeonMovVT,
3966 SDValue ImmVal = DAG.getTargetConstant(Imm, MVT::i32);
3967 SDValue OpCmodeVal = DAG.getConstant(OpCmode, MVT::i32);
3968 if (ImmVal.getNode() && OpCmodeVal.getNode()) {
3969 SDValue NeonMov = DAG.getNode(AArch64ISD::NEON_MVNIMM, DL, NeonMovVT,
3970 ImmVal, OpCmodeVal);
3971 return DAG.getNode(ISD::BITCAST, DL, VT, NeonMov);
3975 // Attempt to use vector immediate-form FMOV
3976 if (((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) ||
3977 (VT == MVT::v2f64 && SplatBitSize == 64)) {
3979 SplatBitSize == 32 ? APFloat::IEEEsingle : APFloat::IEEEdouble,
3982 if (A64Imms::isFPImm(RealVal, ImmVal)) {
3983 SDValue Val = DAG.getTargetConstant(ImmVal, MVT::i32);
3984 return DAG.getNode(AArch64ISD::NEON_FMOVIMM, DL, VT, Val);
3990 unsigned NumElts = VT.getVectorNumElements();
3991 bool isOnlyLowElement = true;
3992 bool usesOnlyOneValue = true;
3993 bool hasDominantValue = false;
3994 bool isConstant = true;
3996 // Map of the number of times a particular SDValue appears in the
3998 DenseMap<SDValue, unsigned> ValueCounts;
4000 for (unsigned i = 0; i < NumElts; ++i) {
4001 SDValue V = Op.getOperand(i);
4002 if (V.getOpcode() == ISD::UNDEF)
4005 isOnlyLowElement = false;
4006 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
4009 ValueCounts.insert(std::make_pair(V, 0));
4010 unsigned &Count = ValueCounts[V];
4012 // Is this value dominant? (takes up more than half of the lanes)
4013 if (++Count > (NumElts / 2)) {
4014 hasDominantValue = true;
4018 if (ValueCounts.size() != 1)
4019 usesOnlyOneValue = false;
4020 if (!Value.getNode() && ValueCounts.size() > 0)
4021 Value = ValueCounts.begin()->first;
4023 if (ValueCounts.size() == 0)
4024 return DAG.getUNDEF(VT);
4026 // Loads are better lowered with insert_vector_elt.
4027 // Keep going if we are hitting this case.
4028 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
4029 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Value);
4031 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4032 if (hasDominantValue && EltSize <= 64) {
4033 // Use VDUP for non-constant splats.
4037 // If we are DUPing a value that comes directly from a vector, we could
4038 // just use DUPLANE. We can only do this if the lane being extracted
4039 // is at a constant index, as the DUP from lane instructions only have
4040 // constant-index forms.
4041 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
4042 isa<ConstantSDNode>(Value->getOperand(1))) {
4043 N = DAG.getNode(AArch64ISD::NEON_VDUPLANE, DL, VT,
4044 Value->getOperand(0), Value->getOperand(1));
4046 N = DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4048 if (!usesOnlyOneValue) {
4049 // The dominant value was splatted as 'N', but we now have to insert
4050 // all differing elements.
4051 for (unsigned I = 0; I < NumElts; ++I) {
4052 if (Op.getOperand(I) == Value)
4054 SmallVector<SDValue, 3> Ops;
4056 Ops.push_back(Op.getOperand(I));
4057 Ops.push_back(DAG.getConstant(I, MVT::i64));
4058 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, &Ops[0], 3);
4063 if (usesOnlyOneValue && isConstant) {
4064 return DAG.getNode(AArch64ISD::NEON_VDUP, DL, VT, Value);
4067 // If all elements are constants and the case above didn't get hit, fall back
4068 // to the default expansion, which will generate a load from the constant
4073 // Try to lower this in lowering ShuffleVector way.
4075 if (isKnownShuffleVector(Op, DAG, Shuf))
4078 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
4079 // know the default expansion would otherwise fall back on something even
4080 // worse. For a vector with one or two non-undef values, that's
4081 // scalar_to_vector for the elements followed by a shuffle (provided the
4082 // shuffle is valid for the target) and materialization element by element
4083 // on the stack followed by a load for everything else.
4084 if (!isConstant && !usesOnlyOneValue) {
4085 SDValue Vec = DAG.getUNDEF(VT);
4086 for (unsigned i = 0 ; i < NumElts; ++i) {
4087 SDValue V = Op.getOperand(i);
4088 if (V.getOpcode() == ISD::UNDEF)
4090 SDValue LaneIdx = DAG.getConstant(i, MVT::i64);
4091 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V, LaneIdx);
4098 /// isREVMask - Check if a vector shuffle corresponds to a REV
4099 /// instruction with the specified blocksize. (The order of the elements
4100 /// within each block of the vector is reversed.)
4101 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
4102 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) &&
4103 "Only possible block sizes for REV are: 16, 32, 64");
4105 unsigned EltSz = VT.getVectorElementType().getSizeInBits();
4109 unsigned NumElts = VT.getVectorNumElements();
4110 unsigned BlockElts = M[0] + 1;
4111 // If the first shuffle index is UNDEF, be optimistic.
4113 BlockElts = BlockSize / EltSz;
4115 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
4118 for (unsigned i = 0; i < NumElts; ++i) {
4120 continue; // ignore UNDEF indices
4121 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts))
4128 // isPermuteMask - Check whether the vector shuffle matches to UZP, ZIP and
4130 static unsigned isPermuteMask(ArrayRef<int> M, EVT VT) {
4131 unsigned NumElts = VT.getVectorNumElements();
4135 bool ismatch = true;
4138 for (unsigned i = 0; i < NumElts; ++i) {
4139 if ((unsigned)M[i] != i * 2) {
4145 return AArch64ISD::NEON_UZP1;
4149 for (unsigned i = 0; i < NumElts; ++i) {
4150 if ((unsigned)M[i] != i * 2 + 1) {
4156 return AArch64ISD::NEON_UZP2;
4160 for (unsigned i = 0; i < NumElts; ++i) {
4161 if ((unsigned)M[i] != i / 2 + NumElts * (i % 2)) {
4167 return AArch64ISD::NEON_ZIP1;
4171 for (unsigned i = 0; i < NumElts; ++i) {
4172 if ((unsigned)M[i] != (NumElts + i) / 2 + NumElts * (i % 2)) {
4178 return AArch64ISD::NEON_ZIP2;
4182 for (unsigned i = 0; i < NumElts; ++i) {
4183 if ((unsigned)M[i] != i + (NumElts - 1) * (i % 2)) {
4189 return AArch64ISD::NEON_TRN1;
4193 for (unsigned i = 0; i < NumElts; ++i) {
4194 if ((unsigned)M[i] != 1 + i + (NumElts - 1) * (i % 2)) {
4200 return AArch64ISD::NEON_TRN2;
4206 AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
4207 SelectionDAG &DAG) const {
4208 SDValue V1 = Op.getOperand(0);
4209 SDValue V2 = Op.getOperand(1);
4211 EVT VT = Op.getValueType();
4212 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
4214 // Convert shuffles that are directly supported on NEON to target-specific
4215 // DAG nodes, instead of keeping them as shuffles and matching them again
4216 // during code selection. This is more efficient and avoids the possibility
4217 // of inconsistencies between legalization and selection.
4218 ArrayRef<int> ShuffleMask = SVN->getMask();
4220 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4224 if (isREVMask(ShuffleMask, VT, 64))
4225 return DAG.getNode(AArch64ISD::NEON_REV64, dl, VT, V1);
4226 if (isREVMask(ShuffleMask, VT, 32))
4227 return DAG.getNode(AArch64ISD::NEON_REV32, dl, VT, V1);
4228 if (isREVMask(ShuffleMask, VT, 16))
4229 return DAG.getNode(AArch64ISD::NEON_REV16, dl, VT, V1);
4231 unsigned ISDNo = isPermuteMask(ShuffleMask, VT);
4233 return DAG.getNode(ISDNo, dl, VT, V1, V2);
4235 // If the element of shuffle mask are all the same constant, we can
4236 // transform it into either NEON_VDUP or NEON_VDUPLANE
4237 if (ShuffleVectorSDNode::isSplatMask(&ShuffleMask[0], VT)) {
4238 int Lane = SVN->getSplatIndex();
4239 // If this is undef splat, generate it via "just" vdup, if possible.
4240 if (Lane == -1) Lane = 0;
4242 // Test if V1 is a SCALAR_TO_VECTOR.
4243 if (V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
4244 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT, V1.getOperand(0));
4246 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR.
4247 if (V1.getOpcode() == ISD::BUILD_VECTOR) {
4248 bool IsScalarToVector = true;
4249 for (unsigned i = 0, e = V1.getNumOperands(); i != e; ++i)
4250 if (V1.getOperand(i).getOpcode() != ISD::UNDEF &&
4251 i != (unsigned)Lane) {
4252 IsScalarToVector = false;
4255 if (IsScalarToVector)
4256 return DAG.getNode(AArch64ISD::NEON_VDUP, dl, VT,
4257 V1.getOperand(Lane));
4260 // Test if V1 is a EXTRACT_SUBVECTOR.
4261 if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
4262 int ExtLane = cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue();
4263 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1.getOperand(0),
4264 DAG.getConstant(Lane + ExtLane, MVT::i64));
4266 // Test if V1 is a CONCAT_VECTORS.
4267 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
4268 V1.getOperand(1).getOpcode() == ISD::UNDEF) {
4269 SDValue Op0 = V1.getOperand(0);
4270 assert((unsigned)Lane < Op0.getValueType().getVectorNumElements() &&
4271 "Invalid vector lane access");
4272 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, Op0,
4273 DAG.getConstant(Lane, MVT::i64));
4276 return DAG.getNode(AArch64ISD::NEON_VDUPLANE, dl, VT, V1,
4277 DAG.getConstant(Lane, MVT::i64));
4280 int Length = ShuffleMask.size();
4281 int V1EltNum = V1.getValueType().getVectorNumElements();
4283 // If the number of v1 elements is the same as the number of shuffle mask
4284 // element and the shuffle masks are sequential values, we can transform
4285 // it into NEON_VEXTRACT.
4286 if (V1EltNum == Length) {
4287 // Check if the shuffle mask is sequential.
4288 bool IsSequential = true;
4289 int CurMask = ShuffleMask[0];
4290 for (int I = 0; I < Length; ++I) {
4291 if (ShuffleMask[I] != CurMask) {
4292 IsSequential = false;
4298 assert((EltSize % 8 == 0) && "Bitsize of vector element is incorrect");
4299 unsigned VecSize = EltSize * V1EltNum;
4300 unsigned Index = (EltSize/8) * ShuffleMask[0];
4301 if (VecSize == 64 || VecSize == 128)
4302 return DAG.getNode(AArch64ISD::NEON_VEXTRACT, dl, VT, V1, V2,
4303 DAG.getConstant(Index, MVT::i64));
4307 // For shuffle mask like "0, 1, 2, 3, 4, 5, 13, 7", try to generate insert
4308 // by element from V2 to V1 .
4309 // If shuffle mask is like "0, 1, 10, 11, 12, 13, 14, 15", V2 would be a
4310 // better choice to be inserted than V1 as less insert needed, so we count
4311 // element to be inserted for both V1 and V2, and select less one as insert
4314 // Collect elements need to be inserted and their index.
4315 SmallVector<int, 8> NV1Elt;
4316 SmallVector<int, 8> N1Index;
4317 SmallVector<int, 8> NV2Elt;
4318 SmallVector<int, 8> N2Index;
4319 for (int I = 0; I != Length; ++I) {
4320 if (ShuffleMask[I] != I) {
4321 NV1Elt.push_back(ShuffleMask[I]);
4322 N1Index.push_back(I);
4325 for (int I = 0; I != Length; ++I) {
4326 if (ShuffleMask[I] != (I + V1EltNum)) {
4327 NV2Elt.push_back(ShuffleMask[I]);
4328 N2Index.push_back(I);
4332 // Decide which to be inserted. If all lanes mismatch, neither V1 nor V2
4333 // will be inserted.
4335 SmallVector<int, 8> InsMasks = NV1Elt;
4336 SmallVector<int, 8> InsIndex = N1Index;
4337 if ((int)NV1Elt.size() != Length || (int)NV2Elt.size() != Length) {
4338 if (NV1Elt.size() > NV2Elt.size()) {
4344 InsV = DAG.getNode(ISD::UNDEF, dl, VT);
4347 for (int I = 0, E = InsMasks.size(); I != E; ++I) {
4349 int Mask = InsMasks[I];
4350 if (Mask >= V1EltNum) {
4354 // Any value type smaller than i32 is illegal in AArch64, and this lower
4355 // function is called after legalize pass, so we need to legalize
4358 if (VT.getVectorElementType().isFloatingPoint())
4359 EltVT = (EltSize == 64) ? MVT::f64 : MVT::f32;
4361 EltVT = (EltSize == 64) ? MVT::i64 : MVT::i32;
4364 ExtV = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, ExtV,
4365 DAG.getConstant(Mask, MVT::i64));
4366 InsV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, InsV, ExtV,
4367 DAG.getConstant(InsIndex[I], MVT::i64));
4373 AArch64TargetLowering::ConstraintType
4374 AArch64TargetLowering::getConstraintType(const std::string &Constraint) const {
4375 if (Constraint.size() == 1) {
4376 switch (Constraint[0]) {
4378 case 'w': // An FP/SIMD vector register
4379 return C_RegisterClass;
4380 case 'I': // Constant that can be used with an ADD instruction
4381 case 'J': // Constant that can be used with a SUB instruction
4382 case 'K': // Constant that can be used with a 32-bit logical instruction
4383 case 'L': // Constant that can be used with a 64-bit logical instruction
4384 case 'M': // Constant that can be used as a 32-bit MOV immediate
4385 case 'N': // Constant that can be used as a 64-bit MOV immediate
4386 case 'Y': // Floating point constant zero
4387 case 'Z': // Integer constant zero
4389 case 'Q': // A memory reference with base register and no offset
4391 case 'S': // A symbolic address
4396 // FIXME: Ump, Utf, Usa, Ush
4397 // Ump: A memory address suitable for ldp/stp in SI, DI, SF and DF modes,
4398 // whatever they may be
4399 // Utf: A memory address suitable for ldp/stp in TF mode, whatever it may be
4400 // Usa: An absolute symbolic address
4401 // Ush: The high part (bits 32:12) of a pc-relative symbolic address
4402 assert(Constraint != "Ump" && Constraint != "Utf" && Constraint != "Usa"
4403 && Constraint != "Ush" && "Unimplemented constraints");
4405 return TargetLowering::getConstraintType(Constraint);
4408 TargetLowering::ConstraintWeight
4409 AArch64TargetLowering::getSingleConstraintMatchWeight(AsmOperandInfo &Info,
4410 const char *Constraint) const {
4412 llvm_unreachable("Constraint weight unimplemented");
4416 AArch64TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
4417 std::string &Constraint,
4418 std::vector<SDValue> &Ops,
4419 SelectionDAG &DAG) const {
4420 SDValue Result(0, 0);
4422 // Only length 1 constraints are C_Other.
4423 if (Constraint.size() != 1) return;
4425 // Only C_Other constraints get lowered like this. That means constants for us
4426 // so return early if there's no hope the constraint can be lowered.
4428 switch(Constraint[0]) {
4430 case 'I': case 'J': case 'K': case 'L':
4431 case 'M': case 'N': case 'Z': {
4432 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
4436 uint64_t CVal = C->getZExtValue();
4439 switch (Constraint[0]) {
4441 // FIXME: 'M' and 'N' are MOV pseudo-insts -- unsupported in assembly. 'J'
4442 // is a peculiarly useless SUB constraint.
4443 llvm_unreachable("Unimplemented C_Other constraint");
4449 if (A64Imms::isLogicalImm(32, CVal, Bits))
4453 if (A64Imms::isLogicalImm(64, CVal, Bits))
4462 Result = DAG.getTargetConstant(CVal, Op.getValueType());
4466 // An absolute symbolic address or label reference.
4467 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
4468 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
4469 GA->getValueType(0));
4470 } else if (const BlockAddressSDNode *BA
4471 = dyn_cast<BlockAddressSDNode>(Op)) {
4472 Result = DAG.getTargetBlockAddress(BA->getBlockAddress(),
4473 BA->getValueType(0));
4474 } else if (const ExternalSymbolSDNode *ES
4475 = dyn_cast<ExternalSymbolSDNode>(Op)) {
4476 Result = DAG.getTargetExternalSymbol(ES->getSymbol(),
4477 ES->getValueType(0));
4483 if (const ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op)) {
4484 if (CFP->isExactlyValue(0.0)) {
4485 Result = DAG.getTargetConstantFP(0.0, CFP->getValueType(0));
4492 if (Result.getNode()) {
4493 Ops.push_back(Result);
4497 // It's an unknown constraint for us. Let generic code have a go.
4498 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
4501 std::pair<unsigned, const TargetRegisterClass*>
4502 AArch64TargetLowering::getRegForInlineAsmConstraint(
4503 const std::string &Constraint,
4505 if (Constraint.size() == 1) {
4506 switch (Constraint[0]) {
4508 if (VT.getSizeInBits() <= 32)
4509 return std::make_pair(0U, &AArch64::GPR32RegClass);
4510 else if (VT == MVT::i64)
4511 return std::make_pair(0U, &AArch64::GPR64RegClass);
4515 return std::make_pair(0U, &AArch64::FPR16RegClass);
4516 else if (VT == MVT::f32)
4517 return std::make_pair(0U, &AArch64::FPR32RegClass);
4518 else if (VT.getSizeInBits() == 64)
4519 return std::make_pair(0U, &AArch64::FPR64RegClass);
4520 else if (VT.getSizeInBits() == 128)
4521 return std::make_pair(0U, &AArch64::FPR128RegClass);
4526 // Use the default implementation in TargetLowering to convert the register
4527 // constraint into a member of a register class.
4528 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
4531 /// Represent NEON load and store intrinsics as MemIntrinsicNodes.
4532 /// The associated MachineMemOperands record the alignment specified
4533 /// in the intrinsic calls.
4534 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
4536 unsigned Intrinsic) const {
4537 switch (Intrinsic) {
4538 case Intrinsic::arm_neon_vld1:
4539 case Intrinsic::arm_neon_vld2:
4540 case Intrinsic::arm_neon_vld3:
4541 case Intrinsic::arm_neon_vld4:
4542 case Intrinsic::aarch64_neon_vld1x2:
4543 case Intrinsic::aarch64_neon_vld1x3:
4544 case Intrinsic::aarch64_neon_vld1x4:
4545 case Intrinsic::arm_neon_vld2lane:
4546 case Intrinsic::arm_neon_vld3lane:
4547 case Intrinsic::arm_neon_vld4lane: {
4548 Info.opc = ISD::INTRINSIC_W_CHAIN;
4549 // Conservatively set memVT to the entire set of vectors loaded.
4550 uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
4551 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
4552 Info.ptrVal = I.getArgOperand(0);
4554 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
4555 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
4556 Info.vol = false; // volatile loads with NEON intrinsics not supported
4557 Info.readMem = true;
4558 Info.writeMem = false;
4561 case Intrinsic::arm_neon_vst1:
4562 case Intrinsic::arm_neon_vst2:
4563 case Intrinsic::arm_neon_vst3:
4564 case Intrinsic::arm_neon_vst4:
4565 case Intrinsic::aarch64_neon_vst1x2:
4566 case Intrinsic::aarch64_neon_vst1x3:
4567 case Intrinsic::aarch64_neon_vst1x4:
4568 case Intrinsic::arm_neon_vst2lane:
4569 case Intrinsic::arm_neon_vst3lane:
4570 case Intrinsic::arm_neon_vst4lane: {
4571 Info.opc = ISD::INTRINSIC_VOID;
4572 // Conservatively set memVT to the entire set of vectors stored.
4573 unsigned NumElts = 0;
4574 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
4575 Type *ArgTy = I.getArgOperand(ArgI)->getType();
4576 if (!ArgTy->isVectorTy())
4578 NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
4580 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
4581 Info.ptrVal = I.getArgOperand(0);
4583 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
4584 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
4585 Info.vol = false; // volatile stores with NEON intrinsics not supported
4586 Info.readMem = false;
4587 Info.writeMem = true;