1 //===- ARMISelLowering.cpp - ARM DAG Lowering Implementation --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "ARMISelLowering.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMCallingConv.h"
18 #include "ARMConstantPoolValue.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMPerfectShuffle.h"
21 #include "ARMRegisterInfo.h"
22 #include "ARMSelectionDAGInfo.h"
23 #include "ARMSubtarget.h"
24 #include "MCTargetDesc/ARMAddressingModes.h"
25 #include "MCTargetDesc/ARMBaseInfo.h"
26 #include "Utils/ARMBaseInfo.h"
27 #include "llvm/ADT/APFloat.h"
28 #include "llvm/ADT/APInt.h"
29 #include "llvm/ADT/ArrayRef.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/DenseMap.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallVector.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include "llvm/ADT/StringRef.h"
38 #include "llvm/ADT/StringSwitch.h"
39 #include "llvm/ADT/Triple.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/VectorUtils.h"
42 #include "llvm/CodeGen/CallingConvLower.h"
43 #include "llvm/CodeGen/ISDOpcodes.h"
44 #include "llvm/CodeGen/IntrinsicLowering.h"
45 #include "llvm/CodeGen/MachineBasicBlock.h"
46 #include "llvm/CodeGen/MachineConstantPool.h"
47 #include "llvm/CodeGen/MachineFrameInfo.h"
48 #include "llvm/CodeGen/MachineFunction.h"
49 #include "llvm/CodeGen/MachineInstr.h"
50 #include "llvm/CodeGen/MachineInstrBuilder.h"
51 #include "llvm/CodeGen/MachineJumpTableInfo.h"
52 #include "llvm/CodeGen/MachineMemOperand.h"
53 #include "llvm/CodeGen/MachineOperand.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/RuntimeLibcalls.h"
56 #include "llvm/CodeGen/SelectionDAG.h"
57 #include "llvm/CodeGen/SelectionDAGNodes.h"
58 #include "llvm/CodeGen/TargetInstrInfo.h"
59 #include "llvm/CodeGen/TargetLowering.h"
60 #include "llvm/CodeGen/TargetOpcodes.h"
61 #include "llvm/CodeGen/TargetRegisterInfo.h"
62 #include "llvm/CodeGen/TargetSubtargetInfo.h"
63 #include "llvm/CodeGen/ValueTypes.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/CallingConv.h"
66 #include "llvm/IR/Constant.h"
67 #include "llvm/IR/Constants.h"
68 #include "llvm/IR/DataLayout.h"
69 #include "llvm/IR/DebugLoc.h"
70 #include "llvm/IR/DerivedTypes.h"
71 #include "llvm/IR/Function.h"
72 #include "llvm/IR/GlobalAlias.h"
73 #include "llvm/IR/GlobalValue.h"
74 #include "llvm/IR/GlobalVariable.h"
75 #include "llvm/IR/IRBuilder.h"
76 #include "llvm/IR/InlineAsm.h"
77 #include "llvm/IR/Instruction.h"
78 #include "llvm/IR/Instructions.h"
79 #include "llvm/IR/IntrinsicInst.h"
80 #include "llvm/IR/Intrinsics.h"
81 #include "llvm/IR/IntrinsicsARM.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/PatternMatch.h"
84 #include "llvm/IR/Type.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/IR/Value.h"
87 #include "llvm/MC/MCInstrDesc.h"
88 #include "llvm/MC/MCInstrItineraries.h"
89 #include "llvm/MC/MCRegisterInfo.h"
90 #include "llvm/MC/MCSchedule.h"
91 #include "llvm/Support/AtomicOrdering.h"
92 #include "llvm/Support/BranchProbability.h"
93 #include "llvm/Support/Casting.h"
94 #include "llvm/Support/CodeGen.h"
95 #include "llvm/Support/CommandLine.h"
96 #include "llvm/Support/Compiler.h"
97 #include "llvm/Support/Debug.h"
98 #include "llvm/Support/ErrorHandling.h"
99 #include "llvm/Support/KnownBits.h"
100 #include "llvm/Support/MachineValueType.h"
101 #include "llvm/Support/MathExtras.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Target/TargetMachine.h"
104 #include "llvm/Target/TargetOptions.h"
116 using namespace llvm;
117 using namespace llvm::PatternMatch;
119 #define DEBUG_TYPE "arm-isel"
121 STATISTIC(NumTailCalls, "Number of tail calls");
122 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
123 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
124 STATISTIC(NumConstpoolPromoted,
125 "Number of constants with their storage promoted into constant pools");
128 ARMInterworking("arm-interworking", cl::Hidden,
129 cl::desc("Enable / disable ARM interworking (for debugging only)"),
132 static cl::opt<bool> EnableConstpoolPromotion(
133 "arm-promote-constant", cl::Hidden,
134 cl::desc("Enable / disable promotion of unnamed_addr constants into "
136 cl::init(false)); // FIXME: set to true by default once PR32780 is fixed
137 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
138 "arm-promote-constant-max-size", cl::Hidden,
139 cl::desc("Maximum size of constant to promote into a constant pool"),
141 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
142 "arm-promote-constant-max-total", cl::Hidden,
143 cl::desc("Maximum size of ALL constants to promote into a constant pool"),
146 static cl::opt<unsigned>
147 MVEMaxSupportedInterleaveFactor("mve-max-interleave-factor", cl::Hidden,
148 cl::desc("Maximum interleave factor for MVE VLDn to generate."),
151 // The APCS parameter registers.
152 static const MCPhysReg GPRArgRegs[] = {
153 ARM::R0, ARM::R1, ARM::R2, ARM::R3
156 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
157 MVT PromotedBitwiseVT) {
158 if (VT != PromotedLdStVT) {
159 setOperationAction(ISD::LOAD, VT, Promote);
160 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
162 setOperationAction(ISD::STORE, VT, Promote);
163 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
166 MVT ElemTy = VT.getVectorElementType();
167 if (ElemTy != MVT::f64)
168 setOperationAction(ISD::SETCC, VT, Custom);
169 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
170 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
171 if (ElemTy == MVT::i32) {
172 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
173 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
174 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
175 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
177 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
178 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
179 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
180 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
182 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
183 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
184 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
185 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
186 setOperationAction(ISD::SELECT, VT, Expand);
187 setOperationAction(ISD::SELECT_CC, VT, Expand);
188 setOperationAction(ISD::VSELECT, VT, Expand);
189 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
190 if (VT.isInteger()) {
191 setOperationAction(ISD::SHL, VT, Custom);
192 setOperationAction(ISD::SRA, VT, Custom);
193 setOperationAction(ISD::SRL, VT, Custom);
196 // Promote all bit-wise operations.
197 if (VT.isInteger() && VT != PromotedBitwiseVT) {
198 setOperationAction(ISD::AND, VT, Promote);
199 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
200 setOperationAction(ISD::OR, VT, Promote);
201 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT);
202 setOperationAction(ISD::XOR, VT, Promote);
203 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
206 // Neon does not support vector divide/remainder operations.
207 setOperationAction(ISD::SDIV, VT, Expand);
208 setOperationAction(ISD::UDIV, VT, Expand);
209 setOperationAction(ISD::FDIV, VT, Expand);
210 setOperationAction(ISD::SREM, VT, Expand);
211 setOperationAction(ISD::UREM, VT, Expand);
212 setOperationAction(ISD::FREM, VT, Expand);
213 setOperationAction(ISD::SDIVREM, VT, Expand);
214 setOperationAction(ISD::UDIVREM, VT, Expand);
216 if (!VT.isFloatingPoint() &&
217 VT != MVT::v2i64 && VT != MVT::v1i64)
218 for (auto Opcode : {ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
219 setOperationAction(Opcode, VT, Legal);
220 if (!VT.isFloatingPoint())
221 for (auto Opcode : {ISD::SADDSAT, ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT})
222 setOperationAction(Opcode, VT, Legal);
225 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
226 addRegisterClass(VT, &ARM::DPRRegClass);
227 addTypeForNEON(VT, MVT::f64, MVT::v2i32);
230 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
231 addRegisterClass(VT, &ARM::DPairRegClass);
232 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
235 void ARMTargetLowering::setAllExpand(MVT VT) {
236 for (unsigned Opc = 0; Opc < ISD::BUILTIN_OP_END; ++Opc)
237 setOperationAction(Opc, VT, Expand);
239 // We support these really simple operations even on types where all
240 // the actual arithmetic has to be broken down into simpler
241 // operations or turned into library calls.
242 setOperationAction(ISD::BITCAST, VT, Legal);
243 setOperationAction(ISD::LOAD, VT, Legal);
244 setOperationAction(ISD::STORE, VT, Legal);
245 setOperationAction(ISD::UNDEF, VT, Legal);
248 void ARMTargetLowering::addAllExtLoads(const MVT From, const MVT To,
249 LegalizeAction Action) {
250 setLoadExtAction(ISD::EXTLOAD, From, To, Action);
251 setLoadExtAction(ISD::ZEXTLOAD, From, To, Action);
252 setLoadExtAction(ISD::SEXTLOAD, From, To, Action);
255 void ARMTargetLowering::addMVEVectorTypes(bool HasMVEFP) {
256 const MVT IntTypes[] = { MVT::v16i8, MVT::v8i16, MVT::v4i32 };
258 for (auto VT : IntTypes) {
259 addRegisterClass(VT, &ARM::MQPRRegClass);
260 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
261 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
262 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
263 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
264 setOperationAction(ISD::SHL, VT, Custom);
265 setOperationAction(ISD::SRA, VT, Custom);
266 setOperationAction(ISD::SRL, VT, Custom);
267 setOperationAction(ISD::SMIN, VT, Legal);
268 setOperationAction(ISD::SMAX, VT, Legal);
269 setOperationAction(ISD::UMIN, VT, Legal);
270 setOperationAction(ISD::UMAX, VT, Legal);
271 setOperationAction(ISD::ABS, VT, Legal);
272 setOperationAction(ISD::SETCC, VT, Custom);
273 setOperationAction(ISD::MLOAD, VT, Custom);
274 setOperationAction(ISD::MSTORE, VT, Legal);
275 setOperationAction(ISD::CTLZ, VT, Legal);
276 setOperationAction(ISD::CTTZ, VT, Custom);
277 setOperationAction(ISD::BITREVERSE, VT, Legal);
278 setOperationAction(ISD::BSWAP, VT, Legal);
279 setOperationAction(ISD::SADDSAT, VT, Legal);
280 setOperationAction(ISD::UADDSAT, VT, Legal);
281 setOperationAction(ISD::SSUBSAT, VT, Legal);
282 setOperationAction(ISD::USUBSAT, VT, Legal);
284 // No native support for these.
285 setOperationAction(ISD::UDIV, VT, Expand);
286 setOperationAction(ISD::SDIV, VT, Expand);
287 setOperationAction(ISD::UREM, VT, Expand);
288 setOperationAction(ISD::SREM, VT, Expand);
289 setOperationAction(ISD::UDIVREM, VT, Expand);
290 setOperationAction(ISD::SDIVREM, VT, Expand);
291 setOperationAction(ISD::CTPOP, VT, Expand);
294 setOperationAction(ISD::VECREDUCE_ADD, VT, Legal);
295 setOperationAction(ISD::VECREDUCE_SMAX, VT, Legal);
296 setOperationAction(ISD::VECREDUCE_UMAX, VT, Legal);
297 setOperationAction(ISD::VECREDUCE_SMIN, VT, Legal);
298 setOperationAction(ISD::VECREDUCE_UMIN, VT, Legal);
299 setOperationAction(ISD::VECREDUCE_MUL, VT, Custom);
300 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
301 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
302 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
305 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
306 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
307 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
308 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
311 // Pre and Post inc are supported on loads and stores
312 for (unsigned im = (unsigned)ISD::PRE_INC;
313 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
314 setIndexedLoadAction(im, VT, Legal);
315 setIndexedStoreAction(im, VT, Legal);
316 setIndexedMaskedLoadAction(im, VT, Legal);
317 setIndexedMaskedStoreAction(im, VT, Legal);
321 const MVT FloatTypes[] = { MVT::v8f16, MVT::v4f32 };
322 for (auto VT : FloatTypes) {
323 addRegisterClass(VT, &ARM::MQPRRegClass);
327 // These are legal or custom whether we have MVE.fp or not
328 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
329 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
330 setOperationAction(ISD::INSERT_VECTOR_ELT, VT.getVectorElementType(), Custom);
331 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
332 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
333 setOperationAction(ISD::BUILD_VECTOR, VT.getVectorElementType(), Custom);
334 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Legal);
335 setOperationAction(ISD::SETCC, VT, Custom);
336 setOperationAction(ISD::MLOAD, VT, Custom);
337 setOperationAction(ISD::MSTORE, VT, Legal);
339 // Pre and Post inc are supported on loads and stores
340 for (unsigned im = (unsigned)ISD::PRE_INC;
341 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
342 setIndexedLoadAction(im, VT, Legal);
343 setIndexedStoreAction(im, VT, Legal);
344 setIndexedMaskedLoadAction(im, VT, Legal);
345 setIndexedMaskedStoreAction(im, VT, Legal);
349 setOperationAction(ISD::FMINNUM, VT, Legal);
350 setOperationAction(ISD::FMAXNUM, VT, Legal);
351 setOperationAction(ISD::FROUND, VT, Legal);
352 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
353 setOperationAction(ISD::VECREDUCE_FMUL, VT, Custom);
354 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
355 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
357 // No native support for these.
358 setOperationAction(ISD::FDIV, VT, Expand);
359 setOperationAction(ISD::FREM, VT, Expand);
360 setOperationAction(ISD::FSQRT, VT, Expand);
361 setOperationAction(ISD::FSIN, VT, Expand);
362 setOperationAction(ISD::FCOS, VT, Expand);
363 setOperationAction(ISD::FPOW, VT, Expand);
364 setOperationAction(ISD::FLOG, VT, Expand);
365 setOperationAction(ISD::FLOG2, VT, Expand);
366 setOperationAction(ISD::FLOG10, VT, Expand);
367 setOperationAction(ISD::FEXP, VT, Expand);
368 setOperationAction(ISD::FEXP2, VT, Expand);
369 setOperationAction(ISD::FNEARBYINT, VT, Expand);
373 // Custom Expand smaller than legal vector reductions to prevent false zero
374 // items being added.
375 setOperationAction(ISD::VECREDUCE_FADD, MVT::v4f16, Custom);
376 setOperationAction(ISD::VECREDUCE_FMUL, MVT::v4f16, Custom);
377 setOperationAction(ISD::VECREDUCE_FMIN, MVT::v4f16, Custom);
378 setOperationAction(ISD::VECREDUCE_FMAX, MVT::v4f16, Custom);
379 setOperationAction(ISD::VECREDUCE_FADD, MVT::v2f16, Custom);
380 setOperationAction(ISD::VECREDUCE_FMUL, MVT::v2f16, Custom);
381 setOperationAction(ISD::VECREDUCE_FMIN, MVT::v2f16, Custom);
382 setOperationAction(ISD::VECREDUCE_FMAX, MVT::v2f16, Custom);
384 // We 'support' these types up to bitcast/load/store level, regardless of
385 // MVE integer-only / float support. Only doing FP data processing on the FP
386 // vector types is inhibited at integer-only level.
387 const MVT LongTypes[] = { MVT::v2i64, MVT::v2f64 };
388 for (auto VT : LongTypes) {
389 addRegisterClass(VT, &ARM::MQPRRegClass);
391 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
392 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
393 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
395 // We can do bitwise operations on v2i64 vectors
396 setOperationAction(ISD::AND, MVT::v2i64, Legal);
397 setOperationAction(ISD::OR, MVT::v2i64, Legal);
398 setOperationAction(ISD::XOR, MVT::v2i64, Legal);
400 // It is legal to extload from v4i8 to v4i16 or v4i32.
401 addAllExtLoads(MVT::v8i16, MVT::v8i8, Legal);
402 addAllExtLoads(MVT::v4i32, MVT::v4i16, Legal);
403 addAllExtLoads(MVT::v4i32, MVT::v4i8, Legal);
405 // It is legal to sign extend from v4i8/v4i16 to v4i32 or v8i8 to v8i16.
406 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i8, Legal);
407 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i16, Legal);
408 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v4i32, Legal);
409 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i8, Legal);
410 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v8i16, Legal);
412 // Some truncating stores are legal too.
413 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
414 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
415 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
417 // Pre and Post inc on these are legal, given the correct extends
418 for (unsigned im = (unsigned)ISD::PRE_INC;
419 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
420 for (auto VT : {MVT::v8i8, MVT::v4i8, MVT::v4i16}) {
421 setIndexedLoadAction(im, VT, Legal);
422 setIndexedStoreAction(im, VT, Legal);
423 setIndexedMaskedLoadAction(im, VT, Legal);
424 setIndexedMaskedStoreAction(im, VT, Legal);
429 const MVT pTypes[] = {MVT::v16i1, MVT::v8i1, MVT::v4i1};
430 for (auto VT : pTypes) {
431 addRegisterClass(VT, &ARM::VCCRRegClass);
432 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
433 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
434 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
435 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
436 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
437 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
438 setOperationAction(ISD::SETCC, VT, Custom);
439 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Expand);
440 setOperationAction(ISD::LOAD, VT, Custom);
441 setOperationAction(ISD::STORE, VT, Custom);
445 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
446 const ARMSubtarget &STI)
447 : TargetLowering(TM), Subtarget(&STI) {
448 RegInfo = Subtarget->getRegisterInfo();
449 Itins = Subtarget->getInstrItineraryData();
451 setBooleanContents(ZeroOrOneBooleanContent);
452 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
454 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetIOS() &&
455 !Subtarget->isTargetWatchOS()) {
456 bool IsHFTarget = TM.Options.FloatABIType == FloatABI::Hard;
457 for (int LCID = 0; LCID < RTLIB::UNKNOWN_LIBCALL; ++LCID)
458 setLibcallCallingConv(static_cast<RTLIB::Libcall>(LCID),
459 IsHFTarget ? CallingConv::ARM_AAPCS_VFP
460 : CallingConv::ARM_AAPCS);
463 if (Subtarget->isTargetMachO()) {
464 // Uses VFP for Thumb libfuncs if available.
465 if (Subtarget->isThumb() && Subtarget->hasVFP2Base() &&
466 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
467 static const struct {
468 const RTLIB::Libcall Op;
469 const char * const Name;
470 const ISD::CondCode Cond;
472 // Single-precision floating-point arithmetic.
473 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
474 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
475 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
476 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
478 // Double-precision floating-point arithmetic.
479 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
480 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
481 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
482 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
484 // Single-precision comparisons.
485 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE },
486 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE },
487 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE },
488 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE },
489 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE },
490 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE },
491 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE },
493 // Double-precision comparisons.
494 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE },
495 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE },
496 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE },
497 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE },
498 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE },
499 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE },
500 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE },
502 // Floating-point to integer conversions.
503 // i64 conversions are done via library routines even when generating VFP
504 // instructions, so use the same ones.
505 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID },
506 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
507 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID },
508 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
510 // Conversions between floating types.
511 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID },
512 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID },
514 // Integer to floating-point conversions.
515 // i64 conversions are done via library routines even when generating VFP
516 // instructions, so use the same ones.
517 // FIXME: There appears to be some naming inconsistency in ARM libgcc:
518 // e.g., __floatunsidf vs. __floatunssidfvfp.
519 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID },
520 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
521 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID },
522 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
525 for (const auto &LC : LibraryCalls) {
526 setLibcallName(LC.Op, LC.Name);
527 if (LC.Cond != ISD::SETCC_INVALID)
528 setCmpLibcallCC(LC.Op, LC.Cond);
533 // These libcalls are not available in 32-bit.
534 setLibcallName(RTLIB::SHL_I128, nullptr);
535 setLibcallName(RTLIB::SRL_I128, nullptr);
536 setLibcallName(RTLIB::SRA_I128, nullptr);
539 if (Subtarget->isAAPCS_ABI() &&
540 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
541 Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
542 static const struct {
543 const RTLIB::Libcall Op;
544 const char * const Name;
545 const CallingConv::ID CC;
546 const ISD::CondCode Cond;
548 // Double-precision floating-point arithmetic helper functions
549 // RTABI chapter 4.1.2, Table 2
550 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
551 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
552 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
553 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
555 // Double-precision floating-point comparison helper functions
556 // RTABI chapter 4.1.2, Table 3
557 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
558 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
559 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
560 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
561 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
562 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
563 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
565 // Single-precision floating-point arithmetic helper functions
566 // RTABI chapter 4.1.2, Table 4
567 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
568 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
569 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
570 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
572 // Single-precision floating-point comparison helper functions
573 // RTABI chapter 4.1.2, Table 5
574 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
575 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
576 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
577 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
578 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
579 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
580 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
582 // Floating-point to integer conversions.
583 // RTABI chapter 4.1.2, Table 6
584 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
585 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
586 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
587 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
588 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
589 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
590 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
591 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
593 // Conversions between floating types.
594 // RTABI chapter 4.1.2, Table 7
595 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
596 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
597 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
599 // Integer to floating-point conversions.
600 // RTABI chapter 4.1.2, Table 8
601 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
602 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
603 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
604 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
605 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
606 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
607 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
608 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
610 // Long long helper functions
611 // RTABI chapter 4.2, Table 9
612 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
613 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
614 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
615 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
617 // Integer division functions
618 // RTABI chapter 4.3.1
619 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
620 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
621 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
622 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
623 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
624 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
625 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
626 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
629 for (const auto &LC : LibraryCalls) {
630 setLibcallName(LC.Op, LC.Name);
631 setLibcallCallingConv(LC.Op, LC.CC);
632 if (LC.Cond != ISD::SETCC_INVALID)
633 setCmpLibcallCC(LC.Op, LC.Cond);
636 // EABI dependent RTLIB
637 if (TM.Options.EABIVersion == EABI::EABI4 ||
638 TM.Options.EABIVersion == EABI::EABI5) {
639 static const struct {
640 const RTLIB::Libcall Op;
641 const char *const Name;
642 const CallingConv::ID CC;
643 const ISD::CondCode Cond;
644 } MemOpsLibraryCalls[] = {
646 // RTABI chapter 4.3.4
647 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
648 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
649 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
652 for (const auto &LC : MemOpsLibraryCalls) {
653 setLibcallName(LC.Op, LC.Name);
654 setLibcallCallingConv(LC.Op, LC.CC);
655 if (LC.Cond != ISD::SETCC_INVALID)
656 setCmpLibcallCC(LC.Op, LC.Cond);
661 if (Subtarget->isTargetWindows()) {
662 static const struct {
663 const RTLIB::Libcall Op;
664 const char * const Name;
665 const CallingConv::ID CC;
667 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
668 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
669 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
670 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
671 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
672 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
673 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
674 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
677 for (const auto &LC : LibraryCalls) {
678 setLibcallName(LC.Op, LC.Name);
679 setLibcallCallingConv(LC.Op, LC.CC);
683 // Use divmod compiler-rt calls for iOS 5.0 and later.
684 if (Subtarget->isTargetMachO() &&
685 !(Subtarget->isTargetIOS() &&
686 Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
687 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
688 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
691 // The half <-> float conversion functions are always soft-float on
692 // non-watchos platforms, but are needed for some targets which use a
693 // hard-float calling convention by default.
694 if (!Subtarget->isTargetWatchABI()) {
695 if (Subtarget->isAAPCS_ABI()) {
696 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
697 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
698 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
700 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
701 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
702 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
706 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
707 // a __gnu_ prefix (which is the default).
708 if (Subtarget->isTargetAEABI()) {
709 static const struct {
710 const RTLIB::Libcall Op;
711 const char * const Name;
712 const CallingConv::ID CC;
714 { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
715 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
716 { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
719 for (const auto &LC : LibraryCalls) {
720 setLibcallName(LC.Op, LC.Name);
721 setLibcallCallingConv(LC.Op, LC.CC);
725 if (Subtarget->isThumb1Only())
726 addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
728 addRegisterClass(MVT::i32, &ARM::GPRRegClass);
730 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only() &&
731 Subtarget->hasFPRegs()) {
732 addRegisterClass(MVT::f32, &ARM::SPRRegClass);
733 addRegisterClass(MVT::f64, &ARM::DPRRegClass);
734 if (!Subtarget->hasVFP2Base())
735 setAllExpand(MVT::f32);
736 if (!Subtarget->hasFP64())
737 setAllExpand(MVT::f64);
740 if (Subtarget->hasFullFP16()) {
741 addRegisterClass(MVT::f16, &ARM::HPRRegClass);
742 setOperationAction(ISD::BITCAST, MVT::i16, Custom);
743 setOperationAction(ISD::BITCAST, MVT::f16, Custom);
745 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
746 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
749 if (Subtarget->hasBF16()) {
750 addRegisterClass(MVT::bf16, &ARM::HPRRegClass);
751 setAllExpand(MVT::bf16);
752 if (!Subtarget->hasFullFP16())
753 setOperationAction(ISD::BITCAST, MVT::bf16, Custom);
756 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
757 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
758 setTruncStoreAction(VT, InnerVT, Expand);
759 addAllExtLoads(VT, InnerVT, Expand);
762 setOperationAction(ISD::MULHS, VT, Expand);
763 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
764 setOperationAction(ISD::MULHU, VT, Expand);
765 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
767 setOperationAction(ISD::BSWAP, VT, Expand);
770 setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
771 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
773 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
774 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
776 if (Subtarget->hasMVEIntegerOps())
777 addMVEVectorTypes(Subtarget->hasMVEFloatOps());
779 // Combine low-overhead loop intrinsics so that we can lower i1 types.
780 if (Subtarget->hasLOB()) {
781 setTargetDAGCombine(ISD::BRCOND);
782 setTargetDAGCombine(ISD::BR_CC);
785 if (Subtarget->hasNEON()) {
786 addDRTypeForNEON(MVT::v2f32);
787 addDRTypeForNEON(MVT::v8i8);
788 addDRTypeForNEON(MVT::v4i16);
789 addDRTypeForNEON(MVT::v2i32);
790 addDRTypeForNEON(MVT::v1i64);
792 addQRTypeForNEON(MVT::v4f32);
793 addQRTypeForNEON(MVT::v2f64);
794 addQRTypeForNEON(MVT::v16i8);
795 addQRTypeForNEON(MVT::v8i16);
796 addQRTypeForNEON(MVT::v4i32);
797 addQRTypeForNEON(MVT::v2i64);
799 if (Subtarget->hasFullFP16()) {
800 addQRTypeForNEON(MVT::v8f16);
801 addDRTypeForNEON(MVT::v4f16);
804 if (Subtarget->hasBF16()) {
805 addQRTypeForNEON(MVT::v8bf16);
806 addDRTypeForNEON(MVT::v4bf16);
810 if (Subtarget->hasMVEIntegerOps() || Subtarget->hasNEON()) {
811 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
812 // none of Neon, MVE or VFP supports any arithmetic operations on it.
813 setOperationAction(ISD::FADD, MVT::v2f64, Expand);
814 setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
815 setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
816 // FIXME: Code duplication: FDIV and FREM are expanded always, see
817 // ARMTargetLowering::addTypeForNEON method for details.
818 setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
819 setOperationAction(ISD::FREM, MVT::v2f64, Expand);
820 // FIXME: Create unittest.
821 // In another words, find a way when "copysign" appears in DAG with vector
823 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
824 // FIXME: Code duplication: SETCC has custom operation action, see
825 // ARMTargetLowering::addTypeForNEON method for details.
826 setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
827 // FIXME: Create unittest for FNEG and for FABS.
828 setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
829 setOperationAction(ISD::FABS, MVT::v2f64, Expand);
830 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
831 setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
832 setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
833 setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
834 setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
835 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
836 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
837 setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
838 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
839 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
840 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
841 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
842 setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
843 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
844 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
845 setOperationAction(ISD::FMA, MVT::v2f64, Expand);
848 if (Subtarget->hasNEON()) {
849 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
850 // supported for v4f32.
851 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
852 setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
853 setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
854 setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
855 setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
856 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
857 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
858 setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
859 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
860 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
861 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
862 setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
863 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
864 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
866 // Mark v2f32 intrinsics.
867 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
868 setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
869 setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
870 setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
871 setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
872 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
873 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
874 setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
875 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
876 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
877 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
878 setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
879 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
880 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
882 // Neon does not support some operations on v1i64 and v2i64 types.
883 setOperationAction(ISD::MUL, MVT::v1i64, Expand);
884 // Custom handling for some quad-vector types to detect VMULL.
885 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
886 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
887 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
888 // Custom handling for some vector types to avoid expensive expansions
889 setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
890 setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
891 setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
892 setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
893 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
894 // a destination type that is wider than the source, and nor does
895 // it have a FP_TO_[SU]INT instruction with a narrower destination than
897 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
898 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Custom);
899 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
900 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
901 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
902 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Custom);
903 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
904 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Custom);
906 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
907 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand);
909 // NEON does not have single instruction CTPOP for vectors with element
910 // types wider than 8-bits. However, custom lowering can leverage the
911 // v8i8/v16i8 vcnt instruction.
912 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom);
913 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
914 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom);
915 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom);
916 setOperationAction(ISD::CTPOP, MVT::v1i64, Custom);
917 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
919 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand);
920 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand);
922 // NEON does not have single instruction CTTZ for vectors.
923 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
924 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
925 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
926 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
928 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
929 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
930 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
931 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
933 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
934 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
935 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
936 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
938 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
939 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
940 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
941 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
943 // NEON only has FMA instructions as of VFP4.
944 if (!Subtarget->hasVFP4Base()) {
945 setOperationAction(ISD::FMA, MVT::v2f32, Expand);
946 setOperationAction(ISD::FMA, MVT::v4f32, Expand);
949 setTargetDAGCombine(ISD::SHL);
950 setTargetDAGCombine(ISD::SRL);
951 setTargetDAGCombine(ISD::SRA);
952 setTargetDAGCombine(ISD::FP_TO_SINT);
953 setTargetDAGCombine(ISD::FP_TO_UINT);
954 setTargetDAGCombine(ISD::FDIV);
955 setTargetDAGCombine(ISD::LOAD);
957 // It is legal to extload from v4i8 to v4i16 or v4i32.
958 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
960 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
961 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
962 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
963 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
968 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
969 setTargetDAGCombine(ISD::BUILD_VECTOR);
970 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
971 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
972 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
973 setTargetDAGCombine(ISD::STORE);
974 setTargetDAGCombine(ISD::SIGN_EXTEND);
975 setTargetDAGCombine(ISD::ZERO_EXTEND);
976 setTargetDAGCombine(ISD::ANY_EXTEND);
977 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
978 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
979 setTargetDAGCombine(ISD::INTRINSIC_VOID);
980 setTargetDAGCombine(ISD::VECREDUCE_ADD);
981 setTargetDAGCombine(ISD::ADD);
982 setTargetDAGCombine(ISD::BITCAST);
984 if (Subtarget->hasMVEIntegerOps()) {
985 setTargetDAGCombine(ISD::SMIN);
986 setTargetDAGCombine(ISD::UMIN);
987 setTargetDAGCombine(ISD::SMAX);
988 setTargetDAGCombine(ISD::UMAX);
989 setTargetDAGCombine(ISD::FP_EXTEND);
992 if (!Subtarget->hasFP64()) {
993 // When targeting a floating-point unit with only single-precision
994 // operations, f64 is legal for the few double-precision instructions which
995 // are present However, no double-precision operations other than moves,
996 // loads and stores are provided by the hardware.
997 setOperationAction(ISD::FADD, MVT::f64, Expand);
998 setOperationAction(ISD::FSUB, MVT::f64, Expand);
999 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1000 setOperationAction(ISD::FMA, MVT::f64, Expand);
1001 setOperationAction(ISD::FDIV, MVT::f64, Expand);
1002 setOperationAction(ISD::FREM, MVT::f64, Expand);
1003 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1004 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand);
1005 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1006 setOperationAction(ISD::FABS, MVT::f64, Expand);
1007 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
1008 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1009 setOperationAction(ISD::FCOS, MVT::f64, Expand);
1010 setOperationAction(ISD::FPOW, MVT::f64, Expand);
1011 setOperationAction(ISD::FLOG, MVT::f64, Expand);
1012 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
1013 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
1014 setOperationAction(ISD::FEXP, MVT::f64, Expand);
1015 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
1016 setOperationAction(ISD::FCEIL, MVT::f64, Expand);
1017 setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
1018 setOperationAction(ISD::FRINT, MVT::f64, Expand);
1019 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
1020 setOperationAction(ISD::FFLOOR, MVT::f64, Expand);
1021 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
1022 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
1023 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
1024 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
1025 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
1026 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
1027 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
1028 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
1029 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
1030 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::f64, Custom);
1031 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::f64, Custom);
1032 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
1035 if (!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) {
1036 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
1037 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Custom);
1038 if (Subtarget->hasFullFP16()) {
1039 setOperationAction(ISD::FP_ROUND, MVT::f16, Custom);
1040 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f16, Custom);
1044 if (!Subtarget->hasFP16()) {
1045 setOperationAction(ISD::FP_EXTEND, MVT::f32, Custom);
1046 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f32, Custom);
1049 computeRegisterProperties(Subtarget->getRegisterInfo());
1051 // ARM does not have floating-point extending loads.
1052 for (MVT VT : MVT::fp_valuetypes()) {
1053 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1054 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
1057 // ... or truncating stores
1058 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1059 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
1060 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
1062 // ARM does not have i1 sign extending load.
1063 for (MVT VT : MVT::integer_valuetypes())
1064 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
1066 // ARM supports all 4 flavors of integer indexed load / store.
1067 if (!Subtarget->isThumb1Only()) {
1068 for (unsigned im = (unsigned)ISD::PRE_INC;
1069 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
1070 setIndexedLoadAction(im, MVT::i1, Legal);
1071 setIndexedLoadAction(im, MVT::i8, Legal);
1072 setIndexedLoadAction(im, MVT::i16, Legal);
1073 setIndexedLoadAction(im, MVT::i32, Legal);
1074 setIndexedStoreAction(im, MVT::i1, Legal);
1075 setIndexedStoreAction(im, MVT::i8, Legal);
1076 setIndexedStoreAction(im, MVT::i16, Legal);
1077 setIndexedStoreAction(im, MVT::i32, Legal);
1080 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
1081 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
1082 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1085 setOperationAction(ISD::SADDO, MVT::i32, Custom);
1086 setOperationAction(ISD::UADDO, MVT::i32, Custom);
1087 setOperationAction(ISD::SSUBO, MVT::i32, Custom);
1088 setOperationAction(ISD::USUBO, MVT::i32, Custom);
1090 setOperationAction(ISD::ADDCARRY, MVT::i32, Custom);
1091 setOperationAction(ISD::SUBCARRY, MVT::i32, Custom);
1092 if (Subtarget->hasDSP()) {
1093 setOperationAction(ISD::SADDSAT, MVT::i8, Custom);
1094 setOperationAction(ISD::SSUBSAT, MVT::i8, Custom);
1095 setOperationAction(ISD::SADDSAT, MVT::i16, Custom);
1096 setOperationAction(ISD::SSUBSAT, MVT::i16, Custom);
1098 if (Subtarget->hasBaseDSP()) {
1099 setOperationAction(ISD::SADDSAT, MVT::i32, Legal);
1100 setOperationAction(ISD::SSUBSAT, MVT::i32, Legal);
1103 // i64 operation support.
1104 setOperationAction(ISD::MUL, MVT::i64, Expand);
1105 setOperationAction(ISD::MULHU, MVT::i32, Expand);
1106 if (Subtarget->isThumb1Only()) {
1107 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1108 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1110 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
1111 || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
1112 setOperationAction(ISD::MULHS, MVT::i32, Expand);
1114 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
1115 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
1116 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
1117 setOperationAction(ISD::SRL, MVT::i64, Custom);
1118 setOperationAction(ISD::SRA, MVT::i64, Custom);
1119 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1120 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
1121 setOperationAction(ISD::LOAD, MVT::i64, Custom);
1122 setOperationAction(ISD::STORE, MVT::i64, Custom);
1124 // MVE lowers 64 bit shifts to lsll and lsrl
1125 // assuming that ISD::SRL and SRA of i64 are already marked custom
1126 if (Subtarget->hasMVEIntegerOps())
1127 setOperationAction(ISD::SHL, MVT::i64, Custom);
1129 // Expand to __aeabi_l{lsl,lsr,asr} calls for Thumb1.
1130 if (Subtarget->isThumb1Only()) {
1131 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1132 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1133 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1136 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
1137 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
1139 // ARM does not have ROTL.
1140 setOperationAction(ISD::ROTL, MVT::i32, Expand);
1141 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
1142 setOperationAction(ISD::ROTL, VT, Expand);
1143 setOperationAction(ISD::ROTR, VT, Expand);
1145 setOperationAction(ISD::CTTZ, MVT::i32, Custom);
1146 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1147 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only()) {
1148 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1149 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, LibCall);
1152 // @llvm.readcyclecounter requires the Performance Monitors extension.
1153 // Default to the 0 expansion on unsupported platforms.
1154 // FIXME: Technically there are older ARM CPUs that have
1155 // implementation-specific ways of obtaining this information.
1156 if (Subtarget->hasPerfMon())
1157 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
1159 // Only ARMv6 has BSWAP.
1160 if (!Subtarget->hasV6Ops())
1161 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1163 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
1164 : Subtarget->hasDivideInARMMode();
1166 // These are expanded into libcalls if the cpu doesn't have HW divider.
1167 setOperationAction(ISD::SDIV, MVT::i32, LibCall);
1168 setOperationAction(ISD::UDIV, MVT::i32, LibCall);
1171 if (Subtarget->isTargetWindows() && !Subtarget->hasDivideInThumbMode()) {
1172 setOperationAction(ISD::SDIV, MVT::i32, Custom);
1173 setOperationAction(ISD::UDIV, MVT::i32, Custom);
1175 setOperationAction(ISD::SDIV, MVT::i64, Custom);
1176 setOperationAction(ISD::UDIV, MVT::i64, Custom);
1179 setOperationAction(ISD::SREM, MVT::i32, Expand);
1180 setOperationAction(ISD::UREM, MVT::i32, Expand);
1182 // Register based DivRem for AEABI (RTABI 4.2)
1183 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
1184 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
1185 Subtarget->isTargetWindows()) {
1186 setOperationAction(ISD::SREM, MVT::i64, Custom);
1187 setOperationAction(ISD::UREM, MVT::i64, Custom);
1188 HasStandaloneRem = false;
1190 if (Subtarget->isTargetWindows()) {
1192 const RTLIB::Libcall Op;
1193 const char * const Name;
1194 const CallingConv::ID CC;
1195 } LibraryCalls[] = {
1196 { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
1197 { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
1198 { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
1199 { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
1201 { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
1202 { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
1203 { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
1204 { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
1207 for (const auto &LC : LibraryCalls) {
1208 setLibcallName(LC.Op, LC.Name);
1209 setLibcallCallingConv(LC.Op, LC.CC);
1213 const RTLIB::Libcall Op;
1214 const char * const Name;
1215 const CallingConv::ID CC;
1216 } LibraryCalls[] = {
1217 { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1218 { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1219 { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1220 { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
1222 { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1223 { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1224 { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1225 { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
1228 for (const auto &LC : LibraryCalls) {
1229 setLibcallName(LC.Op, LC.Name);
1230 setLibcallCallingConv(LC.Op, LC.CC);
1234 setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
1235 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
1236 setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
1237 setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
1239 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1240 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1243 if (Subtarget->getTargetTriple().isOSMSVCRT()) {
1244 // MSVCRT doesn't have powi; fall back to pow
1245 setLibcallName(RTLIB::POWI_F32, nullptr);
1246 setLibcallName(RTLIB::POWI_F64, nullptr);
1249 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1250 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1251 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1252 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1254 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1255 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
1257 // Use the default implementation.
1258 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1259 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1260 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1261 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1262 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1263 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1265 if (Subtarget->isTargetWindows())
1266 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1268 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
1270 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1271 // the default expansion.
1272 InsertFencesForAtomic = false;
1273 if (Subtarget->hasAnyDataBarrier() &&
1274 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1275 // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1276 // to ldrex/strex loops already.
1277 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1278 if (!Subtarget->isThumb() || !Subtarget->isMClass())
1279 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
1281 // On v8, we have particularly efficient implementations of atomic fences
1282 // if they can be combined with nearby atomic loads and stores.
1283 if (!Subtarget->hasAcquireRelease() ||
1284 getTargetMachine().getOptLevel() == 0) {
1285 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1286 InsertFencesForAtomic = true;
1289 // If there's anything we can use as a barrier, go through custom lowering
1290 // for ATOMIC_FENCE.
1291 // If target has DMB in thumb, Fences can be inserted.
1292 if (Subtarget->hasDataBarrier())
1293 InsertFencesForAtomic = true;
1295 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other,
1296 Subtarget->hasAnyDataBarrier() ? Custom : Expand);
1298 // Set them all for expansion, which will force libcalls.
1299 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
1300 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
1301 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
1302 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
1303 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand);
1304 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand);
1305 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand);
1306 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
1307 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
1308 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
1309 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
1310 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
1311 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1312 // Unordered/Monotonic case.
1313 if (!InsertFencesForAtomic) {
1314 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1315 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1319 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1321 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1322 if (!Subtarget->hasV6Ops()) {
1323 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1324 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
1326 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1328 if (!Subtarget->useSoftFloat() && Subtarget->hasFPRegs() &&
1329 !Subtarget->isThumb1Only()) {
1330 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1331 // iff target supports vfp2.
1332 setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1333 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1336 // We want to custom lower some of our intrinsics.
1337 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1338 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1339 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1340 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1341 if (Subtarget->useSjLjEH())
1342 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1344 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1345 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1346 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1347 setOperationAction(ISD::SELECT, MVT::i32, Custom);
1348 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1349 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1350 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1351 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1352 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1353 if (Subtarget->hasFullFP16()) {
1354 setOperationAction(ISD::SETCC, MVT::f16, Expand);
1355 setOperationAction(ISD::SELECT, MVT::f16, Custom);
1356 setOperationAction(ISD::SELECT_CC, MVT::f16, Custom);
1359 setOperationAction(ISD::SETCCCARRY, MVT::i32, Custom);
1361 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
1362 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1363 if (Subtarget->hasFullFP16())
1364 setOperationAction(ISD::BR_CC, MVT::f16, Custom);
1365 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1366 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1367 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1369 // We don't support sin/cos/fmod/copysign/pow
1370 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1371 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1372 setOperationAction(ISD::FCOS, MVT::f32, Expand);
1373 setOperationAction(ISD::FCOS, MVT::f64, Expand);
1374 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1375 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1376 setOperationAction(ISD::FREM, MVT::f64, Expand);
1377 setOperationAction(ISD::FREM, MVT::f32, Expand);
1378 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2Base() &&
1379 !Subtarget->isThumb1Only()) {
1380 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1381 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1383 setOperationAction(ISD::FPOW, MVT::f64, Expand);
1384 setOperationAction(ISD::FPOW, MVT::f32, Expand);
1386 if (!Subtarget->hasVFP4Base()) {
1387 setOperationAction(ISD::FMA, MVT::f64, Expand);
1388 setOperationAction(ISD::FMA, MVT::f32, Expand);
1391 // Various VFP goodness
1392 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1393 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1394 if (!Subtarget->hasFPARMv8Base() || !Subtarget->hasFP64()) {
1395 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1396 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1399 // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1400 if (!Subtarget->hasFP16()) {
1401 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1402 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1405 // Strict floating-point comparisons need custom lowering.
1406 setOperationAction(ISD::STRICT_FSETCC, MVT::f16, Custom);
1407 setOperationAction(ISD::STRICT_FSETCCS, MVT::f16, Custom);
1408 setOperationAction(ISD::STRICT_FSETCC, MVT::f32, Custom);
1409 setOperationAction(ISD::STRICT_FSETCCS, MVT::f32, Custom);
1410 setOperationAction(ISD::STRICT_FSETCC, MVT::f64, Custom);
1411 setOperationAction(ISD::STRICT_FSETCCS, MVT::f64, Custom);
1414 // Use __sincos_stret if available.
1415 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1416 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1417 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1418 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1421 // FP-ARMv8 implements a lot of rounding-like FP operations.
1422 if (Subtarget->hasFPARMv8Base()) {
1423 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1424 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1425 setOperationAction(ISD::FROUND, MVT::f32, Legal);
1426 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1427 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1428 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1429 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1430 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1431 if (Subtarget->hasNEON()) {
1432 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1433 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1434 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1435 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1438 if (Subtarget->hasFP64()) {
1439 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1440 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1441 setOperationAction(ISD::FROUND, MVT::f64, Legal);
1442 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1443 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1444 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1445 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1446 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1450 // FP16 often need to be promoted to call lib functions
1451 if (Subtarget->hasFullFP16()) {
1452 setOperationAction(ISD::FREM, MVT::f16, Promote);
1453 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Expand);
1454 setOperationAction(ISD::FSIN, MVT::f16, Promote);
1455 setOperationAction(ISD::FCOS, MVT::f16, Promote);
1456 setOperationAction(ISD::FSINCOS, MVT::f16, Promote);
1457 setOperationAction(ISD::FPOWI, MVT::f16, Promote);
1458 setOperationAction(ISD::FPOW, MVT::f16, Promote);
1459 setOperationAction(ISD::FEXP, MVT::f16, Promote);
1460 setOperationAction(ISD::FEXP2, MVT::f16, Promote);
1461 setOperationAction(ISD::FLOG, MVT::f16, Promote);
1462 setOperationAction(ISD::FLOG10, MVT::f16, Promote);
1463 setOperationAction(ISD::FLOG2, MVT::f16, Promote);
1465 setOperationAction(ISD::FROUND, MVT::f16, Legal);
1468 if (Subtarget->hasNEON()) {
1469 // vmin and vmax aren't available in a scalar form, so we can use
1470 // a NEON instruction with an undef lane instead. This has a performance
1471 // penalty on some cores, so we don't do this unless we have been
1472 // asked to by the core tuning model.
1473 if (Subtarget->useNEONForSinglePrecisionFP()) {
1474 setOperationAction(ISD::FMINIMUM, MVT::f32, Legal);
1475 setOperationAction(ISD::FMAXIMUM, MVT::f32, Legal);
1476 setOperationAction(ISD::FMINIMUM, MVT::f16, Legal);
1477 setOperationAction(ISD::FMAXIMUM, MVT::f16, Legal);
1479 setOperationAction(ISD::FMINIMUM, MVT::v2f32, Legal);
1480 setOperationAction(ISD::FMAXIMUM, MVT::v2f32, Legal);
1481 setOperationAction(ISD::FMINIMUM, MVT::v4f32, Legal);
1482 setOperationAction(ISD::FMAXIMUM, MVT::v4f32, Legal);
1484 if (Subtarget->hasFullFP16()) {
1485 setOperationAction(ISD::FMINNUM, MVT::v4f16, Legal);
1486 setOperationAction(ISD::FMAXNUM, MVT::v4f16, Legal);
1487 setOperationAction(ISD::FMINNUM, MVT::v8f16, Legal);
1488 setOperationAction(ISD::FMAXNUM, MVT::v8f16, Legal);
1490 setOperationAction(ISD::FMINIMUM, MVT::v4f16, Legal);
1491 setOperationAction(ISD::FMAXIMUM, MVT::v4f16, Legal);
1492 setOperationAction(ISD::FMINIMUM, MVT::v8f16, Legal);
1493 setOperationAction(ISD::FMAXIMUM, MVT::v8f16, Legal);
1497 // We have target-specific dag combine patterns for the following nodes:
1498 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine
1499 setTargetDAGCombine(ISD::ADD);
1500 setTargetDAGCombine(ISD::SUB);
1501 setTargetDAGCombine(ISD::MUL);
1502 setTargetDAGCombine(ISD::AND);
1503 setTargetDAGCombine(ISD::OR);
1504 setTargetDAGCombine(ISD::XOR);
1506 if (Subtarget->hasMVEIntegerOps())
1507 setTargetDAGCombine(ISD::VSELECT);
1509 if (Subtarget->hasV6Ops())
1510 setTargetDAGCombine(ISD::SRL);
1511 if (Subtarget->isThumb1Only())
1512 setTargetDAGCombine(ISD::SHL);
1514 setStackPointerRegisterToSaveRestore(ARM::SP);
1516 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1517 !Subtarget->hasVFP2Base() || Subtarget->hasMinSize())
1518 setSchedulingPreference(Sched::RegPressure);
1520 setSchedulingPreference(Sched::Hybrid);
1522 //// temporary - rewrite interface to use type
1523 MaxStoresPerMemset = 8;
1524 MaxStoresPerMemsetOptSize = 4;
1525 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1526 MaxStoresPerMemcpyOptSize = 2;
1527 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1528 MaxStoresPerMemmoveOptSize = 2;
1530 // On ARM arguments smaller than 4 bytes are extended, so all arguments
1531 // are at least 4 bytes aligned.
1532 setMinStackArgumentAlignment(Align(4));
1534 // Prefer likely predicted branches to selects on out-of-order cores.
1535 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1537 setPrefLoopAlignment(Align(1ULL << Subtarget->getPrefLoopLogAlignment()));
1539 setMinFunctionAlignment(Subtarget->isThumb() ? Align(2) : Align(4));
1541 if (Subtarget->isThumb() || Subtarget->isThumb2())
1542 setTargetDAGCombine(ISD::ABS);
1545 bool ARMTargetLowering::useSoftFloat() const {
1546 return Subtarget->useSoftFloat();
1549 // FIXME: It might make sense to define the representative register class as the
1550 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1551 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1552 // SPR's representative would be DPR_VFP2. This should work well if register
1553 // pressure tracking were modified such that a register use would increment the
1554 // pressure of the register class's representative and all of it's super
1555 // classes' representatives transitively. We have not implemented this because
1556 // of the difficulty prior to coalescing of modeling operand register classes
1557 // due to the common occurrence of cross class copies and subregister insertions
1559 std::pair<const TargetRegisterClass *, uint8_t>
1560 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1562 const TargetRegisterClass *RRC = nullptr;
1564 switch (VT.SimpleTy) {
1566 return TargetLowering::findRepresentativeClass(TRI, VT);
1567 // Use DPR as representative register class for all floating point
1568 // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1569 // the cost is 1 for both f32 and f64.
1570 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1571 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1572 RRC = &ARM::DPRRegClass;
1573 // When NEON is used for SP, only half of the register file is available
1574 // because operations that define both SP and DP results will be constrained
1575 // to the VFP2 class (D0-D15). We currently model this constraint prior to
1576 // coalescing by double-counting the SP regs. See the FIXME above.
1577 if (Subtarget->useNEONForSinglePrecisionFP())
1580 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1581 case MVT::v4f32: case MVT::v2f64:
1582 RRC = &ARM::DPRRegClass;
1586 RRC = &ARM::DPRRegClass;
1590 RRC = &ARM::DPRRegClass;
1594 return std::make_pair(RRC, Cost);
1597 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1598 switch ((ARMISD::NodeType)Opcode) {
1599 case ARMISD::FIRST_NUMBER: break;
1600 case ARMISD::Wrapper: return "ARMISD::Wrapper";
1601 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC";
1602 case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
1603 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1604 case ARMISD::CALL: return "ARMISD::CALL";
1605 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
1606 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
1607 case ARMISD::tSECALL: return "ARMISD::tSECALL";
1608 case ARMISD::BRCOND: return "ARMISD::BRCOND";
1609 case ARMISD::BR_JT: return "ARMISD::BR_JT";
1610 case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
1611 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
1612 case ARMISD::SERET_FLAG: return "ARMISD::SERET_FLAG";
1613 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG";
1614 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
1615 case ARMISD::CMP: return "ARMISD::CMP";
1616 case ARMISD::CMN: return "ARMISD::CMN";
1617 case ARMISD::CMPZ: return "ARMISD::CMPZ";
1618 case ARMISD::CMPFP: return "ARMISD::CMPFP";
1619 case ARMISD::CMPFPE: return "ARMISD::CMPFPE";
1620 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
1621 case ARMISD::CMPFPEw0: return "ARMISD::CMPFPEw0";
1622 case ARMISD::BCC_i64: return "ARMISD::BCC_i64";
1623 case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
1625 case ARMISD::CMOV: return "ARMISD::CMOV";
1626 case ARMISD::SUBS: return "ARMISD::SUBS";
1628 case ARMISD::SSAT: return "ARMISD::SSAT";
1629 case ARMISD::USAT: return "ARMISD::USAT";
1631 case ARMISD::ASRL: return "ARMISD::ASRL";
1632 case ARMISD::LSRL: return "ARMISD::LSRL";
1633 case ARMISD::LSLL: return "ARMISD::LSLL";
1635 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
1636 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
1637 case ARMISD::RRX: return "ARMISD::RRX";
1639 case ARMISD::ADDC: return "ARMISD::ADDC";
1640 case ARMISD::ADDE: return "ARMISD::ADDE";
1641 case ARMISD::SUBC: return "ARMISD::SUBC";
1642 case ARMISD::SUBE: return "ARMISD::SUBE";
1643 case ARMISD::LSLS: return "ARMISD::LSLS";
1645 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
1646 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
1647 case ARMISD::VMOVhr: return "ARMISD::VMOVhr";
1648 case ARMISD::VMOVrh: return "ARMISD::VMOVrh";
1649 case ARMISD::VMOVSR: return "ARMISD::VMOVSR";
1651 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1652 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1653 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1655 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
1657 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1659 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
1661 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1663 case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
1665 case ARMISD::LDRD: return "ARMISD::LDRD";
1666 case ARMISD::STRD: return "ARMISD::STRD";
1668 case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK";
1669 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK";
1671 case ARMISD::PREDICATE_CAST: return "ARMISD::PREDICATE_CAST";
1672 case ARMISD::VECTOR_REG_CAST: return "ARMISD::VECTOR_REG_CAST";
1673 case ARMISD::VCMP: return "ARMISD::VCMP";
1674 case ARMISD::VCMPZ: return "ARMISD::VCMPZ";
1675 case ARMISD::VTST: return "ARMISD::VTST";
1677 case ARMISD::VSHLs: return "ARMISD::VSHLs";
1678 case ARMISD::VSHLu: return "ARMISD::VSHLu";
1679 case ARMISD::VSHLIMM: return "ARMISD::VSHLIMM";
1680 case ARMISD::VSHRsIMM: return "ARMISD::VSHRsIMM";
1681 case ARMISD::VSHRuIMM: return "ARMISD::VSHRuIMM";
1682 case ARMISD::VRSHRsIMM: return "ARMISD::VRSHRsIMM";
1683 case ARMISD::VRSHRuIMM: return "ARMISD::VRSHRuIMM";
1684 case ARMISD::VRSHRNIMM: return "ARMISD::VRSHRNIMM";
1685 case ARMISD::VQSHLsIMM: return "ARMISD::VQSHLsIMM";
1686 case ARMISD::VQSHLuIMM: return "ARMISD::VQSHLuIMM";
1687 case ARMISD::VQSHLsuIMM: return "ARMISD::VQSHLsuIMM";
1688 case ARMISD::VQSHRNsIMM: return "ARMISD::VQSHRNsIMM";
1689 case ARMISD::VQSHRNuIMM: return "ARMISD::VQSHRNuIMM";
1690 case ARMISD::VQSHRNsuIMM: return "ARMISD::VQSHRNsuIMM";
1691 case ARMISD::VQRSHRNsIMM: return "ARMISD::VQRSHRNsIMM";
1692 case ARMISD::VQRSHRNuIMM: return "ARMISD::VQRSHRNuIMM";
1693 case ARMISD::VQRSHRNsuIMM: return "ARMISD::VQRSHRNsuIMM";
1694 case ARMISD::VSLIIMM: return "ARMISD::VSLIIMM";
1695 case ARMISD::VSRIIMM: return "ARMISD::VSRIIMM";
1696 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
1697 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
1698 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM";
1699 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM";
1700 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM";
1701 case ARMISD::VDUP: return "ARMISD::VDUP";
1702 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
1703 case ARMISD::VEXT: return "ARMISD::VEXT";
1704 case ARMISD::VREV64: return "ARMISD::VREV64";
1705 case ARMISD::VREV32: return "ARMISD::VREV32";
1706 case ARMISD::VREV16: return "ARMISD::VREV16";
1707 case ARMISD::VZIP: return "ARMISD::VZIP";
1708 case ARMISD::VUZP: return "ARMISD::VUZP";
1709 case ARMISD::VTRN: return "ARMISD::VTRN";
1710 case ARMISD::VTBL1: return "ARMISD::VTBL1";
1711 case ARMISD::VTBL2: return "ARMISD::VTBL2";
1712 case ARMISD::VMOVN: return "ARMISD::VMOVN";
1713 case ARMISD::VQMOVNs: return "ARMISD::VQMOVNs";
1714 case ARMISD::VQMOVNu: return "ARMISD::VQMOVNu";
1715 case ARMISD::VCVTN: return "ARMISD::VCVTN";
1716 case ARMISD::VCVTL: return "ARMISD::VCVTL";
1717 case ARMISD::VMULLs: return "ARMISD::VMULLs";
1718 case ARMISD::VMULLu: return "ARMISD::VMULLu";
1719 case ARMISD::VADDVs: return "ARMISD::VADDVs";
1720 case ARMISD::VADDVu: return "ARMISD::VADDVu";
1721 case ARMISD::VADDLVs: return "ARMISD::VADDLVs";
1722 case ARMISD::VADDLVu: return "ARMISD::VADDLVu";
1723 case ARMISD::VADDLVAs: return "ARMISD::VADDLVAs";
1724 case ARMISD::VADDLVAu: return "ARMISD::VADDLVAu";
1725 case ARMISD::VADDLVps: return "ARMISD::VADDLVps";
1726 case ARMISD::VADDLVpu: return "ARMISD::VADDLVpu";
1727 case ARMISD::VADDLVAps: return "ARMISD::VADDLVAps";
1728 case ARMISD::VADDLVApu: return "ARMISD::VADDLVApu";
1729 case ARMISD::VMLAVs: return "ARMISD::VMLAVs";
1730 case ARMISD::VMLAVu: return "ARMISD::VMLAVu";
1731 case ARMISD::VMLALVs: return "ARMISD::VMLALVs";
1732 case ARMISD::VMLALVu: return "ARMISD::VMLALVu";
1733 case ARMISD::VMLALVAs: return "ARMISD::VMLALVAs";
1734 case ARMISD::VMLALVAu: return "ARMISD::VMLALVAu";
1735 case ARMISD::UMAAL: return "ARMISD::UMAAL";
1736 case ARMISD::UMLAL: return "ARMISD::UMLAL";
1737 case ARMISD::SMLAL: return "ARMISD::SMLAL";
1738 case ARMISD::SMLALBB: return "ARMISD::SMLALBB";
1739 case ARMISD::SMLALBT: return "ARMISD::SMLALBT";
1740 case ARMISD::SMLALTB: return "ARMISD::SMLALTB";
1741 case ARMISD::SMLALTT: return "ARMISD::SMLALTT";
1742 case ARMISD::SMULWB: return "ARMISD::SMULWB";
1743 case ARMISD::SMULWT: return "ARMISD::SMULWT";
1744 case ARMISD::SMLALD: return "ARMISD::SMLALD";
1745 case ARMISD::SMLALDX: return "ARMISD::SMLALDX";
1746 case ARMISD::SMLSLD: return "ARMISD::SMLSLD";
1747 case ARMISD::SMLSLDX: return "ARMISD::SMLSLDX";
1748 case ARMISD::SMMLAR: return "ARMISD::SMMLAR";
1749 case ARMISD::SMMLSR: return "ARMISD::SMMLSR";
1750 case ARMISD::QADD16b: return "ARMISD::QADD16b";
1751 case ARMISD::QSUB16b: return "ARMISD::QSUB16b";
1752 case ARMISD::QADD8b: return "ARMISD::QADD8b";
1753 case ARMISD::QSUB8b: return "ARMISD::QSUB8b";
1754 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
1755 case ARMISD::BFI: return "ARMISD::BFI";
1756 case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
1757 case ARMISD::VBICIMM: return "ARMISD::VBICIMM";
1758 case ARMISD::VBSL: return "ARMISD::VBSL";
1759 case ARMISD::MEMCPY: return "ARMISD::MEMCPY";
1760 case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP";
1761 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP";
1762 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP";
1763 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP";
1764 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD";
1765 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD";
1766 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD";
1767 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD";
1768 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD";
1769 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD";
1770 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD";
1771 case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD";
1772 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD";
1773 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD";
1774 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD";
1775 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD";
1776 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD";
1777 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD";
1778 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD";
1779 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD";
1780 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD";
1781 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD";
1782 case ARMISD::WLS: return "ARMISD::WLS";
1783 case ARMISD::LE: return "ARMISD::LE";
1784 case ARMISD::LOOP_DEC: return "ARMISD::LOOP_DEC";
1785 case ARMISD::CSINV: return "ARMISD::CSINV";
1786 case ARMISD::CSNEG: return "ARMISD::CSNEG";
1787 case ARMISD::CSINC: return "ARMISD::CSINC";
1792 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1795 return getPointerTy(DL);
1797 // MVE has a predicate register.
1798 if (Subtarget->hasMVEIntegerOps() &&
1799 (VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8))
1800 return MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
1801 return VT.changeVectorElementTypeToInteger();
1804 /// getRegClassFor - Return the register class that should be used for the
1805 /// specified value type.
1806 const TargetRegisterClass *
1807 ARMTargetLowering::getRegClassFor(MVT VT, bool isDivergent) const {
1809 // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1810 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1811 // load / store 4 to 8 consecutive NEON D registers, or 2 to 4 consecutive
1813 if (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) {
1814 if (VT == MVT::v4i64)
1815 return &ARM::QQPRRegClass;
1816 if (VT == MVT::v8i64)
1817 return &ARM::QQQQPRRegClass;
1819 return TargetLowering::getRegClassFor(VT);
1822 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1823 // source/dest is aligned and the copy size is large enough. We therefore want
1824 // to align such objects passed to memory intrinsics.
1825 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1826 unsigned &PrefAlign) const {
1827 if (!isa<MemIntrinsic>(CI))
1830 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1831 // cycle faster than 4-byte aligned LDM.
1832 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1836 // Create a fast isel object.
1838 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1839 const TargetLibraryInfo *libInfo) const {
1840 return ARM::createFastISel(funcInfo, libInfo);
1843 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1844 unsigned NumVals = N->getNumValues();
1846 return Sched::RegPressure;
1848 for (unsigned i = 0; i != NumVals; ++i) {
1849 EVT VT = N->getValueType(i);
1850 if (VT == MVT::Glue || VT == MVT::Other)
1852 if (VT.isFloatingPoint() || VT.isVector())
1856 if (!N->isMachineOpcode())
1857 return Sched::RegPressure;
1859 // Load are scheduled for latency even if there instruction itinerary
1860 // is not available.
1861 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1862 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1864 if (MCID.getNumDefs() == 0)
1865 return Sched::RegPressure;
1866 if (!Itins->isEmpty() &&
1867 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1870 return Sched::RegPressure;
1873 //===----------------------------------------------------------------------===//
1875 //===----------------------------------------------------------------------===//
1877 static bool isSRL16(const SDValue &Op) {
1878 if (Op.getOpcode() != ISD::SRL)
1880 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1881 return Const->getZExtValue() == 16;
1885 static bool isSRA16(const SDValue &Op) {
1886 if (Op.getOpcode() != ISD::SRA)
1888 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1889 return Const->getZExtValue() == 16;
1893 static bool isSHL16(const SDValue &Op) {
1894 if (Op.getOpcode() != ISD::SHL)
1896 if (auto Const = dyn_cast<ConstantSDNode>(Op.getOperand(1)))
1897 return Const->getZExtValue() == 16;
1901 // Check for a signed 16-bit value. We special case SRA because it makes it
1902 // more simple when also looking for SRAs that aren't sign extending a
1903 // smaller value. Without the check, we'd need to take extra care with
1904 // checking order for some operations.
1905 static bool isS16(const SDValue &Op, SelectionDAG &DAG) {
1907 return isSHL16(Op.getOperand(0));
1908 return DAG.ComputeNumSignBits(Op) == 17;
1911 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1912 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1914 default: llvm_unreachable("Unknown condition code!");
1915 case ISD::SETNE: return ARMCC::NE;
1916 case ISD::SETEQ: return ARMCC::EQ;
1917 case ISD::SETGT: return ARMCC::GT;
1918 case ISD::SETGE: return ARMCC::GE;
1919 case ISD::SETLT: return ARMCC::LT;
1920 case ISD::SETLE: return ARMCC::LE;
1921 case ISD::SETUGT: return ARMCC::HI;
1922 case ISD::SETUGE: return ARMCC::HS;
1923 case ISD::SETULT: return ARMCC::LO;
1924 case ISD::SETULE: return ARMCC::LS;
1928 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1929 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1930 ARMCC::CondCodes &CondCode2) {
1931 CondCode2 = ARMCC::AL;
1933 default: llvm_unreachable("Unknown FP condition!");
1935 case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
1937 case ISD::SETOGT: CondCode = ARMCC::GT; break;
1939 case ISD::SETOGE: CondCode = ARMCC::GE; break;
1940 case ISD::SETOLT: CondCode = ARMCC::MI; break;
1941 case ISD::SETOLE: CondCode = ARMCC::LS; break;
1942 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
1943 case ISD::SETO: CondCode = ARMCC::VC; break;
1944 case ISD::SETUO: CondCode = ARMCC::VS; break;
1945 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
1946 case ISD::SETUGT: CondCode = ARMCC::HI; break;
1947 case ISD::SETUGE: CondCode = ARMCC::PL; break;
1949 case ISD::SETULT: CondCode = ARMCC::LT; break;
1951 case ISD::SETULE: CondCode = ARMCC::LE; break;
1953 case ISD::SETUNE: CondCode = ARMCC::NE; break;
1957 //===----------------------------------------------------------------------===//
1958 // Calling Convention Implementation
1959 //===----------------------------------------------------------------------===//
1961 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1962 /// account presence of floating point hardware and calling convention
1963 /// limitations, such as support for variadic functions.
1965 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1966 bool isVarArg) const {
1969 report_fatal_error("Unsupported calling convention");
1970 case CallingConv::ARM_AAPCS:
1971 case CallingConv::ARM_APCS:
1972 case CallingConv::GHC:
1973 case CallingConv::CFGuard_Check:
1975 case CallingConv::PreserveMost:
1976 return CallingConv::PreserveMost;
1977 case CallingConv::ARM_AAPCS_VFP:
1978 case CallingConv::Swift:
1979 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1980 case CallingConv::C:
1981 if (!Subtarget->isAAPCS_ABI())
1982 return CallingConv::ARM_APCS;
1983 else if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() &&
1984 getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1986 return CallingConv::ARM_AAPCS_VFP;
1988 return CallingConv::ARM_AAPCS;
1989 case CallingConv::Fast:
1990 case CallingConv::CXX_FAST_TLS:
1991 if (!Subtarget->isAAPCS_ABI()) {
1992 if (Subtarget->hasVFP2Base() && !Subtarget->isThumb1Only() && !isVarArg)
1993 return CallingConv::Fast;
1994 return CallingConv::ARM_APCS;
1995 } else if (Subtarget->hasVFP2Base() &&
1996 !Subtarget->isThumb1Only() && !isVarArg)
1997 return CallingConv::ARM_AAPCS_VFP;
1999 return CallingConv::ARM_AAPCS;
2003 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
2004 bool isVarArg) const {
2005 return CCAssignFnForNode(CC, false, isVarArg);
2008 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
2009 bool isVarArg) const {
2010 return CCAssignFnForNode(CC, true, isVarArg);
2013 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
2014 /// CallingConvention.
2015 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
2017 bool isVarArg) const {
2018 switch (getEffectiveCallingConv(CC, isVarArg)) {
2020 report_fatal_error("Unsupported calling convention");
2021 case CallingConv::ARM_APCS:
2022 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
2023 case CallingConv::ARM_AAPCS:
2024 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
2025 case CallingConv::ARM_AAPCS_VFP:
2026 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
2027 case CallingConv::Fast:
2028 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
2029 case CallingConv::GHC:
2030 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
2031 case CallingConv::PreserveMost:
2032 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
2033 case CallingConv::CFGuard_Check:
2034 return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
2038 SDValue ARMTargetLowering::MoveToHPR(const SDLoc &dl, SelectionDAG &DAG,
2039 MVT LocVT, MVT ValVT, SDValue Val) const {
2040 Val = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocVT.getSizeInBits()),
2042 if (Subtarget->hasFullFP16()) {
2043 Val = DAG.getNode(ARMISD::VMOVhr, dl, ValVT, Val);
2045 Val = DAG.getNode(ISD::TRUNCATE, dl,
2046 MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
2047 Val = DAG.getNode(ISD::BITCAST, dl, ValVT, Val);
2052 SDValue ARMTargetLowering::MoveFromHPR(const SDLoc &dl, SelectionDAG &DAG,
2053 MVT LocVT, MVT ValVT,
2054 SDValue Val) const {
2055 if (Subtarget->hasFullFP16()) {
2056 Val = DAG.getNode(ARMISD::VMOVrh, dl,
2057 MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
2059 Val = DAG.getNode(ISD::BITCAST, dl,
2060 MVT::getIntegerVT(ValVT.getSizeInBits()), Val);
2061 Val = DAG.getNode(ISD::ZERO_EXTEND, dl,
2062 MVT::getIntegerVT(LocVT.getSizeInBits()), Val);
2064 return DAG.getNode(ISD::BITCAST, dl, LocVT, Val);
2067 /// LowerCallResult - Lower the result values of a call into the
2068 /// appropriate copies out of appropriate physical registers.
2069 SDValue ARMTargetLowering::LowerCallResult(
2070 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2071 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2072 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
2073 SDValue ThisVal) const {
2074 // Assign locations to each value returned by this call.
2075 SmallVector<CCValAssign, 16> RVLocs;
2076 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2078 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
2080 // Copy all of the result registers out of their specified physreg.
2081 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2082 CCValAssign VA = RVLocs[i];
2084 // Pass 'this' value directly from the argument to return value, to avoid
2085 // reg unit interference
2086 if (i == 0 && isThisReturn) {
2087 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
2088 "unexpected return calling convention register assignment");
2089 InVals.push_back(ThisVal);
2094 if (VA.needsCustom() &&
2095 (VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2f64)) {
2096 // Handle f64 or half of a v2f64.
2097 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
2099 Chain = Lo.getValue(1);
2100 InFlag = Lo.getValue(2);
2101 VA = RVLocs[++i]; // skip ahead to next loc
2102 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
2104 Chain = Hi.getValue(1);
2105 InFlag = Hi.getValue(2);
2106 if (!Subtarget->isLittle())
2108 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2110 if (VA.getLocVT() == MVT::v2f64) {
2111 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
2112 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2113 DAG.getConstant(0, dl, MVT::i32));
2115 VA = RVLocs[++i]; // skip ahead to next loc
2116 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
2117 Chain = Lo.getValue(1);
2118 InFlag = Lo.getValue(2);
2119 VA = RVLocs[++i]; // skip ahead to next loc
2120 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
2121 Chain = Hi.getValue(1);
2122 InFlag = Hi.getValue(2);
2123 if (!Subtarget->isLittle())
2125 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
2126 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
2127 DAG.getConstant(1, dl, MVT::i32));
2130 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
2132 Chain = Val.getValue(1);
2133 InFlag = Val.getValue(2);
2136 switch (VA.getLocInfo()) {
2137 default: llvm_unreachable("Unknown loc info!");
2138 case CCValAssign::Full: break;
2139 case CCValAssign::BCvt:
2140 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
2144 // f16 arguments have their size extended to 4 bytes and passed as if they
2145 // had been copied to the LSBs of a 32-bit register.
2146 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
2147 if (VA.needsCustom() &&
2148 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
2149 Val = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Val);
2151 InVals.push_back(Val);
2157 /// LowerMemOpCallTo - Store the argument to the stack.
2158 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
2159 SDValue Arg, const SDLoc &dl,
2161 const CCValAssign &VA,
2162 ISD::ArgFlagsTy Flags) const {
2163 unsigned LocMemOffset = VA.getLocMemOffset();
2164 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2165 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
2167 return DAG.getStore(
2168 Chain, dl, Arg, PtrOff,
2169 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
2172 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
2173 SDValue Chain, SDValue &Arg,
2174 RegsToPassVector &RegsToPass,
2175 CCValAssign &VA, CCValAssign &NextVA,
2177 SmallVectorImpl<SDValue> &MemOpChains,
2178 ISD::ArgFlagsTy Flags) const {
2179 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2180 DAG.getVTList(MVT::i32, MVT::i32), Arg);
2181 unsigned id = Subtarget->isLittle() ? 0 : 1;
2182 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
2184 if (NextVA.isRegLoc())
2185 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
2187 assert(NextVA.isMemLoc());
2188 if (!StackPtr.getNode())
2189 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
2190 getPointerTy(DAG.getDataLayout()));
2192 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
2198 /// LowerCall - Lowering a call into a callseq_start <-
2199 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
2202 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2203 SmallVectorImpl<SDValue> &InVals) const {
2204 SelectionDAG &DAG = CLI.DAG;
2206 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2207 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2208 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2209 SDValue Chain = CLI.Chain;
2210 SDValue Callee = CLI.Callee;
2211 bool &isTailCall = CLI.IsTailCall;
2212 CallingConv::ID CallConv = CLI.CallConv;
2213 bool doesNotRet = CLI.DoesNotReturn;
2214 bool isVarArg = CLI.IsVarArg;
2216 MachineFunction &MF = DAG.getMachineFunction();
2217 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2218 MachineFunction::CallSiteInfo CSInfo;
2219 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
2220 bool isThisReturn = false;
2221 bool isCmseNSCall = false;
2222 bool PreferIndirect = false;
2224 // Determine whether this is a non-secure function call.
2225 if (CLI.CB && CLI.CB->getAttributes().hasFnAttribute("cmse_nonsecure_call"))
2226 isCmseNSCall = true;
2228 // Disable tail calls if they're not supported.
2229 if (!Subtarget->supportsTailCall())
2232 // For both the non-secure calls and the returns from a CMSE entry function,
2233 // the function needs to do some extra work afte r the call, or before the
2234 // return, respectively, thus it cannot end with atail call
2235 if (isCmseNSCall || AFI->isCmseNSEntryFunction())
2238 if (isa<GlobalAddressSDNode>(Callee)) {
2239 // If we're optimizing for minimum size and the function is called three or
2240 // more times in this block, we can improve codesize by calling indirectly
2241 // as BLXr has a 16-bit encoding.
2242 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2244 auto *BB = CLI.CB->getParent();
2245 PreferIndirect = Subtarget->isThumb() && Subtarget->hasMinSize() &&
2246 count_if(GV->users(), [&BB](const User *U) {
2247 return isa<Instruction>(U) &&
2248 cast<Instruction>(U)->getParent() == BB;
2253 // Check if it's really possible to do a tail call.
2254 isTailCall = IsEligibleForTailCallOptimization(
2255 Callee, CallConv, isVarArg, isStructRet,
2256 MF.getFunction().hasStructRetAttr(), Outs, OutVals, Ins, DAG,
2258 if (!isTailCall && CLI.CB && CLI.CB->isMustTailCall())
2259 report_fatal_error("failed to perform tail call elimination on a call "
2260 "site marked musttail");
2261 // We don't support GuaranteedTailCallOpt for ARM, only automatically
2262 // detected sibcalls.
2267 // Analyze operands of the call, assigning locations to each operand.
2268 SmallVector<CCValAssign, 16> ArgLocs;
2269 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
2271 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
2273 // Get a count of how many bytes are to be pushed on the stack.
2274 unsigned NumBytes = CCInfo.getNextStackOffset();
2277 // For tail calls, memory operands are available in our caller's stack.
2280 // Adjust the stack pointer for the new arguments...
2281 // These operations are automatically eliminated by the prolog/epilog pass
2282 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, dl);
2286 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
2288 RegsToPassVector RegsToPass;
2289 SmallVector<SDValue, 8> MemOpChains;
2291 // Walk the register/memloc assignments, inserting copies/loads. In the case
2292 // of tail call optimization, arguments are handled later.
2293 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2295 ++i, ++realArgIdx) {
2296 CCValAssign &VA = ArgLocs[i];
2297 SDValue Arg = OutVals[realArgIdx];
2298 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2299 bool isByVal = Flags.isByVal();
2301 // Promote the value if needed.
2302 switch (VA.getLocInfo()) {
2303 default: llvm_unreachable("Unknown loc info!");
2304 case CCValAssign::Full: break;
2305 case CCValAssign::SExt:
2306 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
2308 case CCValAssign::ZExt:
2309 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
2311 case CCValAssign::AExt:
2312 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
2314 case CCValAssign::BCvt:
2315 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2319 // f16 arguments have their size extended to 4 bytes and passed as if they
2320 // had been copied to the LSBs of a 32-bit register.
2321 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
2322 if (VA.needsCustom() &&
2323 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16)) {
2324 Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
2326 // f16 arguments could have been extended prior to argument lowering.
2327 // Mask them arguments if this is a CMSE nonsecure call.
2328 auto ArgVT = Outs[realArgIdx].ArgVT;
2329 if (isCmseNSCall && (ArgVT == MVT::f16)) {
2330 auto LocBits = VA.getLocVT().getSizeInBits();
2331 auto MaskValue = APInt::getLowBitsSet(LocBits, ArgVT.getSizeInBits());
2333 DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
2334 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
2335 Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
2336 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2340 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
2341 if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
2342 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2343 DAG.getConstant(0, dl, MVT::i32));
2344 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2345 DAG.getConstant(1, dl, MVT::i32));
2347 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass, VA, ArgLocs[++i],
2348 StackPtr, MemOpChains, Flags);
2350 VA = ArgLocs[++i]; // skip ahead to next loc
2351 if (VA.isRegLoc()) {
2352 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass, VA, ArgLocs[++i],
2353 StackPtr, MemOpChains, Flags);
2355 assert(VA.isMemLoc());
2357 MemOpChains.push_back(
2358 LowerMemOpCallTo(Chain, StackPtr, Op1, dl, DAG, VA, Flags));
2360 } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
2361 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
2362 StackPtr, MemOpChains, Flags);
2363 } else if (VA.isRegLoc()) {
2364 if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() &&
2365 Outs[0].VT == MVT::i32) {
2366 assert(VA.getLocVT() == MVT::i32 &&
2367 "unexpected calling convention register assignment");
2368 assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
2369 "unexpected use of 'returned'");
2370 isThisReturn = true;
2372 const TargetOptions &Options = DAG.getTarget().Options;
2373 if (Options.EmitCallSiteInfo)
2374 CSInfo.emplace_back(VA.getLocReg(), i);
2375 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2376 } else if (isByVal) {
2377 assert(VA.isMemLoc());
2378 unsigned offset = 0;
2380 // True if this byval aggregate will be split between registers
2382 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
2383 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
2385 if (CurByValIdx < ByValArgsCount) {
2387 unsigned RegBegin, RegEnd;
2388 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
2391 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
2393 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
2394 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
2395 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
2397 DAG.getLoad(PtrVT, dl, Chain, AddArg, MachinePointerInfo(),
2398 DAG.InferPtrAlign(AddArg));
2399 MemOpChains.push_back(Load.getValue(1));
2400 RegsToPass.push_back(std::make_pair(j, Load));
2403 // If parameter size outsides register area, "offset" value
2404 // helps us to calculate stack slot for remained part properly.
2405 offset = RegEnd - RegBegin;
2407 CCInfo.nextInRegsParam();
2410 if (Flags.getByValSize() > 4*offset) {
2411 auto PtrVT = getPointerTy(DAG.getDataLayout());
2412 unsigned LocMemOffset = VA.getLocMemOffset();
2413 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
2414 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
2415 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
2416 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
2417 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
2420 DAG.getConstant(Flags.getNonZeroByValAlign().value(), dl, MVT::i32);
2422 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
2423 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
2424 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
2427 } else if (!isTailCall) {
2428 assert(VA.isMemLoc());
2430 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2431 dl, DAG, VA, Flags));
2435 if (!MemOpChains.empty())
2436 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2438 // Build a sequence of copy-to-reg nodes chained together with token chain
2439 // and flag operands which copy the outgoing args into the appropriate regs.
2441 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2442 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2443 RegsToPass[i].second, InFlag);
2444 InFlag = Chain.getValue(1);
2447 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2448 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2449 // node so that legalize doesn't hack it.
2450 bool isDirect = false;
2452 const TargetMachine &TM = getTargetMachine();
2453 const Module *Mod = MF.getFunction().getParent();
2454 const GlobalValue *GV = nullptr;
2455 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2456 GV = G->getGlobal();
2458 !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
2460 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
2461 bool isLocalARMFunc = false;
2462 auto PtrVt = getPointerTy(DAG.getDataLayout());
2464 if (Subtarget->genLongCalls()) {
2465 assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
2466 "long-calls codegen is not position independent!");
2467 // Handle a global address or an external symbol. If it's not one of
2468 // those, the target's already in a register, so we don't need to do
2470 if (isa<GlobalAddressSDNode>(Callee)) {
2471 // Create a constant pool entry for the callee address
2472 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2473 ARMConstantPoolValue *CPV =
2474 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2476 // Get the address of the callee into a register
2477 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2478 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2479 Callee = DAG.getLoad(
2480 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2481 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2482 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2483 const char *Sym = S->getSymbol();
2485 // Create a constant pool entry for the callee address
2486 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2487 ARMConstantPoolValue *CPV =
2488 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2489 ARMPCLabelIndex, 0);
2490 // Get the address of the callee into a register
2491 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2492 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2493 Callee = DAG.getLoad(
2494 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2495 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2497 } else if (isa<GlobalAddressSDNode>(Callee)) {
2498 if (!PreferIndirect) {
2500 bool isDef = GV->isStrongDefinitionForLinker();
2502 // ARM call to a local ARM function is predicable.
2503 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2504 // tBX takes a register source operand.
2505 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2506 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2507 Callee = DAG.getNode(
2508 ARMISD::WrapperPIC, dl, PtrVt,
2509 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2510 Callee = DAG.getLoad(
2511 PtrVt, dl, DAG.getEntryNode(), Callee,
2512 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2513 /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2514 MachineMemOperand::MOInvariant);
2515 } else if (Subtarget->isTargetCOFF()) {
2516 assert(Subtarget->isTargetWindows() &&
2517 "Windows is the only supported COFF target");
2518 unsigned TargetFlags = ARMII::MO_NO_FLAG;
2519 if (GV->hasDLLImportStorageClass())
2520 TargetFlags = ARMII::MO_DLLIMPORT;
2521 else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
2522 TargetFlags = ARMII::MO_COFFSTUB;
2523 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*offset=*/0,
2525 if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
2527 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2528 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2529 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2531 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2534 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2536 // tBX takes a register source operand.
2537 const char *Sym = S->getSymbol();
2538 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2539 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2540 ARMConstantPoolValue *CPV =
2541 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2542 ARMPCLabelIndex, 4);
2543 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, Align(4));
2544 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2545 Callee = DAG.getLoad(
2546 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2547 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2548 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2549 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2551 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2556 assert(!isARMFunc && !isDirect &&
2557 "Cannot handle call to ARM function or direct call");
2559 DiagnosticInfoUnsupported Diag(DAG.getMachineFunction().getFunction(),
2560 "call to non-secure function would "
2561 "require passing arguments on stack",
2563 DAG.getContext()->diagnose(Diag);
2566 DiagnosticInfoUnsupported Diag(
2567 DAG.getMachineFunction().getFunction(),
2568 "call to non-secure function would return value through pointer",
2570 DAG.getContext()->diagnose(Diag);
2574 // FIXME: handle tail calls differently.
2576 if (Subtarget->isThumb()) {
2578 CallOpc = ARMISD::tSECALL;
2579 else if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2580 CallOpc = ARMISD::CALL_NOLINK;
2582 CallOpc = ARMISD::CALL;
2584 if (!isDirect && !Subtarget->hasV5TOps())
2585 CallOpc = ARMISD::CALL_NOLINK;
2586 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2587 // Emit regular call when code size is the priority
2588 !Subtarget->hasMinSize())
2589 // "mov lr, pc; b _foo" to avoid confusing the RSP
2590 CallOpc = ARMISD::CALL_NOLINK;
2592 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2595 std::vector<SDValue> Ops;
2596 Ops.push_back(Chain);
2597 Ops.push_back(Callee);
2599 // Add argument registers to the end of the list so that they are known live
2601 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2602 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2603 RegsToPass[i].second.getValueType()));
2605 // Add a register mask operand representing the call-preserved registers.
2607 const uint32_t *Mask;
2608 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2610 // For 'this' returns, use the R0-preserving mask if applicable
2611 Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2613 // Set isThisReturn to false if the calling convention is not one that
2614 // allows 'returned' to be modeled in this way, so LowerCallResult does
2615 // not try to pass 'this' straight through
2616 isThisReturn = false;
2617 Mask = ARI->getCallPreservedMask(MF, CallConv);
2620 Mask = ARI->getCallPreservedMask(MF, CallConv);
2622 assert(Mask && "Missing call preserved mask for calling convention");
2623 Ops.push_back(DAG.getRegisterMask(Mask));
2626 if (InFlag.getNode())
2627 Ops.push_back(InFlag);
2629 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2631 MF.getFrameInfo().setHasTailCall();
2632 SDValue Ret = DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2633 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
2637 // Returns a chain and a flag for retval copy to use.
2638 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2639 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
2640 InFlag = Chain.getValue(1);
2641 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
2643 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2644 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2646 InFlag = Chain.getValue(1);
2648 // Handle result values, copying them out of physregs into vregs that we
2650 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2651 InVals, isThisReturn,
2652 isThisReturn ? OutVals[0] : SDValue());
2655 /// HandleByVal - Every parameter *after* a byval parameter is passed
2656 /// on the stack. Remember the next parameter register to allocate,
2657 /// and then confiscate the rest of the parameter registers to insure
2659 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2660 Align Alignment) const {
2661 // Byval (as with any stack) slots are always at least 4 byte aligned.
2662 Alignment = std::max(Alignment, Align(4));
2664 unsigned Reg = State->AllocateReg(GPRArgRegs);
2668 unsigned AlignInRegs = Alignment.value() / 4;
2669 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2670 for (unsigned i = 0; i < Waste; ++i)
2671 Reg = State->AllocateReg(GPRArgRegs);
2676 unsigned Excess = 4 * (ARM::R4 - Reg);
2678 // Special case when NSAA != SP and parameter size greater than size of
2679 // all remained GPR regs. In that case we can't split parameter, we must
2680 // send it to stack. We also must set NCRN to R4, so waste all
2681 // remained registers.
2682 const unsigned NSAAOffset = State->getNextStackOffset();
2683 if (NSAAOffset != 0 && Size > Excess) {
2684 while (State->AllocateReg(GPRArgRegs))
2689 // First register for byval parameter is the first register that wasn't
2690 // allocated before this method call, so it would be "reg".
2691 // If parameter is small enough to be saved in range [reg, r4), then
2692 // the end (first after last) register would be reg + param-size-in-regs,
2693 // else parameter would be splitted between registers and stack,
2694 // end register would be r4 in this case.
2695 unsigned ByValRegBegin = Reg;
2696 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2697 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2698 // Note, first register is allocated in the beginning of function already,
2699 // allocate remained amount of registers we need.
2700 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2701 State->AllocateReg(GPRArgRegs);
2702 // A byval parameter that is split between registers and memory needs its
2703 // size truncated here.
2704 // In the case where the entire structure fits in registers, we set the
2705 // size in memory to zero.
2706 Size = std::max<int>(Size - Excess, 0);
2709 /// MatchingStackOffset - Return true if the given stack call argument is
2710 /// already available in the same position (relatively) of the caller's
2711 /// incoming argument stack.
2713 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2714 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2715 const TargetInstrInfo *TII) {
2716 unsigned Bytes = Arg.getValueSizeInBits() / 8;
2717 int FI = std::numeric_limits<int>::max();
2718 if (Arg.getOpcode() == ISD::CopyFromReg) {
2719 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2720 if (!Register::isVirtualRegister(VR))
2722 MachineInstr *Def = MRI->getVRegDef(VR);
2725 if (!Flags.isByVal()) {
2726 if (!TII->isLoadFromStackSlot(*Def, FI))
2731 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2732 if (Flags.isByVal())
2733 // ByVal argument is passed in as a pointer but it's now being
2734 // dereferenced. e.g.
2735 // define @foo(%struct.X* %A) {
2736 // tail call @bar(%struct.X* byval %A)
2739 SDValue Ptr = Ld->getBasePtr();
2740 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2743 FI = FINode->getIndex();
2747 assert(FI != std::numeric_limits<int>::max());
2748 if (!MFI.isFixedObjectIndex(FI))
2750 return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2753 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2754 /// for tail call optimization. Targets which want to do tail call
2755 /// optimization should implement this function.
2756 bool ARMTargetLowering::IsEligibleForTailCallOptimization(
2757 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
2758 bool isCalleeStructRet, bool isCallerStructRet,
2759 const SmallVectorImpl<ISD::OutputArg> &Outs,
2760 const SmallVectorImpl<SDValue> &OutVals,
2761 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
2762 const bool isIndirect) const {
2763 MachineFunction &MF = DAG.getMachineFunction();
2764 const Function &CallerF = MF.getFunction();
2765 CallingConv::ID CallerCC = CallerF.getCallingConv();
2767 assert(Subtarget->supportsTailCall());
2769 // Indirect tail calls cannot be optimized for Thumb1 if the args
2770 // to the call take up r0-r3. The reason is that there are no legal registers
2771 // left to hold the pointer to the function to be called.
2772 if (Subtarget->isThumb1Only() && Outs.size() >= 4 &&
2773 (!isa<GlobalAddressSDNode>(Callee.getNode()) || isIndirect))
2776 // Look for obvious safe cases to perform tail call optimization that do not
2777 // require ABI changes. This is what gcc calls sibcall.
2779 // Exception-handling functions need a special set of instructions to indicate
2780 // a return to the hardware. Tail-calling another function would probably
2782 if (CallerF.hasFnAttribute("interrupt"))
2785 // Also avoid sibcall optimization if either caller or callee uses struct
2786 // return semantics.
2787 if (isCalleeStructRet || isCallerStructRet)
2790 // Externally-defined functions with weak linkage should not be
2791 // tail-called on ARM when the OS does not support dynamic
2792 // pre-emption of symbols, as the AAELF spec requires normal calls
2793 // to undefined weak functions to be replaced with a NOP or jump to the
2794 // next instruction. The behaviour of branch instructions in this
2795 // situation (as used for tail calls) is implementation-defined, so we
2796 // cannot rely on the linker replacing the tail call with a return.
2797 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2798 const GlobalValue *GV = G->getGlobal();
2799 const Triple &TT = getTargetMachine().getTargetTriple();
2800 if (GV->hasExternalWeakLinkage() &&
2801 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2805 // Check that the call results are passed in the same way.
2806 LLVMContext &C = *DAG.getContext();
2807 if (!CCState::resultsCompatible(
2808 getEffectiveCallingConv(CalleeCC, isVarArg),
2809 getEffectiveCallingConv(CallerCC, CallerF.isVarArg()), MF, C, Ins,
2810 CCAssignFnForReturn(CalleeCC, isVarArg),
2811 CCAssignFnForReturn(CallerCC, CallerF.isVarArg())))
2813 // The callee has to preserve all registers the caller needs to preserve.
2814 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2815 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2816 if (CalleeCC != CallerCC) {
2817 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2818 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2822 // If Caller's vararg or byval argument has been split between registers and
2823 // stack, do not perform tail call, since part of the argument is in caller's
2825 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2826 if (AFI_Caller->getArgRegsSaveSize())
2829 // If the callee takes no arguments then go on to check the results of the
2831 if (!Outs.empty()) {
2832 // Check if stack adjustment is needed. For now, do not do this if any
2833 // argument is passed on the stack.
2834 SmallVector<CCValAssign, 16> ArgLocs;
2835 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
2836 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2837 if (CCInfo.getNextStackOffset()) {
2838 // Check if the arguments are already laid out in the right way as
2839 // the caller's fixed stack objects.
2840 MachineFrameInfo &MFI = MF.getFrameInfo();
2841 const MachineRegisterInfo *MRI = &MF.getRegInfo();
2842 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2843 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2845 ++i, ++realArgIdx) {
2846 CCValAssign &VA = ArgLocs[i];
2847 EVT RegVT = VA.getLocVT();
2848 SDValue Arg = OutVals[realArgIdx];
2849 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2850 if (VA.getLocInfo() == CCValAssign::Indirect)
2852 if (VA.needsCustom() && (RegVT == MVT::f64 || RegVT == MVT::v2f64)) {
2853 // f64 and vector types are split into multiple registers or
2854 // register/stack-slot combinations. The types will not match
2855 // the registers; give up on memory f64 refs until we figure
2856 // out what to do about this.
2859 if (!ArgLocs[++i].isRegLoc())
2861 if (RegVT == MVT::v2f64) {
2862 if (!ArgLocs[++i].isRegLoc())
2864 if (!ArgLocs[++i].isRegLoc())
2867 } else if (!VA.isRegLoc()) {
2868 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2875 const MachineRegisterInfo &MRI = MF.getRegInfo();
2876 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2884 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2885 MachineFunction &MF, bool isVarArg,
2886 const SmallVectorImpl<ISD::OutputArg> &Outs,
2887 LLVMContext &Context) const {
2888 SmallVector<CCValAssign, 16> RVLocs;
2889 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2890 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2893 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2894 const SDLoc &DL, SelectionDAG &DAG) {
2895 const MachineFunction &MF = DAG.getMachineFunction();
2896 const Function &F = MF.getFunction();
2898 StringRef IntKind = F.getFnAttribute("interrupt").getValueAsString();
2900 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2901 // version of the "preferred return address". These offsets affect the return
2902 // instruction if this is a return from PL1 without hypervisor extensions.
2903 // IRQ/FIQ: +4 "subs pc, lr, #4"
2904 // SWI: 0 "subs pc, lr, #0"
2905 // ABORT: +4 "subs pc, lr, #4"
2906 // UNDEF: +4/+2 "subs pc, lr, #0"
2907 // UNDEF varies depending on where the exception came from ARM or Thumb
2908 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2911 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2914 else if (IntKind == "SWI" || IntKind == "UNDEF")
2917 report_fatal_error("Unsupported interrupt attribute. If present, value "
2918 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2920 RetOps.insert(RetOps.begin() + 1,
2921 DAG.getConstant(LROffset, DL, MVT::i32, false));
2923 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2927 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2929 const SmallVectorImpl<ISD::OutputArg> &Outs,
2930 const SmallVectorImpl<SDValue> &OutVals,
2931 const SDLoc &dl, SelectionDAG &DAG) const {
2932 // CCValAssign - represent the assignment of the return value to a location.
2933 SmallVector<CCValAssign, 16> RVLocs;
2935 // CCState - Info about the registers and stack slots.
2936 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2939 // Analyze outgoing return values.
2940 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2943 SmallVector<SDValue, 4> RetOps;
2944 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2945 bool isLittleEndian = Subtarget->isLittle();
2947 MachineFunction &MF = DAG.getMachineFunction();
2948 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2949 AFI->setReturnRegsCount(RVLocs.size());
2951 // Report error if cmse entry function returns structure through first ptr arg.
2952 if (AFI->isCmseNSEntryFunction() && MF.getFunction().hasStructRetAttr()) {
2953 // Note: using an empty SDLoc(), as the first line of the function is a
2954 // better place to report than the last line.
2955 DiagnosticInfoUnsupported Diag(
2956 DAG.getMachineFunction().getFunction(),
2957 "secure entry function would return value through pointer",
2958 SDLoc().getDebugLoc());
2959 DAG.getContext()->diagnose(Diag);
2962 // Copy the result values into the output registers.
2963 for (unsigned i = 0, realRVLocIdx = 0;
2965 ++i, ++realRVLocIdx) {
2966 CCValAssign &VA = RVLocs[i];
2967 assert(VA.isRegLoc() && "Can only return in registers!");
2969 SDValue Arg = OutVals[realRVLocIdx];
2970 bool ReturnF16 = false;
2972 if (Subtarget->hasFullFP16() && Subtarget->isTargetHardFloat()) {
2973 // Half-precision return values can be returned like this:
2975 // t11 f16 = fadd ...
2976 // t12: i16 = bitcast t11
2977 // t13: i32 = zero_extend t12
2978 // t14: f32 = bitcast t13 <~~~~~~~ Arg
2980 // to avoid code generation for bitcasts, we simply set Arg to the node
2981 // that produces the f16 value, t11 in this case.
2983 if (Arg.getValueType() == MVT::f32 && Arg.getOpcode() == ISD::BITCAST) {
2984 SDValue ZE = Arg.getOperand(0);
2985 if (ZE.getOpcode() == ISD::ZERO_EXTEND && ZE.getValueType() == MVT::i32) {
2986 SDValue BC = ZE.getOperand(0);
2987 if (BC.getOpcode() == ISD::BITCAST && BC.getValueType() == MVT::i16) {
2988 Arg = BC.getOperand(0);
2995 switch (VA.getLocInfo()) {
2996 default: llvm_unreachable("Unknown loc info!");
2997 case CCValAssign::Full: break;
2998 case CCValAssign::BCvt:
3000 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
3004 // Mask f16 arguments if this is a CMSE nonsecure entry.
3005 auto RetVT = Outs[realRVLocIdx].ArgVT;
3006 if (AFI->isCmseNSEntryFunction() && (RetVT == MVT::f16)) {
3007 if (VA.needsCustom() && VA.getValVT() == MVT::f16) {
3008 Arg = MoveFromHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), Arg);
3010 auto LocBits = VA.getLocVT().getSizeInBits();
3011 auto MaskValue = APInt::getLowBitsSet(LocBits, RetVT.getSizeInBits());
3013 DAG.getConstant(MaskValue, dl, MVT::getIntegerVT(LocBits));
3014 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::getIntegerVT(LocBits), Arg);
3015 Arg = DAG.getNode(ISD::AND, dl, MVT::getIntegerVT(LocBits), Arg, Mask);
3016 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
3020 if (VA.needsCustom() &&
3021 (VA.getLocVT() == MVT::v2f64 || VA.getLocVT() == MVT::f64)) {
3022 if (VA.getLocVT() == MVT::v2f64) {
3023 // Extract the first half and return it in two registers.
3024 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
3025 DAG.getConstant(0, dl, MVT::i32));
3026 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
3027 DAG.getVTList(MVT::i32, MVT::i32), Half);
3030 DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3031 HalfGPRs.getValue(isLittleEndian ? 0 : 1), Flag);
3032 Flag = Chain.getValue(1);
3033 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3034 VA = RVLocs[++i]; // skip ahead to next loc
3036 DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3037 HalfGPRs.getValue(isLittleEndian ? 1 : 0), Flag);
3038 Flag = Chain.getValue(1);
3039 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3040 VA = RVLocs[++i]; // skip ahead to next loc
3042 // Extract the 2nd half and fall through to handle it as an f64 value.
3043 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
3044 DAG.getConstant(1, dl, MVT::i32));
3046 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
3048 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
3049 DAG.getVTList(MVT::i32, MVT::i32), Arg);
3050 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3051 fmrrd.getValue(isLittleEndian ? 0 : 1), Flag);
3052 Flag = Chain.getValue(1);
3053 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
3054 VA = RVLocs[++i]; // skip ahead to next loc
3055 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
3056 fmrrd.getValue(isLittleEndian ? 1 : 0), Flag);
3058 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
3060 // Guarantee that all emitted copies are
3061 // stuck together, avoiding something bad.
3062 Flag = Chain.getValue(1);
3063 RetOps.push_back(DAG.getRegister(
3064 VA.getLocReg(), ReturnF16 ? Arg.getValueType() : VA.getLocVT()));
3066 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
3067 const MCPhysReg *I =
3068 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
3071 if (ARM::GPRRegClass.contains(*I))
3072 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
3073 else if (ARM::DPRRegClass.contains(*I))
3074 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
3076 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
3080 // Update chain and glue.
3083 RetOps.push_back(Flag);
3085 // CPUs which aren't M-class use a special sequence to return from
3086 // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
3087 // though we use "subs pc, lr, #N").
3089 // M-class CPUs actually use a normal return sequence with a special
3090 // (hardware-provided) value in LR, so the normal code path works.
3091 if (DAG.getMachineFunction().getFunction().hasFnAttribute("interrupt") &&
3092 !Subtarget->isMClass()) {
3093 if (Subtarget->isThumb1Only())
3094 report_fatal_error("interrupt attribute is not supported in Thumb1");
3095 return LowerInterruptReturn(RetOps, dl, DAG);
3098 ARMISD::NodeType RetNode = AFI->isCmseNSEntryFunction() ? ARMISD::SERET_FLAG :
3100 return DAG.getNode(RetNode, dl, MVT::Other, RetOps);
3103 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
3104 if (N->getNumValues() != 1)
3106 if (!N->hasNUsesOfValue(1, 0))
3109 SDValue TCChain = Chain;
3110 SDNode *Copy = *N->use_begin();
3111 if (Copy->getOpcode() == ISD::CopyToReg) {
3112 // If the copy has a glue operand, we conservatively assume it isn't safe to
3113 // perform a tail call.
3114 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3116 TCChain = Copy->getOperand(0);
3117 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
3118 SDNode *VMov = Copy;
3119 // f64 returned in a pair of GPRs.
3120 SmallPtrSet<SDNode*, 2> Copies;
3121 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
3123 if (UI->getOpcode() != ISD::CopyToReg)
3127 if (Copies.size() > 2)
3130 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
3132 SDValue UseChain = UI->getOperand(0);
3133 if (Copies.count(UseChain.getNode()))
3137 // We are at the top of this chain.
3138 // If the copy has a glue operand, we conservatively assume it
3139 // isn't safe to perform a tail call.
3140 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
3146 } else if (Copy->getOpcode() == ISD::BITCAST) {
3147 // f32 returned in a single GPR.
3148 if (!Copy->hasOneUse())
3150 Copy = *Copy->use_begin();
3151 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
3153 // If the copy has a glue operand, we conservatively assume it isn't safe to
3154 // perform a tail call.
3155 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
3157 TCChain = Copy->getOperand(0);
3162 bool HasRet = false;
3163 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
3165 if (UI->getOpcode() != ARMISD::RET_FLAG &&
3166 UI->getOpcode() != ARMISD::INTRET_FLAG)
3178 bool ARMTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3179 if (!Subtarget->supportsTailCall())
3182 if (!CI->isTailCall())
3188 // Trying to write a 64 bit value so need to split into two 32 bit values first,
3189 // and pass the lower and high parts through.
3190 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
3192 SDValue WriteValue = Op->getOperand(2);
3194 // This function is only supposed to be called for i64 type argument.
3195 assert(WriteValue.getValueType() == MVT::i64
3196 && "LowerWRITE_REGISTER called for non-i64 type argument.");
3198 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
3199 DAG.getConstant(0, DL, MVT::i32));
3200 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
3201 DAG.getConstant(1, DL, MVT::i32));
3202 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
3203 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
3206 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
3207 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
3208 // one of the above mentioned nodes. It has to be wrapped because otherwise
3209 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
3210 // be used to form addressing mode. These wrapped nodes will be selected
3212 SDValue ARMTargetLowering::LowerConstantPool(SDValue Op,
3213 SelectionDAG &DAG) const {
3214 EVT PtrVT = Op.getValueType();
3215 // FIXME there is no actual debug info here
3217 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
3220 // When generating execute-only code Constant Pools must be promoted to the
3221 // global data section. It's a bit ugly that we can't share them across basic
3222 // blocks, but this way we guarantee that execute-only behaves correct with
3223 // position-independent addressing modes.
3224 if (Subtarget->genExecuteOnly()) {
3225 auto AFI = DAG.getMachineFunction().getInfo<ARMFunctionInfo>();
3226 auto T = const_cast<Type*>(CP->getType());
3227 auto C = const_cast<Constant*>(CP->getConstVal());
3228 auto M = const_cast<Module*>(DAG.getMachineFunction().
3229 getFunction().getParent());
3230 auto GV = new GlobalVariable(
3231 *M, T, /*isConstant=*/true, GlobalVariable::InternalLinkage, C,
3232 Twine(DAG.getDataLayout().getPrivateGlobalPrefix()) + "CP" +
3233 Twine(DAG.getMachineFunction().getFunctionNumber()) + "_" +
3234 Twine(AFI->createPICLabelUId())
3236 SDValue GA = DAG.getTargetGlobalAddress(dyn_cast<GlobalValue>(GV),
3238 return LowerGlobalAddress(GA, DAG);
3241 if (CP->isMachineConstantPoolEntry())
3243 DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT, CP->getAlign());
3245 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT, CP->getAlign());
3246 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
3249 unsigned ARMTargetLowering::getJumpTableEncoding() const {
3250 return MachineJumpTableInfo::EK_Inline;
3253 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
3254 SelectionDAG &DAG) const {
3255 MachineFunction &MF = DAG.getMachineFunction();
3256 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3257 unsigned ARMPCLabelIndex = 0;
3259 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3260 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
3262 bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
3263 if (!IsPositionIndependent) {
3264 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, Align(4));
3266 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3267 ARMPCLabelIndex = AFI->createPICLabelUId();
3268 ARMConstantPoolValue *CPV =
3269 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
3270 ARMCP::CPBlockAddress, PCAdj);
3271 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3273 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
3274 SDValue Result = DAG.getLoad(
3275 PtrVT, DL, DAG.getEntryNode(), CPAddr,
3276 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3277 if (!IsPositionIndependent)
3279 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
3280 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
3283 /// Convert a TLS address reference into the correct sequence of loads
3284 /// and calls to compute the variable's address for Darwin, and return an
3285 /// SDValue containing the final node.
3287 /// Darwin only has one TLS scheme which must be capable of dealing with the
3288 /// fully general situation, in the worst case. This means:
3289 /// + "extern __thread" declaration.
3290 /// + Defined in a possibly unknown dynamic library.
3292 /// The general system is that each __thread variable has a [3 x i32] descriptor
3293 /// which contains information used by the runtime to calculate the address. The
3294 /// only part of this the compiler needs to know about is the first word, which
3295 /// contains a function pointer that must be called with the address of the
3296 /// entire descriptor in "r0".
3298 /// Since this descriptor may be in a different unit, in general access must
3299 /// proceed along the usual ARM rules. A common sequence to produce is:
3301 /// movw rT1, :lower16:_var$non_lazy_ptr
3302 /// movt rT1, :upper16:_var$non_lazy_ptr
3306 /// [...address now in r0...]
3308 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
3309 SelectionDAG &DAG) const {
3310 assert(Subtarget->isTargetDarwin() &&
3311 "This function expects a Darwin target");
3314 // First step is to get the address of the actua global symbol. This is where
3315 // the TLS descriptor lives.
3316 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
3318 // The first entry in the descriptor is a function pointer that we must call
3319 // to obtain the address of the variable.
3320 SDValue Chain = DAG.getEntryNode();
3321 SDValue FuncTLVGet = DAG.getLoad(
3322 MVT::i32, DL, Chain, DescAddr,
3323 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
3324 /* Alignment = */ 4,
3325 MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
3326 MachineMemOperand::MOInvariant);
3327 Chain = FuncTLVGet.getValue(1);
3329 MachineFunction &F = DAG.getMachineFunction();
3330 MachineFrameInfo &MFI = F.getFrameInfo();
3331 MFI.setAdjustsStack(true);
3333 // TLS calls preserve all registers except those that absolutely must be
3334 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
3337 getTargetMachine().getSubtargetImpl(F.getFunction())->getRegisterInfo();
3338 auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
3339 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
3341 // Finally, we can make the call. This is just a degenerate version of a
3342 // normal AArch64 call node: r0 takes the address of the descriptor, and
3343 // returns the address of the variable in this thread.
3344 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
3346 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
3347 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
3348 DAG.getRegisterMask(Mask), Chain.getValue(1));
3349 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
3353 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
3354 SelectionDAG &DAG) const {
3355 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
3357 SDValue Chain = DAG.getEntryNode();
3358 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3361 // Load the current TEB (thread environment block)
3362 SDValue Ops[] = {Chain,
3363 DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
3364 DAG.getTargetConstant(15, DL, MVT::i32),
3365 DAG.getTargetConstant(0, DL, MVT::i32),
3366 DAG.getTargetConstant(13, DL, MVT::i32),
3367 DAG.getTargetConstant(0, DL, MVT::i32),
3368 DAG.getTargetConstant(2, DL, MVT::i32)};
3369 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
3370 DAG.getVTList(MVT::i32, MVT::Other), Ops);
3372 SDValue TEB = CurrentTEB.getValue(0);
3373 Chain = CurrentTEB.getValue(1);
3375 // Load the ThreadLocalStoragePointer from the TEB
3376 // A pointer to the TLS array is located at offset 0x2c from the TEB.
3378 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
3379 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
3381 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
3382 // offset into the TLSArray.
3384 // Load the TLS index from the C runtime
3386 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
3387 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
3388 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
3390 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
3391 DAG.getConstant(2, DL, MVT::i32));
3392 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
3393 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
3394 MachinePointerInfo());
3396 // Get the offset of the start of the .tls section (section base)
3397 const auto *GA = cast<GlobalAddressSDNode>(Op);
3398 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
3399 SDValue Offset = DAG.getLoad(
3401 DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
3402 DAG.getTargetConstantPool(CPV, PtrVT, Align(4))),
3403 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3405 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
3408 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
3410 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
3411 SelectionDAG &DAG) const {
3413 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3414 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3415 MachineFunction &MF = DAG.getMachineFunction();
3416 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3417 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3418 ARMConstantPoolValue *CPV =
3419 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3420 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
3421 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3422 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
3423 Argument = DAG.getLoad(
3424 PtrVT, dl, DAG.getEntryNode(), Argument,
3425 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3426 SDValue Chain = Argument.getValue(1);
3428 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3429 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
3431 // call __tls_get_addr.
3434 Entry.Node = Argument;
3435 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
3436 Args.push_back(Entry);
3438 // FIXME: is there useful debug info available here?
3439 TargetLowering::CallLoweringInfo CLI(DAG);
3440 CLI.setDebugLoc(dl).setChain(Chain).setLibCallee(
3441 CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
3442 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
3444 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
3445 return CallResult.first;
3448 // Lower ISD::GlobalTLSAddress using the "initial exec" or
3449 // "local exec" model.
3451 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
3453 TLSModel::Model model) const {
3454 const GlobalValue *GV = GA->getGlobal();
3457 SDValue Chain = DAG.getEntryNode();
3458 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3459 // Get the Thread Pointer
3460 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3462 if (model == TLSModel::InitialExec) {
3463 MachineFunction &MF = DAG.getMachineFunction();
3464 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3465 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3466 // Initial exec model.
3467 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
3468 ARMConstantPoolValue *CPV =
3469 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
3470 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
3472 Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3473 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3474 Offset = DAG.getLoad(
3475 PtrVT, dl, Chain, Offset,
3476 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3477 Chain = Offset.getValue(1);
3479 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3480 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
3482 Offset = DAG.getLoad(
3483 PtrVT, dl, Chain, Offset,
3484 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3487 assert(model == TLSModel::LocalExec);
3488 ARMConstantPoolValue *CPV =
3489 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
3490 Offset = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3491 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
3492 Offset = DAG.getLoad(
3493 PtrVT, dl, Chain, Offset,
3494 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3497 // The address of the thread local variable is the add of the thread
3498 // pointer with the offset of the variable.
3499 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
3503 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
3504 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3505 if (DAG.getTarget().useEmulatedTLS())
3506 return LowerToTLSEmulatedModel(GA, DAG);
3508 if (Subtarget->isTargetDarwin())
3509 return LowerGlobalTLSAddressDarwin(Op, DAG);
3511 if (Subtarget->isTargetWindows())
3512 return LowerGlobalTLSAddressWindows(Op, DAG);
3514 // TODO: implement the "local dynamic" model
3515 assert(Subtarget->isTargetELF() && "Only ELF implemented here");
3516 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
3519 case TLSModel::GeneralDynamic:
3520 case TLSModel::LocalDynamic:
3521 return LowerToTLSGeneralDynamicModel(GA, DAG);
3522 case TLSModel::InitialExec:
3523 case TLSModel::LocalExec:
3524 return LowerToTLSExecModels(GA, DAG, model);
3526 llvm_unreachable("bogus TLS model");
3529 /// Return true if all users of V are within function F, looking through
3531 static bool allUsersAreInFunction(const Value *V, const Function *F) {
3532 SmallVector<const User*,4> Worklist;
3533 for (auto *U : V->users())
3534 Worklist.push_back(U);
3535 while (!Worklist.empty()) {
3536 auto *U = Worklist.pop_back_val();
3537 if (isa<ConstantExpr>(U)) {
3538 for (auto *UU : U->users())
3539 Worklist.push_back(UU);
3543 auto *I = dyn_cast<Instruction>(U);
3544 if (!I || I->getParent()->getParent() != F)
3550 static SDValue promoteToConstantPool(const ARMTargetLowering *TLI,
3551 const GlobalValue *GV, SelectionDAG &DAG,
3552 EVT PtrVT, const SDLoc &dl) {
3553 // If we're creating a pool entry for a constant global with unnamed address,
3554 // and the global is small enough, we can emit it inline into the constant pool
3555 // to save ourselves an indirection.
3557 // This is a win if the constant is only used in one function (so it doesn't
3558 // need to be duplicated) or duplicating the constant wouldn't increase code
3559 // size (implying the constant is no larger than 4 bytes).
3560 const Function &F = DAG.getMachineFunction().getFunction();
3562 // We rely on this decision to inline being idemopotent and unrelated to the
3563 // use-site. We know that if we inline a variable at one use site, we'll
3564 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3565 // doesn't know about this optimization, so bail out if it's enabled else
3566 // we could decide to inline here (and thus never emit the GV) but require
3567 // the GV from fast-isel generated code.
3568 if (!EnableConstpoolPromotion ||
3569 DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3572 auto *GVar = dyn_cast<GlobalVariable>(GV);
3573 if (!GVar || !GVar->hasInitializer() ||
3574 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3575 !GVar->hasLocalLinkage())
3578 // If we inline a value that contains relocations, we move the relocations
3579 // from .data to .text. This is not allowed in position-independent code.
3580 auto *Init = GVar->getInitializer();
3581 if ((TLI->isPositionIndependent() || TLI->getSubtarget()->isROPI()) &&
3582 Init->needsRelocation())
3585 // The constant islands pass can only really deal with alignment requests
3586 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3587 // any type wanting greater alignment requirements than 4 bytes. We also
3588 // can only promote constants that are multiples of 4 bytes in size or
3589 // are paddable to a multiple of 4. Currently we only try and pad constants
3590 // that are strings for simplicity.
3591 auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3592 unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3593 Align PrefAlign = DAG.getDataLayout().getPreferredAlign(GVar);
3594 unsigned RequiredPadding = 4 - (Size % 4);
3595 bool PaddingPossible =
3596 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3597 if (!PaddingPossible || PrefAlign > 4 || Size > ConstpoolPromotionMaxSize ||
3601 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3602 MachineFunction &MF = DAG.getMachineFunction();
3603 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3605 // We can't bloat the constant pool too much, else the ConstantIslands pass
3606 // may fail to converge. If we haven't promoted this global yet (it may have
3607 // multiple uses), and promoting it would increase the constant pool size (Sz
3608 // > 4), ensure we have space to do so up to MaxTotal.
3609 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3610 if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3611 ConstpoolPromotionMaxTotal)
3614 // This is only valid if all users are in a single function; we can't clone
3615 // the constant in general. The LLVM IR unnamed_addr allows merging
3616 // constants, but not cloning them.
3618 // We could potentially allow cloning if we could prove all uses of the
3619 // constant in the current function don't care about the address, like
3620 // printf format strings. But that isn't implemented for now.
3621 if (!allUsersAreInFunction(GVar, &F))
3624 // We're going to inline this global. Pad it out if needed.
3625 if (RequiredPadding != 4) {
3626 StringRef S = CDAInit->getAsString();
3628 SmallVector<uint8_t,16> V(S.size());
3629 std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3630 while (RequiredPadding--)
3632 Init = ConstantDataArray::get(*DAG.getContext(), V);
3635 auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3636 SDValue CPAddr = DAG.getTargetConstantPool(CPVal, PtrVT, Align(4));
3637 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3638 AFI->markGlobalAsPromotedToConstantPool(GVar);
3639 AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3642 ++NumConstpoolPromoted;
3643 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3646 bool ARMTargetLowering::isReadOnly(const GlobalValue *GV) const {
3647 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3648 if (!(GV = GA->getBaseObject()))
3650 if (const auto *V = dyn_cast<GlobalVariable>(GV))
3651 return V->isConstant();
3652 return isa<Function>(GV);
3655 SDValue ARMTargetLowering::LowerGlobalAddress(SDValue Op,
3656 SelectionDAG &DAG) const {
3657 switch (Subtarget->getTargetTriple().getObjectFormat()) {
3658 default: llvm_unreachable("unknown object format");
3660 return LowerGlobalAddressWindows(Op, DAG);
3662 return LowerGlobalAddressELF(Op, DAG);
3664 return LowerGlobalAddressDarwin(Op, DAG);
3668 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3669 SelectionDAG &DAG) const {
3670 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3672 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3673 const TargetMachine &TM = getTargetMachine();
3674 bool IsRO = isReadOnly(GV);
3676 // promoteToConstantPool only if not generating XO text section
3677 if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3678 if (SDValue V = promoteToConstantPool(this, GV, DAG, PtrVT, dl))
3681 if (isPositionIndependent()) {
3682 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3683 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0,
3684 UseGOT_PREL ? ARMII::MO_GOT : 0);
3685 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3688 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3689 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3691 } else if (Subtarget->isROPI() && IsRO) {
3693 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3694 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3696 } else if (Subtarget->isRWPI() && !IsRO) {
3699 if (Subtarget->useMovt()) {
3701 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_SBREL);
3702 RelAddr = DAG.getNode(ARMISD::Wrapper, dl, PtrVT, G);
3703 } else { // use literal pool for address constant
3704 ARMConstantPoolValue *CPV =
3705 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3706 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3707 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3708 RelAddr = DAG.getLoad(
3709 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3710 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3712 SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3713 SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, RelAddr);
3717 // If we have T2 ops, we can materialize the address directly via movt/movw
3718 // pair. This is always cheaper.
3719 if (Subtarget->useMovt()) {
3721 // FIXME: Once remat is capable of dealing with instructions with register
3722 // operands, expand this into two nodes.
3723 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3724 DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3726 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, Align(4));
3727 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3729 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3730 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3734 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3735 SelectionDAG &DAG) const {
3736 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3737 "ROPI/RWPI not currently supported for Darwin");
3738 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3740 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3742 if (Subtarget->useMovt())
3745 // FIXME: Once remat is capable of dealing with instructions with register
3746 // operands, expand this into multiple nodes
3748 isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3750 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3751 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3753 if (Subtarget->isGVIndirectSymbol(GV))
3754 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3755 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3759 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3760 SelectionDAG &DAG) const {
3761 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3762 assert(Subtarget->useMovt() &&
3763 "Windows on ARM expects to use movw/movt");
3764 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3765 "ROPI/RWPI not currently supported for Windows");
3767 const TargetMachine &TM = getTargetMachine();
3768 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3769 ARMII::TOF TargetFlags = ARMII::MO_NO_FLAG;
3770 if (GV->hasDLLImportStorageClass())
3771 TargetFlags = ARMII::MO_DLLIMPORT;
3772 else if (!TM.shouldAssumeDSOLocal(*GV->getParent(), GV))
3773 TargetFlags = ARMII::MO_COFFSTUB;
3774 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3780 // FIXME: Once remat is capable of dealing with instructions with register
3781 // operands, expand this into two nodes.
3782 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3783 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*offset=*/0,
3785 if (TargetFlags & (ARMII::MO_DLLIMPORT | ARMII::MO_COFFSTUB))
3786 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3787 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3792 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3794 SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3795 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3796 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3797 Op.getOperand(1), Val);
3801 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3803 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3804 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3807 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3808 SelectionDAG &DAG) const {
3810 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3814 SDValue ARMTargetLowering::LowerINTRINSIC_VOID(
3815 SDValue Op, SelectionDAG &DAG, const ARMSubtarget *Subtarget) const {
3817 cast<ConstantSDNode>(
3818 Op.getOperand(Op.getOperand(0).getValueType() == MVT::Other))
3822 return SDValue(); // Don't custom lower most intrinsics.
3823 case Intrinsic::arm_gnu_eabi_mcount: {
3824 MachineFunction &MF = DAG.getMachineFunction();
3825 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3827 SDValue Chain = Op.getOperand(0);
3828 // call "\01__gnu_mcount_nc"
3829 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
3830 const uint32_t *Mask =
3831 ARI->getCallPreservedMask(DAG.getMachineFunction(), CallingConv::C);
3832 assert(Mask && "Missing call preserved mask for calling convention");
3833 // Mark LR an implicit live-in.
3834 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
3835 SDValue ReturnAddress =
3836 DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, PtrVT);
3837 constexpr EVT ResultTys[] = {MVT::Other, MVT::Glue};
3839 DAG.getTargetExternalSymbol("\01__gnu_mcount_nc", PtrVT, 0);
3840 SDValue RegisterMask = DAG.getRegisterMask(Mask);
3841 if (Subtarget->isThumb())
3844 ARM::tBL_PUSHLR, dl, ResultTys,
3845 {ReturnAddress, DAG.getTargetConstant(ARMCC::AL, dl, PtrVT),
3846 DAG.getRegister(0, PtrVT), Callee, RegisterMask, Chain}),
3849 DAG.getMachineNode(ARM::BL_PUSHLR, dl, ResultTys,
3850 {ReturnAddress, Callee, RegisterMask, Chain}),
3857 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3858 const ARMSubtarget *Subtarget) const {
3859 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3862 default: return SDValue(); // Don't custom lower most intrinsics.
3863 case Intrinsic::thread_pointer: {
3864 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3865 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3867 case Intrinsic::arm_cls: {
3868 const SDValue &Operand = Op.getOperand(1);
3869 const EVT VTy = Op.getValueType();
3871 DAG.getNode(ISD::SRA, dl, VTy, Operand, DAG.getConstant(31, dl, VTy));
3872 SDValue XOR = DAG.getNode(ISD::XOR, dl, VTy, SRA, Operand);
3874 DAG.getNode(ISD::SHL, dl, VTy, XOR, DAG.getConstant(1, dl, VTy));
3876 DAG.getNode(ISD::OR, dl, VTy, SHL, DAG.getConstant(1, dl, VTy));
3877 SDValue Result = DAG.getNode(ISD::CTLZ, dl, VTy, OR);
3880 case Intrinsic::arm_cls64: {
3881 // cls(x) = if cls(hi(x)) != 31 then cls(hi(x))
3882 // else 31 + clz(if hi(x) == 0 then lo(x) else not(lo(x)))
3883 const SDValue &Operand = Op.getOperand(1);
3884 const EVT VTy = Op.getValueType();
3886 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
3887 DAG.getConstant(1, dl, VTy));
3888 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VTy, Operand,
3889 DAG.getConstant(0, dl, VTy));
3890 SDValue Constant0 = DAG.getConstant(0, dl, VTy);
3891 SDValue Constant1 = DAG.getConstant(1, dl, VTy);
3892 SDValue Constant31 = DAG.getConstant(31, dl, VTy);
3893 SDValue SRAHi = DAG.getNode(ISD::SRA, dl, VTy, Hi, Constant31);
3894 SDValue XORHi = DAG.getNode(ISD::XOR, dl, VTy, SRAHi, Hi);
3895 SDValue SHLHi = DAG.getNode(ISD::SHL, dl, VTy, XORHi, Constant1);
3896 SDValue ORHi = DAG.getNode(ISD::OR, dl, VTy, SHLHi, Constant1);
3897 SDValue CLSHi = DAG.getNode(ISD::CTLZ, dl, VTy, ORHi);
3899 DAG.getSetCC(dl, MVT::i1, CLSHi, Constant31, ISD::CondCode::SETEQ);
3901 DAG.getSetCC(dl, MVT::i1, Hi, Constant0, ISD::CondCode::SETEQ);
3902 SDValue AdjustedLo =
3903 DAG.getSelect(dl, VTy, HiIsZero, Lo, DAG.getNOT(dl, Lo, VTy));
3904 SDValue CLZAdjustedLo = DAG.getNode(ISD::CTLZ, dl, VTy, AdjustedLo);
3906 DAG.getSelect(dl, VTy, CheckLo,
3907 DAG.getNode(ISD::ADD, dl, VTy, CLZAdjustedLo, Constant31), CLSHi);
3910 case Intrinsic::eh_sjlj_lsda: {
3911 MachineFunction &MF = DAG.getMachineFunction();
3912 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3913 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3914 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3916 bool IsPositionIndependent = isPositionIndependent();
3917 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3918 ARMConstantPoolValue *CPV =
3919 ARMConstantPoolConstant::Create(&MF.getFunction(), ARMPCLabelIndex,
3920 ARMCP::CPLSDA, PCAdj);
3921 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, Align(4));
3922 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3923 SDValue Result = DAG.getLoad(
3924 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3925 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3927 if (IsPositionIndependent) {
3928 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3929 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3933 case Intrinsic::arm_neon_vabs:
3934 return DAG.getNode(ISD::ABS, SDLoc(Op), Op.getValueType(),
3936 case Intrinsic::arm_neon_vmulls:
3937 case Intrinsic::arm_neon_vmullu: {
3938 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3939 ? ARMISD::VMULLs : ARMISD::VMULLu;
3940 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3941 Op.getOperand(1), Op.getOperand(2));
3943 case Intrinsic::arm_neon_vminnm:
3944 case Intrinsic::arm_neon_vmaxnm: {
3945 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3946 ? ISD::FMINNUM : ISD::FMAXNUM;
3947 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3948 Op.getOperand(1), Op.getOperand(2));
3950 case Intrinsic::arm_neon_vminu:
3951 case Intrinsic::arm_neon_vmaxu: {
3952 if (Op.getValueType().isFloatingPoint())
3954 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3955 ? ISD::UMIN : ISD::UMAX;
3956 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3957 Op.getOperand(1), Op.getOperand(2));
3959 case Intrinsic::arm_neon_vmins:
3960 case Intrinsic::arm_neon_vmaxs: {
3961 // v{min,max}s is overloaded between signed integers and floats.
3962 if (!Op.getValueType().isFloatingPoint()) {
3963 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3964 ? ISD::SMIN : ISD::SMAX;
3965 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3966 Op.getOperand(1), Op.getOperand(2));
3968 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3969 ? ISD::FMINIMUM : ISD::FMAXIMUM;
3970 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3971 Op.getOperand(1), Op.getOperand(2));
3973 case Intrinsic::arm_neon_vtbl1:
3974 return DAG.getNode(ARMISD::VTBL1, SDLoc(Op), Op.getValueType(),
3975 Op.getOperand(1), Op.getOperand(2));
3976 case Intrinsic::arm_neon_vtbl2:
3977 return DAG.getNode(ARMISD::VTBL2, SDLoc(Op), Op.getValueType(),
3978 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3979 case Intrinsic::arm_mve_pred_i2v:
3980 case Intrinsic::arm_mve_pred_v2i:
3981 return DAG.getNode(ARMISD::PREDICATE_CAST, SDLoc(Op), Op.getValueType(),
3983 case Intrinsic::arm_mve_vreinterpretq:
3984 return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(Op), Op.getValueType(),
3986 case Intrinsic::arm_mve_lsll:
3987 return DAG.getNode(ARMISD::LSLL, SDLoc(Op), Op->getVTList(),
3988 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3989 case Intrinsic::arm_mve_asrl:
3990 return DAG.getNode(ARMISD::ASRL, SDLoc(Op), Op->getVTList(),
3991 Op.getOperand(1), Op.getOperand(2), Op.getOperand(3));
3995 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3996 const ARMSubtarget *Subtarget) {
3998 ConstantSDNode *SSIDNode = cast<ConstantSDNode>(Op.getOperand(2));
3999 auto SSID = static_cast<SyncScope::ID>(SSIDNode->getZExtValue());
4000 if (SSID == SyncScope::SingleThread)
4003 if (!Subtarget->hasDataBarrier()) {
4004 // Some ARMv6 cpus can support data barriers with an mcr instruction.
4005 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
4007 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
4008 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
4009 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
4010 DAG.getConstant(0, dl, MVT::i32));
4013 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
4014 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
4015 ARM_MB::MemBOpt Domain = ARM_MB::ISH;
4016 if (Subtarget->isMClass()) {
4017 // Only a full system barrier exists in the M-class architectures.
4018 Domain = ARM_MB::SY;
4019 } else if (Subtarget->preferISHSTBarriers() &&
4020 Ord == AtomicOrdering::Release) {
4021 // Swift happens to implement ISHST barriers in a way that's compatible with
4022 // Release semantics but weaker than ISH so we'd be fools not to use
4023 // it. Beware: other processors probably don't!
4024 Domain = ARM_MB::ISHST;
4027 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
4028 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
4029 DAG.getConstant(Domain, dl, MVT::i32));
4032 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
4033 const ARMSubtarget *Subtarget) {
4034 // ARM pre v5TE and Thumb1 does not have preload instructions.
4035 if (!(Subtarget->isThumb2() ||
4036 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
4037 // Just preserve the chain.
4038 return Op.getOperand(0);
4041 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
4043 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
4044 // ARMv7 with MP extension has PLDW.
4045 return Op.getOperand(0);
4047 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
4048 if (Subtarget->isThumb()) {
4050 isRead = ~isRead & 1;
4051 isData = ~isData & 1;
4054 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
4055 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
4056 DAG.getConstant(isData, dl, MVT::i32));
4059 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
4060 MachineFunction &MF = DAG.getMachineFunction();
4061 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
4063 // vastart just stores the address of the VarArgsFrameIndex slot into the
4064 // memory location argument.
4066 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
4067 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
4068 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
4069 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
4070 MachinePointerInfo(SV));
4073 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
4074 CCValAssign &NextVA,
4077 const SDLoc &dl) const {
4078 MachineFunction &MF = DAG.getMachineFunction();
4079 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4081 const TargetRegisterClass *RC;
4082 if (AFI->isThumb1OnlyFunction())
4083 RC = &ARM::tGPRRegClass;
4085 RC = &ARM::GPRRegClass;
4087 // Transform the arguments stored in physical registers into virtual ones.
4088 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4089 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
4092 if (NextVA.isMemLoc()) {
4093 MachineFrameInfo &MFI = MF.getFrameInfo();
4094 int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
4096 // Create load node to retrieve arguments from the stack.
4097 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4098 ArgValue2 = DAG.getLoad(
4099 MVT::i32, dl, Root, FIN,
4100 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4102 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
4103 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
4105 if (!Subtarget->isLittle())
4106 std::swap (ArgValue, ArgValue2);
4107 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
4110 // The remaining GPRs hold either the beginning of variable-argument
4111 // data, or the beginning of an aggregate passed by value (usually
4112 // byval). Either way, we allocate stack slots adjacent to the data
4113 // provided by our caller, and store the unallocated registers there.
4114 // If this is a variadic function, the va_list pointer will begin with
4115 // these values; otherwise, this reassembles a (byval) structure that
4116 // was split between registers and memory.
4117 // Return: The frame index registers were stored into.
4118 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
4119 const SDLoc &dl, SDValue &Chain,
4120 const Value *OrigArg,
4121 unsigned InRegsParamRecordIdx,
4122 int ArgOffset, unsigned ArgSize) const {
4123 // Currently, two use-cases possible:
4124 // Case #1. Non-var-args function, and we meet first byval parameter.
4125 // Setup first unallocated register as first byval register;
4126 // eat all remained registers
4127 // (these two actions are performed by HandleByVal method).
4128 // Then, here, we initialize stack frame with
4129 // "store-reg" instructions.
4130 // Case #2. Var-args function, that doesn't contain byval parameters.
4131 // The same: eat all remained unallocated registers,
4132 // initialize stack frame.
4134 MachineFunction &MF = DAG.getMachineFunction();
4135 MachineFrameInfo &MFI = MF.getFrameInfo();
4136 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4137 unsigned RBegin, REnd;
4138 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
4139 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
4141 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
4142 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
4147 ArgOffset = -4 * (ARM::R4 - RBegin);
4149 auto PtrVT = getPointerTy(DAG.getDataLayout());
4150 int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
4151 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
4153 SmallVector<SDValue, 4> MemOps;
4154 const TargetRegisterClass *RC =
4155 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
4157 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
4158 unsigned VReg = MF.addLiveIn(Reg, RC);
4159 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
4160 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
4161 MachinePointerInfo(OrigArg, 4 * i));
4162 MemOps.push_back(Store);
4163 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
4166 if (!MemOps.empty())
4167 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
4171 // Setup stack frame, the va_list pointer will start from.
4172 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
4173 const SDLoc &dl, SDValue &Chain,
4175 unsigned TotalArgRegsSaveSize,
4176 bool ForceMutable) const {
4177 MachineFunction &MF = DAG.getMachineFunction();
4178 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4180 // Try to store any remaining integer argument regs
4181 // to their spots on the stack so that they may be loaded by dereferencing
4182 // the result of va_next.
4183 // If there is no regs to be stored, just point address after last
4184 // argument passed via stack.
4185 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
4186 CCInfo.getInRegsParamsCount(),
4187 CCInfo.getNextStackOffset(),
4188 std::max(4U, TotalArgRegsSaveSize));
4189 AFI->setVarArgsFrameIndex(FrameIndex);
4192 bool ARMTargetLowering::splitValueIntoRegisterParts(
4193 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
4194 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
4195 bool IsABIRegCopy = CC.hasValue();
4196 EVT ValueVT = Val.getValueType();
4197 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
4198 PartVT == MVT::f32) {
4199 unsigned ValueBits = ValueVT.getSizeInBits();
4200 unsigned PartBits = PartVT.getSizeInBits();
4201 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(ValueBits), Val);
4202 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::getIntegerVT(PartBits), Val);
4203 Val = DAG.getNode(ISD::BITCAST, DL, PartVT, Val);
4210 SDValue ARMTargetLowering::joinRegisterPartsIntoValue(
4211 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
4212 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
4213 bool IsABIRegCopy = CC.hasValue();
4214 if (IsABIRegCopy && (ValueVT == MVT::f16 || ValueVT == MVT::bf16) &&
4215 PartVT == MVT::f32) {
4216 unsigned ValueBits = ValueVT.getSizeInBits();
4217 unsigned PartBits = PartVT.getSizeInBits();
4218 SDValue Val = Parts[0];
4220 Val = DAG.getNode(ISD::BITCAST, DL, MVT::getIntegerVT(PartBits), Val);
4221 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::getIntegerVT(ValueBits), Val);
4222 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
4228 SDValue ARMTargetLowering::LowerFormalArguments(
4229 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
4230 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
4231 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
4232 MachineFunction &MF = DAG.getMachineFunction();
4233 MachineFrameInfo &MFI = MF.getFrameInfo();
4235 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
4237 // Assign locations to all of the incoming arguments.
4238 SmallVector<CCValAssign, 16> ArgLocs;
4239 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
4241 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
4243 SmallVector<SDValue, 16> ArgValues;
4245 Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin();
4246 unsigned CurArgIdx = 0;
4248 // Initially ArgRegsSaveSize is zero.
4249 // Then we increase this value each time we meet byval parameter.
4250 // We also increase this value in case of varargs function.
4251 AFI->setArgRegsSaveSize(0);
4253 // Calculate the amount of stack space that we need to allocate to store
4254 // byval and variadic arguments that are passed in registers.
4255 // We need to know this before we allocate the first byval or variadic
4256 // argument, as they will be allocated a stack slot below the CFA (Canonical
4257 // Frame Address, the stack pointer at entry to the function).
4258 unsigned ArgRegBegin = ARM::R4;
4259 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4260 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
4263 CCValAssign &VA = ArgLocs[i];
4264 unsigned Index = VA.getValNo();
4265 ISD::ArgFlagsTy Flags = Ins[Index].Flags;
4266 if (!Flags.isByVal())
4269 assert(VA.isMemLoc() && "unexpected byval pointer in reg");
4270 unsigned RBegin, REnd;
4271 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
4272 ArgRegBegin = std::min(ArgRegBegin, RBegin);
4274 CCInfo.nextInRegsParam();
4276 CCInfo.rewindByValRegsInfo();
4278 int lastInsIndex = -1;
4279 if (isVarArg && MFI.hasVAStart()) {
4280 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
4281 if (RegIdx != array_lengthof(GPRArgRegs))
4282 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
4285 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
4286 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
4287 auto PtrVT = getPointerTy(DAG.getDataLayout());
4289 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4290 CCValAssign &VA = ArgLocs[i];
4291 if (Ins[VA.getValNo()].isOrigArg()) {
4292 std::advance(CurOrigArg,
4293 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
4294 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
4296 // Arguments stored in registers.
4297 if (VA.isRegLoc()) {
4298 EVT RegVT = VA.getLocVT();
4300 if (VA.needsCustom() && VA.getLocVT() == MVT::v2f64) {
4301 // f64 and vector types are split up into multiple registers or
4302 // combinations of registers and stack slots.
4304 GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4305 VA = ArgLocs[++i]; // skip ahead to next loc
4307 if (VA.isMemLoc()) {
4308 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
4309 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4310 ArgValue2 = DAG.getLoad(
4311 MVT::f64, dl, Chain, FIN,
4312 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4314 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4316 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
4317 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
4318 ArgValue1, DAG.getIntPtrConstant(0, dl));
4319 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, ArgValue,
4320 ArgValue2, DAG.getIntPtrConstant(1, dl));
4321 } else if (VA.needsCustom() && VA.getLocVT() == MVT::f64) {
4322 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
4324 const TargetRegisterClass *RC;
4326 if (RegVT == MVT::f16 || RegVT == MVT::bf16)
4327 RC = &ARM::HPRRegClass;
4328 else if (RegVT == MVT::f32)
4329 RC = &ARM::SPRRegClass;
4330 else if (RegVT == MVT::f64 || RegVT == MVT::v4f16 ||
4331 RegVT == MVT::v4bf16)
4332 RC = &ARM::DPRRegClass;
4333 else if (RegVT == MVT::v2f64 || RegVT == MVT::v8f16 ||
4334 RegVT == MVT::v8bf16)
4335 RC = &ARM::QPRRegClass;
4336 else if (RegVT == MVT::i32)
4337 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
4338 : &ARM::GPRRegClass;
4340 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
4342 // Transform the arguments in physical registers into virtual ones.
4343 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
4344 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
4346 // If this value is passed in r0 and has the returned attribute (e.g.
4347 // C++ 'structors), record this fact for later use.
4348 if (VA.getLocReg() == ARM::R0 && Ins[VA.getValNo()].Flags.isReturned()) {
4349 AFI->setPreservesR0();
4353 // If this is an 8 or 16-bit value, it is really passed promoted
4354 // to 32 bits. Insert an assert[sz]ext to capture this, then
4355 // truncate to the right size.
4356 switch (VA.getLocInfo()) {
4357 default: llvm_unreachable("Unknown loc info!");
4358 case CCValAssign::Full: break;
4359 case CCValAssign::BCvt:
4360 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
4362 case CCValAssign::SExt:
4363 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
4364 DAG.getValueType(VA.getValVT()));
4365 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4367 case CCValAssign::ZExt:
4368 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
4369 DAG.getValueType(VA.getValVT()));
4370 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
4374 // f16 arguments have their size extended to 4 bytes and passed as if they
4375 // had been copied to the LSBs of a 32-bit register.
4376 // For that, it's passed extended to i32 (soft ABI) or to f32 (hard ABI)
4377 if (VA.needsCustom() &&
4378 (VA.getValVT() == MVT::f16 || VA.getValVT() == MVT::bf16))
4379 ArgValue = MoveToHPR(dl, DAG, VA.getLocVT(), VA.getValVT(), ArgValue);
4381 InVals.push_back(ArgValue);
4382 } else { // VA.isRegLoc()
4384 assert(VA.isMemLoc());
4385 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
4387 int index = VA.getValNo();
4389 // Some Ins[] entries become multiple ArgLoc[] entries.
4390 // Process them only once.
4391 if (index != lastInsIndex)
4393 ISD::ArgFlagsTy Flags = Ins[index].Flags;
4394 // FIXME: For now, all byval parameter objects are marked mutable.
4395 // This can be changed with more analysis.
4396 // In case of tail call optimization mark all arguments mutable.
4397 // Since they could be overwritten by lowering of arguments in case of
4399 if (Flags.isByVal()) {
4400 assert(Ins[index].isOrigArg() &&
4401 "Byval arguments cannot be implicit");
4402 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
4404 int FrameIndex = StoreByValRegs(
4405 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
4406 VA.getLocMemOffset(), Flags.getByValSize());
4407 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
4408 CCInfo.nextInRegsParam();
4410 unsigned FIOffset = VA.getLocMemOffset();
4411 int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
4414 // Create load nodes to retrieve arguments from the stack.
4415 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
4416 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
4417 MachinePointerInfo::getFixedStack(
4418 DAG.getMachineFunction(), FI)));
4420 lastInsIndex = index;
4426 if (isVarArg && MFI.hasVAStart())
4427 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
4428 CCInfo.getNextStackOffset(),
4429 TotalArgRegsSaveSize);
4431 AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
4436 /// isFloatingPointZero - Return true if this is +0.0.
4437 static bool isFloatingPointZero(SDValue Op) {
4438 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
4439 return CFP->getValueAPF().isPosZero();
4440 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
4441 // Maybe this has already been legalized into the constant pool?
4442 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
4443 SDValue WrapperOp = Op.getOperand(1).getOperand(0);
4444 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
4445 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
4446 return CFP->getValueAPF().isPosZero();
4448 } else if (Op->getOpcode() == ISD::BITCAST &&
4449 Op->getValueType(0) == MVT::f64) {
4450 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
4451 // created by LowerConstantFP().
4452 SDValue BitcastOp = Op->getOperand(0);
4453 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
4454 isNullConstant(BitcastOp->getOperand(0)))
4460 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
4461 /// the given operands.
4462 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
4463 SDValue &ARMcc, SelectionDAG &DAG,
4464 const SDLoc &dl) const {
4465 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
4466 unsigned C = RHSC->getZExtValue();
4467 if (!isLegalICmpImmediate((int32_t)C)) {
4468 // Constant does not fit, try adjusting it by one.
4473 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
4474 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
4475 RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4480 if (C != 0 && isLegalICmpImmediate(C-1)) {
4481 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
4482 RHS = DAG.getConstant(C - 1, dl, MVT::i32);
4487 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
4488 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
4489 RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4494 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
4495 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
4496 RHS = DAG.getConstant(C + 1, dl, MVT::i32);
4501 } else if ((ARM_AM::getShiftOpcForNode(LHS.getOpcode()) != ARM_AM::no_shift) &&
4502 (ARM_AM::getShiftOpcForNode(RHS.getOpcode()) == ARM_AM::no_shift)) {
4503 // In ARM and Thumb-2, the compare instructions can shift their second
4505 CC = ISD::getSetCCSwappedOperands(CC);
4506 std::swap(LHS, RHS);
4509 // Thumb1 has very limited immediate modes, so turning an "and" into a
4510 // shift can save multiple instructions.
4512 // If we have (x & C1), and C1 is an appropriate mask, we can transform it
4513 // into "((x << n) >> n)". But that isn't necessarily profitable on its
4514 // own. If it's the operand to an unsigned comparison with an immediate,
4515 // we can eliminate one of the shifts: we transform
4516 // "((x << n) >> n) == C2" to "(x << n) == (C2 << n)".
4518 // We avoid transforming cases which aren't profitable due to encoding
4521 // 1. C2 fits into the immediate field of a cmp, and the transformed version
4522 // would not; in that case, we're essentially trading one immediate load for
4524 // 2. C1 is 255 or 65535, so we can use uxtb or uxth.
4525 // 3. C2 is zero; we have other code for this special case.
4527 // FIXME: Figure out profitability for Thumb2; we usually can't save an
4528 // instruction, since the AND is always one instruction anyway, but we could
4529 // use narrow instructions in some cases.
4530 if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::AND &&
4531 LHS->hasOneUse() && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4532 LHS.getValueType() == MVT::i32 && isa<ConstantSDNode>(RHS) &&
4533 !isSignedIntSetCC(CC)) {
4534 unsigned Mask = cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue();
4535 auto *RHSC = cast<ConstantSDNode>(RHS.getNode());
4536 uint64_t RHSV = RHSC->getZExtValue();
4537 if (isMask_32(Mask) && (RHSV & ~Mask) == 0 && Mask != 255 && Mask != 65535) {
4538 unsigned ShiftBits = countLeadingZeros(Mask);
4539 if (RHSV && (RHSV > 255 || (RHSV << ShiftBits) <= 255)) {
4540 SDValue ShiftAmt = DAG.getConstant(ShiftBits, dl, MVT::i32);
4541 LHS = DAG.getNode(ISD::SHL, dl, MVT::i32, LHS.getOperand(0), ShiftAmt);
4542 RHS = DAG.getConstant(RHSV << ShiftBits, dl, MVT::i32);
4547 // The specific comparison "(x<<c) > 0x80000000U" can be optimized to a
4548 // single "lsls x, c+1". The shift sets the "C" and "Z" flags the same
4550 // FIXME: Add support for ARM/Thumb2; this would need isel patterns, and
4551 // some tweaks to the heuristics for the previous and->shift transform.
4552 // FIXME: Optimize cases where the LHS isn't a shift.
4553 if (Subtarget->isThumb1Only() && LHS->getOpcode() == ISD::SHL &&
4554 isa<ConstantSDNode>(RHS) &&
4555 cast<ConstantSDNode>(RHS)->getZExtValue() == 0x80000000U &&
4556 CC == ISD::SETUGT && isa<ConstantSDNode>(LHS.getOperand(1)) &&
4557 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() < 31) {
4559 cast<ConstantSDNode>(LHS.getOperand(1))->getZExtValue() + 1;
4560 SDValue Shift = DAG.getNode(ARMISD::LSLS, dl,
4561 DAG.getVTList(MVT::i32, MVT::i32),
4563 DAG.getConstant(ShiftAmt, dl, MVT::i32));
4564 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
4565 Shift.getValue(1), SDValue());
4566 ARMcc = DAG.getConstant(ARMCC::HI, dl, MVT::i32);
4567 return Chain.getValue(1);
4570 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4572 // If the RHS is a constant zero then the V (overflow) flag will never be
4573 // set. This can allow us to simplify GE to PL or LT to MI, which can be
4574 // simpler for other passes (like the peephole optimiser) to deal with.
4575 if (isNullConstant(RHS)) {
4579 CondCode = ARMCC::PL;
4582 CondCode = ARMCC::MI;
4587 ARMISD::NodeType CompareType;
4590 CompareType = ARMISD::CMP;
4595 CompareType = ARMISD::CMPZ;
4598 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4599 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
4602 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
4603 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
4604 SelectionDAG &DAG, const SDLoc &dl,
4605 bool Signaling) const {
4606 assert(Subtarget->hasFP64() || RHS.getValueType() != MVT::f64);
4608 if (!isFloatingPointZero(RHS))
4609 Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPE : ARMISD::CMPFP,
4610 dl, MVT::Glue, LHS, RHS);
4612 Cmp = DAG.getNode(Signaling ? ARMISD::CMPFPEw0 : ARMISD::CMPFPw0,
4613 dl, MVT::Glue, LHS);
4614 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
4617 /// duplicateCmp - Glue values can have only one use, so this function
4618 /// duplicates a comparison node.
4620 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
4621 unsigned Opc = Cmp.getOpcode();
4623 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
4624 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4626 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
4627 Cmp = Cmp.getOperand(0);
4628 Opc = Cmp.getOpcode();
4629 if (Opc == ARMISD::CMPFP)
4630 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
4632 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
4633 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
4635 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
4638 // This function returns three things: the arithmetic computation itself
4639 // (Value), a comparison (OverflowCmp), and a condition code (ARMcc). The
4640 // comparison and the condition code define the case in which the arithmetic
4641 // computation *does not* overflow.
4642 std::pair<SDValue, SDValue>
4643 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
4644 SDValue &ARMcc) const {
4645 assert(Op.getValueType() == MVT::i32 && "Unsupported value type");
4647 SDValue Value, OverflowCmp;
4648 SDValue LHS = Op.getOperand(0);
4649 SDValue RHS = Op.getOperand(1);
4652 // FIXME: We are currently always generating CMPs because we don't support
4653 // generating CMN through the backend. This is not as good as the natural
4654 // CMP case because it causes a register dependency and cannot be folded
4657 switch (Op.getOpcode()) {
4659 llvm_unreachable("Unknown overflow instruction!");
4661 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4662 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
4663 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4666 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4667 // We use ADDC here to correspond to its use in LowerUnsignedALUO.
4668 // We do not use it in the USUBO case as Value may not be used.
4669 Value = DAG.getNode(ARMISD::ADDC, dl,
4670 DAG.getVTList(Op.getValueType(), MVT::i32), LHS, RHS)
4672 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
4675 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
4676 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4677 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4680 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
4681 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
4682 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
4685 // We generate a UMUL_LOHI and then check if the high word is 0.
4686 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4687 Value = DAG.getNode(ISD::UMUL_LOHI, dl,
4688 DAG.getVTList(Op.getValueType(), Op.getValueType()),
4690 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4691 DAG.getConstant(0, dl, MVT::i32));
4692 Value = Value.getValue(0); // We only want the low 32 bits for the result.
4695 // We generate a SMUL_LOHI and then check if all the bits of the high word
4696 // are the same as the sign bit of the low word.
4697 ARMcc = DAG.getConstant(ARMCC::EQ, dl, MVT::i32);
4698 Value = DAG.getNode(ISD::SMUL_LOHI, dl,
4699 DAG.getVTList(Op.getValueType(), Op.getValueType()),
4701 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value.getValue(1),
4702 DAG.getNode(ISD::SRA, dl, Op.getValueType(),
4704 DAG.getConstant(31, dl, MVT::i32)));
4705 Value = Value.getValue(0); // We only want the low 32 bits for the result.
4709 return std::make_pair(Value, OverflowCmp);
4713 ARMTargetLowering::LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const {
4714 // Let legalize expand this if it isn't a legal type yet.
4715 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4718 SDValue Value, OverflowCmp;
4720 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
4721 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4723 // We use 0 and 1 as false and true values.
4724 SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
4725 SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
4726 EVT VT = Op.getValueType();
4728 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
4729 ARMcc, CCR, OverflowCmp);
4731 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
4732 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4735 static SDValue ConvertBooleanCarryToCarryFlag(SDValue BoolCarry,
4736 SelectionDAG &DAG) {
4737 SDLoc DL(BoolCarry);
4738 EVT CarryVT = BoolCarry.getValueType();
4740 // This converts the boolean value carry into the carry flag by doing
4741 // ARMISD::SUBC Carry, 1
4742 SDValue Carry = DAG.getNode(ARMISD::SUBC, DL,
4743 DAG.getVTList(CarryVT, MVT::i32),
4744 BoolCarry, DAG.getConstant(1, DL, CarryVT));
4745 return Carry.getValue(1);
4748 static SDValue ConvertCarryFlagToBooleanCarry(SDValue Flags, EVT VT,
4749 SelectionDAG &DAG) {
4752 // Now convert the carry flag into a boolean carry. We do this
4753 // using ARMISD:ADDE 0, 0, Carry
4754 return DAG.getNode(ARMISD::ADDE, DL, DAG.getVTList(VT, MVT::i32),
4755 DAG.getConstant(0, DL, MVT::i32),
4756 DAG.getConstant(0, DL, MVT::i32), Flags);
4759 SDValue ARMTargetLowering::LowerUnsignedALUO(SDValue Op,
4760 SelectionDAG &DAG) const {
4761 // Let legalize expand this if it isn't a legal type yet.
4762 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
4765 SDValue LHS = Op.getOperand(0);
4766 SDValue RHS = Op.getOperand(1);
4769 EVT VT = Op.getValueType();
4770 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
4773 switch (Op.getOpcode()) {
4775 llvm_unreachable("Unknown overflow instruction!");
4777 Value = DAG.getNode(ARMISD::ADDC, dl, VTs, LHS, RHS);
4778 // Convert the carry flag into a boolean value.
4779 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4782 Value = DAG.getNode(ARMISD::SUBC, dl, VTs, LHS, RHS);
4783 // Convert the carry flag into a boolean value.
4784 Overflow = ConvertCarryFlagToBooleanCarry(Value.getValue(1), VT, DAG);
4785 // ARMISD::SUBC returns 0 when we have to borrow, so make it an overflow
4786 // value. So compute 1 - C.
4787 Overflow = DAG.getNode(ISD::SUB, dl, MVT::i32,
4788 DAG.getConstant(1, dl, MVT::i32), Overflow);
4793 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
4796 static SDValue LowerSADDSUBSAT(SDValue Op, SelectionDAG &DAG,
4797 const ARMSubtarget *Subtarget) {
4798 EVT VT = Op.getValueType();
4799 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
4805 bool IsAdd = Op->getOpcode() == ISD::SADDSAT;
4806 switch (VT.getSimpleVT().SimpleTy) {
4810 NewOpcode = IsAdd ? ARMISD::QADD8b : ARMISD::QSUB8b;
4813 NewOpcode = IsAdd ? ARMISD::QADD16b : ARMISD::QSUB16b;
4819 DAG.getNode(NewOpcode, dl, MVT::i32,
4820 DAG.getSExtOrTrunc(Op->getOperand(0), dl, MVT::i32),
4821 DAG.getSExtOrTrunc(Op->getOperand(1), dl, MVT::i32));
4822 return DAG.getNode(ISD::TRUNCATE, dl, VT, Add);
4825 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
4826 SDValue Cond = Op.getOperand(0);
4827 SDValue SelectTrue = Op.getOperand(1);
4828 SDValue SelectFalse = Op.getOperand(2);
4830 unsigned Opc = Cond.getOpcode();
4832 if (Cond.getResNo() == 1 &&
4833 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
4834 Opc == ISD::USUBO)) {
4835 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
4838 SDValue Value, OverflowCmp;
4840 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
4841 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4842 EVT VT = Op.getValueType();
4844 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4850 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
4851 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
4853 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
4854 const ConstantSDNode *CMOVTrue =
4855 dyn_cast<ConstantSDNode>(Cond.getOperand(0));
4856 const ConstantSDNode *CMOVFalse =
4857 dyn_cast<ConstantSDNode>(Cond.getOperand(1));
4859 if (CMOVTrue && CMOVFalse) {
4860 unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
4861 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4865 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4867 False = SelectFalse;
4868 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4873 if (True.getNode() && False.getNode()) {
4874 EVT VT = Op.getValueType();
4875 SDValue ARMcc = Cond.getOperand(2);
4876 SDValue CCR = Cond.getOperand(3);
4877 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4878 assert(True.getValueType() == VT);
4879 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4884 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4885 // undefined bits before doing a full-word comparison with zero.
4886 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4887 DAG.getConstant(1, dl, Cond.getValueType()));
4889 return DAG.getSelectCC(dl, Cond,
4890 DAG.getConstant(0, dl, Cond.getValueType()),
4891 SelectTrue, SelectFalse, ISD::SETNE);
4894 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4895 bool &swpCmpOps, bool &swpVselOps) {
4896 // Start by selecting the GE condition code for opcodes that return true for
4898 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4899 CC == ISD::SETULE || CC == ISD::SETGE || CC == ISD::SETLE)
4900 CondCode = ARMCC::GE;
4902 // and GT for opcodes that return false for 'equality'.
4903 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4904 CC == ISD::SETULT || CC == ISD::SETGT || CC == ISD::SETLT)
4905 CondCode = ARMCC::GT;
4907 // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4908 // to swap the compare operands.
4909 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4910 CC == ISD::SETULT || CC == ISD::SETLE || CC == ISD::SETLT)
4913 // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4914 // If we have an unordered opcode, we need to swap the operands to the VSEL
4915 // instruction (effectively negating the condition).
4917 // This also has the effect of swapping which one of 'less' or 'greater'
4918 // returns true, so we also swap the compare operands. It also switches
4919 // whether we return true for 'equality', so we compensate by picking the
4920 // opposite condition code to our original choice.
4921 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4922 CC == ISD::SETUGT) {
4923 swpCmpOps = !swpCmpOps;
4924 swpVselOps = !swpVselOps;
4925 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4928 // 'ordered' is 'anything but unordered', so use the VS condition code and
4929 // swap the VSEL operands.
4930 if (CC == ISD::SETO) {
4931 CondCode = ARMCC::VS;
4935 // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4936 // code and swap the VSEL operands. Also do this if we don't care about the
4938 if (CC == ISD::SETUNE || CC == ISD::SETNE) {
4939 CondCode = ARMCC::EQ;
4944 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4945 SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4946 SDValue Cmp, SelectionDAG &DAG) const {
4947 if (!Subtarget->hasFP64() && VT == MVT::f64) {
4948 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4949 DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4950 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4951 DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4953 SDValue TrueLow = TrueVal.getValue(0);
4954 SDValue TrueHigh = TrueVal.getValue(1);
4955 SDValue FalseLow = FalseVal.getValue(0);
4956 SDValue FalseHigh = FalseVal.getValue(1);
4958 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4960 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4961 ARMcc, CCR, duplicateCmp(Cmp, DAG));
4963 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4965 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4970 static bool isGTorGE(ISD::CondCode CC) {
4971 return CC == ISD::SETGT || CC == ISD::SETGE;
4974 static bool isLTorLE(ISD::CondCode CC) {
4975 return CC == ISD::SETLT || CC == ISD::SETLE;
4978 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4979 // All of these conditions (and their <= and >= counterparts) will do:
4984 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4985 const SDValue TrueVal, const SDValue FalseVal,
4986 const ISD::CondCode CC, const SDValue K) {
4987 return (isGTorGE(CC) &&
4988 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4990 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4993 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4994 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4995 const SDValue TrueVal, const SDValue FalseVal,
4996 const ISD::CondCode CC, const SDValue K) {
4997 return (isGTorGE(CC) &&
4998 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
5000 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
5003 // Check if two chained conditionals could be converted into SSAT or USAT.
5005 // SSAT can replace a set of two conditional selectors that bound a number to an
5006 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
5008 // x < -k ? -k : (x > k ? k : x)
5009 // x < -k ? -k : (x < k ? x : k)
5010 // x > -k ? (x > k ? k : x) : -k
5011 // x < k ? (x < -k ? -k : x) : k
5014 // USAT works similarily to SSAT but bounds on the interval [0, k] where k + 1 is
5017 // It returns true if the conversion can be done, false otherwise.
5018 // Additionally, the variable is returned in parameter V, the constant in K and
5019 // usat is set to true if the conditional represents an unsigned saturation
5020 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
5021 uint64_t &K, bool &usat) {
5022 SDValue LHS1 = Op.getOperand(0);
5023 SDValue RHS1 = Op.getOperand(1);
5024 SDValue TrueVal1 = Op.getOperand(2);
5025 SDValue FalseVal1 = Op.getOperand(3);
5026 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5028 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
5029 if (Op2.getOpcode() != ISD::SELECT_CC)
5032 SDValue LHS2 = Op2.getOperand(0);
5033 SDValue RHS2 = Op2.getOperand(1);
5034 SDValue TrueVal2 = Op2.getOperand(2);
5035 SDValue FalseVal2 = Op2.getOperand(3);
5036 ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
5038 // Find out which are the constants and which are the variables
5039 // in each conditional
5040 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
5043 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
5046 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
5047 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
5048 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
5049 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
5051 // We must detect cases where the original operations worked with 16- or
5052 // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
5053 // must work with sign-extended values but the select operations return
5054 // the original non-extended value.
5055 SDValue V2TmpReg = V2Tmp;
5056 if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
5057 V2TmpReg = V2Tmp->getOperand(0);
5059 // Check that the registers and the constants have the correct values
5060 // in both conditionals
5061 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
5065 // Figure out which conditional is saturating the lower/upper bound.
5066 const SDValue *LowerCheckOp =
5067 isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
5069 : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
5072 const SDValue *UpperCheckOp =
5073 isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
5075 : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2)
5079 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
5082 // Check that the constant in the lower-bound check is
5083 // the opposite of the constant in the upper-bound check
5084 // in 1's complement.
5085 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
5086 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
5087 int64_t PosVal = std::max(Val1, Val2);
5088 int64_t NegVal = std::min(Val1, Val2);
5090 if (((Val1 > Val2 && UpperCheckOp == &Op) ||
5091 (Val1 < Val2 && UpperCheckOp == &Op2)) &&
5092 isPowerOf2_64(PosVal + 1)) {
5094 // Handle the difference between USAT (unsigned) and SSAT (signed) saturation
5097 else if (NegVal == 0)
5103 K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
5111 // Check if a condition of the type x < k ? k : x can be converted into a
5112 // bit operation instead of conditional moves.
5113 // Currently this is allowed given:
5114 // - The conditions and values match up
5115 // - k is 0 or -1 (all ones)
5116 // This function will not check the last condition, thats up to the caller
5117 // It returns true if the transformation can be made, and in such case
5118 // returns x in V, and k in SatK.
5119 static bool isLowerSaturatingConditional(const SDValue &Op, SDValue &V,
5122 SDValue LHS = Op.getOperand(0);
5123 SDValue RHS = Op.getOperand(1);
5124 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5125 SDValue TrueVal = Op.getOperand(2);
5126 SDValue FalseVal = Op.getOperand(3);
5128 SDValue *K = isa<ConstantSDNode>(LHS) ? &LHS : isa<ConstantSDNode>(RHS)
5132 // No constant operation in comparison, early out
5136 SDValue KTmp = isa<ConstantSDNode>(TrueVal) ? TrueVal : FalseVal;
5137 V = (KTmp == TrueVal) ? FalseVal : TrueVal;
5138 SDValue VTmp = (K && *K == LHS) ? RHS : LHS;
5140 // If the constant on left and right side, or variable on left and right,
5141 // does not match, early out
5142 if (*K != KTmp || V != VTmp)
5145 if (isLowerSaturate(LHS, RHS, TrueVal, FalseVal, CC, *K)) {
5153 bool ARMTargetLowering::isUnsupportedFloatingType(EVT VT) const {
5155 return !Subtarget->hasVFP2Base();
5157 return !Subtarget->hasFP64();
5159 return !Subtarget->hasFullFP16();
5163 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
5164 EVT VT = Op.getValueType();
5167 // Try to convert two saturating conditional selects into a single SSAT
5169 uint64_t SatConstant;
5171 if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
5172 isSaturatingConditional(Op, SatValue, SatConstant, SatUSat)) {
5174 return DAG.getNode(ARMISD::USAT, dl, VT, SatValue,
5175 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
5177 return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
5178 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
5181 // Try to convert expressions of the form x < k ? k : x (and similar forms)
5182 // into more efficient bit operations, which is possible when k is 0 or -1
5183 // On ARM and Thumb-2 which have flexible operand 2 this will result in
5184 // single instructions. On Thumb the shift and the bit operation will be two
5186 // Only allow this transformation on full-width (32-bit) operations
5187 SDValue LowerSatConstant;
5188 if (VT == MVT::i32 &&
5189 isLowerSaturatingConditional(Op, SatValue, LowerSatConstant)) {
5190 SDValue ShiftV = DAG.getNode(ISD::SRA, dl, VT, SatValue,
5191 DAG.getConstant(31, dl, VT));
5192 if (isNullConstant(LowerSatConstant)) {
5193 SDValue NotShiftV = DAG.getNode(ISD::XOR, dl, VT, ShiftV,
5194 DAG.getAllOnesConstant(dl, VT));
5195 return DAG.getNode(ISD::AND, dl, VT, SatValue, NotShiftV);
5196 } else if (isAllOnesConstant(LowerSatConstant))
5197 return DAG.getNode(ISD::OR, dl, VT, SatValue, ShiftV);
5200 SDValue LHS = Op.getOperand(0);
5201 SDValue RHS = Op.getOperand(1);
5202 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
5203 SDValue TrueVal = Op.getOperand(2);
5204 SDValue FalseVal = Op.getOperand(3);
5205 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FalseVal);
5206 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TrueVal);
5208 if (Subtarget->hasV8_1MMainlineOps() && CFVal && CTVal &&
5209 LHS.getValueType() == MVT::i32 && RHS.getValueType() == MVT::i32) {
5210 unsigned TVal = CTVal->getZExtValue();
5211 unsigned FVal = CFVal->getZExtValue();
5212 unsigned Opcode = 0;
5214 if (TVal == ~FVal) {
5215 Opcode = ARMISD::CSINV;
5216 } else if (TVal == ~FVal + 1) {
5217 Opcode = ARMISD::CSNEG;
5218 } else if (TVal + 1 == FVal) {
5219 Opcode = ARMISD::CSINC;
5220 } else if (TVal == FVal + 1) {
5221 Opcode = ARMISD::CSINC;
5222 std::swap(TrueVal, FalseVal);
5223 std::swap(TVal, FVal);
5224 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5228 // If one of the constants is cheaper than another, materialise the
5229 // cheaper one and let the csel generate the other.
5230 if (Opcode != ARMISD::CSINC &&
5231 HasLowerConstantMaterializationCost(FVal, TVal, Subtarget)) {
5232 std::swap(TrueVal, FalseVal);
5233 std::swap(TVal, FVal);
5234 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5237 // Attempt to use ZR checking TVal is 0, possibly inverting the condition
5238 // to get there. CSINC not is invertable like the other two (~(~a) == a,
5239 // -(-a) == a, but (a+1)+1 != a).
5240 if (FVal == 0 && Opcode != ARMISD::CSINC) {
5241 std::swap(TrueVal, FalseVal);
5242 std::swap(TVal, FVal);
5243 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5246 TrueVal = DAG.getRegister(ARM::ZR, MVT::i32);
5248 // Drops F's value because we can get it by inverting/negating TVal.
5252 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5253 EVT VT = TrueVal.getValueType();
5254 return DAG.getNode(Opcode, dl, VT, TrueVal, FalseVal, ARMcc, Cmp);
5258 if (isUnsupportedFloatingType(LHS.getValueType())) {
5259 DAG.getTargetLoweringInfo().softenSetCCOperands(
5260 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5262 // If softenSetCCOperands only returned one value, we should compare it to
5264 if (!RHS.getNode()) {
5265 RHS = DAG.getConstant(0, dl, LHS.getValueType());
5270 if (LHS.getValueType() == MVT::i32) {
5271 // Try to generate VSEL on ARMv8.
5272 // The VSEL instruction can't use all the usual ARM condition
5273 // codes: it only has two bits to select the condition code, so it's
5274 // constrained to use only GE, GT, VS and EQ.
5276 // To implement all the various ISD::SETXXX opcodes, we sometimes need to
5277 // swap the operands of the previous compare instruction (effectively
5278 // inverting the compare condition, swapping 'less' and 'greater') and
5279 // sometimes need to swap the operands to the VSEL (which inverts the
5280 // condition in the sense of firing whenever the previous condition didn't)
5281 if (Subtarget->hasFPARMv8Base() && (TrueVal.getValueType() == MVT::f16 ||
5282 TrueVal.getValueType() == MVT::f32 ||
5283 TrueVal.getValueType() == MVT::f64)) {
5284 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5285 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
5286 CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
5287 CC = ISD::getSetCCInverse(CC, LHS.getValueType());
5288 std::swap(TrueVal, FalseVal);
5293 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5294 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5295 // Choose GE over PL, which vsel does now support
5296 if (cast<ConstantSDNode>(ARMcc)->getZExtValue() == ARMCC::PL)
5297 ARMcc = DAG.getConstant(ARMCC::GE, dl, MVT::i32);
5298 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5301 ARMCC::CondCodes CondCode, CondCode2;
5302 FPCCToARMCC(CC, CondCode, CondCode2);
5304 // Normalize the fp compare. If RHS is zero we prefer to keep it there so we
5305 // match CMPFPw0 instead of CMPFP, though we don't do this for f16 because we
5306 // must use VSEL (limited condition codes), due to not having conditional f16
5308 if (Subtarget->hasFPARMv8Base() &&
5309 !(isFloatingPointZero(RHS) && TrueVal.getValueType() != MVT::f16) &&
5310 (TrueVal.getValueType() == MVT::f16 ||
5311 TrueVal.getValueType() == MVT::f32 ||
5312 TrueVal.getValueType() == MVT::f64)) {
5313 bool swpCmpOps = false;
5314 bool swpVselOps = false;
5315 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
5317 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
5318 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
5320 std::swap(LHS, RHS);
5322 std::swap(TrueVal, FalseVal);
5326 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5327 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5328 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5329 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
5330 if (CondCode2 != ARMCC::AL) {
5331 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
5332 // FIXME: Needs another CMP because flag can have but one use.
5333 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
5334 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
5339 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
5340 /// to morph to an integer compare sequence.
5341 static bool canChangeToInt(SDValue Op, bool &SeenZero,
5342 const ARMSubtarget *Subtarget) {
5343 SDNode *N = Op.getNode();
5344 if (!N->hasOneUse())
5345 // Otherwise it requires moving the value from fp to integer registers.
5347 if (!N->getNumValues())
5349 EVT VT = Op.getValueType();
5350 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
5351 // f32 case is generally profitable. f64 case only makes sense when vcmpe +
5352 // vmrs are very slow, e.g. cortex-a8.
5355 if (isFloatingPointZero(Op)) {
5359 return ISD::isNormalLoad(N);
5362 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
5363 if (isFloatingPointZero(Op))
5364 return DAG.getConstant(0, SDLoc(Op), MVT::i32);
5366 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
5367 return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
5368 Ld->getPointerInfo(), Ld->getAlignment(),
5369 Ld->getMemOperand()->getFlags());
5371 llvm_unreachable("Unknown VFP cmp argument!");
5374 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
5375 SDValue &RetVal1, SDValue &RetVal2) {
5378 if (isFloatingPointZero(Op)) {
5379 RetVal1 = DAG.getConstant(0, dl, MVT::i32);
5380 RetVal2 = DAG.getConstant(0, dl, MVT::i32);
5384 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
5385 SDValue Ptr = Ld->getBasePtr();
5387 DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
5388 Ld->getAlignment(), Ld->getMemOperand()->getFlags());
5390 EVT PtrType = Ptr.getValueType();
5391 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
5392 SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
5393 PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
5394 RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
5395 Ld->getPointerInfo().getWithOffset(4), NewAlign,
5396 Ld->getMemOperand()->getFlags());
5400 llvm_unreachable("Unknown VFP cmp argument!");
5403 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
5404 /// f32 and even f64 comparisons to integer ones.
5406 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
5407 SDValue Chain = Op.getOperand(0);
5408 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5409 SDValue LHS = Op.getOperand(2);
5410 SDValue RHS = Op.getOperand(3);
5411 SDValue Dest = Op.getOperand(4);
5414 bool LHSSeenZero = false;
5415 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
5416 bool RHSSeenZero = false;
5417 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
5418 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
5419 // If unsafe fp math optimization is enabled and there are no other uses of
5420 // the CMP operands, and the condition code is EQ or NE, we can optimize it
5421 // to an integer comparison.
5422 if (CC == ISD::SETOEQ)
5424 else if (CC == ISD::SETUNE)
5427 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5429 if (LHS.getValueType() == MVT::f32) {
5430 LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5431 bitcastf32Toi32(LHS, DAG), Mask);
5432 RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
5433 bitcastf32Toi32(RHS, DAG), Mask);
5434 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5435 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5436 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5437 Chain, Dest, ARMcc, CCR, Cmp);
5442 expandf64Toi32(LHS, DAG, LHS1, LHS2);
5443 expandf64Toi32(RHS, DAG, RHS1, RHS2);
5444 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
5445 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
5446 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
5447 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5448 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5449 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
5450 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
5456 SDValue ARMTargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
5457 SDValue Chain = Op.getOperand(0);
5458 SDValue Cond = Op.getOperand(1);
5459 SDValue Dest = Op.getOperand(2);
5462 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5464 unsigned Opc = Cond.getOpcode();
5465 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5466 !Subtarget->isThumb1Only();
5467 if (Cond.getResNo() == 1 &&
5468 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5469 Opc == ISD::USUBO || OptimizeMul)) {
5470 // Only lower legal XALUO ops.
5471 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
5474 // The actual operation with overflow check.
5475 SDValue Value, OverflowCmp;
5477 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
5479 // Reverse the condition code.
5480 ARMCC::CondCodes CondCode =
5481 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5482 CondCode = ARMCC::getOppositeCondition(CondCode);
5483 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5484 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5486 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5493 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
5494 SDValue Chain = Op.getOperand(0);
5495 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
5496 SDValue LHS = Op.getOperand(2);
5497 SDValue RHS = Op.getOperand(3);
5498 SDValue Dest = Op.getOperand(4);
5501 if (isUnsupportedFloatingType(LHS.getValueType())) {
5502 DAG.getTargetLoweringInfo().softenSetCCOperands(
5503 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS);
5505 // If softenSetCCOperands only returned one value, we should compare it to
5507 if (!RHS.getNode()) {
5508 RHS = DAG.getConstant(0, dl, LHS.getValueType());
5513 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch
5515 unsigned Opc = LHS.getOpcode();
5516 bool OptimizeMul = (Opc == ISD::SMULO || Opc == ISD::UMULO) &&
5517 !Subtarget->isThumb1Only();
5518 if (LHS.getResNo() == 1 && (isOneConstant(RHS) || isNullConstant(RHS)) &&
5519 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
5520 Opc == ISD::USUBO || OptimizeMul) &&
5521 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
5522 // Only lower legal XALUO ops.
5523 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0)))
5526 // The actual operation with overflow check.
5527 SDValue Value, OverflowCmp;
5529 std::tie(Value, OverflowCmp) = getARMXALUOOp(LHS.getValue(0), DAG, ARMcc);
5531 if ((CC == ISD::SETNE) != isOneConstant(RHS)) {
5532 // Reverse the condition code.
5533 ARMCC::CondCodes CondCode =
5534 (ARMCC::CondCodes)cast<const ConstantSDNode>(ARMcc)->getZExtValue();
5535 CondCode = ARMCC::getOppositeCondition(CondCode);
5536 ARMcc = DAG.getConstant(CondCode, SDLoc(ARMcc), MVT::i32);
5538 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5540 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other, Chain, Dest, ARMcc, CCR,
5544 if (LHS.getValueType() == MVT::i32) {
5546 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
5547 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5548 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
5549 Chain, Dest, ARMcc, CCR, Cmp);
5552 if (getTargetMachine().Options.UnsafeFPMath &&
5553 (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
5554 CC == ISD::SETNE || CC == ISD::SETUNE)) {
5555 if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
5559 ARMCC::CondCodes CondCode, CondCode2;
5560 FPCCToARMCC(CC, CondCode, CondCode2);
5562 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
5563 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
5564 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5565 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
5566 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
5567 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5568 if (CondCode2 != ARMCC::AL) {
5569 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
5570 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
5571 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
5576 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
5577 SDValue Chain = Op.getOperand(0);
5578 SDValue Table = Op.getOperand(1);
5579 SDValue Index = Op.getOperand(2);
5582 EVT PTy = getPointerTy(DAG.getDataLayout());
5583 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
5584 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
5585 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
5586 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
5587 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Index);
5588 if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
5589 // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
5590 // which does another jump to the destination. This also makes it easier
5591 // to translate it to TBB / TBH later (Thumb2 only).
5592 // FIXME: This might not work if the function is extremely large.
5593 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
5594 Addr, Op.getOperand(2), JTI);
5596 if (isPositionIndependent() || Subtarget->isROPI()) {
5598 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
5599 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5600 Chain = Addr.getValue(1);
5601 Addr = DAG.getNode(ISD::ADD, dl, PTy, Table, Addr);
5602 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5605 DAG.getLoad(PTy, dl, Chain, Addr,
5606 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
5607 Chain = Addr.getValue(1);
5608 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
5612 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
5613 EVT VT = Op.getValueType();
5616 if (Op.getValueType().getVectorElementType() == MVT::i32) {
5617 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
5619 return DAG.UnrollVectorOp(Op.getNode());
5622 const bool HasFullFP16 =
5623 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5626 const EVT OpTy = Op.getOperand(0).getValueType();
5627 if (OpTy == MVT::v4f32)
5629 else if (OpTy == MVT::v4f16 && HasFullFP16)
5631 else if (OpTy == MVT::v8f16 && HasFullFP16)
5634 llvm_unreachable("Invalid type for custom lowering!");
5636 if (VT != MVT::v4i16 && VT != MVT::v8i16)
5637 return DAG.UnrollVectorOp(Op.getNode());
5639 Op = DAG.getNode(Op.getOpcode(), dl, NewTy, Op.getOperand(0));
5640 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
5643 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
5644 EVT VT = Op.getValueType();
5646 return LowerVectorFP_TO_INT(Op, DAG);
5648 bool IsStrict = Op->isStrictFPOpcode();
5649 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
5651 if (isUnsupportedFloatingType(SrcVal.getValueType())) {
5653 if (Op.getOpcode() == ISD::FP_TO_SINT ||
5654 Op.getOpcode() == ISD::STRICT_FP_TO_SINT)
5655 LC = RTLIB::getFPTOSINT(SrcVal.getValueType(),
5658 LC = RTLIB::getFPTOUINT(SrcVal.getValueType(),
5661 MakeLibCallOptions CallOptions;
5662 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
5664 std::tie(Result, Chain) = makeLibCall(DAG, LC, Op.getValueType(), SrcVal,
5665 CallOptions, Loc, Chain);
5666 return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
5669 // FIXME: Remove this when we have strict fp instruction selection patterns
5673 DAG.getNode(Op.getOpcode() == ISD::STRICT_FP_TO_SINT ? ISD::FP_TO_SINT
5675 Loc, Op.getValueType(), SrcVal);
5676 return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
5682 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
5683 EVT VT = Op.getValueType();
5686 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
5687 if (VT.getVectorElementType() == MVT::f32)
5689 return DAG.UnrollVectorOp(Op.getNode());
5692 assert((Op.getOperand(0).getValueType() == MVT::v4i16 ||
5693 Op.getOperand(0).getValueType() == MVT::v8i16) &&
5694 "Invalid type for custom lowering!");
5696 const bool HasFullFP16 =
5697 static_cast<const ARMSubtarget&>(DAG.getSubtarget()).hasFullFP16();
5700 if (VT == MVT::v4f32)
5701 DestVecType = MVT::v4i32;
5702 else if (VT == MVT::v4f16 && HasFullFP16)
5703 DestVecType = MVT::v4i16;
5704 else if (VT == MVT::v8f16 && HasFullFP16)
5705 DestVecType = MVT::v8i16;
5707 return DAG.UnrollVectorOp(Op.getNode());
5711 switch (Op.getOpcode()) {
5712 default: llvm_unreachable("Invalid opcode!");
5713 case ISD::SINT_TO_FP:
5714 CastOpc = ISD::SIGN_EXTEND;
5715 Opc = ISD::SINT_TO_FP;
5717 case ISD::UINT_TO_FP:
5718 CastOpc = ISD::ZERO_EXTEND;
5719 Opc = ISD::UINT_TO_FP;
5723 Op = DAG.getNode(CastOpc, dl, DestVecType, Op.getOperand(0));
5724 return DAG.getNode(Opc, dl, VT, Op);
5727 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
5728 EVT VT = Op.getValueType();
5730 return LowerVectorINT_TO_FP(Op, DAG);
5731 if (isUnsupportedFloatingType(VT)) {
5733 if (Op.getOpcode() == ISD::SINT_TO_FP)
5734 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
5737 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
5739 MakeLibCallOptions CallOptions;
5740 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
5741 CallOptions, SDLoc(Op)).first;
5747 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
5748 // Implement fcopysign with a fabs and a conditional fneg.
5749 SDValue Tmp0 = Op.getOperand(0);
5750 SDValue Tmp1 = Op.getOperand(1);
5752 EVT VT = Op.getValueType();
5753 EVT SrcVT = Tmp1.getValueType();
5754 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
5755 Tmp0.getOpcode() == ARMISD::VMOVDRR;
5756 bool UseNEON = !InGPR && Subtarget->hasNEON();
5759 // Use VBSL to copy the sign bit.
5760 unsigned EncodedVal = ARM_AM::createVMOVModImm(0x6, 0x80);
5761 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
5762 DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
5763 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
5765 Mask = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
5766 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
5767 DAG.getConstant(32, dl, MVT::i32));
5768 else /*if (VT == MVT::f32)*/
5769 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
5770 if (SrcVT == MVT::f32) {
5771 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
5773 Tmp1 = DAG.getNode(ARMISD::VSHLIMM, dl, OpVT,
5774 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
5775 DAG.getConstant(32, dl, MVT::i32));
5776 } else if (VT == MVT::f32)
5777 Tmp1 = DAG.getNode(ARMISD::VSHRuIMM, dl, MVT::v1i64,
5778 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
5779 DAG.getConstant(32, dl, MVT::i32));
5780 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
5781 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
5783 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff),
5785 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
5786 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
5787 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
5789 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
5790 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
5791 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
5792 if (VT == MVT::f32) {
5793 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
5794 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
5795 DAG.getConstant(0, dl, MVT::i32));
5797 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
5803 // Bitcast operand 1 to i32.
5804 if (SrcVT == MVT::f64)
5805 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5807 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
5809 // Or in the signbit with integer operations.
5810 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
5811 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
5812 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
5813 if (VT == MVT::f32) {
5814 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
5815 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
5816 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
5817 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
5820 // f64: Or the high part with signbit and then combine two parts.
5821 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
5823 SDValue Lo = Tmp0.getValue(0);
5824 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
5825 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
5826 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
5829 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
5830 MachineFunction &MF = DAG.getMachineFunction();
5831 MachineFrameInfo &MFI = MF.getFrameInfo();
5832 MFI.setReturnAddressIsTaken(true);
5834 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
5837 EVT VT = Op.getValueType();
5839 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5841 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
5842 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
5843 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
5844 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
5845 MachinePointerInfo());
5848 // Return LR, which contains the return address. Mark it an implicit live-in.
5849 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
5850 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
5853 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
5854 const ARMBaseRegisterInfo &ARI =
5855 *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
5856 MachineFunction &MF = DAG.getMachineFunction();
5857 MachineFrameInfo &MFI = MF.getFrameInfo();
5858 MFI.setFrameAddressIsTaken(true);
5860 EVT VT = Op.getValueType();
5861 SDLoc dl(Op); // FIXME probably not meaningful
5862 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
5863 Register FrameReg = ARI.getFrameRegister(MF);
5864 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
5866 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
5867 MachinePointerInfo());
5871 // FIXME? Maybe this could be a TableGen attribute on some registers and
5872 // this table could be generated automatically from RegInfo.
5873 Register ARMTargetLowering::getRegisterByName(const char* RegName, LLT VT,
5874 const MachineFunction &MF) const {
5875 Register Reg = StringSwitch<unsigned>(RegName)
5876 .Case("sp", ARM::SP)
5880 report_fatal_error(Twine("Invalid register name \""
5881 + StringRef(RegName) + "\"."));
5884 // Result is 64 bit value so split into two 32 bit values and return as a
5886 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
5887 SelectionDAG &DAG) {
5890 // This function is only supposed to be called for i64 type destination.
5891 assert(N->getValueType(0) == MVT::i64
5892 && "ExpandREAD_REGISTER called for non-i64 type result.");
5894 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
5895 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
5899 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
5901 Results.push_back(Read.getOperand(0));
5904 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
5905 /// When \p DstVT, the destination type of \p BC, is on the vector
5906 /// register bank and the source of bitcast, \p Op, operates on the same bank,
5907 /// it might be possible to combine them, such that everything stays on the
5908 /// vector register bank.
5909 /// \p return The node that would replace \p BT, if the combine
5911 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
5912 SelectionDAG &DAG) {
5913 SDValue Op = BC->getOperand(0);
5914 EVT DstVT = BC->getValueType(0);
5916 // The only vector instruction that can produce a scalar (remember,
5917 // since the bitcast was about to be turned into VMOVDRR, the source
5918 // type is i64) from a vector is EXTRACT_VECTOR_ELT.
5919 // Moreover, we can do this combine only if there is one use.
5920 // Finally, if the destination type is not a vector, there is not
5921 // much point on forcing everything on the vector bank.
5922 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5926 // If the index is not constant, we will introduce an additional
5927 // multiply that will stick.
5928 // Give up in that case.
5929 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
5932 unsigned DstNumElt = DstVT.getVectorNumElements();
5934 // Compute the new index.
5935 const APInt &APIntIndex = Index->getAPIntValue();
5936 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
5937 NewIndex *= APIntIndex;
5938 // Check if the new constant index fits into i32.
5939 if (NewIndex.getBitWidth() > 32)
5942 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
5943 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
5945 SDValue ExtractSrc = Op.getOperand(0);
5946 EVT VecVT = EVT::getVectorVT(
5947 *DAG.getContext(), DstVT.getScalarType(),
5948 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
5949 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
5950 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
5951 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
5954 /// ExpandBITCAST - If the target supports VFP, this function is called to
5955 /// expand a bit convert where either the source or destination type is i64 to
5956 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
5957 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
5958 /// vectors), since the legalizer won't know what to do with that.
5959 SDValue ARMTargetLowering::ExpandBITCAST(SDNode *N, SelectionDAG &DAG,
5960 const ARMSubtarget *Subtarget) const {
5961 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
5963 SDValue Op = N->getOperand(0);
5965 // This function is only supposed to be called for i16 and i64 types, either
5966 // as the source or destination of the bit convert.
5967 EVT SrcVT = Op.getValueType();
5968 EVT DstVT = N->getValueType(0);
5970 if ((SrcVT == MVT::i16 || SrcVT == MVT::i32) &&
5971 (DstVT == MVT::f16 || DstVT == MVT::bf16))
5972 return MoveToHPR(SDLoc(N), DAG, MVT::i32, DstVT.getSimpleVT(),
5973 DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), MVT::i32, Op));
5975 if ((DstVT == MVT::i16 || DstVT == MVT::i32) &&
5976 (SrcVT == MVT::f16 || SrcVT == MVT::bf16))
5978 ISD::TRUNCATE, SDLoc(N), DstVT,
5979 MoveFromHPR(SDLoc(N), DAG, MVT::i32, SrcVT.getSimpleVT(), Op));
5981 if (!(SrcVT == MVT::i64 || DstVT == MVT::i64))
5984 // Turn i64->f64 into VMOVDRR.
5985 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
5986 // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
5987 // if we can combine the bitcast with its source.
5988 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
5991 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5992 DAG.getConstant(0, dl, MVT::i32));
5993 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
5994 DAG.getConstant(1, dl, MVT::i32));
5995 return DAG.getNode(ISD::BITCAST, dl, DstVT,
5996 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
5999 // Turn f64->i64 into VMOVRRD.
6000 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
6002 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
6003 SrcVT.getVectorNumElements() > 1)
6004 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
6005 DAG.getVTList(MVT::i32, MVT::i32),
6006 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
6008 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
6009 DAG.getVTList(MVT::i32, MVT::i32), Op);
6010 // Merge the pieces into a single i64 value.
6011 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
6017 /// getZeroVector - Returns a vector of specified type with all zero elements.
6018 /// Zero vectors are used to represent vector negation and in those cases
6019 /// will be implemented with the NEON VNEG instruction. However, VNEG does
6020 /// not support i64 elements, so sometimes the zero vectors will need to be
6021 /// explicitly constructed. Regardless, use a canonical VMOV to create the
6023 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6024 assert(VT.isVector() && "Expected a vector type");
6025 // The canonical modified immediate encoding of a zero vector is....0!
6026 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
6027 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
6028 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
6029 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6032 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
6033 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
6034 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
6035 SelectionDAG &DAG) const {
6036 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
6037 EVT VT = Op.getValueType();
6038 unsigned VTBits = VT.getSizeInBits();
6040 SDValue ShOpLo = Op.getOperand(0);
6041 SDValue ShOpHi = Op.getOperand(1);
6042 SDValue ShAmt = Op.getOperand(2);
6044 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6045 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
6047 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
6049 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6050 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
6051 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
6052 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
6053 DAG.getConstant(VTBits, dl, MVT::i32));
6054 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
6055 SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
6056 SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
6057 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6058 ISD::SETGE, ARMcc, DAG, dl);
6059 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
6062 SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
6063 SDValue HiBigShift = Opc == ISD::SRA
6064 ? DAG.getNode(Opc, dl, VT, ShOpHi,
6065 DAG.getConstant(VTBits - 1, dl, VT))
6066 : DAG.getConstant(0, dl, VT);
6067 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6068 ISD::SETGE, ARMcc, DAG, dl);
6069 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
6072 SDValue Ops[2] = { Lo, Hi };
6073 return DAG.getMergeValues(Ops, dl);
6076 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
6077 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
6078 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
6079 SelectionDAG &DAG) const {
6080 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
6081 EVT VT = Op.getValueType();
6082 unsigned VTBits = VT.getSizeInBits();
6084 SDValue ShOpLo = Op.getOperand(0);
6085 SDValue ShOpHi = Op.getOperand(1);
6086 SDValue ShAmt = Op.getOperand(2);
6088 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6090 assert(Op.getOpcode() == ISD::SHL_PARTS);
6091 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6092 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
6093 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
6094 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
6095 SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
6097 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
6098 DAG.getConstant(VTBits, dl, MVT::i32));
6099 SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
6100 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6101 ISD::SETGE, ARMcc, DAG, dl);
6102 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
6105 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
6106 ISD::SETGE, ARMcc, DAG, dl);
6107 SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
6108 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
6109 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
6111 SDValue Ops[2] = { Lo, Hi };
6112 return DAG.getMergeValues(Ops, dl);
6115 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
6116 SelectionDAG &DAG) const {
6117 // The rounding mode is in bits 23:22 of the FPSCR.
6118 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
6119 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
6120 // so that the shift + and get folded into a bitfield extract.
6122 SDValue Chain = Op.getOperand(0);
6123 SDValue Ops[] = {Chain,
6124 DAG.getConstant(Intrinsic::arm_get_fpscr, dl, MVT::i32)};
6127 DAG.getNode(ISD::INTRINSIC_W_CHAIN, dl, {MVT::i32, MVT::Other}, Ops);
6128 Chain = FPSCR.getValue(1);
6129 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
6130 DAG.getConstant(1U << 22, dl, MVT::i32));
6131 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
6132 DAG.getConstant(22, dl, MVT::i32));
6133 SDValue And = DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
6134 DAG.getConstant(3, dl, MVT::i32));
6135 return DAG.getMergeValues({And, Chain}, dl);
6138 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
6139 const ARMSubtarget *ST) {
6141 EVT VT = N->getValueType(0);
6142 if (VT.isVector() && ST->hasNEON()) {
6144 // Compute the least significant set bit: LSB = X & -X
6145 SDValue X = N->getOperand(0);
6146 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
6147 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
6149 EVT ElemTy = VT.getVectorElementType();
6151 if (ElemTy == MVT::i8) {
6152 // Compute with: cttz(x) = ctpop(lsb - 1)
6153 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6154 DAG.getTargetConstant(1, dl, ElemTy));
6155 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
6156 return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
6159 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
6160 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
6161 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
6162 unsigned NumBits = ElemTy.getSizeInBits();
6163 SDValue WidthMinus1 =
6164 DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6165 DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
6166 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
6167 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
6170 // Compute with: cttz(x) = ctpop(lsb - 1)
6174 if (ElemTy == MVT::i64) {
6175 // Load constant 0xffff'ffff'ffff'ffff to register.
6176 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6177 DAG.getTargetConstant(0x1eff, dl, MVT::i32));
6178 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
6180 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
6181 DAG.getTargetConstant(1, dl, ElemTy));
6182 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
6184 return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
6187 if (!ST->hasV6T2Ops())
6190 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
6191 return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
6194 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
6195 const ARMSubtarget *ST) {
6196 EVT VT = N->getValueType(0);
6199 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
6200 assert((VT == MVT::v1i64 || VT == MVT::v2i64 || VT == MVT::v2i32 ||
6201 VT == MVT::v4i32 || VT == MVT::v4i16 || VT == MVT::v8i16) &&
6202 "Unexpected type for custom ctpop lowering");
6204 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6205 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
6206 SDValue Res = DAG.getBitcast(VT8Bit, N->getOperand(0));
6207 Res = DAG.getNode(ISD::CTPOP, DL, VT8Bit, Res);
6209 // Widen v8i8/v16i8 CTPOP result to VT by repeatedly widening pairwise adds.
6210 unsigned EltSize = 8;
6211 unsigned NumElts = VT.is64BitVector() ? 8 : 16;
6212 while (EltSize != VT.getScalarSizeInBits()) {
6213 SmallVector<SDValue, 8> Ops;
6214 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddlu, DL,
6215 TLI.getPointerTy(DAG.getDataLayout())));
6220 MVT WidenVT = MVT::getVectorVT(MVT::getIntegerVT(EltSize), NumElts);
6221 Res = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, WidenVT, Ops);
6227 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
6228 /// operand of a vector shift operation, where all the elements of the
6229 /// build_vector must have the same constant integer value.
6230 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
6231 // Ignore bit_converts.
6232 while (Op.getOpcode() == ISD::BITCAST)
6233 Op = Op.getOperand(0);
6234 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
6235 APInt SplatBits, SplatUndef;
6236 unsigned SplatBitSize;
6239 !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs,
6241 SplatBitSize > ElementBits)
6243 Cnt = SplatBits.getSExtValue();
6247 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
6248 /// operand of a vector shift left operation. That value must be in the range:
6249 /// 0 <= Value < ElementBits for a left shift; or
6250 /// 0 <= Value <= ElementBits for a long left shift.
6251 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
6252 assert(VT.isVector() && "vector shift count is not a vector type");
6253 int64_t ElementBits = VT.getScalarSizeInBits();
6254 if (!getVShiftImm(Op, ElementBits, Cnt))
6256 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits);
6259 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
6260 /// operand of a vector shift right operation. For a shift opcode, the value
6261 /// is positive, but for an intrinsic the value count must be negative. The
6262 /// absolute value must be in the range:
6263 /// 1 <= |Value| <= ElementBits for a right shift; or
6264 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
6265 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
6267 assert(VT.isVector() && "vector shift count is not a vector type");
6268 int64_t ElementBits = VT.getScalarSizeInBits();
6269 if (!getVShiftImm(Op, ElementBits, Cnt))
6272 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits));
6273 if (Cnt >= -(isNarrow ? ElementBits / 2 : ElementBits) && Cnt <= -1) {
6280 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
6281 const ARMSubtarget *ST) {
6282 EVT VT = N->getValueType(0);
6289 // We essentially have two forms here. Shift by an immediate and shift by a
6290 // vector register (there are also shift by a gpr, but that is just handled
6291 // with a tablegen pattern). We cannot easily match shift by an immediate in
6292 // tablegen so we do that here and generate a VSHLIMM/VSHRsIMM/VSHRuIMM.
6293 // For shifting by a vector, we don't have VSHR, only VSHL (which can be
6294 // signed or unsigned, and a negative shift indicates a shift right).
6295 if (N->getOpcode() == ISD::SHL) {
6296 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt))
6297 return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
6298 DAG.getConstant(Cnt, dl, MVT::i32));
6299 return DAG.getNode(ARMISD::VSHLu, dl, VT, N->getOperand(0),
6303 assert((N->getOpcode() == ISD::SRA || N->getOpcode() == ISD::SRL) &&
6304 "unexpected vector shift opcode");
6306 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
6307 unsigned VShiftOpc =
6308 (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
6309 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
6310 DAG.getConstant(Cnt, dl, MVT::i32));
6313 // Other right shifts we don't have operations for (we use a shift left by a
6314 // negative number).
6315 EVT ShiftVT = N->getOperand(1).getValueType();
6316 SDValue NegatedCount = DAG.getNode(
6317 ISD::SUB, dl, ShiftVT, getZeroVector(ShiftVT, DAG, dl), N->getOperand(1));
6318 unsigned VShiftOpc =
6319 (N->getOpcode() == ISD::SRA ? ARMISD::VSHLs : ARMISD::VSHLu);
6320 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0), NegatedCount);
6323 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
6324 const ARMSubtarget *ST) {
6325 EVT VT = N->getValueType(0);
6328 // We can get here for a node like i32 = ISD::SHL i32, i64
6332 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA ||
6333 N->getOpcode() == ISD::SHL) &&
6334 "Unknown shift to lower!");
6336 unsigned ShOpc = N->getOpcode();
6337 if (ST->hasMVEIntegerOps()) {
6338 SDValue ShAmt = N->getOperand(1);
6339 unsigned ShPartsOpc = ARMISD::LSLL;
6340 ConstantSDNode *Con = dyn_cast<ConstantSDNode>(ShAmt);
6342 // If the shift amount is greater than 32 or has a greater bitwidth than 64
6343 // then do the default optimisation
6344 if (ShAmt->getValueType(0).getSizeInBits() > 64 ||
6345 (Con && (Con->getZExtValue() == 0 || Con->getZExtValue() >= 32)))
6348 // Extract the lower 32 bits of the shift amount if it's not an i32
6349 if (ShAmt->getValueType(0) != MVT::i32)
6350 ShAmt = DAG.getZExtOrTrunc(ShAmt, dl, MVT::i32);
6352 if (ShOpc == ISD::SRL) {
6354 // There is no t2LSRLr instruction so negate and perform an lsll if the
6355 // shift amount is in a register, emulating a right shift.
6356 ShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
6357 DAG.getConstant(0, dl, MVT::i32), ShAmt);
6359 // Else generate an lsrl on the immediate shift amount
6360 ShPartsOpc = ARMISD::LSRL;
6361 } else if (ShOpc == ISD::SRA)
6362 ShPartsOpc = ARMISD::ASRL;
6364 // Lower 32 bits of the destination/source
6365 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6366 DAG.getConstant(0, dl, MVT::i32));
6367 // Upper 32 bits of the destination/source
6368 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6369 DAG.getConstant(1, dl, MVT::i32));
6371 // Generate the shift operation as computed above
6372 Lo = DAG.getNode(ShPartsOpc, dl, DAG.getVTList(MVT::i32, MVT::i32), Lo, Hi,
6374 // The upper 32 bits come from the second return value of lsll
6375 Hi = SDValue(Lo.getNode(), 1);
6376 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6379 // We only lower SRA, SRL of 1 here, all others use generic lowering.
6380 if (!isOneConstant(N->getOperand(1)) || N->getOpcode() == ISD::SHL)
6383 // If we are in thumb mode, we don't have RRX.
6384 if (ST->isThumb1Only())
6387 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
6388 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6389 DAG.getConstant(0, dl, MVT::i32));
6390 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
6391 DAG.getConstant(1, dl, MVT::i32));
6393 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
6394 // captures the result into a carry flag.
6395 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
6396 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
6398 // The low part is an ARMISD::RRX operand, which shifts the carry in.
6399 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
6401 // Merge the pieces into a single i64 value.
6402 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
6405 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG,
6406 const ARMSubtarget *ST) {
6407 bool Invert = false;
6409 unsigned Opc = ARMCC::AL;
6411 SDValue Op0 = Op.getOperand(0);
6412 SDValue Op1 = Op.getOperand(1);
6413 SDValue CC = Op.getOperand(2);
6414 EVT VT = Op.getValueType();
6415 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
6420 CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
6422 assert(ST->hasMVEIntegerOps() &&
6423 "No hardware support for integer vector comparison!");
6425 if (Op.getValueType().getVectorElementType() != MVT::i1)
6428 // Make sure we expand floating point setcc to scalar if we do not have
6429 // mve.fp, so that we can handle them from there.
6430 if (Op0.getValueType().isFloatingPoint() && !ST->hasMVEFloatOps())
6436 if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
6437 (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
6438 // Special-case integer 64-bit equality comparisons. They aren't legal,
6439 // but they can be lowered with a few vector instructions.
6440 unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
6441 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
6442 SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
6443 SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
6444 SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
6445 DAG.getCondCode(ISD::SETEQ));
6446 SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
6447 SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
6448 Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
6449 if (SetCCOpcode == ISD::SETNE)
6450 Merged = DAG.getNOT(dl, Merged, CmpVT);
6451 Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
6455 if (CmpVT.getVectorElementType() == MVT::i64)
6456 // 64-bit comparisons are not legal in general.
6459 if (Op1.getValueType().isFloatingPoint()) {
6460 switch (SetCCOpcode) {
6461 default: llvm_unreachable("Illegal FP comparison");
6464 if (ST->hasMVEFloatOps()) {
6465 Opc = ARMCC::NE; break;
6467 Invert = true; LLVM_FALLTHROUGH;
6470 case ISD::SETEQ: Opc = ARMCC::EQ; break;
6472 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
6474 case ISD::SETGT: Opc = ARMCC::GT; break;
6476 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
6478 case ISD::SETGE: Opc = ARMCC::GE; break;
6479 case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
6480 case ISD::SETULE: Invert = true; Opc = ARMCC::GT; break;
6481 case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
6482 case ISD::SETULT: Invert = true; Opc = ARMCC::GE; break;
6483 case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
6485 // Expand this to (OLT | OGT).
6486 SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6487 DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6488 SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6489 DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6490 SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6492 Result = DAG.getNOT(dl, Result, VT);
6495 case ISD::SETUO: Invert = true; LLVM_FALLTHROUGH;
6497 // Expand this to (OLT | OGE).
6498 SDValue TmpOp0 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op1, Op0,
6499 DAG.getConstant(ARMCC::GT, dl, MVT::i32));
6500 SDValue TmpOp1 = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6501 DAG.getConstant(ARMCC::GE, dl, MVT::i32));
6502 SDValue Result = DAG.getNode(ISD::OR, dl, CmpVT, TmpOp0, TmpOp1);
6504 Result = DAG.getNOT(dl, Result, VT);
6509 // Integer comparisons.
6510 switch (SetCCOpcode) {
6511 default: llvm_unreachable("Illegal integer comparison");
6513 if (ST->hasMVEIntegerOps()) {
6514 Opc = ARMCC::NE; break;
6516 Invert = true; LLVM_FALLTHROUGH;
6518 case ISD::SETEQ: Opc = ARMCC::EQ; break;
6519 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
6520 case ISD::SETGT: Opc = ARMCC::GT; break;
6521 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
6522 case ISD::SETGE: Opc = ARMCC::GE; break;
6523 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
6524 case ISD::SETUGT: Opc = ARMCC::HI; break;
6525 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
6526 case ISD::SETUGE: Opc = ARMCC::HS; break;
6529 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
6530 if (ST->hasNEON() && Opc == ARMCC::EQ) {
6532 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
6534 else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
6537 // Ignore bitconvert.
6538 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
6539 AndOp = AndOp.getOperand(0);
6541 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
6542 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
6543 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
6544 SDValue Result = DAG.getNode(ARMISD::VTST, dl, CmpVT, Op0, Op1);
6546 Result = DAG.getNOT(dl, Result, VT);
6553 std::swap(Op0, Op1);
6555 // If one of the operands is a constant vector zero, attempt to fold the
6556 // comparison to a specialized compare-against-zero form.
6558 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
6560 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
6561 if (Opc == ARMCC::GE)
6563 else if (Opc == ARMCC::GT)
6569 if (SingleOp.getNode()) {
6570 Result = DAG.getNode(ARMISD::VCMPZ, dl, CmpVT, SingleOp,
6571 DAG.getConstant(Opc, dl, MVT::i32));
6573 Result = DAG.getNode(ARMISD::VCMP, dl, CmpVT, Op0, Op1,
6574 DAG.getConstant(Opc, dl, MVT::i32));
6577 Result = DAG.getSExtOrTrunc(Result, dl, VT);
6580 Result = DAG.getNOT(dl, Result, VT);
6585 static SDValue LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) {
6586 SDValue LHS = Op.getOperand(0);
6587 SDValue RHS = Op.getOperand(1);
6588 SDValue Carry = Op.getOperand(2);
6589 SDValue Cond = Op.getOperand(3);
6592 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
6594 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
6595 // have to invert the carry first.
6596 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
6597 DAG.getConstant(1, DL, MVT::i32), Carry);
6598 // This converts the boolean value carry into the carry flag.
6599 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
6601 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
6602 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
6604 SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
6605 SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
6606 SDValue ARMcc = DAG.getConstant(
6607 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
6608 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
6609 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
6610 Cmp.getValue(1), SDValue());
6611 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
6612 CCR, Chain.getValue(1));
6615 /// isVMOVModifiedImm - Check if the specified splat value corresponds to a
6616 /// valid vector constant for a NEON or MVE instruction with a "modified
6617 /// immediate" operand (e.g., VMOV). If so, return the encoded value.
6618 static SDValue isVMOVModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
6619 unsigned SplatBitSize, SelectionDAG &DAG,
6620 const SDLoc &dl, EVT &VT, EVT VectorVT,
6621 VMOVModImmType type) {
6622 unsigned OpCmode, Imm;
6623 bool is128Bits = VectorVT.is128BitVector();
6625 // SplatBitSize is set to the smallest size that splats the vector, so a
6626 // zero vector will always have SplatBitSize == 8. However, NEON modified
6627 // immediate instructions others than VMOV do not support the 8-bit encoding
6628 // of a zero vector, and the default encoding of zero is supposed to be the
6633 switch (SplatBitSize) {
6635 if (type != VMOVModImm)
6637 // Any 1-byte value is OK. Op=0, Cmode=1110.
6638 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
6641 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
6645 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
6646 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
6647 if ((SplatBits & ~0xff) == 0) {
6648 // Value = 0x00nn: Op=x, Cmode=100x.
6653 if ((SplatBits & ~0xff00) == 0) {
6654 // Value = 0xnn00: Op=x, Cmode=101x.
6656 Imm = SplatBits >> 8;
6662 // NEON's 32-bit VMOV supports splat values where:
6663 // * only one byte is nonzero, or
6664 // * the least significant byte is 0xff and the second byte is nonzero, or
6665 // * the least significant 2 bytes are 0xff and the third is nonzero.
6666 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
6667 if ((SplatBits & ~0xff) == 0) {
6668 // Value = 0x000000nn: Op=x, Cmode=000x.
6673 if ((SplatBits & ~0xff00) == 0) {
6674 // Value = 0x0000nn00: Op=x, Cmode=001x.
6676 Imm = SplatBits >> 8;
6679 if ((SplatBits & ~0xff0000) == 0) {
6680 // Value = 0x00nn0000: Op=x, Cmode=010x.
6682 Imm = SplatBits >> 16;
6685 if ((SplatBits & ~0xff000000) == 0) {
6686 // Value = 0xnn000000: Op=x, Cmode=011x.
6688 Imm = SplatBits >> 24;
6692 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
6693 if (type == OtherModImm) return SDValue();
6695 if ((SplatBits & ~0xffff) == 0 &&
6696 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
6697 // Value = 0x0000nnff: Op=x, Cmode=1100.
6699 Imm = SplatBits >> 8;
6703 // cmode == 0b1101 is not supported for MVE VMVN
6704 if (type == MVEVMVNModImm)
6707 if ((SplatBits & ~0xffffff) == 0 &&
6708 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
6709 // Value = 0x00nnffff: Op=x, Cmode=1101.
6711 Imm = SplatBits >> 16;
6715 // Note: there are a few 32-bit splat values (specifically: 00ffff00,
6716 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
6717 // VMOV.I32. A (very) minor optimization would be to replicate the value
6718 // and fall through here to test for a valid 64-bit splat. But, then the
6719 // caller would also need to check and handle the change in size.
6723 if (type != VMOVModImm)
6725 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
6726 uint64_t BitMask = 0xff;
6728 unsigned ImmMask = 1;
6730 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
6731 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
6734 } else if ((SplatBits & BitMask) != 0) {
6741 if (DAG.getDataLayout().isBigEndian()) {
6742 // Reverse the order of elements within the vector.
6743 unsigned BytesPerElem = VectorVT.getScalarSizeInBits() / 8;
6744 unsigned Mask = (1 << BytesPerElem) - 1;
6745 unsigned NumElems = 8 / BytesPerElem;
6746 unsigned NewImm = 0;
6747 for (unsigned ElemNum = 0; ElemNum < NumElems; ++ElemNum) {
6748 unsigned Elem = ((Imm >> ElemNum * BytesPerElem) & Mask);
6749 NewImm |= Elem << (NumElems - ElemNum - 1) * BytesPerElem;
6754 // Op=1, Cmode=1110.
6756 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
6761 llvm_unreachable("unexpected size for isVMOVModifiedImm");
6764 unsigned EncodedVal = ARM_AM::createVMOVModImm(OpCmode, Imm);
6765 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
6768 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
6769 const ARMSubtarget *ST) const {
6770 EVT VT = Op.getValueType();
6771 bool IsDouble = (VT == MVT::f64);
6772 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
6773 const APFloat &FPVal = CFP->getValueAPF();
6775 // Prevent floating-point constants from using literal loads
6776 // when execute-only is enabled.
6777 if (ST->genExecuteOnly()) {
6778 // If we can represent the constant as an immediate, don't lower it
6779 if (isFPImmLegal(FPVal, VT))
6781 // Otherwise, construct as integer, and move to float register
6782 APInt INTVal = FPVal.bitcastToAPInt();
6784 switch (VT.getSimpleVT().SimpleTy) {
6786 llvm_unreachable("Unknown floating point type!");
6789 SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
6790 SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
6791 return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
6794 return DAG.getNode(ARMISD::VMOVSR, DL, VT,
6795 DAG.getConstant(INTVal, DL, MVT::i32));
6799 if (!ST->hasVFP3Base())
6802 // Use the default (constant pool) lowering for double constants when we have
6804 if (IsDouble && !Subtarget->hasFP64())
6807 // Try splatting with a VMOV.f32...
6808 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
6811 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
6812 // We have code in place to select a valid ConstantFP already, no need to
6817 // It's a float and we are trying to use NEON operations where
6818 // possible. Lower it to a splat followed by an extract.
6820 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
6821 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
6823 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
6824 DAG.getConstant(0, DL, MVT::i32));
6827 // The rest of our options are NEON only, make sure that's allowed before
6829 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
6833 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
6835 // It wouldn't really be worth bothering for doubles except for one very
6836 // important value, which does happen to match: 0.0. So make sure we don't do
6838 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
6841 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
6842 SDValue NewVal = isVMOVModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
6843 VMovVT, VT, VMOVModImm);
6844 if (NewVal != SDValue()) {
6846 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
6849 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6851 // It's a float: cast and extract a vector element.
6852 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6854 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6855 DAG.getConstant(0, DL, MVT::i32));
6858 // Finally, try a VMVN.i32
6859 NewVal = isVMOVModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
6861 if (NewVal != SDValue()) {
6863 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
6866 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
6868 // It's a float: cast and extract a vector element.
6869 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
6871 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
6872 DAG.getConstant(0, DL, MVT::i32));
6878 // check if an VEXT instruction can handle the shuffle mask when the
6879 // vector sources of the shuffle are the same.
6880 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
6881 unsigned NumElts = VT.getVectorNumElements();
6883 // Assume that the first shuffle index is not UNDEF. Fail if it is.
6889 // If this is a VEXT shuffle, the immediate value is the index of the first
6890 // element. The other shuffle indices must be the successive elements after
6892 unsigned ExpectedElt = Imm;
6893 for (unsigned i = 1; i < NumElts; ++i) {
6894 // Increment the expected index. If it wraps around, just follow it
6895 // back to index zero and keep going.
6897 if (ExpectedElt == NumElts)
6900 if (M[i] < 0) continue; // ignore UNDEF indices
6901 if (ExpectedElt != static_cast<unsigned>(M[i]))
6908 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
6909 bool &ReverseVEXT, unsigned &Imm) {
6910 unsigned NumElts = VT.getVectorNumElements();
6911 ReverseVEXT = false;
6913 // Assume that the first shuffle index is not UNDEF. Fail if it is.
6919 // If this is a VEXT shuffle, the immediate value is the index of the first
6920 // element. The other shuffle indices must be the successive elements after
6922 unsigned ExpectedElt = Imm;
6923 for (unsigned i = 1; i < NumElts; ++i) {
6924 // Increment the expected index. If it wraps around, it may still be
6925 // a VEXT but the source vectors must be swapped.
6927 if (ExpectedElt == NumElts * 2) {
6932 if (M[i] < 0) continue; // ignore UNDEF indices
6933 if (ExpectedElt != static_cast<unsigned>(M[i]))
6937 // Adjust the index value if the source operands will be swapped.
6944 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
6945 /// instruction with the specified blocksize. (The order of the elements
6946 /// within each block of the vector is reversed.)
6947 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
6948 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
6949 "Only possible block sizes for VREV are: 16, 32, 64");
6951 unsigned EltSz = VT.getScalarSizeInBits();
6955 unsigned NumElts = VT.getVectorNumElements();
6956 unsigned BlockElts = M[0] + 1;
6957 // If the first shuffle index is UNDEF, be optimistic.
6959 BlockElts = BlockSize / EltSz;
6961 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
6964 for (unsigned i = 0; i < NumElts; ++i) {
6965 if (M[i] < 0) continue; // ignore UNDEF indices
6966 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
6973 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
6974 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
6975 // range, then 0 is placed into the resulting vector. So pretty much any mask
6976 // of 8 elements can work here.
6977 return VT == MVT::v8i8 && M.size() == 8;
6980 static unsigned SelectPairHalf(unsigned Elements, ArrayRef<int> Mask,
6982 if (Mask.size() == Elements * 2)
6983 return Index / Elements;
6984 return Mask[Index] == 0 ? 0 : 1;
6987 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
6988 // checking that pairs of elements in the shuffle mask represent the same index
6989 // in each vector, incrementing the expected index by 2 at each step.
6990 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
6991 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
6993 // WhichResult gives the offset for each element in the mask based on which
6994 // of the two results it belongs to.
6996 // The transpose can be represented either as:
6997 // result1 = shufflevector v1, v2, result1_shuffle_mask
6998 // result2 = shufflevector v1, v2, result2_shuffle_mask
6999 // where v1/v2 and the shuffle masks have the same number of elements
7000 // (here WhichResult (see below) indicates which result is being checked)
7003 // results = shufflevector v1, v2, shuffle_mask
7004 // where both results are returned in one vector and the shuffle mask has twice
7005 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
7006 // want to check the low half and high half of the shuffle mask as if it were
7008 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7009 unsigned EltSz = VT.getScalarSizeInBits();
7013 unsigned NumElts = VT.getVectorNumElements();
7014 if (M.size() != NumElts && M.size() != NumElts*2)
7017 // If the mask is twice as long as the input vector then we need to check the
7018 // upper and lower parts of the mask with a matching value for WhichResult
7019 // FIXME: A mask with only even values will be rejected in case the first
7020 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
7021 // M[0] is used to determine WhichResult
7022 for (unsigned i = 0; i < M.size(); i += NumElts) {
7023 WhichResult = SelectPairHalf(NumElts, M, i);
7024 for (unsigned j = 0; j < NumElts; j += 2) {
7025 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
7026 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
7031 if (M.size() == NumElts*2)
7037 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
7038 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7039 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
7040 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7041 unsigned EltSz = VT.getScalarSizeInBits();
7045 unsigned NumElts = VT.getVectorNumElements();
7046 if (M.size() != NumElts && M.size() != NumElts*2)
7049 for (unsigned i = 0; i < M.size(); i += NumElts) {
7050 WhichResult = SelectPairHalf(NumElts, M, i);
7051 for (unsigned j = 0; j < NumElts; j += 2) {
7052 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
7053 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
7058 if (M.size() == NumElts*2)
7064 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
7065 // that the mask elements are either all even and in steps of size 2 or all odd
7066 // and in steps of size 2.
7067 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
7068 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
7070 // Requires similar checks to that of isVTRNMask with
7071 // respect the how results are returned.
7072 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7073 unsigned EltSz = VT.getScalarSizeInBits();
7077 unsigned NumElts = VT.getVectorNumElements();
7078 if (M.size() != NumElts && M.size() != NumElts*2)
7081 for (unsigned i = 0; i < M.size(); i += NumElts) {
7082 WhichResult = SelectPairHalf(NumElts, M, i);
7083 for (unsigned j = 0; j < NumElts; ++j) {
7084 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
7089 if (M.size() == NumElts*2)
7092 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7093 if (VT.is64BitVector() && EltSz == 32)
7099 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
7100 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7101 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
7102 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7103 unsigned EltSz = VT.getScalarSizeInBits();
7107 unsigned NumElts = VT.getVectorNumElements();
7108 if (M.size() != NumElts && M.size() != NumElts*2)
7111 unsigned Half = NumElts / 2;
7112 for (unsigned i = 0; i < M.size(); i += NumElts) {
7113 WhichResult = SelectPairHalf(NumElts, M, i);
7114 for (unsigned j = 0; j < NumElts; j += Half) {
7115 unsigned Idx = WhichResult;
7116 for (unsigned k = 0; k < Half; ++k) {
7117 int MIdx = M[i + j + k];
7118 if (MIdx >= 0 && (unsigned) MIdx != Idx)
7125 if (M.size() == NumElts*2)
7128 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7129 if (VT.is64BitVector() && EltSz == 32)
7135 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
7136 // that pairs of elements of the shufflemask represent the same index in each
7137 // vector incrementing sequentially through the vectors.
7138 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
7139 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
7141 // Requires similar checks to that of isVTRNMask with respect the how results
7143 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
7144 unsigned EltSz = VT.getScalarSizeInBits();
7148 unsigned NumElts = VT.getVectorNumElements();
7149 if (M.size() != NumElts && M.size() != NumElts*2)
7152 for (unsigned i = 0; i < M.size(); i += NumElts) {
7153 WhichResult = SelectPairHalf(NumElts, M, i);
7154 unsigned Idx = WhichResult * NumElts / 2;
7155 for (unsigned j = 0; j < NumElts; j += 2) {
7156 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
7157 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
7163 if (M.size() == NumElts*2)
7166 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7167 if (VT.is64BitVector() && EltSz == 32)
7173 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
7174 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
7175 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
7176 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
7177 unsigned EltSz = VT.getScalarSizeInBits();
7181 unsigned NumElts = VT.getVectorNumElements();
7182 if (M.size() != NumElts && M.size() != NumElts*2)
7185 for (unsigned i = 0; i < M.size(); i += NumElts) {
7186 WhichResult = SelectPairHalf(NumElts, M, i);
7187 unsigned Idx = WhichResult * NumElts / 2;
7188 for (unsigned j = 0; j < NumElts; j += 2) {
7189 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
7190 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
7196 if (M.size() == NumElts*2)
7199 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
7200 if (VT.is64BitVector() && EltSz == 32)
7206 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
7207 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
7208 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
7209 unsigned &WhichResult,
7212 if (isVTRNMask(ShuffleMask, VT, WhichResult))
7213 return ARMISD::VTRN;
7214 if (isVUZPMask(ShuffleMask, VT, WhichResult))
7215 return ARMISD::VUZP;
7216 if (isVZIPMask(ShuffleMask, VT, WhichResult))
7217 return ARMISD::VZIP;
7220 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
7221 return ARMISD::VTRN;
7222 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7223 return ARMISD::VUZP;
7224 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
7225 return ARMISD::VZIP;
7230 /// \return true if this is a reverse operation on an vector.
7231 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
7232 unsigned NumElts = VT.getVectorNumElements();
7233 // Make sure the mask has the right size.
7234 if (NumElts != M.size())
7237 // Look for <15, ..., 3, -1, 1, 0>.
7238 for (unsigned i = 0; i != NumElts; ++i)
7239 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
7245 static bool isVMOVNMask(ArrayRef<int> M, EVT VT, bool Top) {
7246 unsigned NumElts = VT.getVectorNumElements();
7247 // Make sure the mask has the right size.
7248 if (NumElts != M.size() || (VT != MVT::v8i16 && VT != MVT::v16i8))
7252 // Look for <0, N, 2, N+2, 4, N+4, ..>.
7253 // This inserts Input2 into Input1
7255 // Look for <0, N+1, 2, N+3, 4, N+5, ..>
7256 // This inserts Input1 into Input2
7257 unsigned Offset = Top ? 0 : 1;
7258 for (unsigned i = 0; i < NumElts; i+=2) {
7259 if (M[i] >= 0 && M[i] != (int)i)
7261 if (M[i+1] >= 0 && M[i+1] != (int)(NumElts + i + Offset))
7268 // Reconstruct an MVE VCVT from a BuildVector of scalar fptrunc, all extracted
7269 // from a pair of inputs. For example:
7270 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
7271 // FP_ROUND(EXTRACT_ELT(Y, 0),
7272 // FP_ROUND(EXTRACT_ELT(X, 1),
7273 // FP_ROUND(EXTRACT_ELT(Y, 1), ...)
7274 static SDValue LowerBuildVectorOfFPTrunc(SDValue BV, SelectionDAG &DAG,
7275 const ARMSubtarget *ST) {
7276 assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7277 if (!ST->hasMVEFloatOps())
7281 EVT VT = BV.getValueType();
7282 if (VT != MVT::v8f16)
7285 // We are looking for a buildvector of fptrunc elements, where all the
7286 // elements are interleavingly extracted from two sources. Check the first two
7287 // items are valid enough and extract some info from them (they are checked
7288 // properly in the loop below).
7289 if (BV.getOperand(0).getOpcode() != ISD::FP_ROUND ||
7290 BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7291 BV.getOperand(0).getOperand(0).getConstantOperandVal(1) != 0)
7293 if (BV.getOperand(1).getOpcode() != ISD::FP_ROUND ||
7294 BV.getOperand(1).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
7295 BV.getOperand(1).getOperand(0).getConstantOperandVal(1) != 0)
7297 SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
7298 SDValue Op1 = BV.getOperand(1).getOperand(0).getOperand(0);
7299 if (Op0.getValueType() != MVT::v4f32 || Op1.getValueType() != MVT::v4f32)
7302 // Check all the values in the BuildVector line up with our expectations.
7303 for (unsigned i = 1; i < 4; i++) {
7304 auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
7305 return Trunc.getOpcode() == ISD::FP_ROUND &&
7306 Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7307 Trunc.getOperand(0).getOperand(0) == Op &&
7308 Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
7310 if (!Check(BV.getOperand(i * 2 + 0), Op0, i))
7312 if (!Check(BV.getOperand(i * 2 + 1), Op1, i))
7316 SDValue N1 = DAG.getNode(ARMISD::VCVTN, dl, VT, DAG.getUNDEF(VT), Op0,
7317 DAG.getConstant(0, dl, MVT::i32));
7318 return DAG.getNode(ARMISD::VCVTN, dl, VT, N1, Op1,
7319 DAG.getConstant(1, dl, MVT::i32));
7322 // Reconstruct an MVE VCVT from a BuildVector of scalar fpext, all extracted
7323 // from a single input on alternating lanes. For example:
7324 // BUILDVECTOR(FP_ROUND(EXTRACT_ELT(X, 0),
7325 // FP_ROUND(EXTRACT_ELT(X, 2),
7326 // FP_ROUND(EXTRACT_ELT(X, 4), ...)
7327 static SDValue LowerBuildVectorOfFPExt(SDValue BV, SelectionDAG &DAG,
7328 const ARMSubtarget *ST) {
7329 assert(BV.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7330 if (!ST->hasMVEFloatOps())
7334 EVT VT = BV.getValueType();
7335 if (VT != MVT::v4f32)
7338 // We are looking for a buildvector of fptext elements, where all the
7339 // elements are alternating lanes from a single source. For example <0,2,4,6>
7340 // or <1,3,5,7>. Check the first two items are valid enough and extract some
7341 // info from them (they are checked properly in the loop below).
7342 if (BV.getOperand(0).getOpcode() != ISD::FP_EXTEND ||
7343 BV.getOperand(0).getOperand(0).getOpcode() != ISD::EXTRACT_VECTOR_ELT)
7345 SDValue Op0 = BV.getOperand(0).getOperand(0).getOperand(0);
7346 int Offset = BV.getOperand(0).getOperand(0).getConstantOperandVal(1);
7347 if (Op0.getValueType() != MVT::v8f16 || (Offset != 0 && Offset != 1))
7350 // Check all the values in the BuildVector line up with our expectations.
7351 for (unsigned i = 1; i < 4; i++) {
7352 auto Check = [](SDValue Trunc, SDValue Op, unsigned Idx) {
7353 return Trunc.getOpcode() == ISD::FP_EXTEND &&
7354 Trunc.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7355 Trunc.getOperand(0).getOperand(0) == Op &&
7356 Trunc.getOperand(0).getConstantOperandVal(1) == Idx;
7358 if (!Check(BV.getOperand(i), Op0, 2 * i + Offset))
7362 return DAG.getNode(ARMISD::VCVTL, dl, VT, Op0,
7363 DAG.getConstant(Offset, dl, MVT::i32));
7366 // If N is an integer constant that can be moved into a register in one
7367 // instruction, return an SDValue of such a constant (will become a MOV
7368 // instruction). Otherwise return null.
7369 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
7370 const ARMSubtarget *ST, const SDLoc &dl) {
7372 if (!isa<ConstantSDNode>(N))
7374 Val = cast<ConstantSDNode>(N)->getZExtValue();
7376 if (ST->isThumb1Only()) {
7377 if (Val <= 255 || ~Val <= 255)
7378 return DAG.getConstant(Val, dl, MVT::i32);
7380 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
7381 return DAG.getConstant(Val, dl, MVT::i32);
7386 static SDValue LowerBUILD_VECTOR_i1(SDValue Op, SelectionDAG &DAG,
7387 const ARMSubtarget *ST) {
7389 EVT VT = Op.getValueType();
7391 assert(ST->hasMVEIntegerOps() && "LowerBUILD_VECTOR_i1 called without MVE!");
7393 unsigned NumElts = VT.getVectorNumElements();
7395 unsigned BitsPerBool;
7399 } else if (NumElts == 8) {
7402 } else if (NumElts == 16) {
7408 // If this is a single value copied into all lanes (a splat), we can just sign
7409 // extend that single value
7410 SDValue FirstOp = Op.getOperand(0);
7411 if (!isa<ConstantSDNode>(FirstOp) &&
7412 std::all_of(std::next(Op->op_begin()), Op->op_end(),
7413 [&FirstOp](SDUse &U) {
7414 return U.get().isUndef() || U.get() == FirstOp;
7416 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32, FirstOp,
7417 DAG.getValueType(MVT::i1));
7418 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), Ext);
7421 // First create base with bits set where known
7422 unsigned Bits32 = 0;
7423 for (unsigned i = 0; i < NumElts; ++i) {
7424 SDValue V = Op.getOperand(i);
7425 if (!isa<ConstantSDNode>(V) && !V.isUndef())
7427 bool BitSet = V.isUndef() ? false : cast<ConstantSDNode>(V)->getZExtValue();
7429 Bits32 |= BoolMask << (i * BitsPerBool);
7432 // Add in unknown nodes
7433 SDValue Base = DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT,
7434 DAG.getConstant(Bits32, dl, MVT::i32));
7435 for (unsigned i = 0; i < NumElts; ++i) {
7436 SDValue V = Op.getOperand(i);
7437 if (isa<ConstantSDNode>(V) || V.isUndef())
7439 Base = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Base, V,
7440 DAG.getConstant(i, dl, MVT::i32));
7446 // If this is a case we can't handle, return null and let the default
7447 // expansion code take care of it.
7448 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
7449 const ARMSubtarget *ST) const {
7450 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
7452 EVT VT = Op.getValueType();
7454 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
7455 return LowerBUILD_VECTOR_i1(Op, DAG, ST);
7457 APInt SplatBits, SplatUndef;
7458 unsigned SplatBitSize;
7460 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
7461 if (SplatUndef.isAllOnesValue())
7462 return DAG.getUNDEF(VT);
7464 if ((ST->hasNEON() && SplatBitSize <= 64) ||
7465 (ST->hasMVEIntegerOps() && SplatBitSize <= 64)) {
7466 // Check if an immediate VMOV works.
7469 isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
7470 SplatBitSize, DAG, dl, VmovVT, VT, VMOVModImm);
7472 if (Val.getNode()) {
7473 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
7474 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7477 // Try an immediate VMVN.
7478 uint64_t NegatedImm = (~SplatBits).getZExtValue();
7479 Val = isVMOVModifiedImm(
7480 NegatedImm, SplatUndef.getZExtValue(), SplatBitSize, DAG, dl, VmovVT,
7481 VT, ST->hasMVEIntegerOps() ? MVEVMVNModImm : VMVNModImm);
7482 if (Val.getNode()) {
7483 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
7484 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
7487 // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
7488 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
7489 int ImmVal = ARM_AM::getFP32Imm(SplatBits);
7491 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
7492 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
7498 // Scan through the operands to see if only one value is used.
7500 // As an optimisation, even if more than one value is used it may be more
7501 // profitable to splat with one value then change some lanes.
7503 // Heuristically we decide to do this if the vector has a "dominant" value,
7504 // defined as splatted to more than half of the lanes.
7505 unsigned NumElts = VT.getVectorNumElements();
7506 bool isOnlyLowElement = true;
7507 bool usesOnlyOneValue = true;
7508 bool hasDominantValue = false;
7509 bool isConstant = true;
7511 // Map of the number of times a particular SDValue appears in the
7513 DenseMap<SDValue, unsigned> ValueCounts;
7515 for (unsigned i = 0; i < NumElts; ++i) {
7516 SDValue V = Op.getOperand(i);
7520 isOnlyLowElement = false;
7521 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
7524 ValueCounts.insert(std::make_pair(V, 0));
7525 unsigned &Count = ValueCounts[V];
7527 // Is this value dominant? (takes up more than half of the lanes)
7528 if (++Count > (NumElts / 2)) {
7529 hasDominantValue = true;
7533 if (ValueCounts.size() != 1)
7534 usesOnlyOneValue = false;
7535 if (!Value.getNode() && !ValueCounts.empty())
7536 Value = ValueCounts.begin()->first;
7538 if (ValueCounts.empty())
7539 return DAG.getUNDEF(VT);
7541 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
7542 // Keep going if we are hitting this case.
7543 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
7544 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
7546 unsigned EltSize = VT.getScalarSizeInBits();
7548 // Use VDUP for non-constant splats. For f32 constant splats, reduce to
7549 // i32 and try again.
7550 if (hasDominantValue && EltSize <= 32) {
7554 // If we are VDUPing a value that comes directly from a vector, that will
7555 // cause an unnecessary move to and from a GPR, where instead we could
7556 // just use VDUPLANE. We can only do this if the lane being extracted
7557 // is at a constant index, as the VDUP from lane instructions only have
7558 // constant-index forms.
7559 ConstantSDNode *constIndex;
7560 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
7561 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
7562 // We need to create a new undef vector to use for the VDUPLANE if the
7563 // size of the vector from which we get the value is different than the
7564 // size of the vector that we need to create. We will insert the element
7565 // such that the register coalescer will remove unnecessary copies.
7566 if (VT != Value->getOperand(0).getValueType()) {
7567 unsigned index = constIndex->getAPIntValue().getLimitedValue() %
7568 VT.getVectorNumElements();
7569 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7570 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
7571 Value, DAG.getConstant(index, dl, MVT::i32)),
7572 DAG.getConstant(index, dl, MVT::i32));
7574 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
7575 Value->getOperand(0), Value->getOperand(1));
7577 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
7579 if (!usesOnlyOneValue) {
7580 // The dominant value was splatted as 'N', but we now have to insert
7581 // all differing elements.
7582 for (unsigned I = 0; I < NumElts; ++I) {
7583 if (Op.getOperand(I) == Value)
7585 SmallVector<SDValue, 3> Ops;
7587 Ops.push_back(Op.getOperand(I));
7588 Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
7589 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
7594 if (VT.getVectorElementType().isFloatingPoint()) {
7595 SmallVector<SDValue, 8> Ops;
7596 MVT FVT = VT.getVectorElementType().getSimpleVT();
7597 assert(FVT == MVT::f32 || FVT == MVT::f16);
7598 MVT IVT = (FVT == MVT::f32) ? MVT::i32 : MVT::i16;
7599 for (unsigned i = 0; i < NumElts; ++i)
7600 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, IVT,
7602 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), IVT, NumElts);
7603 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
7604 Val = LowerBUILD_VECTOR(Val, DAG, ST);
7606 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
7608 if (usesOnlyOneValue) {
7609 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
7610 if (isConstant && Val.getNode())
7611 return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
7615 // If all elements are constants and the case above didn't get hit, fall back
7616 // to the default expansion, which will generate a load from the constant
7621 // Reconstruct the BUILDVECTOR to one of the legal shuffles (such as vext and
7622 // vmovn). Empirical tests suggest this is rarely worth it for vectors of
7625 if (SDValue shuffle = ReconstructShuffle(Op, DAG))
7628 // Attempt to turn a buildvector of scalar fptrunc's or fpext's back into
7630 if (SDValue VCVT = LowerBuildVectorOfFPTrunc(Op, DAG, Subtarget))
7632 if (SDValue VCVT = LowerBuildVectorOfFPExt(Op, DAG, Subtarget))
7635 if (ST->hasNEON() && VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
7636 // If we haven't found an efficient lowering, try splitting a 128-bit vector
7637 // into two 64-bit vectors; we might discover a better way to lower it.
7638 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
7639 EVT ExtVT = VT.getVectorElementType();
7640 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
7642 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
7643 if (Lower.getOpcode() == ISD::BUILD_VECTOR)
7644 Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
7645 SDValue Upper = DAG.getBuildVector(
7646 HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
7647 if (Upper.getOpcode() == ISD::BUILD_VECTOR)
7648 Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
7650 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
7653 // Vectors with 32- or 64-bit elements can be built by directly assigning
7654 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands
7655 // will be legalized.
7656 if (EltSize >= 32) {
7657 // Do the expansion with floating-point types, since that is what the VFP
7658 // registers are defined to use, and since i64 is not legal.
7659 EVT EltVT = EVT::getFloatingPointVT(EltSize);
7660 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
7661 SmallVector<SDValue, 8> Ops;
7662 for (unsigned i = 0; i < NumElts; ++i)
7663 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
7664 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
7665 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
7668 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
7669 // know the default expansion would otherwise fall back on something even
7670 // worse. For a vector with one or two non-undef values, that's
7671 // scalar_to_vector for the elements followed by a shuffle (provided the
7672 // shuffle is valid for the target) and materialization element by element
7673 // on the stack followed by a load for everything else.
7674 if (!isConstant && !usesOnlyOneValue) {
7675 SDValue Vec = DAG.getUNDEF(VT);
7676 for (unsigned i = 0 ; i < NumElts; ++i) {
7677 SDValue V = Op.getOperand(i);
7680 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
7681 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
7689 // Gather data to see if the operation can be modelled as a
7690 // shuffle in combination with VEXTs.
7691 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
7692 SelectionDAG &DAG) const {
7693 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
7695 EVT VT = Op.getValueType();
7696 unsigned NumElts = VT.getVectorNumElements();
7698 struct ShuffleSourceInfo {
7700 unsigned MinElt = std::numeric_limits<unsigned>::max();
7701 unsigned MaxElt = 0;
7703 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
7704 // be compatible with the shuffle we intend to construct. As a result
7705 // ShuffleVec will be some sliding window into the original Vec.
7708 // Code should guarantee that element i in Vec starts at element "WindowBase
7709 // + i * WindowScale in ShuffleVec".
7711 int WindowScale = 1;
7713 ShuffleSourceInfo(SDValue Vec) : Vec(Vec), ShuffleVec(Vec) {}
7715 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
7718 // First gather all vectors used as an immediate source for this BUILD_VECTOR
7720 SmallVector<ShuffleSourceInfo, 2> Sources;
7721 for (unsigned i = 0; i < NumElts; ++i) {
7722 SDValue V = Op.getOperand(i);
7725 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
7726 // A shuffle can only come from building a vector from various
7727 // elements of other vectors.
7729 } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
7730 // Furthermore, shuffles require a constant mask, whereas extractelts
7731 // accept variable indices.
7735 // Add this element source to the list if it's not already there.
7736 SDValue SourceVec = V.getOperand(0);
7737 auto Source = llvm::find(Sources, SourceVec);
7738 if (Source == Sources.end())
7739 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
7741 // Update the minimum and maximum lane number seen.
7742 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
7743 Source->MinElt = std::min(Source->MinElt, EltNo);
7744 Source->MaxElt = std::max(Source->MaxElt, EltNo);
7747 // Currently only do something sane when at most two source vectors
7749 if (Sources.size() > 2)
7752 // Find out the smallest element size among result and two sources, and use
7753 // it as element size to build the shuffle_vector.
7754 EVT SmallestEltTy = VT.getVectorElementType();
7755 for (auto &Source : Sources) {
7756 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
7757 if (SrcEltTy.bitsLT(SmallestEltTy))
7758 SmallestEltTy = SrcEltTy;
7760 unsigned ResMultiplier =
7761 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
7762 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
7763 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
7765 // If the source vector is too wide or too narrow, we may nevertheless be able
7766 // to construct a compatible shuffle either by concatenating it with UNDEF or
7767 // extracting a suitable range of elements.
7768 for (auto &Src : Sources) {
7769 EVT SrcVT = Src.ShuffleVec.getValueType();
7771 if (SrcVT.getSizeInBits() == VT.getSizeInBits())
7774 // This stage of the search produces a source with the same element type as
7775 // the original, but with a total width matching the BUILD_VECTOR output.
7776 EVT EltVT = SrcVT.getVectorElementType();
7777 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
7778 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
7780 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
7781 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
7783 // We can pad out the smaller vector for free, so if it's part of a
7786 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
7787 DAG.getUNDEF(Src.ShuffleVec.getValueType()));
7791 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
7794 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
7795 // Span too large for a VEXT to cope
7799 if (Src.MinElt >= NumSrcElts) {
7800 // The extraction can just take the second half
7802 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7803 DAG.getConstant(NumSrcElts, dl, MVT::i32));
7804 Src.WindowBase = -NumSrcElts;
7805 } else if (Src.MaxElt < NumSrcElts) {
7806 // The extraction can just take the first half
7808 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7809 DAG.getConstant(0, dl, MVT::i32));
7811 // An actual VEXT is needed
7813 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7814 DAG.getConstant(0, dl, MVT::i32));
7816 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
7817 DAG.getConstant(NumSrcElts, dl, MVT::i32));
7819 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
7821 DAG.getConstant(Src.MinElt, dl, MVT::i32));
7822 Src.WindowBase = -Src.MinElt;
7826 // Another possible incompatibility occurs from the vector element types. We
7827 // can fix this by bitcasting the source vectors to the same type we intend
7829 for (auto &Src : Sources) {
7830 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
7831 if (SrcEltTy == SmallestEltTy)
7833 assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
7834 Src.ShuffleVec = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, ShuffleVT, Src.ShuffleVec);
7835 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
7836 Src.WindowBase *= Src.WindowScale;
7839 // Final sanity check before we try to actually produce a shuffle.
7840 LLVM_DEBUG(for (auto Src
7842 assert(Src.ShuffleVec.getValueType() == ShuffleVT););
7844 // The stars all align, our next step is to produce the mask for the shuffle.
7845 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
7846 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
7847 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
7848 SDValue Entry = Op.getOperand(i);
7849 if (Entry.isUndef())
7852 auto Src = llvm::find(Sources, Entry.getOperand(0));
7853 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
7855 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
7856 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
7858 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
7859 int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
7860 VT.getScalarSizeInBits());
7861 int LanesDefined = BitsDefined / BitsPerShuffleLane;
7863 // This source is expected to fill ResMultiplier lanes of the final shuffle,
7864 // starting at the appropriate offset.
7865 int *LaneMask = &Mask[i * ResMultiplier];
7867 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
7868 ExtractBase += NumElts * (Src - Sources.begin());
7869 for (int j = 0; j < LanesDefined; ++j)
7870 LaneMask[j] = ExtractBase + j;
7874 // We can't handle more than two sources. This should have already
7875 // been checked before this point.
7876 assert(Sources.size() <= 2 && "Too many sources!");
7878 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
7879 for (unsigned i = 0; i < Sources.size(); ++i)
7880 ShuffleOps[i] = Sources[i].ShuffleVec;
7882 SDValue Shuffle = buildLegalVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
7883 ShuffleOps[1], Mask, DAG);
7886 return DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Shuffle);
7889 enum ShuffleOpCodes {
7890 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
7899 OP_VUZPL, // VUZP, left result
7900 OP_VUZPR, // VUZP, right result
7901 OP_VZIPL, // VZIP, left result
7902 OP_VZIPR, // VZIP, right result
7903 OP_VTRNL, // VTRN, left result
7904 OP_VTRNR // VTRN, right result
7907 static bool isLegalMVEShuffleOp(unsigned PFEntry) {
7908 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7921 /// isShuffleMaskLegal - Targets can use this to indicate that they only
7922 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
7923 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
7924 /// are assumed to be legal.
7925 bool ARMTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
7926 if (VT.getVectorNumElements() == 4 &&
7927 (VT.is128BitVector() || VT.is64BitVector())) {
7928 unsigned PFIndexes[4];
7929 for (unsigned i = 0; i != 4; ++i) {
7933 PFIndexes[i] = M[i];
7936 // Compute the index in the perfect shuffle table.
7937 unsigned PFTableIndex =
7938 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
7939 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
7940 unsigned Cost = (PFEntry >> 30);
7942 if (Cost <= 4 && (Subtarget->hasNEON() || isLegalMVEShuffleOp(PFEntry)))
7946 bool ReverseVEXT, isV_UNDEF;
7947 unsigned Imm, WhichResult;
7949 unsigned EltSize = VT.getScalarSizeInBits();
7950 if (EltSize >= 32 ||
7951 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
7952 ShuffleVectorInst::isIdentityMask(M) ||
7953 isVREVMask(M, VT, 64) ||
7954 isVREVMask(M, VT, 32) ||
7955 isVREVMask(M, VT, 16))
7957 else if (Subtarget->hasNEON() &&
7958 (isVEXTMask(M, VT, ReverseVEXT, Imm) ||
7959 isVTBLMask(M, VT) ||
7960 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF)))
7962 else if (Subtarget->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) &&
7963 isReverseMask(M, VT))
7965 else if (Subtarget->hasMVEIntegerOps() &&
7966 (isVMOVNMask(M, VT, 0) || isVMOVNMask(M, VT, 1)))
7972 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
7973 /// the specified operations to build the shuffle.
7974 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
7975 SDValue RHS, SelectionDAG &DAG,
7977 unsigned OpNum = (PFEntry >> 26) & 0x0F;
7978 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
7979 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
7981 if (OpNum == OP_COPY) {
7982 if (LHSID == (1*9+2)*9+3) return LHS;
7983 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
7987 SDValue OpLHS, OpRHS;
7988 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
7989 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
7990 EVT VT = OpLHS.getValueType();
7993 default: llvm_unreachable("Unknown shuffle opcode!");
7995 // VREV divides the vector in half and swaps within the half.
7996 if (VT.getVectorElementType() == MVT::i32 ||
7997 VT.getVectorElementType() == MVT::f32)
7998 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
7999 // vrev <4 x i16> -> VREV32
8000 if (VT.getVectorElementType() == MVT::i16)
8001 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
8002 // vrev <4 x i8> -> VREV16
8003 assert(VT.getVectorElementType() == MVT::i8);
8004 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
8009 return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
8010 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
8014 return DAG.getNode(ARMISD::VEXT, dl, VT,
8016 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
8019 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
8020 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
8023 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
8024 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
8027 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
8028 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
8032 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
8033 ArrayRef<int> ShuffleMask,
8034 SelectionDAG &DAG) {
8035 // Check to see if we can use the VTBL instruction.
8036 SDValue V1 = Op.getOperand(0);
8037 SDValue V2 = Op.getOperand(1);
8040 SmallVector<SDValue, 8> VTBLMask;
8041 for (ArrayRef<int>::iterator
8042 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
8043 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
8045 if (V2.getNode()->isUndef())
8046 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
8047 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
8049 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
8050 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
8053 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
8054 SelectionDAG &DAG) {
8056 SDValue OpLHS = Op.getOperand(0);
8057 EVT VT = OpLHS.getValueType();
8059 assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
8060 "Expect an v8i16/v16i8 type");
8061 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
8062 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
8063 // extract the first 8 bytes into the top double word and the last 8 bytes
8064 // into the bottom double word. The v8i16 case is similar.
8065 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
8066 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
8067 DAG.getConstant(ExtractNum, DL, MVT::i32));
8070 static EVT getVectorTyFromPredicateVector(EVT VT) {
8071 switch (VT.getSimpleVT().SimpleTy) {
8079 llvm_unreachable("Unexpected vector predicate type");
8083 static SDValue PromoteMVEPredVector(SDLoc dl, SDValue Pred, EVT VT,
8084 SelectionDAG &DAG) {
8085 // Converting from boolean predicates to integers involves creating a vector
8086 // of all ones or all zeroes and selecting the lanes based upon the real
8089 DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0xff), dl, MVT::i32);
8090 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllOnes);
8093 DAG.getTargetConstant(ARM_AM::createVMOVModImm(0xe, 0x0), dl, MVT::i32);
8094 AllZeroes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v16i8, AllZeroes);
8096 // Get full vector type from predicate type
8097 EVT NewVT = getVectorTyFromPredicateVector(VT);
8100 // If the real predicate is an v8i1 or v4i1 (not v16i1) then we need to recast
8101 // this to a v16i1. This cannot be done with an ordinary bitcast because the
8102 // sizes are not the same. We have to use a MVE specific PREDICATE_CAST node,
8103 // since we know in hardware the sizes are really the same.
8104 if (VT != MVT::v16i1)
8105 RecastV1 = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Pred);
8109 // Select either all ones or zeroes depending upon the real predicate bits.
8110 SDValue PredAsVector =
8111 DAG.getNode(ISD::VSELECT, dl, MVT::v16i8, RecastV1, AllOnes, AllZeroes);
8113 // Recast our new predicate-as-integer v16i8 vector into something
8114 // appropriate for the shuffle, i.e. v4i32 for a real v4i1 predicate.
8115 return DAG.getNode(ISD::BITCAST, dl, NewVT, PredAsVector);
8118 static SDValue LowerVECTOR_SHUFFLE_i1(SDValue Op, SelectionDAG &DAG,
8119 const ARMSubtarget *ST) {
8120 EVT VT = Op.getValueType();
8121 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
8122 ArrayRef<int> ShuffleMask = SVN->getMask();
8124 assert(ST->hasMVEIntegerOps() &&
8125 "No support for vector shuffle of boolean predicates");
8127 SDValue V1 = Op.getOperand(0);
8129 if (isReverseMask(ShuffleMask, VT)) {
8130 SDValue cast = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, V1);
8131 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, cast);
8132 SDValue srl = DAG.getNode(ISD::SRL, dl, MVT::i32, rbit,
8133 DAG.getConstant(16, dl, MVT::i32));
8134 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, srl);
8137 // Until we can come up with optimised cases for every single vector
8138 // shuffle in existence we have chosen the least painful strategy. This is
8139 // to essentially promote the boolean predicate to a 8-bit integer, where
8140 // each predicate represents a byte. Then we fall back on a normal integer
8141 // vector shuffle and convert the result back into a predicate vector. In
8142 // many cases the generated code might be even better than scalar code
8143 // operating on bits. Just imagine trying to shuffle 8 arbitrary 2-bit
8144 // fields in a register into 8 other arbitrary 2-bit fields!
8145 SDValue PredAsVector = PromoteMVEPredVector(dl, V1, VT, DAG);
8146 EVT NewVT = PredAsVector.getValueType();
8149 SDValue Shuffled = DAG.getVectorShuffle(NewVT, dl, PredAsVector,
8150 DAG.getUNDEF(NewVT), ShuffleMask);
8152 // Now return the result of comparing the shuffled vector with zero,
8153 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8154 return DAG.getNode(ARMISD::VCMPZ, dl, VT, Shuffled,
8155 DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8158 static SDValue LowerVECTOR_SHUFFLEUsingMovs(SDValue Op,
8159 ArrayRef<int> ShuffleMask,
8160 SelectionDAG &DAG) {
8161 // Attempt to lower the vector shuffle using as many whole register movs as
8162 // possible. This is useful for types smaller than 32bits, which would
8163 // often otherwise become a series for grp movs.
8165 EVT VT = Op.getValueType();
8166 if (VT.getScalarSizeInBits() >= 32)
8169 assert((VT == MVT::v8i16 || VT == MVT::v8f16 || VT == MVT::v16i8) &&
8170 "Unexpected vector type");
8171 int NumElts = VT.getVectorNumElements();
8172 int QuarterSize = NumElts / 4;
8173 // The four final parts of the vector, as i32's
8176 // Look for full lane vmovs like <0,1,2,3> or <u,5,6,7> etc, (but not
8177 // <u,u,u,u>), returning the vmov lane index
8178 auto getMovIdx = [](ArrayRef<int> ShuffleMask, int Start, int Length) {
8179 // Detect which mov lane this would be from the first non-undef element.
8181 for (int i = 0; i < Length; i++) {
8182 if (ShuffleMask[Start + i] >= 0) {
8183 if (ShuffleMask[Start + i] % Length != i)
8185 MovIdx = ShuffleMask[Start + i] / Length;
8189 // If all items are undef, leave this for other combines
8192 // Check the remaining values are the correct part of the same mov
8193 for (int i = 1; i < Length; i++) {
8194 if (ShuffleMask[Start + i] >= 0 &&
8195 (ShuffleMask[Start + i] / Length != MovIdx ||
8196 ShuffleMask[Start + i] % Length != i))
8202 for (int Part = 0; Part < 4; ++Part) {
8203 // Does this part look like a mov
8204 int Elt = getMovIdx(ShuffleMask, Part * QuarterSize, QuarterSize);
8206 SDValue Input = Op->getOperand(0);
8208 Input = Op->getOperand(1);
8211 SDValue BitCast = DAG.getBitcast(MVT::v4i32, Input);
8212 Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, BitCast,
8213 DAG.getConstant(Elt, dl, MVT::i32));
8217 // Nothing interesting found, just return
8218 if (!Parts[0] && !Parts[1] && !Parts[2] && !Parts[3])
8221 // The other parts need to be built with the old shuffle vector, cast to a
8222 // v4i32 and extract_vector_elts
8223 if (!Parts[0] || !Parts[1] || !Parts[2] || !Parts[3]) {
8224 SmallVector<int, 16> NewShuffleMask;
8225 for (int Part = 0; Part < 4; ++Part)
8226 for (int i = 0; i < QuarterSize; i++)
8227 NewShuffleMask.push_back(
8228 Parts[Part] ? -1 : ShuffleMask[Part * QuarterSize + i]);
8229 SDValue NewShuffle = DAG.getVectorShuffle(
8230 VT, dl, Op->getOperand(0), Op->getOperand(1), NewShuffleMask);
8231 SDValue BitCast = DAG.getBitcast(MVT::v4i32, NewShuffle);
8233 for (int Part = 0; Part < 4; ++Part)
8235 Parts[Part] = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
8236 BitCast, DAG.getConstant(Part, dl, MVT::i32));
8238 // Build a vector out of the various parts and bitcast it back to the original
8240 SDValue NewVec = DAG.getBuildVector(MVT::v4i32, dl, Parts);
8241 return DAG.getBitcast(VT, NewVec);
8244 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
8245 const ARMSubtarget *ST) {
8246 SDValue V1 = Op.getOperand(0);
8247 SDValue V2 = Op.getOperand(1);
8249 EVT VT = Op.getValueType();
8250 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
8251 unsigned EltSize = VT.getScalarSizeInBits();
8253 if (ST->hasMVEIntegerOps() && EltSize == 1)
8254 return LowerVECTOR_SHUFFLE_i1(Op, DAG, ST);
8256 // Convert shuffles that are directly supported on NEON to target-specific
8257 // DAG nodes, instead of keeping them as shuffles and matching them again
8258 // during code selection. This is more efficient and avoids the possibility
8259 // of inconsistencies between legalization and selection.
8260 // FIXME: floating-point vectors should be canonicalized to integer vectors
8261 // of the same time so that they get CSEd properly.
8262 ArrayRef<int> ShuffleMask = SVN->getMask();
8264 if (EltSize <= 32) {
8265 if (SVN->isSplat()) {
8266 int Lane = SVN->getSplatIndex();
8267 // If this is undef splat, generate it via "just" vdup, if possible.
8268 if (Lane == -1) Lane = 0;
8270 // Test if V1 is a SCALAR_TO_VECTOR.
8271 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
8272 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
8274 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
8275 // (and probably will turn into a SCALAR_TO_VECTOR once legalization
8277 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
8278 !isa<ConstantSDNode>(V1.getOperand(0))) {
8279 bool IsScalarToVector = true;
8280 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
8281 if (!V1.getOperand(i).isUndef()) {
8282 IsScalarToVector = false;
8285 if (IsScalarToVector)
8286 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
8288 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
8289 DAG.getConstant(Lane, dl, MVT::i32));
8292 bool ReverseVEXT = false;
8294 if (ST->hasNEON() && isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
8297 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
8298 DAG.getConstant(Imm, dl, MVT::i32));
8301 if (isVREVMask(ShuffleMask, VT, 64))
8302 return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
8303 if (isVREVMask(ShuffleMask, VT, 32))
8304 return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
8305 if (isVREVMask(ShuffleMask, VT, 16))
8306 return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
8308 if (ST->hasNEON() && V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
8309 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
8310 DAG.getConstant(Imm, dl, MVT::i32));
8313 // Check for Neon shuffles that modify both input vectors in place.
8314 // If both results are used, i.e., if there are two shuffles with the same
8315 // source operands and with masks corresponding to both results of one of
8316 // these operations, DAG memoization will ensure that a single node is
8317 // used for both shuffles.
8318 unsigned WhichResult = 0;
8319 bool isV_UNDEF = false;
8320 if (ST->hasNEON()) {
8321 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8322 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
8325 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
8326 .getValue(WhichResult);
8329 if (ST->hasMVEIntegerOps()) {
8330 if (isVMOVNMask(ShuffleMask, VT, 0))
8331 return DAG.getNode(ARMISD::VMOVN, dl, VT, V2, V1,
8332 DAG.getConstant(0, dl, MVT::i32));
8333 if (isVMOVNMask(ShuffleMask, VT, 1))
8334 return DAG.getNode(ARMISD::VMOVN, dl, VT, V1, V2,
8335 DAG.getConstant(1, dl, MVT::i32));
8338 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
8339 // shuffles that produce a result larger than their operands with:
8340 // shuffle(concat(v1, undef), concat(v2, undef))
8342 // shuffle(concat(v1, v2), undef)
8343 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
8345 // This is useful in the general case, but there are special cases where
8346 // native shuffles produce larger results: the two-result ops.
8348 // Look through the concat when lowering them:
8349 // shuffle(concat(v1, v2), undef)
8351 // concat(VZIP(v1, v2):0, :1)
8353 if (ST->hasNEON() && V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
8354 SDValue SubV1 = V1->getOperand(0);
8355 SDValue SubV2 = V1->getOperand(1);
8356 EVT SubVT = SubV1.getValueType();
8358 // We expect these to have been canonicalized to -1.
8359 assert(llvm::all_of(ShuffleMask, [&](int i) {
8360 return i < (int)VT.getVectorNumElements();
8361 }) && "Unexpected shuffle index into UNDEF operand!");
8363 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
8364 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
8367 assert((WhichResult == 0) &&
8368 "In-place shuffle of concat can only have one result!");
8369 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
8371 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
8377 // If the shuffle is not directly supported and it has 4 elements, use
8378 // the PerfectShuffle-generated table to synthesize it from other shuffles.
8379 unsigned NumElts = VT.getVectorNumElements();
8381 unsigned PFIndexes[4];
8382 for (unsigned i = 0; i != 4; ++i) {
8383 if (ShuffleMask[i] < 0)
8386 PFIndexes[i] = ShuffleMask[i];
8389 // Compute the index in the perfect shuffle table.
8390 unsigned PFTableIndex =
8391 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
8392 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
8393 unsigned Cost = (PFEntry >> 30);
8397 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8398 else if (isLegalMVEShuffleOp(PFEntry)) {
8399 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
8400 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
8401 unsigned PFEntryLHS = PerfectShuffleTable[LHSID];
8402 unsigned PFEntryRHS = PerfectShuffleTable[RHSID];
8403 if (isLegalMVEShuffleOp(PFEntryLHS) && isLegalMVEShuffleOp(PFEntryRHS))
8404 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
8409 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
8410 if (EltSize >= 32) {
8411 // Do the expansion with floating-point types, since that is what the VFP
8412 // registers are defined to use, and since i64 is not legal.
8413 EVT EltVT = EVT::getFloatingPointVT(EltSize);
8414 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
8415 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
8416 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
8417 SmallVector<SDValue, 8> Ops;
8418 for (unsigned i = 0; i < NumElts; ++i) {
8419 if (ShuffleMask[i] < 0)
8420 Ops.push_back(DAG.getUNDEF(EltVT));
8422 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
8423 ShuffleMask[i] < (int)NumElts ? V1 : V2,
8424 DAG.getConstant(ShuffleMask[i] & (NumElts-1),
8427 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
8428 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
8431 if (ST->hasNEON() && (VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
8432 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
8434 if (ST->hasNEON() && VT == MVT::v8i8)
8435 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
8438 if (ST->hasMVEIntegerOps())
8439 if (SDValue NewOp = LowerVECTOR_SHUFFLEUsingMovs(Op, ShuffleMask, DAG))
8445 static SDValue LowerINSERT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
8446 const ARMSubtarget *ST) {
8447 EVT VecVT = Op.getOperand(0).getValueType();
8450 assert(ST->hasMVEIntegerOps() &&
8451 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8454 DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
8455 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
8456 unsigned LaneWidth =
8457 getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
8458 unsigned Mask = ((1 << LaneWidth) - 1) << Lane * LaneWidth;
8459 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::i32,
8460 Op.getOperand(1), DAG.getValueType(MVT::i1));
8461 SDValue BFI = DAG.getNode(ARMISD::BFI, dl, MVT::i32, Conv, Ext,
8462 DAG.getConstant(~Mask, dl, MVT::i32));
8463 return DAG.getNode(ARMISD::PREDICATE_CAST, dl, Op.getValueType(), BFI);
8466 SDValue ARMTargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
8467 SelectionDAG &DAG) const {
8468 // INSERT_VECTOR_ELT is legal only for immediate indexes.
8469 SDValue Lane = Op.getOperand(2);
8470 if (!isa<ConstantSDNode>(Lane))
8473 SDValue Elt = Op.getOperand(1);
8474 EVT EltVT = Elt.getValueType();
8476 if (Subtarget->hasMVEIntegerOps() &&
8477 Op.getValueType().getScalarSizeInBits() == 1)
8478 return LowerINSERT_VECTOR_ELT_i1(Op, DAG, Subtarget);
8480 if (getTypeAction(*DAG.getContext(), EltVT) ==
8481 TargetLowering::TypePromoteFloat) {
8482 // INSERT_VECTOR_ELT doesn't want f16 operands promoting to f32,
8483 // but the type system will try to do that if we don't intervene.
8484 // Reinterpret any such vector-element insertion as one with the
8485 // corresponding integer types.
8489 EVT IEltVT = MVT::getIntegerVT(EltVT.getScalarSizeInBits());
8490 assert(getTypeAction(*DAG.getContext(), IEltVT) !=
8491 TargetLowering::TypePromoteFloat);
8493 SDValue VecIn = Op.getOperand(0);
8494 EVT VecVT = VecIn.getValueType();
8495 EVT IVecVT = EVT::getVectorVT(*DAG.getContext(), IEltVT,
8496 VecVT.getVectorNumElements());
8498 SDValue IElt = DAG.getNode(ISD::BITCAST, dl, IEltVT, Elt);
8499 SDValue IVecIn = DAG.getNode(ISD::BITCAST, dl, IVecVT, VecIn);
8500 SDValue IVecOut = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, IVecVT,
8501 IVecIn, IElt, Lane);
8502 return DAG.getNode(ISD::BITCAST, dl, VecVT, IVecOut);
8508 static SDValue LowerEXTRACT_VECTOR_ELT_i1(SDValue Op, SelectionDAG &DAG,
8509 const ARMSubtarget *ST) {
8510 EVT VecVT = Op.getOperand(0).getValueType();
8513 assert(ST->hasMVEIntegerOps() &&
8514 "LowerINSERT_VECTOR_ELT_i1 called without MVE!");
8517 DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Op->getOperand(0));
8518 unsigned Lane = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
8519 unsigned LaneWidth =
8520 getVectorTyFromPredicateVector(VecVT).getScalarSizeInBits() / 8;
8521 SDValue Shift = DAG.getNode(ISD::SRL, dl, MVT::i32, Conv,
8522 DAG.getConstant(Lane * LaneWidth, dl, MVT::i32));
8526 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG,
8527 const ARMSubtarget *ST) {
8528 // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
8529 SDValue Lane = Op.getOperand(1);
8530 if (!isa<ConstantSDNode>(Lane))
8533 SDValue Vec = Op.getOperand(0);
8534 EVT VT = Vec.getValueType();
8536 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
8537 return LowerEXTRACT_VECTOR_ELT_i1(Op, DAG, ST);
8539 if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
8541 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
8547 static SDValue LowerCONCAT_VECTORS_i1(SDValue Op, SelectionDAG &DAG,
8548 const ARMSubtarget *ST) {
8549 SDValue V1 = Op.getOperand(0);
8550 SDValue V2 = Op.getOperand(1);
8552 EVT VT = Op.getValueType();
8553 EVT Op1VT = V1.getValueType();
8554 EVT Op2VT = V2.getValueType();
8555 unsigned NumElts = VT.getVectorNumElements();
8557 assert(Op1VT == Op2VT && "Operand types don't match!");
8558 assert(VT.getScalarSizeInBits() == 1 &&
8559 "Unexpected custom CONCAT_VECTORS lowering");
8560 assert(ST->hasMVEIntegerOps() &&
8561 "CONCAT_VECTORS lowering only supported for MVE");
8563 SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
8564 SDValue NewV2 = PromoteMVEPredVector(dl, V2, Op2VT, DAG);
8566 // We now have Op1 + Op2 promoted to vectors of integers, where v8i1 gets
8567 // promoted to v8i16, etc.
8569 MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
8571 // Extract the vector elements from Op1 and Op2 one by one and truncate them
8572 // to be the right size for the destination. For example, if Op1 is v4i1 then
8573 // the promoted vector is v4i32. The result of concatentation gives a v8i1,
8574 // which when promoted is v8i16. That means each i32 element from Op1 needs
8575 // truncating to i16 and inserting in the result.
8576 EVT ConcatVT = MVT::getVectorVT(ElType, NumElts);
8577 SDValue ConVec = DAG.getNode(ISD::UNDEF, dl, ConcatVT);
8578 auto ExractInto = [&DAG, &dl](SDValue NewV, SDValue ConVec, unsigned &j) {
8579 EVT NewVT = NewV.getValueType();
8580 EVT ConcatVT = ConVec.getValueType();
8581 for (unsigned i = 0, e = NewVT.getVectorNumElements(); i < e; i++, j++) {
8582 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV,
8583 DAG.getIntPtrConstant(i, dl));
8584 ConVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ConcatVT, ConVec, Elt,
8585 DAG.getConstant(j, dl, MVT::i32));
8590 ConVec = ExractInto(NewV1, ConVec, j);
8591 ConVec = ExractInto(NewV2, ConVec, j);
8593 // Now return the result of comparing the subvector with zero,
8594 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8595 return DAG.getNode(ARMISD::VCMPZ, dl, VT, ConVec,
8596 DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8599 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
8600 const ARMSubtarget *ST) {
8601 EVT VT = Op->getValueType(0);
8602 if (ST->hasMVEIntegerOps() && VT.getScalarSizeInBits() == 1)
8603 return LowerCONCAT_VECTORS_i1(Op, DAG, ST);
8605 // The only time a CONCAT_VECTORS operation can have legal types is when
8606 // two 64-bit vectors are concatenated to a 128-bit vector.
8607 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
8608 "unexpected CONCAT_VECTORS");
8610 SDValue Val = DAG.getUNDEF(MVT::v2f64);
8611 SDValue Op0 = Op.getOperand(0);
8612 SDValue Op1 = Op.getOperand(1);
8614 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
8615 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
8616 DAG.getIntPtrConstant(0, dl));
8618 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
8619 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
8620 DAG.getIntPtrConstant(1, dl));
8621 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
8624 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG,
8625 const ARMSubtarget *ST) {
8626 SDValue V1 = Op.getOperand(0);
8627 SDValue V2 = Op.getOperand(1);
8629 EVT VT = Op.getValueType();
8630 EVT Op1VT = V1.getValueType();
8631 unsigned NumElts = VT.getVectorNumElements();
8632 unsigned Index = cast<ConstantSDNode>(V2)->getZExtValue();
8634 assert(VT.getScalarSizeInBits() == 1 &&
8635 "Unexpected custom EXTRACT_SUBVECTOR lowering");
8636 assert(ST->hasMVEIntegerOps() &&
8637 "EXTRACT_SUBVECTOR lowering only supported for MVE");
8639 SDValue NewV1 = PromoteMVEPredVector(dl, V1, Op1VT, DAG);
8641 // We now have Op1 promoted to a vector of integers, where v8i1 gets
8642 // promoted to v8i16, etc.
8644 MVT ElType = getVectorTyFromPredicateVector(VT).getScalarType().getSimpleVT();
8646 EVT SubVT = MVT::getVectorVT(ElType, NumElts);
8647 SDValue SubVec = DAG.getNode(ISD::UNDEF, dl, SubVT);
8648 for (unsigned i = Index, j = 0; i < (Index + NumElts); i++, j++) {
8649 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, NewV1,
8650 DAG.getIntPtrConstant(i, dl));
8651 SubVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, SubVT, SubVec, Elt,
8652 DAG.getConstant(j, dl, MVT::i32));
8655 // Now return the result of comparing the subvector with zero,
8656 // which will generate a real predicate, i.e. v4i1, v8i1 or v16i1.
8657 return DAG.getNode(ARMISD::VCMPZ, dl, VT, SubVec,
8658 DAG.getConstant(ARMCC::NE, dl, MVT::i32));
8661 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
8662 /// element has been zero/sign-extended, depending on the isSigned parameter,
8663 /// from an integer type half its size.
8664 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
8666 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
8667 EVT VT = N->getValueType(0);
8668 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
8669 SDNode *BVN = N->getOperand(0).getNode();
8670 if (BVN->getValueType(0) != MVT::v4i32 ||
8671 BVN->getOpcode() != ISD::BUILD_VECTOR)
8673 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
8674 unsigned HiElt = 1 - LoElt;
8675 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
8676 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
8677 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
8678 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
8679 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
8682 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
8683 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
8686 if (Hi0->isNullValue() && Hi1->isNullValue())
8692 if (N->getOpcode() != ISD::BUILD_VECTOR)
8695 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
8696 SDNode *Elt = N->getOperand(i).getNode();
8697 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
8698 unsigned EltSize = VT.getScalarSizeInBits();
8699 unsigned HalfSize = EltSize / 2;
8701 if (!isIntN(HalfSize, C->getSExtValue()))
8704 if (!isUIntN(HalfSize, C->getZExtValue()))
8715 /// isSignExtended - Check if a node is a vector value that is sign-extended
8716 /// or a constant BUILD_VECTOR with sign-extended elements.
8717 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
8718 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
8720 if (isExtendedBUILD_VECTOR(N, DAG, true))
8725 /// isZeroExtended - Check if a node is a vector value that is zero-extended
8726 /// or a constant BUILD_VECTOR with zero-extended elements.
8727 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
8728 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
8730 if (isExtendedBUILD_VECTOR(N, DAG, false))
8735 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
8736 if (OrigVT.getSizeInBits() >= 64)
8739 assert(OrigVT.isSimple() && "Expecting a simple value type");
8741 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
8742 switch (OrigSimpleTy) {
8743 default: llvm_unreachable("Unexpected Vector Type");
8752 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
8753 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
8754 /// We insert the required extension here to get the vector to fill a D register.
8755 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
8758 unsigned ExtOpcode) {
8759 // The vector originally had a size of OrigTy. It was then extended to ExtTy.
8760 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
8761 // 64-bits we need to insert a new extension so that it will be 64-bits.
8762 assert(ExtTy.is128BitVector() && "Unexpected extension size");
8763 if (OrigTy.getSizeInBits() >= 64)
8766 // Must extend size to at least 64 bits to be used as an operand for VMULL.
8767 EVT NewVT = getExtensionTo64Bits(OrigTy);
8769 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
8772 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
8773 /// does not do any sign/zero extension. If the original vector is less
8774 /// than 64 bits, an appropriate extension will be added after the load to
8775 /// reach a total size of 64 bits. We have to add the extension separately
8776 /// because ARM does not have a sign/zero extending load for vectors.
8777 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
8778 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
8780 // The load already has the right type.
8781 if (ExtendedTy == LD->getMemoryVT())
8782 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
8783 LD->getBasePtr(), LD->getPointerInfo(),
8784 LD->getAlignment(), LD->getMemOperand()->getFlags());
8786 // We need to create a zextload/sextload. We cannot just create a load
8787 // followed by a zext/zext node because LowerMUL is also run during normal
8788 // operation legalization where we can't create illegal types.
8789 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
8790 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
8791 LD->getMemoryVT(), LD->getAlignment(),
8792 LD->getMemOperand()->getFlags());
8795 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
8796 /// extending load, or BUILD_VECTOR with extended elements, return the
8797 /// unextended value. The unextended vector should be 64 bits so that it can
8798 /// be used as an operand to a VMULL instruction. If the original vector size
8799 /// before extension is less than 64 bits we add a an extension to resize
8800 /// the vector to 64 bits.
8801 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
8802 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
8803 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
8804 N->getOperand(0)->getValueType(0),
8808 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
8809 assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
8810 "Expected extending load");
8812 SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
8813 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
8814 unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
8816 DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
8817 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
8822 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will
8823 // have been legalized as a BITCAST from v4i32.
8824 if (N->getOpcode() == ISD::BITCAST) {
8825 SDNode *BVN = N->getOperand(0).getNode();
8826 assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
8827 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
8828 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
8829 return DAG.getBuildVector(
8830 MVT::v2i32, SDLoc(N),
8831 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
8833 // Construct a new BUILD_VECTOR with elements truncated to half the size.
8834 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
8835 EVT VT = N->getValueType(0);
8836 unsigned EltSize = VT.getScalarSizeInBits() / 2;
8837 unsigned NumElts = VT.getVectorNumElements();
8838 MVT TruncVT = MVT::getIntegerVT(EltSize);
8839 SmallVector<SDValue, 8> Ops;
8841 for (unsigned i = 0; i != NumElts; ++i) {
8842 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
8843 const APInt &CInt = C->getAPIntValue();
8844 // Element types smaller than 32 bits are not legal, so use i32 elements.
8845 // The values are implicitly truncated so sext vs. zext doesn't matter.
8846 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
8848 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
8851 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
8852 unsigned Opcode = N->getOpcode();
8853 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
8854 SDNode *N0 = N->getOperand(0).getNode();
8855 SDNode *N1 = N->getOperand(1).getNode();
8856 return N0->hasOneUse() && N1->hasOneUse() &&
8857 isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
8862 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
8863 unsigned Opcode = N->getOpcode();
8864 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
8865 SDNode *N0 = N->getOperand(0).getNode();
8866 SDNode *N1 = N->getOperand(1).getNode();
8867 return N0->hasOneUse() && N1->hasOneUse() &&
8868 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
8873 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
8874 // Multiplications are only custom-lowered for 128-bit vectors so that
8875 // VMULL can be detected. Otherwise v2i64 multiplications are not legal.
8876 EVT VT = Op.getValueType();
8877 assert(VT.is128BitVector() && VT.isInteger() &&
8878 "unexpected type for custom-lowering ISD::MUL");
8879 SDNode *N0 = Op.getOperand(0).getNode();
8880 SDNode *N1 = Op.getOperand(1).getNode();
8881 unsigned NewOpc = 0;
8883 bool isN0SExt = isSignExtended(N0, DAG);
8884 bool isN1SExt = isSignExtended(N1, DAG);
8885 if (isN0SExt && isN1SExt)
8886 NewOpc = ARMISD::VMULLs;
8888 bool isN0ZExt = isZeroExtended(N0, DAG);
8889 bool isN1ZExt = isZeroExtended(N1, DAG);
8890 if (isN0ZExt && isN1ZExt)
8891 NewOpc = ARMISD::VMULLu;
8892 else if (isN1SExt || isN1ZExt) {
8893 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
8894 // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
8895 if (isN1SExt && isAddSubSExt(N0, DAG)) {
8896 NewOpc = ARMISD::VMULLs;
8898 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
8899 NewOpc = ARMISD::VMULLu;
8901 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
8903 NewOpc = ARMISD::VMULLu;
8909 if (VT == MVT::v2i64)
8910 // Fall through to expand this. It is not legal.
8913 // Other vector multiplications are legal.
8918 // Legalize to a VMULL instruction.
8921 SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
8923 Op0 = SkipExtensionForVMULL(N0, DAG);
8924 assert(Op0.getValueType().is64BitVector() &&
8925 Op1.getValueType().is64BitVector() &&
8926 "unexpected types for extended operands to VMULL");
8927 return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
8930 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
8931 // isel lowering to take advantage of no-stall back to back vmul + vmla.
8938 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
8939 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
8940 EVT Op1VT = Op1.getValueType();
8941 return DAG.getNode(N0->getOpcode(), DL, VT,
8942 DAG.getNode(NewOpc, DL, VT,
8943 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
8944 DAG.getNode(NewOpc, DL, VT,
8945 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
8948 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
8949 SelectionDAG &DAG) {
8950 // TODO: Should this propagate fast-math-flags?
8953 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
8954 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
8955 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
8956 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
8957 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
8958 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
8959 // Get reciprocal estimate.
8960 // float4 recip = vrecpeq_f32(yf);
8961 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8962 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
8964 // Because char has a smaller range than uchar, we can actually get away
8965 // without any newton steps. This requires that we use a weird bias
8966 // of 0xb000, however (again, this has been exhaustively tested).
8967 // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
8968 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
8969 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
8970 Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
8971 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
8972 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
8973 // Convert back to short.
8974 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
8975 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
8979 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
8980 SelectionDAG &DAG) {
8981 // TODO: Should this propagate fast-math-flags?
8984 // Convert to float.
8985 // float4 yf = vcvt_f32_s32(vmovl_s16(y));
8986 // float4 xf = vcvt_f32_s32(vmovl_s16(x));
8987 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
8988 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
8989 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
8990 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
8992 // Use reciprocal estimate and one refinement step.
8993 // float4 recip = vrecpeq_f32(yf);
8994 // recip *= vrecpsq_f32(yf, recip);
8995 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8996 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
8998 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
8999 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9001 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9002 // Because short has a smaller range than ushort, we can actually get away
9003 // with only a single newton step. This requires that we use a weird bias
9004 // of 89, however (again, this has been exhaustively tested).
9005 // float4 result = as_float4(as_int4(xf*recip) + 0x89);
9006 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
9007 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
9008 N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
9009 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
9010 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
9011 // Convert back to integer and return.
9012 // return vmovn_s32(vcvt_s32_f32(result));
9013 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
9014 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
9018 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG,
9019 const ARMSubtarget *ST) {
9020 EVT VT = Op.getValueType();
9021 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
9022 "unexpected type for custom-lowering ISD::SDIV");
9025 SDValue N0 = Op.getOperand(0);
9026 SDValue N1 = Op.getOperand(1);
9029 if (VT == MVT::v8i8) {
9030 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
9031 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
9033 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9034 DAG.getIntPtrConstant(4, dl));
9035 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9036 DAG.getIntPtrConstant(4, dl));
9037 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9038 DAG.getIntPtrConstant(0, dl));
9039 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9040 DAG.getIntPtrConstant(0, dl));
9042 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
9043 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
9045 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
9046 N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
9048 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
9051 return LowerSDIV_v4i16(N0, N1, dl, DAG);
9054 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG,
9055 const ARMSubtarget *ST) {
9056 // TODO: Should this propagate fast-math-flags?
9057 EVT VT = Op.getValueType();
9058 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
9059 "unexpected type for custom-lowering ISD::UDIV");
9062 SDValue N0 = Op.getOperand(0);
9063 SDValue N1 = Op.getOperand(1);
9066 if (VT == MVT::v8i8) {
9067 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
9068 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
9070 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9071 DAG.getIntPtrConstant(4, dl));
9072 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9073 DAG.getIntPtrConstant(4, dl));
9074 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
9075 DAG.getIntPtrConstant(0, dl));
9076 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
9077 DAG.getIntPtrConstant(0, dl));
9079 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
9080 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
9082 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
9083 N0 = LowerCONCAT_VECTORS(N0, DAG, ST);
9085 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
9086 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
9092 // v4i16 sdiv ... Convert to float.
9093 // float4 yf = vcvt_f32_s32(vmovl_u16(y));
9094 // float4 xf = vcvt_f32_s32(vmovl_u16(x));
9095 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
9096 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
9097 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
9098 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
9100 // Use reciprocal estimate and two refinement steps.
9101 // float4 recip = vrecpeq_f32(yf);
9102 // recip *= vrecpsq_f32(yf, recip);
9103 // recip *= vrecpsq_f32(yf, recip);
9104 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9105 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
9107 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9108 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9110 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9111 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
9112 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
9114 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
9115 // Simply multiplying by the reciprocal estimate can leave us a few ulps
9116 // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
9117 // and that it will never cause us to return an answer too large).
9118 // float4 result = as_float4(as_int4(xf*recip) + 2);
9119 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
9120 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
9121 N1 = DAG.getConstant(2, dl, MVT::v4i32);
9122 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
9123 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
9124 // Convert back to integer and return.
9125 // return vmovn_u32(vcvt_s32_f32(result));
9126 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
9127 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
9131 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
9132 SDNode *N = Op.getNode();
9133 EVT VT = N->getValueType(0);
9134 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
9136 SDValue Carry = Op.getOperand(2);
9141 if (Op.getOpcode() == ISD::ADDCARRY) {
9142 // This converts the boolean value carry into the carry flag.
9143 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
9145 // Do the addition proper using the carry flag we wanted.
9146 Result = DAG.getNode(ARMISD::ADDE, DL, VTs, Op.getOperand(0),
9147 Op.getOperand(1), Carry);
9149 // Now convert the carry flag into a boolean value.
9150 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
9152 // ARMISD::SUBE expects a carry not a borrow like ISD::SUBCARRY so we
9153 // have to invert the carry first.
9154 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
9155 DAG.getConstant(1, DL, MVT::i32), Carry);
9156 // This converts the boolean value carry into the carry flag.
9157 Carry = ConvertBooleanCarryToCarryFlag(Carry, DAG);
9159 // Do the subtraction proper using the carry flag we wanted.
9160 Result = DAG.getNode(ARMISD::SUBE, DL, VTs, Op.getOperand(0),
9161 Op.getOperand(1), Carry);
9163 // Now convert the carry flag into a boolean value.
9164 Carry = ConvertCarryFlagToBooleanCarry(Result.getValue(1), VT, DAG);
9165 // But the carry returned by ARMISD::SUBE is not a borrow as expected
9166 // by ISD::SUBCARRY, so compute 1 - C.
9167 Carry = DAG.getNode(ISD::SUB, DL, MVT::i32,
9168 DAG.getConstant(1, DL, MVT::i32), Carry);
9171 // Return both values.
9172 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Result, Carry);
9175 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
9176 assert(Subtarget->isTargetDarwin());
9178 // For iOS, we want to call an alternative entry point: __sincos_stret,
9179 // return values are passed via sret.
9181 SDValue Arg = Op.getOperand(0);
9182 EVT ArgVT = Arg.getValueType();
9183 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
9184 auto PtrVT = getPointerTy(DAG.getDataLayout());
9186 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
9187 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9189 // Pair of floats / doubles used to pass the result.
9190 Type *RetTy = StructType::get(ArgTy, ArgTy);
9191 auto &DL = DAG.getDataLayout();
9194 bool ShouldUseSRet = Subtarget->isAPCS_ABI();
9196 if (ShouldUseSRet) {
9197 // Create stack object for sret.
9198 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
9199 const Align StackAlign = DL.getPrefTypeAlign(RetTy);
9200 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
9201 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
9205 Entry.Ty = RetTy->getPointerTo();
9206 Entry.IsSExt = false;
9207 Entry.IsZExt = false;
9208 Entry.IsSRet = true;
9209 Args.push_back(Entry);
9210 RetTy = Type::getVoidTy(*DAG.getContext());
9216 Entry.IsSExt = false;
9217 Entry.IsZExt = false;
9218 Args.push_back(Entry);
9221 (ArgVT == MVT::f64) ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
9222 const char *LibcallName = getLibcallName(LC);
9223 CallingConv::ID CC = getLibcallCallingConv(LC);
9224 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
9226 TargetLowering::CallLoweringInfo CLI(DAG);
9228 .setChain(DAG.getEntryNode())
9229 .setCallee(CC, RetTy, Callee, std::move(Args))
9230 .setDiscardResult(ShouldUseSRet);
9231 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
9234 return CallResult.first;
9237 DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
9239 // Address of cos field.
9240 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
9241 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
9243 DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
9245 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
9246 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
9247 LoadSin.getValue(0), LoadCos.getValue(0));
9250 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
9252 SDValue &Chain) const {
9253 EVT VT = Op.getValueType();
9254 assert((VT == MVT::i32 || VT == MVT::i64) &&
9255 "unexpected type for custom lowering DIV");
9258 const auto &DL = DAG.getDataLayout();
9259 const auto &TLI = DAG.getTargetLoweringInfo();
9261 const char *Name = nullptr;
9263 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
9265 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
9267 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
9269 ARMTargetLowering::ArgListTy Args;
9271 for (auto AI : {1, 0}) {
9273 Arg.Node = Op.getOperand(AI);
9274 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
9275 Args.push_back(Arg);
9278 CallLoweringInfo CLI(DAG);
9281 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
9282 ES, std::move(Args));
9284 return LowerCallTo(CLI).first;
9287 // This is a code size optimisation: return the original SDIV node to
9288 // DAGCombiner when we don't want to expand SDIV into a sequence of
9289 // instructions, and an empty node otherwise which will cause the
9290 // SDIV to be expanded in DAGCombine.
9292 ARMTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
9294 SmallVectorImpl<SDNode *> &Created) const {
9295 // TODO: Support SREM
9296 if (N->getOpcode() != ISD::SDIV)
9299 const auto &ST = static_cast<const ARMSubtarget&>(DAG.getSubtarget());
9300 const bool MinSize = ST.hasMinSize();
9301 const bool HasDivide = ST.isThumb() ? ST.hasDivideInThumbMode()
9302 : ST.hasDivideInARMMode();
9304 // Don't touch vector types; rewriting this may lead to scalarizing
9306 if (N->getOperand(0).getValueType().isVector())
9309 // Bail if MinSize is not set, and also for both ARM and Thumb mode we need
9310 // hwdiv support for this to be really profitable.
9311 if (!(MinSize && HasDivide))
9314 // ARM mode is a bit simpler than Thumb: we can handle large power
9315 // of 2 immediates with 1 mov instruction; no further checks required,
9316 // just return the sdiv node.
9318 return SDValue(N, 0);
9320 // In Thumb mode, immediates larger than 128 need a wide 4-byte MOV,
9321 // and thus lose the code size benefits of a MOVS that requires only 2.
9322 // TargetTransformInfo and 'getIntImmCodeSizeCost' could be helpful here,
9323 // but as it's doing exactly this, it's not worth the trouble to get TTI.
9324 if (Divisor.sgt(128))
9327 return SDValue(N, 0);
9330 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
9331 bool Signed) const {
9332 assert(Op.getValueType() == MVT::i32 &&
9333 "unexpected type for custom lowering DIV");
9336 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
9337 DAG.getEntryNode(), Op.getOperand(1));
9339 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9342 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
9344 SDValue Op = N->getOperand(1);
9345 if (N->getValueType(0) == MVT::i32)
9346 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
9347 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
9348 DAG.getConstant(0, DL, MVT::i32));
9349 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
9350 DAG.getConstant(1, DL, MVT::i32));
9351 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
9352 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
9355 void ARMTargetLowering::ExpandDIV_Windows(
9356 SDValue Op, SelectionDAG &DAG, bool Signed,
9357 SmallVectorImpl<SDValue> &Results) const {
9358 const auto &DL = DAG.getDataLayout();
9359 const auto &TLI = DAG.getTargetLoweringInfo();
9361 assert(Op.getValueType() == MVT::i64 &&
9362 "unexpected type for custom lowering DIV");
9365 SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
9367 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
9369 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
9370 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
9371 DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
9372 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
9374 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lower, Upper));
9377 static SDValue LowerPredicateLoad(SDValue Op, SelectionDAG &DAG) {
9378 LoadSDNode *LD = cast<LoadSDNode>(Op.getNode());
9379 EVT MemVT = LD->getMemoryVT();
9380 assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
9381 "Expected a predicate type!");
9382 assert(MemVT == Op.getValueType());
9383 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
9384 "Expected a non-extending load");
9385 assert(LD->isUnindexed() && "Expected a unindexed load");
9387 // The basic MVE VLDR on a v4i1/v8i1 actually loads the entire 16bit
9388 // predicate, with the "v4i1" bits spread out over the 16 bits loaded. We
9389 // need to make sure that 8/4 bits are actually loaded into the correct
9390 // place, which means loading the value and then shuffling the values into
9391 // the bottom bits of the predicate.
9392 // Equally, VLDR for an v16i1 will actually load 32bits (so will be incorrect
9396 SDValue Load = DAG.getExtLoad(
9397 ISD::EXTLOAD, dl, MVT::i32, LD->getChain(), LD->getBasePtr(),
9398 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
9399 LD->getMemOperand());
9400 SDValue Pred = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::v16i1, Load);
9401 if (MemVT != MVT::v16i1)
9402 Pred = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MemVT, Pred,
9403 DAG.getConstant(0, dl, MVT::i32));
9404 return DAG.getMergeValues({Pred, Load.getValue(1)}, dl);
9407 void ARMTargetLowering::LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
9408 SelectionDAG &DAG) const {
9409 LoadSDNode *LD = cast<LoadSDNode>(N);
9410 EVT MemVT = LD->getMemoryVT();
9411 assert(LD->isUnindexed() && "Loads should be unindexed at this point.");
9413 if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
9414 !Subtarget->isThumb1Only() && LD->isVolatile()) {
9416 SDValue Result = DAG.getMemIntrinsicNode(
9417 ARMISD::LDRD, dl, DAG.getVTList({MVT::i32, MVT::i32, MVT::Other}),
9418 {LD->getChain(), LD->getBasePtr()}, MemVT, LD->getMemOperand());
9419 SDValue Lo = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 0 : 1);
9420 SDValue Hi = Result.getValue(DAG.getDataLayout().isLittleEndian() ? 1 : 0);
9421 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
9422 Results.append({Pair, Result.getValue(2)});
9426 static SDValue LowerPredicateStore(SDValue Op, SelectionDAG &DAG) {
9427 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
9428 EVT MemVT = ST->getMemoryVT();
9429 assert((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 || MemVT == MVT::v16i1) &&
9430 "Expected a predicate type!");
9431 assert(MemVT == ST->getValue().getValueType());
9432 assert(!ST->isTruncatingStore() && "Expected a non-extending store");
9433 assert(ST->isUnindexed() && "Expected a unindexed store");
9435 // Only store the v4i1 or v8i1 worth of bits, via a buildvector with top bits
9436 // unset and a scalar store.
9438 SDValue Build = ST->getValue();
9439 if (MemVT != MVT::v16i1) {
9440 SmallVector<SDValue, 16> Ops;
9441 for (unsigned I = 0; I < MemVT.getVectorNumElements(); I++)
9442 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Build,
9443 DAG.getConstant(I, dl, MVT::i32)));
9444 for (unsigned I = MemVT.getVectorNumElements(); I < 16; I++)
9445 Ops.push_back(DAG.getUNDEF(MVT::i32));
9446 Build = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i1, Ops);
9448 SDValue GRP = DAG.getNode(ARMISD::PREDICATE_CAST, dl, MVT::i32, Build);
9449 return DAG.getTruncStore(
9450 ST->getChain(), dl, GRP, ST->getBasePtr(),
9451 EVT::getIntegerVT(*DAG.getContext(), MemVT.getSizeInBits()),
9452 ST->getMemOperand());
9455 static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG,
9456 const ARMSubtarget *Subtarget) {
9457 StoreSDNode *ST = cast<StoreSDNode>(Op.getNode());
9458 EVT MemVT = ST->getMemoryVT();
9459 assert(ST->isUnindexed() && "Stores should be unindexed at this point.");
9461 if (MemVT == MVT::i64 && Subtarget->hasV5TEOps() &&
9462 !Subtarget->isThumb1Only() && ST->isVolatile()) {
9463 SDNode *N = Op.getNode();
9466 SDValue Lo = DAG.getNode(
9467 ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
9468 DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 0 : 1, dl,
9470 SDValue Hi = DAG.getNode(
9471 ISD::EXTRACT_ELEMENT, dl, MVT::i32, ST->getValue(),
9472 DAG.getTargetConstant(DAG.getDataLayout().isLittleEndian() ? 1 : 0, dl,
9475 return DAG.getMemIntrinsicNode(ARMISD::STRD, dl, DAG.getVTList(MVT::Other),
9476 {ST->getChain(), Lo, Hi, ST->getBasePtr()},
9477 MemVT, ST->getMemOperand());
9478 } else if (Subtarget->hasMVEIntegerOps() &&
9479 ((MemVT == MVT::v4i1 || MemVT == MVT::v8i1 ||
9480 MemVT == MVT::v16i1))) {
9481 return LowerPredicateStore(Op, DAG);
9487 static bool isZeroVector(SDValue N) {
9488 return (ISD::isBuildVectorAllZeros(N.getNode()) ||
9489 (N->getOpcode() == ARMISD::VMOVIMM &&
9490 isNullConstant(N->getOperand(0))));
9493 static SDValue LowerMLOAD(SDValue Op, SelectionDAG &DAG) {
9494 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
9495 MVT VT = Op.getSimpleValueType();
9496 SDValue Mask = N->getMask();
9497 SDValue PassThru = N->getPassThru();
9500 if (isZeroVector(PassThru))
9503 // MVE Masked loads use zero as the passthru value. Here we convert undef to
9504 // zero too, and other values are lowered to a select.
9505 SDValue ZeroVec = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
9506 DAG.getTargetConstant(0, dl, MVT::i32));
9507 SDValue NewLoad = DAG.getMaskedLoad(
9508 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask, ZeroVec,
9509 N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
9510 N->getExtensionType(), N->isExpandingLoad());
9511 SDValue Combo = NewLoad;
9512 bool PassThruIsCastZero = (PassThru.getOpcode() == ISD::BITCAST ||
9513 PassThru.getOpcode() == ARMISD::VECTOR_REG_CAST) &&
9514 isZeroVector(PassThru->getOperand(0));
9515 if (!PassThru.isUndef() && !PassThruIsCastZero)
9516 Combo = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
9517 return DAG.getMergeValues({Combo, NewLoad.getValue(1)}, dl);
9520 static SDValue LowerVecReduce(SDValue Op, SelectionDAG &DAG,
9521 const ARMSubtarget *ST) {
9522 if (!ST->hasMVEIntegerOps())
9526 unsigned BaseOpcode = 0;
9527 switch (Op->getOpcode()) {
9528 default: llvm_unreachable("Expected VECREDUCE opcode");
9529 case ISD::VECREDUCE_FADD: BaseOpcode = ISD::FADD; break;
9530 case ISD::VECREDUCE_FMUL: BaseOpcode = ISD::FMUL; break;
9531 case ISD::VECREDUCE_MUL: BaseOpcode = ISD::MUL; break;
9532 case ISD::VECREDUCE_AND: BaseOpcode = ISD::AND; break;
9533 case ISD::VECREDUCE_OR: BaseOpcode = ISD::OR; break;
9534 case ISD::VECREDUCE_XOR: BaseOpcode = ISD::XOR; break;
9535 case ISD::VECREDUCE_FMAX: BaseOpcode = ISD::FMAXNUM; break;
9536 case ISD::VECREDUCE_FMIN: BaseOpcode = ISD::FMINNUM; break;
9539 SDValue Op0 = Op->getOperand(0);
9540 EVT VT = Op0.getValueType();
9541 EVT EltVT = VT.getVectorElementType();
9542 unsigned NumElts = VT.getVectorNumElements();
9543 unsigned NumActiveLanes = NumElts;
9545 assert((NumActiveLanes == 16 || NumActiveLanes == 8 || NumActiveLanes == 4 ||
9546 NumActiveLanes == 2) &&
9547 "Only expected a power 2 vector size");
9549 // Use Mul(X, Rev(X)) until 4 items remain. Going down to 4 vector elements
9550 // allows us to easily extract vector elements from the lanes.
9551 while (NumActiveLanes > 4) {
9552 unsigned RevOpcode = NumActiveLanes == 16 ? ARMISD::VREV16 : ARMISD::VREV32;
9553 SDValue Rev = DAG.getNode(RevOpcode, dl, VT, Op0);
9554 Op0 = DAG.getNode(BaseOpcode, dl, VT, Op0, Rev);
9555 NumActiveLanes /= 2;
9559 if (NumActiveLanes == 4) {
9560 // The remaining 4 elements are summed sequentially
9561 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9562 DAG.getConstant(0 * NumElts / 4, dl, MVT::i32));
9563 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9564 DAG.getConstant(1 * NumElts / 4, dl, MVT::i32));
9565 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9566 DAG.getConstant(2 * NumElts / 4, dl, MVT::i32));
9567 SDValue Ext3 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9568 DAG.getConstant(3 * NumElts / 4, dl, MVT::i32));
9569 SDValue Res0 = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
9570 SDValue Res1 = DAG.getNode(BaseOpcode, dl, EltVT, Ext2, Ext3, Op->getFlags());
9571 Res = DAG.getNode(BaseOpcode, dl, EltVT, Res0, Res1, Op->getFlags());
9573 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9574 DAG.getConstant(0, dl, MVT::i32));
9575 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, Op0,
9576 DAG.getConstant(1, dl, MVT::i32));
9577 Res = DAG.getNode(BaseOpcode, dl, EltVT, Ext0, Ext1, Op->getFlags());
9580 // Result type may be wider than element type.
9581 if (EltVT != Op->getValueType(0))
9582 Res = DAG.getNode(ISD::ANY_EXTEND, dl, Op->getValueType(0), Res);
9586 static SDValue LowerVecReduceF(SDValue Op, SelectionDAG &DAG,
9587 const ARMSubtarget *ST) {
9588 if (!ST->hasMVEFloatOps())
9590 return LowerVecReduce(Op, DAG, ST);
9593 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
9594 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
9595 // Acquire/Release load/store is not legal for targets without a dmb or
9596 // equivalent available.
9599 // Monotonic load/store is legal for all targets.
9603 static void ReplaceREADCYCLECOUNTER(SDNode *N,
9604 SmallVectorImpl<SDValue> &Results,
9606 const ARMSubtarget *Subtarget) {
9608 // Under Power Management extensions, the cycle-count is:
9609 // mrc p15, #0, <Rt>, c9, c13, #0
9610 SDValue Ops[] = { N->getOperand(0), // Chain
9611 DAG.getTargetConstant(Intrinsic::arm_mrc, DL, MVT::i32),
9612 DAG.getTargetConstant(15, DL, MVT::i32),
9613 DAG.getTargetConstant(0, DL, MVT::i32),
9614 DAG.getTargetConstant(9, DL, MVT::i32),
9615 DAG.getTargetConstant(13, DL, MVT::i32),
9616 DAG.getTargetConstant(0, DL, MVT::i32)
9619 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
9620 DAG.getVTList(MVT::i32, MVT::Other), Ops);
9621 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
9622 DAG.getConstant(0, DL, MVT::i32)));
9623 Results.push_back(Cycles32.getValue(1));
9626 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
9627 SDLoc dl(V.getNode());
9628 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
9629 SDValue VHi = DAG.getAnyExtOrTrunc(
9630 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
9632 bool isBigEndian = DAG.getDataLayout().isBigEndian();
9634 std::swap (VLo, VHi);
9636 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
9637 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
9638 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
9639 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
9641 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
9644 static void ReplaceCMP_SWAP_64Results(SDNode *N,
9645 SmallVectorImpl<SDValue> & Results,
9646 SelectionDAG &DAG) {
9647 assert(N->getValueType(0) == MVT::i64 &&
9648 "AtomicCmpSwap on types less than 64 should be legal");
9649 SDValue Ops[] = {N->getOperand(1),
9650 createGPRPairNode(DAG, N->getOperand(2)),
9651 createGPRPairNode(DAG, N->getOperand(3)),
9653 SDNode *CmpSwap = DAG.getMachineNode(
9654 ARM::CMP_SWAP_64, SDLoc(N),
9655 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
9657 MachineMemOperand *MemOp = cast<MemSDNode>(N)->getMemOperand();
9658 DAG.setNodeMemRefs(cast<MachineSDNode>(CmpSwap), {MemOp});
9660 bool isBigEndian = DAG.getDataLayout().isBigEndian();
9663 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_1 : ARM::gsub_0,
9664 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
9666 DAG.getTargetExtractSubreg(isBigEndian ? ARM::gsub_0 : ARM::gsub_1,
9667 SDLoc(N), MVT::i32, SDValue(CmpSwap, 0));
9668 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, SDLoc(N), MVT::i64, Lo, Hi));
9669 Results.push_back(SDValue(CmpSwap, 2));
9672 SDValue ARMTargetLowering::LowerFSETCC(SDValue Op, SelectionDAG &DAG) const {
9674 EVT VT = Op.getValueType();
9675 SDValue Chain = Op.getOperand(0);
9676 SDValue LHS = Op.getOperand(1);
9677 SDValue RHS = Op.getOperand(2);
9678 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(3))->get();
9679 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
9681 // If we don't have instructions of this float type then soften to a libcall
9682 // and use SETCC instead.
9683 if (isUnsupportedFloatingType(LHS.getValueType())) {
9684 DAG.getTargetLoweringInfo().softenSetCCOperands(
9685 DAG, LHS.getValueType(), LHS, RHS, CC, dl, LHS, RHS, Chain, IsSignaling);
9686 if (!RHS.getNode()) {
9687 RHS = DAG.getConstant(0, dl, LHS.getValueType());
9690 SDValue Result = DAG.getNode(ISD::SETCC, dl, VT, LHS, RHS,
9691 DAG.getCondCode(CC));
9692 return DAG.getMergeValues({Result, Chain}, dl);
9695 ARMCC::CondCodes CondCode, CondCode2;
9696 FPCCToARMCC(CC, CondCode, CondCode2);
9698 // FIXME: Chain is not handled correctly here. Currently the FPSCR is implicit
9699 // in CMPFP and CMPFPE, but instead it should be made explicit by these
9700 // instructions using a chain instead of glue. This would also fix the problem
9701 // here (and also in LowerSELECT_CC) where we generate two comparisons when
9703 SDValue True = DAG.getConstant(1, dl, VT);
9704 SDValue False = DAG.getConstant(0, dl, VT);
9705 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
9706 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
9707 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9708 SDValue Result = getCMOV(dl, VT, False, True, ARMcc, CCR, Cmp, DAG);
9709 if (CondCode2 != ARMCC::AL) {
9710 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
9711 Cmp = getVFPCmp(LHS, RHS, DAG, dl, IsSignaling);
9712 Result = getCMOV(dl, VT, Result, True, ARMcc, CCR, Cmp, DAG);
9714 return DAG.getMergeValues({Result, Chain}, dl);
9717 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
9718 LLVM_DEBUG(dbgs() << "Lowering node: "; Op.dump());
9719 switch (Op.getOpcode()) {
9720 default: llvm_unreachable("Don't know how to custom lower this!");
9721 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
9722 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
9723 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
9724 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
9725 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
9726 case ISD::SELECT: return LowerSELECT(Op, DAG);
9727 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
9728 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
9729 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
9730 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
9731 case ISD::VASTART: return LowerVASTART(Op, DAG);
9732 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget);
9733 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget);
9734 case ISD::SINT_TO_FP:
9735 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
9736 case ISD::STRICT_FP_TO_SINT:
9737 case ISD::STRICT_FP_TO_UINT:
9738 case ISD::FP_TO_SINT:
9739 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
9740 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
9741 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
9742 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
9743 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
9744 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
9745 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
9746 case ISD::INTRINSIC_VOID: return LowerINTRINSIC_VOID(Op, DAG, Subtarget);
9747 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
9749 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG, Subtarget);
9752 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
9753 case ISD::SREM: return LowerREM(Op.getNode(), DAG);
9754 case ISD::UREM: return LowerREM(Op.getNode(), DAG);
9755 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
9756 case ISD::SRL_PARTS:
9757 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
9759 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
9760 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget);
9761 case ISD::SETCC: return LowerVSETCC(Op, DAG, Subtarget);
9762 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
9763 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget);
9764 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget);
9765 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
9766 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op, DAG, Subtarget);
9767 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
9768 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG, Subtarget);
9769 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG, Subtarget);
9770 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
9771 case ISD::MUL: return LowerMUL(Op, DAG);
9773 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
9774 return LowerDIV_Windows(Op, DAG, /* Signed */ true);
9775 return LowerSDIV(Op, DAG, Subtarget);
9777 if (Subtarget->isTargetWindows() && !Op.getValueType().isVector())
9778 return LowerDIV_Windows(Op, DAG, /* Signed */ false);
9779 return LowerUDIV(Op, DAG, Subtarget);
9781 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
9784 return LowerSignedALUO(Op, DAG);
9787 return LowerUnsignedALUO(Op, DAG);
9790 return LowerSADDSUBSAT(Op, DAG, Subtarget);
9792 return LowerPredicateLoad(Op, DAG);
9794 return LowerSTORE(Op, DAG, Subtarget);
9796 return LowerMLOAD(Op, DAG);
9797 case ISD::VECREDUCE_MUL:
9798 case ISD::VECREDUCE_AND:
9799 case ISD::VECREDUCE_OR:
9800 case ISD::VECREDUCE_XOR:
9801 return LowerVecReduce(Op, DAG, Subtarget);
9802 case ISD::VECREDUCE_FADD:
9803 case ISD::VECREDUCE_FMUL:
9804 case ISD::VECREDUCE_FMIN:
9805 case ISD::VECREDUCE_FMAX:
9806 return LowerVecReduceF(Op, DAG, Subtarget);
9807 case ISD::ATOMIC_LOAD:
9808 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG);
9809 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG);
9811 case ISD::UDIVREM: return LowerDivRem(Op, DAG);
9812 case ISD::DYNAMIC_STACKALLOC:
9813 if (Subtarget->isTargetWindows())
9814 return LowerDYNAMIC_STACKALLOC(Op, DAG);
9815 llvm_unreachable("Don't know how to custom lower this!");
9816 case ISD::STRICT_FP_ROUND:
9817 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
9818 case ISD::STRICT_FP_EXTEND:
9819 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
9820 case ISD::STRICT_FSETCC:
9821 case ISD::STRICT_FSETCCS: return LowerFSETCC(Op, DAG);
9822 case ARMISD::WIN__DBZCHK: return SDValue();
9826 static void ReplaceLongIntrinsic(SDNode *N, SmallVectorImpl<SDValue> &Results,
9827 SelectionDAG &DAG) {
9828 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
9830 if (IntNo == Intrinsic::arm_smlald)
9831 Opc = ARMISD::SMLALD;
9832 else if (IntNo == Intrinsic::arm_smlaldx)
9833 Opc = ARMISD::SMLALDX;
9834 else if (IntNo == Intrinsic::arm_smlsld)
9835 Opc = ARMISD::SMLSLD;
9836 else if (IntNo == Intrinsic::arm_smlsldx)
9837 Opc = ARMISD::SMLSLDX;
9842 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9844 DAG.getConstant(0, dl, MVT::i32));
9845 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
9847 DAG.getConstant(1, dl, MVT::i32));
9849 SDValue LongMul = DAG.getNode(Opc, dl,
9850 DAG.getVTList(MVT::i32, MVT::i32),
9851 N->getOperand(1), N->getOperand(2),
9853 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64,
9854 LongMul.getValue(0), LongMul.getValue(1)));
9857 /// ReplaceNodeResults - Replace the results of node with an illegal result
9858 /// type with new values built out of custom code.
9859 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
9860 SmallVectorImpl<SDValue> &Results,
9861 SelectionDAG &DAG) const {
9863 switch (N->getOpcode()) {
9865 llvm_unreachable("Don't know how to custom expand this!");
9866 case ISD::READ_REGISTER:
9867 ExpandREAD_REGISTER(N, Results, DAG);
9870 Res = ExpandBITCAST(N, DAG, Subtarget);
9875 Res = Expand64BitShift(N, DAG, Subtarget);
9879 Res = LowerREM(N, DAG);
9883 Res = LowerDivRem(SDValue(N, 0), DAG);
9884 assert(Res.getNumOperands() == 2 && "DivRem needs two values");
9885 Results.push_back(Res.getValue(0));
9886 Results.push_back(Res.getValue(1));
9890 Res = LowerSADDSUBSAT(SDValue(N, 0), DAG, Subtarget);
9892 case ISD::READCYCLECOUNTER:
9893 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
9897 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
9898 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
9900 case ISD::ATOMIC_CMP_SWAP:
9901 ReplaceCMP_SWAP_64Results(N, Results, DAG);
9903 case ISD::INTRINSIC_WO_CHAIN:
9904 return ReplaceLongIntrinsic(N, Results, DAG);
9906 lowerABS(N, Results, DAG);
9909 LowerLOAD(N, Results, DAG);
9913 Results.push_back(Res);
9916 //===----------------------------------------------------------------------===//
9917 // ARM Scheduler Hooks
9918 //===----------------------------------------------------------------------===//
9920 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
9921 /// registers the function context.
9922 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
9923 MachineBasicBlock *MBB,
9924 MachineBasicBlock *DispatchBB,
9926 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
9927 "ROPI/RWPI not currently supported with SjLj");
9928 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
9929 DebugLoc dl = MI.getDebugLoc();
9930 MachineFunction *MF = MBB->getParent();
9931 MachineRegisterInfo *MRI = &MF->getRegInfo();
9932 MachineConstantPool *MCP = MF->getConstantPool();
9933 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
9934 const Function &F = MF->getFunction();
9936 bool isThumb = Subtarget->isThumb();
9937 bool isThumb2 = Subtarget->isThumb2();
9939 unsigned PCLabelId = AFI->createPICLabelUId();
9940 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
9941 ARMConstantPoolValue *CPV =
9942 ARMConstantPoolMBB::Create(F.getContext(), DispatchBB, PCLabelId, PCAdj);
9943 unsigned CPI = MCP->getConstantPoolIndex(CPV, Align(4));
9945 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
9946 : &ARM::GPRRegClass;
9948 // Grab constant pool and fixed stack memory operands.
9949 MachineMemOperand *CPMMO =
9950 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
9951 MachineMemOperand::MOLoad, 4, Align(4));
9953 MachineMemOperand *FIMMOSt =
9954 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
9955 MachineMemOperand::MOStore, 4, Align(4));
9957 // Load the address of the dispatch MBB into the jump buffer.
9959 // Incoming value: jbuf
9960 // ldr.n r5, LCPI1_1
9963 // str r5, [$jbuf, #+4] ; &jbuf[1]
9964 Register NewVReg1 = MRI->createVirtualRegister(TRC);
9965 BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
9966 .addConstantPoolIndex(CPI)
9967 .addMemOperand(CPMMO)
9968 .add(predOps(ARMCC::AL));
9969 // Set the low bit because of thumb mode.
9970 Register NewVReg2 = MRI->createVirtualRegister(TRC);
9971 BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
9972 .addReg(NewVReg1, RegState::Kill)
9974 .add(predOps(ARMCC::AL))
9976 Register NewVReg3 = MRI->createVirtualRegister(TRC);
9977 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
9978 .addReg(NewVReg2, RegState::Kill)
9980 BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
9981 .addReg(NewVReg3, RegState::Kill)
9983 .addImm(36) // &jbuf[1] :: pc
9984 .addMemOperand(FIMMOSt)
9985 .add(predOps(ARMCC::AL));
9986 } else if (isThumb) {
9987 // Incoming value: jbuf
9988 // ldr.n r1, LCPI1_4
9992 // add r2, $jbuf, #+4 ; &jbuf[1]
9994 Register NewVReg1 = MRI->createVirtualRegister(TRC);
9995 BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
9996 .addConstantPoolIndex(CPI)
9997 .addMemOperand(CPMMO)
9998 .add(predOps(ARMCC::AL));
9999 Register NewVReg2 = MRI->createVirtualRegister(TRC);
10000 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
10001 .addReg(NewVReg1, RegState::Kill)
10002 .addImm(PCLabelId);
10003 // Set the low bit because of thumb mode.
10004 Register NewVReg3 = MRI->createVirtualRegister(TRC);
10005 BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
10006 .addReg(ARM::CPSR, RegState::Define)
10008 .add(predOps(ARMCC::AL));
10009 Register NewVReg4 = MRI->createVirtualRegister(TRC);
10010 BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
10011 .addReg(ARM::CPSR, RegState::Define)
10012 .addReg(NewVReg2, RegState::Kill)
10013 .addReg(NewVReg3, RegState::Kill)
10014 .add(predOps(ARMCC::AL));
10015 Register NewVReg5 = MRI->createVirtualRegister(TRC);
10016 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
10018 .addImm(36); // &jbuf[1] :: pc
10019 BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
10020 .addReg(NewVReg4, RegState::Kill)
10021 .addReg(NewVReg5, RegState::Kill)
10023 .addMemOperand(FIMMOSt)
10024 .add(predOps(ARMCC::AL));
10026 // Incoming value: jbuf
10029 // str r1, [$jbuf, #+4] ; &jbuf[1]
10030 Register NewVReg1 = MRI->createVirtualRegister(TRC);
10031 BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
10032 .addConstantPoolIndex(CPI)
10034 .addMemOperand(CPMMO)
10035 .add(predOps(ARMCC::AL));
10036 Register NewVReg2 = MRI->createVirtualRegister(TRC);
10037 BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
10038 .addReg(NewVReg1, RegState::Kill)
10040 .add(predOps(ARMCC::AL));
10041 BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
10042 .addReg(NewVReg2, RegState::Kill)
10044 .addImm(36) // &jbuf[1] :: pc
10045 .addMemOperand(FIMMOSt)
10046 .add(predOps(ARMCC::AL));
10050 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
10051 MachineBasicBlock *MBB) const {
10052 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10053 DebugLoc dl = MI.getDebugLoc();
10054 MachineFunction *MF = MBB->getParent();
10055 MachineRegisterInfo *MRI = &MF->getRegInfo();
10056 MachineFrameInfo &MFI = MF->getFrameInfo();
10057 int FI = MFI.getFunctionContextIndex();
10059 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
10060 : &ARM::GPRnopcRegClass;
10062 // Get a mapping of the call site numbers to all of the landing pads they're
10063 // associated with.
10064 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2>> CallSiteNumToLPad;
10065 unsigned MaxCSNum = 0;
10066 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
10068 if (!BB->isEHPad()) continue;
10070 // FIXME: We should assert that the EH_LABEL is the first MI in the landing
10072 for (MachineBasicBlock::iterator
10073 II = BB->begin(), IE = BB->end(); II != IE; ++II) {
10074 if (!II->isEHLabel()) continue;
10076 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
10077 if (!MF->hasCallSiteLandingPad(Sym)) continue;
10079 SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
10080 for (SmallVectorImpl<unsigned>::iterator
10081 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
10082 CSI != CSE; ++CSI) {
10083 CallSiteNumToLPad[*CSI].push_back(&*BB);
10084 MaxCSNum = std::max(MaxCSNum, *CSI);
10090 // Get an ordered list of the machine basic blocks for the jump table.
10091 std::vector<MachineBasicBlock*> LPadList;
10092 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
10093 LPadList.reserve(CallSiteNumToLPad.size());
10094 for (unsigned I = 1; I <= MaxCSNum; ++I) {
10095 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
10096 for (SmallVectorImpl<MachineBasicBlock*>::iterator
10097 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
10098 LPadList.push_back(*II);
10099 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
10103 assert(!LPadList.empty() &&
10104 "No landing pad destinations for the dispatch jump table!");
10106 // Create the jump table and associated information.
10107 MachineJumpTableInfo *JTI =
10108 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
10109 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
10111 // Create the MBBs for the dispatch code.
10113 // Shove the dispatch's address into the return slot in the function context.
10114 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
10115 DispatchBB->setIsEHPad();
10117 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
10118 unsigned trap_opcode;
10119 if (Subtarget->isThumb())
10120 trap_opcode = ARM::tTRAP;
10122 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
10124 BuildMI(TrapBB, dl, TII->get(trap_opcode));
10125 DispatchBB->addSuccessor(TrapBB);
10127 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
10128 DispatchBB->addSuccessor(DispContBB);
10130 // Insert and MBBs.
10131 MF->insert(MF->end(), DispatchBB);
10132 MF->insert(MF->end(), DispContBB);
10133 MF->insert(MF->end(), TrapBB);
10135 // Insert code into the entry block that creates and registers the function
10137 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
10139 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
10140 MachinePointerInfo::getFixedStack(*MF, FI),
10141 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, Align(4));
10143 MachineInstrBuilder MIB;
10144 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
10146 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
10147 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
10149 // Add a register mask with no preserved registers. This results in all
10150 // registers being marked as clobbered. This can't work if the dispatch block
10151 // is in a Thumb1 function and is linked with ARM code which uses the FP
10152 // registers, as there is no way to preserve the FP registers in Thumb1 mode.
10153 MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
10155 bool IsPositionIndependent = isPositionIndependent();
10156 unsigned NumLPads = LPadList.size();
10157 if (Subtarget->isThumb2()) {
10158 Register NewVReg1 = MRI->createVirtualRegister(TRC);
10159 BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
10162 .addMemOperand(FIMMOLd)
10163 .add(predOps(ARMCC::AL));
10165 if (NumLPads < 256) {
10166 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
10168 .addImm(LPadList.size())
10169 .add(predOps(ARMCC::AL));
10171 Register VReg1 = MRI->createVirtualRegister(TRC);
10172 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
10173 .addImm(NumLPads & 0xFFFF)
10174 .add(predOps(ARMCC::AL));
10176 unsigned VReg2 = VReg1;
10177 if ((NumLPads & 0xFFFF0000) != 0) {
10178 VReg2 = MRI->createVirtualRegister(TRC);
10179 BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
10181 .addImm(NumLPads >> 16)
10182 .add(predOps(ARMCC::AL));
10185 BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
10188 .add(predOps(ARMCC::AL));
10191 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
10194 .addReg(ARM::CPSR);
10196 Register NewVReg3 = MRI->createVirtualRegister(TRC);
10197 BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT), NewVReg3)
10198 .addJumpTableIndex(MJTI)
10199 .add(predOps(ARMCC::AL));
10201 Register NewVReg4 = MRI->createVirtualRegister(TRC);
10202 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
10203 .addReg(NewVReg3, RegState::Kill)
10205 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
10206 .add(predOps(ARMCC::AL))
10207 .add(condCodeOp());
10209 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
10210 .addReg(NewVReg4, RegState::Kill)
10212 .addJumpTableIndex(MJTI);
10213 } else if (Subtarget->isThumb()) {
10214 Register NewVReg1 = MRI->createVirtualRegister(TRC);
10215 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
10218 .addMemOperand(FIMMOLd)
10219 .add(predOps(ARMCC::AL));
10221 if (NumLPads < 256) {
10222 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
10225 .add(predOps(ARMCC::AL));
10227 MachineConstantPool *ConstantPool = MF->getConstantPool();
10228 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
10229 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
10231 // MachineConstantPool wants an explicit alignment.
10232 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
10233 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
10235 Register VReg1 = MRI->createVirtualRegister(TRC);
10236 BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
10237 .addReg(VReg1, RegState::Define)
10238 .addConstantPoolIndex(Idx)
10239 .add(predOps(ARMCC::AL));
10240 BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
10243 .add(predOps(ARMCC::AL));
10246 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
10249 .addReg(ARM::CPSR);
10251 Register NewVReg2 = MRI->createVirtualRegister(TRC);
10252 BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
10253 .addReg(ARM::CPSR, RegState::Define)
10256 .add(predOps(ARMCC::AL));
10258 Register NewVReg3 = MRI->createVirtualRegister(TRC);
10259 BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
10260 .addJumpTableIndex(MJTI)
10261 .add(predOps(ARMCC::AL));
10263 Register NewVReg4 = MRI->createVirtualRegister(TRC);
10264 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
10265 .addReg(ARM::CPSR, RegState::Define)
10266 .addReg(NewVReg2, RegState::Kill)
10268 .add(predOps(ARMCC::AL));
10270 MachineMemOperand *JTMMOLd =
10271 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
10272 MachineMemOperand::MOLoad, 4, Align(4));
10274 Register NewVReg5 = MRI->createVirtualRegister(TRC);
10275 BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
10276 .addReg(NewVReg4, RegState::Kill)
10278 .addMemOperand(JTMMOLd)
10279 .add(predOps(ARMCC::AL));
10281 unsigned NewVReg6 = NewVReg5;
10282 if (IsPositionIndependent) {
10283 NewVReg6 = MRI->createVirtualRegister(TRC);
10284 BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
10285 .addReg(ARM::CPSR, RegState::Define)
10286 .addReg(NewVReg5, RegState::Kill)
10288 .add(predOps(ARMCC::AL));
10291 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
10292 .addReg(NewVReg6, RegState::Kill)
10293 .addJumpTableIndex(MJTI);
10295 Register NewVReg1 = MRI->createVirtualRegister(TRC);
10296 BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
10299 .addMemOperand(FIMMOLd)
10300 .add(predOps(ARMCC::AL));
10302 if (NumLPads < 256) {
10303 BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
10306 .add(predOps(ARMCC::AL));
10307 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
10308 Register VReg1 = MRI->createVirtualRegister(TRC);
10309 BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
10310 .addImm(NumLPads & 0xFFFF)
10311 .add(predOps(ARMCC::AL));
10313 unsigned VReg2 = VReg1;
10314 if ((NumLPads & 0xFFFF0000) != 0) {
10315 VReg2 = MRI->createVirtualRegister(TRC);
10316 BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
10318 .addImm(NumLPads >> 16)
10319 .add(predOps(ARMCC::AL));
10322 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
10325 .add(predOps(ARMCC::AL));
10327 MachineConstantPool *ConstantPool = MF->getConstantPool();
10328 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
10329 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
10331 // MachineConstantPool wants an explicit alignment.
10332 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
10333 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
10335 Register VReg1 = MRI->createVirtualRegister(TRC);
10336 BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
10337 .addReg(VReg1, RegState::Define)
10338 .addConstantPoolIndex(Idx)
10340 .add(predOps(ARMCC::AL));
10341 BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
10343 .addReg(VReg1, RegState::Kill)
10344 .add(predOps(ARMCC::AL));
10347 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
10350 .addReg(ARM::CPSR);
10352 Register NewVReg3 = MRI->createVirtualRegister(TRC);
10353 BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
10355 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))
10356 .add(predOps(ARMCC::AL))
10357 .add(condCodeOp());
10358 Register NewVReg4 = MRI->createVirtualRegister(TRC);
10359 BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
10360 .addJumpTableIndex(MJTI)
10361 .add(predOps(ARMCC::AL));
10363 MachineMemOperand *JTMMOLd =
10364 MF->getMachineMemOperand(MachinePointerInfo::getJumpTable(*MF),
10365 MachineMemOperand::MOLoad, 4, Align(4));
10366 Register NewVReg5 = MRI->createVirtualRegister(TRC);
10367 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
10368 .addReg(NewVReg3, RegState::Kill)
10371 .addMemOperand(JTMMOLd)
10372 .add(predOps(ARMCC::AL));
10374 if (IsPositionIndependent) {
10375 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
10376 .addReg(NewVReg5, RegState::Kill)
10378 .addJumpTableIndex(MJTI);
10380 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
10381 .addReg(NewVReg5, RegState::Kill)
10382 .addJumpTableIndex(MJTI);
10386 // Add the jump table entries as successors to the MBB.
10387 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
10388 for (std::vector<MachineBasicBlock*>::iterator
10389 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
10390 MachineBasicBlock *CurMBB = *I;
10391 if (SeenMBBs.insert(CurMBB).second)
10392 DispContBB->addSuccessor(CurMBB);
10395 // N.B. the order the invoke BBs are processed in doesn't matter here.
10396 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
10397 SmallVector<MachineBasicBlock*, 64> MBBLPads;
10398 for (MachineBasicBlock *BB : InvokeBBs) {
10400 // Remove the landing pad successor from the invoke block and replace it
10401 // with the new dispatch block.
10402 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
10404 while (!Successors.empty()) {
10405 MachineBasicBlock *SMBB = Successors.pop_back_val();
10406 if (SMBB->isEHPad()) {
10407 BB->removeSuccessor(SMBB);
10408 MBBLPads.push_back(SMBB);
10412 BB->addSuccessor(DispatchBB, BranchProbability::getZero());
10413 BB->normalizeSuccProbs();
10415 // Find the invoke call and mark all of the callee-saved registers as
10416 // 'implicit defined' so that they're spilled. This prevents code from
10417 // moving instructions to before the EH block, where they will never be
10419 for (MachineBasicBlock::reverse_iterator
10420 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
10421 if (!II->isCall()) continue;
10423 DenseMap<unsigned, bool> DefRegs;
10424 for (MachineInstr::mop_iterator
10425 OI = II->operands_begin(), OE = II->operands_end();
10427 if (!OI->isReg()) continue;
10428 DefRegs[OI->getReg()] = true;
10431 MachineInstrBuilder MIB(*MF, &*II);
10433 for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
10434 unsigned Reg = SavedRegs[i];
10435 if (Subtarget->isThumb2() &&
10436 !ARM::tGPRRegClass.contains(Reg) &&
10437 !ARM::hGPRRegClass.contains(Reg))
10439 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
10441 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
10444 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
10451 // Mark all former landing pads as non-landing pads. The dispatch is the only
10452 // landing pad now.
10453 for (SmallVectorImpl<MachineBasicBlock*>::iterator
10454 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
10455 (*I)->setIsEHPad(false);
10457 // The instruction is gone now.
10458 MI.eraseFromParent();
10462 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
10463 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
10464 E = MBB->succ_end(); I != E; ++I)
10467 llvm_unreachable("Expecting a BB with two successors!");
10470 /// Return the load opcode for a given load size. If load size >= 8,
10471 /// neon opcode will be returned.
10472 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
10474 return LdSize == 16 ? ARM::VLD1q32wb_fixed
10475 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
10477 return LdSize == 4 ? ARM::tLDRi
10478 : LdSize == 2 ? ARM::tLDRHi
10479 : LdSize == 1 ? ARM::tLDRBi : 0;
10481 return LdSize == 4 ? ARM::t2LDR_POST
10482 : LdSize == 2 ? ARM::t2LDRH_POST
10483 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
10484 return LdSize == 4 ? ARM::LDR_POST_IMM
10485 : LdSize == 2 ? ARM::LDRH_POST
10486 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
10489 /// Return the store opcode for a given store size. If store size >= 8,
10490 /// neon opcode will be returned.
10491 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
10493 return StSize == 16 ? ARM::VST1q32wb_fixed
10494 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
10496 return StSize == 4 ? ARM::tSTRi
10497 : StSize == 2 ? ARM::tSTRHi
10498 : StSize == 1 ? ARM::tSTRBi : 0;
10500 return StSize == 4 ? ARM::t2STR_POST
10501 : StSize == 2 ? ARM::t2STRH_POST
10502 : StSize == 1 ? ARM::t2STRB_POST : 0;
10503 return StSize == 4 ? ARM::STR_POST_IMM
10504 : StSize == 2 ? ARM::STRH_POST
10505 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
10508 /// Emit a post-increment load operation with given size. The instructions
10509 /// will be added to BB at Pos.
10510 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
10511 const TargetInstrInfo *TII, const DebugLoc &dl,
10512 unsigned LdSize, unsigned Data, unsigned AddrIn,
10513 unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
10514 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
10515 assert(LdOpc != 0 && "Should have a load opcode");
10517 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10518 .addReg(AddrOut, RegState::Define)
10521 .add(predOps(ARMCC::AL));
10522 } else if (IsThumb1) {
10523 // load + update AddrIn
10524 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10527 .add(predOps(ARMCC::AL));
10528 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
10529 .add(t1CondCodeOp())
10532 .add(predOps(ARMCC::AL));
10533 } else if (IsThumb2) {
10534 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10535 .addReg(AddrOut, RegState::Define)
10538 .add(predOps(ARMCC::AL));
10540 BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
10541 .addReg(AddrOut, RegState::Define)
10545 .add(predOps(ARMCC::AL));
10549 /// Emit a post-increment store operation with given size. The instructions
10550 /// will be added to BB at Pos.
10551 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
10552 const TargetInstrInfo *TII, const DebugLoc &dl,
10553 unsigned StSize, unsigned Data, unsigned AddrIn,
10554 unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
10555 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
10556 assert(StOpc != 0 && "Should have a store opcode");
10558 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10562 .add(predOps(ARMCC::AL));
10563 } else if (IsThumb1) {
10564 // store + update AddrIn
10565 BuildMI(*BB, Pos, dl, TII->get(StOpc))
10569 .add(predOps(ARMCC::AL));
10570 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut)
10571 .add(t1CondCodeOp())
10574 .add(predOps(ARMCC::AL));
10575 } else if (IsThumb2) {
10576 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10580 .add(predOps(ARMCC::AL));
10582 BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
10587 .add(predOps(ARMCC::AL));
10591 MachineBasicBlock *
10592 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
10593 MachineBasicBlock *BB) const {
10594 // This pseudo instruction has 3 operands: dst, src, size
10595 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
10596 // Otherwise, we will generate unrolled scalar copies.
10597 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10598 const BasicBlock *LLVM_BB = BB->getBasicBlock();
10599 MachineFunction::iterator It = ++BB->getIterator();
10601 Register dest = MI.getOperand(0).getReg();
10602 Register src = MI.getOperand(1).getReg();
10603 unsigned SizeVal = MI.getOperand(2).getImm();
10604 unsigned Alignment = MI.getOperand(3).getImm();
10605 DebugLoc dl = MI.getDebugLoc();
10607 MachineFunction *MF = BB->getParent();
10608 MachineRegisterInfo &MRI = MF->getRegInfo();
10609 unsigned UnitSize = 0;
10610 const TargetRegisterClass *TRC = nullptr;
10611 const TargetRegisterClass *VecTRC = nullptr;
10613 bool IsThumb1 = Subtarget->isThumb1Only();
10614 bool IsThumb2 = Subtarget->isThumb2();
10615 bool IsThumb = Subtarget->isThumb();
10617 if (Alignment & 1) {
10619 } else if (Alignment & 2) {
10622 // Check whether we can use NEON instructions.
10623 if (!MF->getFunction().hasFnAttribute(Attribute::NoImplicitFloat) &&
10624 Subtarget->hasNEON()) {
10625 if ((Alignment % 16 == 0) && SizeVal >= 16)
10627 else if ((Alignment % 8 == 0) && SizeVal >= 8)
10630 // Can't use NEON instructions.
10635 // Select the correct opcode and register class for unit size load/store
10636 bool IsNeon = UnitSize >= 8;
10637 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
10639 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
10640 : UnitSize == 8 ? &ARM::DPRRegClass
10643 unsigned BytesLeft = SizeVal % UnitSize;
10644 unsigned LoopSize = SizeVal - BytesLeft;
10646 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
10647 // Use LDR and STR to copy.
10648 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
10649 // [destOut] = STR_POST(scratch, destIn, UnitSize)
10650 unsigned srcIn = src;
10651 unsigned destIn = dest;
10652 for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
10653 Register srcOut = MRI.createVirtualRegister(TRC);
10654 Register destOut = MRI.createVirtualRegister(TRC);
10655 Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
10656 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
10657 IsThumb1, IsThumb2);
10658 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
10659 IsThumb1, IsThumb2);
10664 // Handle the leftover bytes with LDRB and STRB.
10665 // [scratch, srcOut] = LDRB_POST(srcIn, 1)
10666 // [destOut] = STRB_POST(scratch, destIn, 1)
10667 for (unsigned i = 0; i < BytesLeft; i++) {
10668 Register srcOut = MRI.createVirtualRegister(TRC);
10669 Register destOut = MRI.createVirtualRegister(TRC);
10670 Register scratch = MRI.createVirtualRegister(TRC);
10671 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
10672 IsThumb1, IsThumb2);
10673 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
10674 IsThumb1, IsThumb2);
10678 MI.eraseFromParent(); // The instruction is gone now.
10682 // Expand the pseudo op to a loop.
10685 // movw varEnd, # --> with thumb2
10687 // ldrcp varEnd, idx --> without thumb2
10688 // fallthrough --> loopMBB
10690 // PHI varPhi, varEnd, varLoop
10691 // PHI srcPhi, src, srcLoop
10692 // PHI destPhi, dst, destLoop
10693 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
10694 // [destLoop] = STR_POST(scratch, destPhi, UnitSize)
10695 // subs varLoop, varPhi, #UnitSize
10697 // fallthrough --> exitMBB
10699 // epilogue to handle left-over bytes
10700 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
10701 // [destOut] = STRB_POST(scratch, destLoop, 1)
10702 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10703 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
10704 MF->insert(It, loopMBB);
10705 MF->insert(It, exitMBB);
10707 // Transfer the remainder of BB and its successor edges to exitMBB.
10708 exitMBB->splice(exitMBB->begin(), BB,
10709 std::next(MachineBasicBlock::iterator(MI)), BB->end());
10710 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
10712 // Load an immediate to varEnd.
10713 Register varEnd = MRI.createVirtualRegister(TRC);
10714 if (Subtarget->useMovt()) {
10715 unsigned Vtmp = varEnd;
10716 if ((LoopSize & 0xFFFF0000) != 0)
10717 Vtmp = MRI.createVirtualRegister(TRC);
10718 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16), Vtmp)
10719 .addImm(LoopSize & 0xFFFF)
10720 .add(predOps(ARMCC::AL));
10722 if ((LoopSize & 0xFFFF0000) != 0)
10723 BuildMI(BB, dl, TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16), varEnd)
10725 .addImm(LoopSize >> 16)
10726 .add(predOps(ARMCC::AL));
10728 MachineConstantPool *ConstantPool = MF->getConstantPool();
10729 Type *Int32Ty = Type::getInt32Ty(MF->getFunction().getContext());
10730 const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
10732 // MachineConstantPool wants an explicit alignment.
10733 Align Alignment = MF->getDataLayout().getPrefTypeAlign(Int32Ty);
10734 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Alignment);
10735 MachineMemOperand *CPMMO =
10736 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
10737 MachineMemOperand::MOLoad, 4, Align(4));
10740 BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci))
10741 .addReg(varEnd, RegState::Define)
10742 .addConstantPoolIndex(Idx)
10743 .add(predOps(ARMCC::AL))
10744 .addMemOperand(CPMMO);
10746 BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp))
10747 .addReg(varEnd, RegState::Define)
10748 .addConstantPoolIndex(Idx)
10750 .add(predOps(ARMCC::AL))
10751 .addMemOperand(CPMMO);
10753 BB->addSuccessor(loopMBB);
10755 // Generate the loop body:
10756 // varPhi = PHI(varLoop, varEnd)
10757 // srcPhi = PHI(srcLoop, src)
10758 // destPhi = PHI(destLoop, dst)
10759 MachineBasicBlock *entryBB = BB;
10761 Register varLoop = MRI.createVirtualRegister(TRC);
10762 Register varPhi = MRI.createVirtualRegister(TRC);
10763 Register srcLoop = MRI.createVirtualRegister(TRC);
10764 Register srcPhi = MRI.createVirtualRegister(TRC);
10765 Register destLoop = MRI.createVirtualRegister(TRC);
10766 Register destPhi = MRI.createVirtualRegister(TRC);
10768 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
10769 .addReg(varLoop).addMBB(loopMBB)
10770 .addReg(varEnd).addMBB(entryBB);
10771 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
10772 .addReg(srcLoop).addMBB(loopMBB)
10773 .addReg(src).addMBB(entryBB);
10774 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
10775 .addReg(destLoop).addMBB(loopMBB)
10776 .addReg(dest).addMBB(entryBB);
10778 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
10779 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
10780 Register scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
10781 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
10782 IsThumb1, IsThumb2);
10783 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
10784 IsThumb1, IsThumb2);
10786 // Decrement loop variable by UnitSize.
10788 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop)
10789 .add(t1CondCodeOp())
10792 .add(predOps(ARMCC::AL));
10794 MachineInstrBuilder MIB =
10795 BuildMI(*BB, BB->end(), dl,
10796 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
10799 .add(predOps(ARMCC::AL))
10800 .add(condCodeOp());
10801 MIB->getOperand(5).setReg(ARM::CPSR);
10802 MIB->getOperand(5).setIsDef(true);
10804 BuildMI(*BB, BB->end(), dl,
10805 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
10806 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
10808 // loopMBB can loop back to loopMBB or fall through to exitMBB.
10809 BB->addSuccessor(loopMBB);
10810 BB->addSuccessor(exitMBB);
10812 // Add epilogue to handle BytesLeft.
10814 auto StartOfExit = exitMBB->begin();
10816 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
10817 // [destOut] = STRB_POST(scratch, destLoop, 1)
10818 unsigned srcIn = srcLoop;
10819 unsigned destIn = destLoop;
10820 for (unsigned i = 0; i < BytesLeft; i++) {
10821 Register srcOut = MRI.createVirtualRegister(TRC);
10822 Register destOut = MRI.createVirtualRegister(TRC);
10823 Register scratch = MRI.createVirtualRegister(TRC);
10824 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
10825 IsThumb1, IsThumb2);
10826 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
10827 IsThumb1, IsThumb2);
10832 MI.eraseFromParent(); // The instruction is gone now.
10836 MachineBasicBlock *
10837 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
10838 MachineBasicBlock *MBB) const {
10839 const TargetMachine &TM = getTargetMachine();
10840 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
10841 DebugLoc DL = MI.getDebugLoc();
10843 assert(Subtarget->isTargetWindows() &&
10844 "__chkstk is only supported on Windows");
10845 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
10847 // __chkstk takes the number of words to allocate on the stack in R4, and
10848 // returns the stack adjustment in number of bytes in R4. This will not
10849 // clober any other registers (other than the obvious lr).
10851 // Although, technically, IP should be considered a register which may be
10852 // clobbered, the call itself will not touch it. Windows on ARM is a pure
10853 // thumb-2 environment, so there is no interworking required. As a result, we
10854 // do not expect a veneer to be emitted by the linker, clobbering IP.
10856 // Each module receives its own copy of __chkstk, so no import thunk is
10857 // required, again, ensuring that IP is not clobbered.
10859 // Finally, although some linkers may theoretically provide a trampoline for
10860 // out of range calls (which is quite common due to a 32M range limitation of
10861 // branches for Thumb), we can generate the long-call version via
10862 // -mcmodel=large, alleviating the need for the trampoline which may clobber
10865 switch (TM.getCodeModel()) {
10866 case CodeModel::Tiny:
10867 llvm_unreachable("Tiny code model not available on ARM.");
10868 case CodeModel::Small:
10869 case CodeModel::Medium:
10870 case CodeModel::Kernel:
10871 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
10872 .add(predOps(ARMCC::AL))
10873 .addExternalSymbol("__chkstk")
10874 .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
10875 .addReg(ARM::R4, RegState::Implicit | RegState::Define)
10877 RegState::Implicit | RegState::Define | RegState::Dead)
10879 RegState::Implicit | RegState::Define | RegState::Dead);
10881 case CodeModel::Large: {
10882 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
10883 Register Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
10885 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
10886 .addExternalSymbol("__chkstk");
10887 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
10888 .add(predOps(ARMCC::AL))
10889 .addReg(Reg, RegState::Kill)
10890 .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
10891 .addReg(ARM::R4, RegState::Implicit | RegState::Define)
10893 RegState::Implicit | RegState::Define | RegState::Dead)
10895 RegState::Implicit | RegState::Define | RegState::Dead);
10900 BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr), ARM::SP)
10901 .addReg(ARM::SP, RegState::Kill)
10902 .addReg(ARM::R4, RegState::Kill)
10903 .setMIFlags(MachineInstr::FrameSetup)
10904 .add(predOps(ARMCC::AL))
10905 .add(condCodeOp());
10907 MI.eraseFromParent();
10911 MachineBasicBlock *
10912 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
10913 MachineBasicBlock *MBB) const {
10914 DebugLoc DL = MI.getDebugLoc();
10915 MachineFunction *MF = MBB->getParent();
10916 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10918 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
10919 MF->insert(++MBB->getIterator(), ContBB);
10920 ContBB->splice(ContBB->begin(), MBB,
10921 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
10922 ContBB->transferSuccessorsAndUpdatePHIs(MBB);
10923 MBB->addSuccessor(ContBB);
10925 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
10926 BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
10927 MF->push_back(TrapBB);
10928 MBB->addSuccessor(TrapBB);
10930 BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
10931 .addReg(MI.getOperand(0).getReg())
10933 .add(predOps(ARMCC::AL));
10934 BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
10937 .addReg(ARM::CPSR);
10939 MI.eraseFromParent();
10943 // The CPSR operand of SelectItr might be missing a kill marker
10944 // because there were multiple uses of CPSR, and ISel didn't know
10945 // which to mark. Figure out whether SelectItr should have had a
10946 // kill marker, and set it if it should. Returns the correct kill
10948 static bool checkAndUpdateCPSRKill(MachineBasicBlock::iterator SelectItr,
10949 MachineBasicBlock* BB,
10950 const TargetRegisterInfo* TRI) {
10951 // Scan forward through BB for a use/def of CPSR.
10952 MachineBasicBlock::iterator miI(std::next(SelectItr));
10953 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
10954 const MachineInstr& mi = *miI;
10955 if (mi.readsRegister(ARM::CPSR))
10957 if (mi.definesRegister(ARM::CPSR))
10958 break; // Should have kill-flag - update below.
10961 // If we hit the end of the block, check whether CPSR is live into a
10963 if (miI == BB->end()) {
10964 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
10965 sEnd = BB->succ_end();
10966 sItr != sEnd; ++sItr) {
10967 MachineBasicBlock* succ = *sItr;
10968 if (succ->isLiveIn(ARM::CPSR))
10973 // We found a def, or hit the end of the basic block and CPSR wasn't live
10974 // out. SelectMI should have a kill flag on CPSR.
10975 SelectItr->addRegisterKilled(ARM::CPSR, TRI);
10979 MachineBasicBlock *
10980 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
10981 MachineBasicBlock *BB) const {
10982 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
10983 DebugLoc dl = MI.getDebugLoc();
10984 bool isThumb2 = Subtarget->isThumb2();
10985 switch (MI.getOpcode()) {
10988 llvm_unreachable("Unexpected instr type to insert");
10991 // Thumb1 post-indexed loads are really just single-register LDMs.
10992 case ARM::tLDR_postidx: {
10993 MachineOperand Def(MI.getOperand(1));
10994 BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
10996 .add(MI.getOperand(2)) // Rn
10997 .add(MI.getOperand(3)) // PredImm
10998 .add(MI.getOperand(4)) // PredReg
10999 .add(MI.getOperand(0)) // Rt
11001 MI.eraseFromParent();
11005 // The Thumb2 pre-indexed stores have the same MI operands, they just
11006 // define them differently in the .td files from the isel patterns, so
11007 // they need pseudos.
11008 case ARM::t2STR_preidx:
11009 MI.setDesc(TII->get(ARM::t2STR_PRE));
11011 case ARM::t2STRB_preidx:
11012 MI.setDesc(TII->get(ARM::t2STRB_PRE));
11014 case ARM::t2STRH_preidx:
11015 MI.setDesc(TII->get(ARM::t2STRH_PRE));
11018 case ARM::STRi_preidx:
11019 case ARM::STRBi_preidx: {
11020 unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
11021 : ARM::STRB_PRE_IMM;
11022 // Decode the offset.
11023 unsigned Offset = MI.getOperand(4).getImm();
11024 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
11025 Offset = ARM_AM::getAM2Offset(Offset);
11029 MachineMemOperand *MMO = *MI.memoperands_begin();
11030 BuildMI(*BB, MI, dl, TII->get(NewOpc))
11031 .add(MI.getOperand(0)) // Rn_wb
11032 .add(MI.getOperand(1)) // Rt
11033 .add(MI.getOperand(2)) // Rn
11034 .addImm(Offset) // offset (skip GPR==zero_reg)
11035 .add(MI.getOperand(5)) // pred
11036 .add(MI.getOperand(6))
11037 .addMemOperand(MMO);
11038 MI.eraseFromParent();
11041 case ARM::STRr_preidx:
11042 case ARM::STRBr_preidx:
11043 case ARM::STRH_preidx: {
11045 switch (MI.getOpcode()) {
11046 default: llvm_unreachable("unexpected opcode!");
11047 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
11048 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
11049 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
11051 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
11052 for (unsigned i = 0; i < MI.getNumOperands(); ++i)
11053 MIB.add(MI.getOperand(i));
11054 MI.eraseFromParent();
11058 case ARM::tMOVCCr_pseudo: {
11059 // To "insert" a SELECT_CC instruction, we actually have to insert the
11060 // diamond control-flow pattern. The incoming instruction knows the
11061 // destination vreg to set, the condition code register to branch on, the
11062 // true/false values to select between, and a branch opcode to use.
11063 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11064 MachineFunction::iterator It = ++BB->getIterator();
11069 // cmpTY ccX, r1, r2
11071 // fallthrough --> copy0MBB
11072 MachineBasicBlock *thisMBB = BB;
11073 MachineFunction *F = BB->getParent();
11074 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
11075 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
11076 F->insert(It, copy0MBB);
11077 F->insert(It, sinkMBB);
11079 // Check whether CPSR is live past the tMOVCCr_pseudo.
11080 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
11081 if (!MI.killsRegister(ARM::CPSR) &&
11082 !checkAndUpdateCPSRKill(MI, thisMBB, TRI)) {
11083 copy0MBB->addLiveIn(ARM::CPSR);
11084 sinkMBB->addLiveIn(ARM::CPSR);
11087 // Transfer the remainder of BB and its successor edges to sinkMBB.
11088 sinkMBB->splice(sinkMBB->begin(), BB,
11089 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11090 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
11092 BB->addSuccessor(copy0MBB);
11093 BB->addSuccessor(sinkMBB);
11095 BuildMI(BB, dl, TII->get(ARM::tBcc))
11097 .addImm(MI.getOperand(3).getImm())
11098 .addReg(MI.getOperand(4).getReg());
11101 // %FalseValue = ...
11102 // # fallthrough to sinkMBB
11105 // Update machine-CFG edges
11106 BB->addSuccessor(sinkMBB);
11109 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
11112 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
11113 .addReg(MI.getOperand(1).getReg())
11115 .addReg(MI.getOperand(2).getReg())
11118 MI.eraseFromParent(); // The pseudo instruction is gone now.
11123 case ARM::BCCZi64: {
11124 // If there is an unconditional branch to the other successor, remove it.
11125 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
11127 // Compare both parts that make up the double comparison separately for
11129 bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
11131 Register LHS1 = MI.getOperand(1).getReg();
11132 Register LHS2 = MI.getOperand(2).getReg();
11134 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11137 .add(predOps(ARMCC::AL));
11138 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11139 .addReg(LHS2).addImm(0)
11140 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
11142 Register RHS1 = MI.getOperand(3).getReg();
11143 Register RHS2 = MI.getOperand(4).getReg();
11144 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
11147 .add(predOps(ARMCC::AL));
11148 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
11149 .addReg(LHS2).addReg(RHS2)
11150 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
11153 MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
11154 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
11155 if (MI.getOperand(0).getImm() == ARMCC::NE)
11156 std::swap(destMBB, exitMBB);
11158 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
11159 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
11161 BuildMI(BB, dl, TII->get(ARM::t2B))
11163 .add(predOps(ARMCC::AL));
11165 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
11167 MI.eraseFromParent(); // The pseudo instruction is gone now.
11171 case ARM::Int_eh_sjlj_setjmp:
11172 case ARM::Int_eh_sjlj_setjmp_nofp:
11173 case ARM::tInt_eh_sjlj_setjmp:
11174 case ARM::t2Int_eh_sjlj_setjmp:
11175 case ARM::t2Int_eh_sjlj_setjmp_nofp:
11178 case ARM::Int_eh_sjlj_setup_dispatch:
11179 EmitSjLjDispatchBlock(MI, BB);
11184 // To insert an ABS instruction, we have to insert the
11185 // diamond control-flow pattern. The incoming instruction knows the
11186 // source vreg to test against 0, the destination vreg to set,
11187 // the condition code register to branch on, the
11188 // true/false values to select between, and a branch opcode to use.
11193 // BCC (branch to SinkBB if V0 >= 0)
11194 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0)
11195 // SinkBB: V1 = PHI(V2, V3)
11196 const BasicBlock *LLVM_BB = BB->getBasicBlock();
11197 MachineFunction::iterator BBI = ++BB->getIterator();
11198 MachineFunction *Fn = BB->getParent();
11199 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
11200 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB);
11201 Fn->insert(BBI, RSBBB);
11202 Fn->insert(BBI, SinkBB);
11204 Register ABSSrcReg = MI.getOperand(1).getReg();
11205 Register ABSDstReg = MI.getOperand(0).getReg();
11206 bool ABSSrcKIll = MI.getOperand(1).isKill();
11207 bool isThumb2 = Subtarget->isThumb2();
11208 MachineRegisterInfo &MRI = Fn->getRegInfo();
11209 // In Thumb mode S must not be specified if source register is the SP or
11210 // PC and if destination register is the SP, so restrict register class
11211 Register NewRsbDstReg = MRI.createVirtualRegister(
11212 isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
11214 // Transfer the remainder of BB and its successor edges to sinkMBB.
11215 SinkBB->splice(SinkBB->begin(), BB,
11216 std::next(MachineBasicBlock::iterator(MI)), BB->end());
11217 SinkBB->transferSuccessorsAndUpdatePHIs(BB);
11219 BB->addSuccessor(RSBBB);
11220 BB->addSuccessor(SinkBB);
11222 // fall through to SinkMBB
11223 RSBBB->addSuccessor(SinkBB);
11225 // insert a cmp at the end of BB
11226 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
11229 .add(predOps(ARMCC::AL));
11231 // insert a bcc with opposite CC to ARMCC::MI at the end of BB
11233 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
11234 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
11236 // insert rsbri in RSBBB
11237 // Note: BCC and rsbri will be converted into predicated rsbmi
11238 // by if-conversion pass
11239 BuildMI(*RSBBB, RSBBB->begin(), dl,
11240 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
11241 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
11243 .add(predOps(ARMCC::AL))
11244 .add(condCodeOp());
11246 // insert PHI in SinkBB,
11247 // reuse ABSDstReg to not change uses of ABS instruction
11248 BuildMI(*SinkBB, SinkBB->begin(), dl,
11249 TII->get(ARM::PHI), ABSDstReg)
11250 .addReg(NewRsbDstReg).addMBB(RSBBB)
11251 .addReg(ABSSrcReg).addMBB(BB);
11253 // remove ABS instruction
11254 MI.eraseFromParent();
11256 // return last added BB
11259 case ARM::COPY_STRUCT_BYVAL_I32:
11261 return EmitStructByval(MI, BB);
11262 case ARM::WIN__CHKSTK:
11263 return EmitLowered__chkstk(MI, BB);
11264 case ARM::WIN__DBZCHK:
11265 return EmitLowered__dbzchk(MI, BB);
11269 /// Attaches vregs to MEMCPY that it will use as scratch registers
11270 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
11271 /// instead of as a custom inserter because we need the use list from the SDNode.
11272 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
11273 MachineInstr &MI, const SDNode *Node) {
11274 bool isThumb1 = Subtarget->isThumb1Only();
11276 DebugLoc DL = MI.getDebugLoc();
11277 MachineFunction *MF = MI.getParent()->getParent();
11278 MachineRegisterInfo &MRI = MF->getRegInfo();
11279 MachineInstrBuilder MIB(*MF, MI);
11281 // If the new dst/src is unused mark it as dead.
11282 if (!Node->hasAnyUseOfValue(0)) {
11283 MI.getOperand(0).setIsDead(true);
11285 if (!Node->hasAnyUseOfValue(1)) {
11286 MI.getOperand(1).setIsDead(true);
11289 // The MEMCPY both defines and kills the scratch registers.
11290 for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
11291 Register TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
11292 : &ARM::GPRRegClass);
11293 MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
11297 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
11298 SDNode *Node) const {
11299 if (MI.getOpcode() == ARM::MEMCPY) {
11300 attachMEMCPYScratchRegs(Subtarget, MI, Node);
11304 const MCInstrDesc *MCID = &MI.getDesc();
11305 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
11306 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
11307 // operand is still set to noreg. If needed, set the optional operand's
11308 // register to CPSR, and remove the redundant implicit def.
11310 // e.g. ADCS (..., implicit-def CPSR) -> ADC (... opt:def CPSR).
11312 // Rename pseudo opcodes.
11313 unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
11316 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
11317 MCID = &TII->get(NewOpc);
11319 assert(MCID->getNumOperands() ==
11320 MI.getDesc().getNumOperands() + 5 - MI.getDesc().getSize()
11321 && "converted opcode should be the same except for cc_out"
11322 " (and, on Thumb1, pred)");
11326 // Add the optional cc_out operand
11327 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
11329 // On Thumb1, move all input operands to the end, then add the predicate
11330 if (Subtarget->isThumb1Only()) {
11331 for (unsigned c = MCID->getNumOperands() - 4; c--;) {
11332 MI.addOperand(MI.getOperand(1));
11333 MI.RemoveOperand(1);
11336 // Restore the ties
11337 for (unsigned i = MI.getNumOperands(); i--;) {
11338 const MachineOperand& op = MI.getOperand(i);
11339 if (op.isReg() && op.isUse()) {
11340 int DefIdx = MCID->getOperandConstraint(i, MCOI::TIED_TO);
11342 MI.tieOperands(DefIdx, i);
11346 MI.addOperand(MachineOperand::CreateImm(ARMCC::AL));
11347 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/false));
11350 ccOutIdx = MCID->getNumOperands() - 1;
11352 ccOutIdx = MCID->getNumOperands() - 1;
11354 // Any ARM instruction that sets the 's' bit should specify an optional
11355 // "cc_out" operand in the last operand position.
11356 if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
11357 assert(!NewOpc && "Optional cc_out operand required");
11360 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
11361 // since we already have an optional CPSR def.
11362 bool definesCPSR = false;
11363 bool deadCPSR = false;
11364 for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
11366 const MachineOperand &MO = MI.getOperand(i);
11367 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
11368 definesCPSR = true;
11371 MI.RemoveOperand(i);
11375 if (!definesCPSR) {
11376 assert(!NewOpc && "Optional cc_out operand required");
11379 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
11381 assert(!MI.getOperand(ccOutIdx).getReg() &&
11382 "expect uninitialized optional cc_out operand");
11383 // Thumb1 instructions must have the S bit even if the CPSR is dead.
11384 if (!Subtarget->isThumb1Only())
11388 // If this instruction was defined with an optional CPSR def and its dag node
11389 // had a live implicit CPSR def, then activate the optional CPSR def.
11390 MachineOperand &MO = MI.getOperand(ccOutIdx);
11391 MO.setReg(ARM::CPSR);
11395 //===----------------------------------------------------------------------===//
11396 // ARM Optimization Hooks
11397 //===----------------------------------------------------------------------===//
11399 // Helper function that checks if N is a null or all ones constant.
11400 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
11401 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
11404 // Return true if N is conditionally 0 or all ones.
11405 // Detects these expressions where cc is an i1 value:
11407 // (select cc 0, y) [AllOnes=0]
11408 // (select cc y, 0) [AllOnes=0]
11409 // (zext cc) [AllOnes=0]
11410 // (sext cc) [AllOnes=0/1]
11411 // (select cc -1, y) [AllOnes=1]
11412 // (select cc y, -1) [AllOnes=1]
11414 // Invert is set when N is the null/all ones constant when CC is false.
11415 // OtherOp is set to the alternative value of N.
11416 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
11417 SDValue &CC, bool &Invert,
11419 SelectionDAG &DAG) {
11420 switch (N->getOpcode()) {
11421 default: return false;
11422 case ISD::SELECT: {
11423 CC = N->getOperand(0);
11424 SDValue N1 = N->getOperand(1);
11425 SDValue N2 = N->getOperand(2);
11426 if (isZeroOrAllOnes(N1, AllOnes)) {
11431 if (isZeroOrAllOnes(N2, AllOnes)) {
11438 case ISD::ZERO_EXTEND:
11439 // (zext cc) can never be the all ones value.
11443 case ISD::SIGN_EXTEND: {
11445 EVT VT = N->getValueType(0);
11446 CC = N->getOperand(0);
11447 if (CC.getValueType() != MVT::i1 || CC.getOpcode() != ISD::SETCC)
11451 // When looking for an AllOnes constant, N is an sext, and the 'other'
11453 OtherOp = DAG.getConstant(0, dl, VT);
11454 else if (N->getOpcode() == ISD::ZERO_EXTEND)
11455 // When looking for a 0 constant, N can be zext or sext.
11456 OtherOp = DAG.getConstant(1, dl, VT);
11458 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
11465 // Combine a constant select operand into its use:
11467 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
11468 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
11469 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1]
11470 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
11471 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
11473 // The transform is rejected if the select doesn't have a constant operand that
11474 // is null, or all ones when AllOnes is set.
11476 // Also recognize sext/zext from i1:
11478 // (add (zext cc), x) -> (select cc (add x, 1), x)
11479 // (add (sext cc), x) -> (select cc (add x, -1), x)
11481 // These transformations eventually create predicated instructions.
11483 // @param N The node to transform.
11484 // @param Slct The N operand that is a select.
11485 // @param OtherOp The other N operand (x above).
11486 // @param DCI Context.
11487 // @param AllOnes Require the select constant to be all ones instead of null.
11488 // @returns The new node, or SDValue() on failure.
11490 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
11491 TargetLowering::DAGCombinerInfo &DCI,
11492 bool AllOnes = false) {
11493 SelectionDAG &DAG = DCI.DAG;
11494 EVT VT = N->getValueType(0);
11495 SDValue NonConstantVal;
11497 bool SwapSelectOps;
11498 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
11499 NonConstantVal, DAG))
11502 // Slct is now know to be the desired identity constant when CC is true.
11503 SDValue TrueVal = OtherOp;
11504 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
11505 OtherOp, NonConstantVal);
11506 // Unless SwapSelectOps says CC should be false.
11508 std::swap(TrueVal, FalseVal);
11510 return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
11511 CCOp, TrueVal, FalseVal);
11514 // Attempt combineSelectAndUse on each operand of a commutative operator N.
11516 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
11517 TargetLowering::DAGCombinerInfo &DCI) {
11518 SDValue N0 = N->getOperand(0);
11519 SDValue N1 = N->getOperand(1);
11520 if (N0.getNode()->hasOneUse())
11521 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
11523 if (N1.getNode()->hasOneUse())
11524 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
11529 static bool IsVUZPShuffleNode(SDNode *N) {
11530 // VUZP shuffle node.
11531 if (N->getOpcode() == ARMISD::VUZP)
11534 // "VUZP" on i32 is an alias for VTRN.
11535 if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
11541 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
11542 TargetLowering::DAGCombinerInfo &DCI,
11543 const ARMSubtarget *Subtarget) {
11544 // Look for ADD(VUZP.0, VUZP.1).
11545 if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
11549 // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
11550 if (!N->getValueType(0).is64BitVector())
11554 SelectionDAG &DAG = DCI.DAG;
11555 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11557 SDNode *Unzip = N0.getNode();
11558 EVT VT = N->getValueType(0);
11560 SmallVector<SDValue, 8> Ops;
11561 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
11562 TLI.getPointerTy(DAG.getDataLayout())));
11563 Ops.push_back(Unzip->getOperand(0));
11564 Ops.push_back(Unzip->getOperand(1));
11566 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
11569 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
11570 TargetLowering::DAGCombinerInfo &DCI,
11571 const ARMSubtarget *Subtarget) {
11572 // Check for two extended operands.
11573 if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
11574 N1.getOpcode() == ISD::SIGN_EXTEND) &&
11575 !(N0.getOpcode() == ISD::ZERO_EXTEND &&
11576 N1.getOpcode() == ISD::ZERO_EXTEND))
11579 SDValue N00 = N0.getOperand(0);
11580 SDValue N10 = N1.getOperand(0);
11582 // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
11583 if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
11587 // We only recognize Q register paddl here; this can't be reached until
11588 // after type legalization.
11589 if (!N00.getValueType().is64BitVector() ||
11590 !N0.getValueType().is128BitVector())
11593 // Generate vpaddl.
11594 SelectionDAG &DAG = DCI.DAG;
11595 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11597 EVT VT = N->getValueType(0);
11599 SmallVector<SDValue, 8> Ops;
11600 // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
11602 if (N0.getOpcode() == ISD::SIGN_EXTEND)
11603 Opcode = Intrinsic::arm_neon_vpaddls;
11605 Opcode = Intrinsic::arm_neon_vpaddlu;
11606 Ops.push_back(DAG.getConstant(Opcode, dl,
11607 TLI.getPointerTy(DAG.getDataLayout())));
11608 EVT ElemTy = N00.getValueType().getVectorElementType();
11609 unsigned NumElts = VT.getVectorNumElements();
11610 EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
11611 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
11612 N00.getOperand(0), N00.getOperand(1));
11613 Ops.push_back(Concat);
11615 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
11618 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
11619 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
11620 // much easier to match.
11622 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
11623 TargetLowering::DAGCombinerInfo &DCI,
11624 const ARMSubtarget *Subtarget) {
11625 // Only perform optimization if after legalize, and if NEON is available. We
11626 // also expected both operands to be BUILD_VECTORs.
11627 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
11628 || N0.getOpcode() != ISD::BUILD_VECTOR
11629 || N1.getOpcode() != ISD::BUILD_VECTOR)
11632 // Check output type since VPADDL operand elements can only be 8, 16, or 32.
11633 EVT VT = N->getValueType(0);
11634 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
11637 // Check that the vector operands are of the right form.
11638 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
11639 // operands, where N is the size of the formed vector.
11640 // Each EXTRACT_VECTOR should have the same input vector and odd or even
11641 // index such that we have a pair wise add pattern.
11643 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
11644 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
11646 SDValue Vec = N0->getOperand(0)->getOperand(0);
11647 SDNode *V = Vec.getNode();
11648 unsigned nextIndex = 0;
11650 // For each operands to the ADD which are BUILD_VECTORs,
11651 // check to see if each of their operands are an EXTRACT_VECTOR with
11652 // the same vector and appropriate index.
11653 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
11654 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
11655 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11657 SDValue ExtVec0 = N0->getOperand(i);
11658 SDValue ExtVec1 = N1->getOperand(i);
11660 // First operand is the vector, verify its the same.
11661 if (V != ExtVec0->getOperand(0).getNode() ||
11662 V != ExtVec1->getOperand(0).getNode())
11665 // Second is the constant, verify its correct.
11666 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
11667 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
11669 // For the constant, we want to see all the even or all the odd.
11670 if (!C0 || !C1 || C0->getZExtValue() != nextIndex
11671 || C1->getZExtValue() != nextIndex+1)
11674 // Increment index.
11680 // Don't generate vpaddl+vmovn; we'll match it to vpadd later. Also make sure
11681 // we're using the entire input vector, otherwise there's a size/legality
11682 // mismatch somewhere.
11683 if (nextIndex != Vec.getValueType().getVectorNumElements() ||
11684 Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
11687 // Create VPADDL node.
11688 SelectionDAG &DAG = DCI.DAG;
11689 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11693 // Build operand list.
11694 SmallVector<SDValue, 8> Ops;
11695 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
11696 TLI.getPointerTy(DAG.getDataLayout())));
11698 // Input is the vector.
11699 Ops.push_back(Vec);
11701 // Get widened type and narrowed type.
11703 unsigned numElem = VT.getVectorNumElements();
11705 EVT inputLaneType = Vec.getValueType().getVectorElementType();
11706 switch (inputLaneType.getSimpleVT().SimpleTy) {
11707 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
11708 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
11709 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
11711 llvm_unreachable("Invalid vector element type for padd optimization.");
11714 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
11715 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
11716 return DAG.getNode(ExtOp, dl, VT, tmp);
11719 static SDValue findMUL_LOHI(SDValue V) {
11720 if (V->getOpcode() == ISD::UMUL_LOHI ||
11721 V->getOpcode() == ISD::SMUL_LOHI)
11726 static SDValue AddCombineTo64BitSMLAL16(SDNode *AddcNode, SDNode *AddeNode,
11727 TargetLowering::DAGCombinerInfo &DCI,
11728 const ARMSubtarget *Subtarget) {
11729 if (!Subtarget->hasBaseDSP())
11732 // SMLALBB, SMLALBT, SMLALTB, SMLALTT multiply two 16-bit values and
11733 // accumulates the product into a 64-bit value. The 16-bit values will
11734 // be sign extended somehow or SRA'd into 32-bit values
11735 // (addc (adde (mul 16bit, 16bit), lo), hi)
11736 SDValue Mul = AddcNode->getOperand(0);
11737 SDValue Lo = AddcNode->getOperand(1);
11738 if (Mul.getOpcode() != ISD::MUL) {
11739 Lo = AddcNode->getOperand(0);
11740 Mul = AddcNode->getOperand(1);
11741 if (Mul.getOpcode() != ISD::MUL)
11745 SDValue SRA = AddeNode->getOperand(0);
11746 SDValue Hi = AddeNode->getOperand(1);
11747 if (SRA.getOpcode() != ISD::SRA) {
11748 SRA = AddeNode->getOperand(1);
11749 Hi = AddeNode->getOperand(0);
11750 if (SRA.getOpcode() != ISD::SRA)
11753 if (auto Const = dyn_cast<ConstantSDNode>(SRA.getOperand(1))) {
11754 if (Const->getZExtValue() != 31)
11759 if (SRA.getOperand(0) != Mul)
11762 SelectionDAG &DAG = DCI.DAG;
11763 SDLoc dl(AddcNode);
11764 unsigned Opcode = 0;
11768 if (isS16(Mul.getOperand(0), DAG) && isS16(Mul.getOperand(1), DAG)) {
11769 Opcode = ARMISD::SMLALBB;
11770 Op0 = Mul.getOperand(0);
11771 Op1 = Mul.getOperand(1);
11772 } else if (isS16(Mul.getOperand(0), DAG) && isSRA16(Mul.getOperand(1))) {
11773 Opcode = ARMISD::SMLALBT;
11774 Op0 = Mul.getOperand(0);
11775 Op1 = Mul.getOperand(1).getOperand(0);
11776 } else if (isSRA16(Mul.getOperand(0)) && isS16(Mul.getOperand(1), DAG)) {
11777 Opcode = ARMISD::SMLALTB;
11778 Op0 = Mul.getOperand(0).getOperand(0);
11779 Op1 = Mul.getOperand(1);
11780 } else if (isSRA16(Mul.getOperand(0)) && isSRA16(Mul.getOperand(1))) {
11781 Opcode = ARMISD::SMLALTT;
11782 Op0 = Mul->getOperand(0).getOperand(0);
11783 Op1 = Mul->getOperand(1).getOperand(0);
11789 SDValue SMLAL = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
11791 // Replace the ADDs' nodes uses by the MLA node's values.
11792 SDValue HiMLALResult(SMLAL.getNode(), 1);
11793 SDValue LoMLALResult(SMLAL.getNode(), 0);
11795 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
11796 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
11798 // Return original node to notify the driver to stop replacing.
11799 SDValue resNode(AddcNode, 0);
11803 static SDValue AddCombineTo64bitMLAL(SDNode *AddeSubeNode,
11804 TargetLowering::DAGCombinerInfo &DCI,
11805 const ARMSubtarget *Subtarget) {
11806 // Look for multiply add opportunities.
11807 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
11808 // each add nodes consumes a value from ISD::UMUL_LOHI and there is
11809 // a glue link from the first add to the second add.
11810 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
11811 // a S/UMLAL instruction.
11814 // V \ [no multiline comment]
11820 // In the special case where only the higher part of a signed result is used
11821 // and the add to the low part of the result of ISD::UMUL_LOHI adds or subtracts
11822 // a constant with the exact value of 0x80000000, we recognize we are dealing
11823 // with a "rounded multiply and add" (or subtract) and transform it into
11824 // either a ARMISD::SMMLAR or ARMISD::SMMLSR respectively.
11826 assert((AddeSubeNode->getOpcode() == ARMISD::ADDE ||
11827 AddeSubeNode->getOpcode() == ARMISD::SUBE) &&
11828 "Expect an ADDE or SUBE");
11830 assert(AddeSubeNode->getNumOperands() == 3 &&
11831 AddeSubeNode->getOperand(2).getValueType() == MVT::i32 &&
11832 "ADDE node has the wrong inputs");
11834 // Check that we are chained to the right ADDC or SUBC node.
11835 SDNode *AddcSubcNode = AddeSubeNode->getOperand(2).getNode();
11836 if ((AddeSubeNode->getOpcode() == ARMISD::ADDE &&
11837 AddcSubcNode->getOpcode() != ARMISD::ADDC) ||
11838 (AddeSubeNode->getOpcode() == ARMISD::SUBE &&
11839 AddcSubcNode->getOpcode() != ARMISD::SUBC))
11842 SDValue AddcSubcOp0 = AddcSubcNode->getOperand(0);
11843 SDValue AddcSubcOp1 = AddcSubcNode->getOperand(1);
11845 // Check if the two operands are from the same mul_lohi node.
11846 if (AddcSubcOp0.getNode() == AddcSubcOp1.getNode())
11849 assert(AddcSubcNode->getNumValues() == 2 &&
11850 AddcSubcNode->getValueType(0) == MVT::i32 &&
11851 "Expect ADDC with two result values. First: i32");
11853 // Check that the ADDC adds the low result of the S/UMUL_LOHI. If not, it
11854 // maybe a SMLAL which multiplies two 16-bit values.
11855 if (AddeSubeNode->getOpcode() == ARMISD::ADDE &&
11856 AddcSubcOp0->getOpcode() != ISD::UMUL_LOHI &&
11857 AddcSubcOp0->getOpcode() != ISD::SMUL_LOHI &&
11858 AddcSubcOp1->getOpcode() != ISD::UMUL_LOHI &&
11859 AddcSubcOp1->getOpcode() != ISD::SMUL_LOHI)
11860 return AddCombineTo64BitSMLAL16(AddcSubcNode, AddeSubeNode, DCI, Subtarget);
11862 // Check for the triangle shape.
11863 SDValue AddeSubeOp0 = AddeSubeNode->getOperand(0);
11864 SDValue AddeSubeOp1 = AddeSubeNode->getOperand(1);
11866 // Make sure that the ADDE/SUBE operands are not coming from the same node.
11867 if (AddeSubeOp0.getNode() == AddeSubeOp1.getNode())
11870 // Find the MUL_LOHI node walking up ADDE/SUBE's operands.
11871 bool IsLeftOperandMUL = false;
11872 SDValue MULOp = findMUL_LOHI(AddeSubeOp0);
11873 if (MULOp == SDValue())
11874 MULOp = findMUL_LOHI(AddeSubeOp1);
11876 IsLeftOperandMUL = true;
11877 if (MULOp == SDValue())
11880 // Figure out the right opcode.
11881 unsigned Opc = MULOp->getOpcode();
11882 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
11884 // Figure out the high and low input values to the MLAL node.
11885 SDValue *HiAddSub = nullptr;
11886 SDValue *LoMul = nullptr;
11887 SDValue *LowAddSub = nullptr;
11889 // Ensure that ADDE/SUBE is from high result of ISD::xMUL_LOHI.
11890 if ((AddeSubeOp0 != MULOp.getValue(1)) && (AddeSubeOp1 != MULOp.getValue(1)))
11893 if (IsLeftOperandMUL)
11894 HiAddSub = &AddeSubeOp1;
11896 HiAddSub = &AddeSubeOp0;
11898 // Ensure that LoMul and LowAddSub are taken from correct ISD::SMUL_LOHI node
11899 // whose low result is fed to the ADDC/SUBC we are checking.
11901 if (AddcSubcOp0 == MULOp.getValue(0)) {
11902 LoMul = &AddcSubcOp0;
11903 LowAddSub = &AddcSubcOp1;
11905 if (AddcSubcOp1 == MULOp.getValue(0)) {
11906 LoMul = &AddcSubcOp1;
11907 LowAddSub = &AddcSubcOp0;
11913 // If HiAddSub is the same node as ADDC/SUBC or is a predecessor of ADDC/SUBC
11914 // the replacement below will create a cycle.
11915 if (AddcSubcNode == HiAddSub->getNode() ||
11916 AddcSubcNode->isPredecessorOf(HiAddSub->getNode()))
11919 // Create the merged node.
11920 SelectionDAG &DAG = DCI.DAG;
11922 // Start building operand list.
11923 SmallVector<SDValue, 8> Ops;
11924 Ops.push_back(LoMul->getOperand(0));
11925 Ops.push_back(LoMul->getOperand(1));
11927 // Check whether we can use SMMLAR, SMMLSR or SMMULR instead. For this to be
11928 // the case, we must be doing signed multiplication and only use the higher
11929 // part of the result of the MLAL, furthermore the LowAddSub must be a constant
11930 // addition or subtraction with the value of 0x800000.
11931 if (Subtarget->hasV6Ops() && Subtarget->hasDSP() && Subtarget->useMulOps() &&
11932 FinalOpc == ARMISD::SMLAL && !AddeSubeNode->hasAnyUseOfValue(1) &&
11933 LowAddSub->getNode()->getOpcode() == ISD::Constant &&
11934 static_cast<ConstantSDNode *>(LowAddSub->getNode())->getZExtValue() ==
11936 Ops.push_back(*HiAddSub);
11937 if (AddcSubcNode->getOpcode() == ARMISD::SUBC) {
11938 FinalOpc = ARMISD::SMMLSR;
11940 FinalOpc = ARMISD::SMMLAR;
11942 SDValue NewNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode), MVT::i32, Ops);
11943 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), NewNode);
11945 return SDValue(AddeSubeNode, 0);
11946 } else if (AddcSubcNode->getOpcode() == ARMISD::SUBC)
11947 // SMMLS is generated during instruction selection and the rest of this
11948 // function can not handle the case where AddcSubcNode is a SUBC.
11951 // Finish building the operand list for {U/S}MLAL
11952 Ops.push_back(*LowAddSub);
11953 Ops.push_back(*HiAddSub);
11955 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcSubcNode),
11956 DAG.getVTList(MVT::i32, MVT::i32), Ops);
11958 // Replace the ADDs' nodes uses by the MLA node's values.
11959 SDValue HiMLALResult(MLALNode.getNode(), 1);
11960 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeSubeNode, 0), HiMLALResult);
11962 SDValue LoMLALResult(MLALNode.getNode(), 0);
11963 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcSubcNode, 0), LoMLALResult);
11965 // Return original node to notify the driver to stop replacing.
11966 return SDValue(AddeSubeNode, 0);
11969 static SDValue AddCombineTo64bitUMAAL(SDNode *AddeNode,
11970 TargetLowering::DAGCombinerInfo &DCI,
11971 const ARMSubtarget *Subtarget) {
11972 // UMAAL is similar to UMLAL except that it adds two unsigned values.
11973 // While trying to combine for the other MLAL nodes, first search for the
11974 // chance to use UMAAL. Check if Addc uses a node which has already
11975 // been combined into a UMLAL. The other pattern is UMLAL using Addc/Adde
11976 // as the addend, and it's handled in PerformUMLALCombine.
11978 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
11979 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
11981 // Check that we have a glued ADDC node.
11982 SDNode* AddcNode = AddeNode->getOperand(2).getNode();
11983 if (AddcNode->getOpcode() != ARMISD::ADDC)
11986 // Find the converted UMAAL or quit if it doesn't exist.
11987 SDNode *UmlalNode = nullptr;
11989 if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
11990 UmlalNode = AddcNode->getOperand(0).getNode();
11991 AddHi = AddcNode->getOperand(1);
11992 } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
11993 UmlalNode = AddcNode->getOperand(1).getNode();
11994 AddHi = AddcNode->getOperand(0);
11996 return AddCombineTo64bitMLAL(AddeNode, DCI, Subtarget);
11999 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
12000 // the ADDC as well as Zero.
12001 if (!isNullConstant(UmlalNode->getOperand(3)))
12004 if ((isNullConstant(AddeNode->getOperand(0)) &&
12005 AddeNode->getOperand(1).getNode() == UmlalNode) ||
12006 (AddeNode->getOperand(0).getNode() == UmlalNode &&
12007 isNullConstant(AddeNode->getOperand(1)))) {
12008 SelectionDAG &DAG = DCI.DAG;
12009 SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
12010 UmlalNode->getOperand(2), AddHi };
12011 SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
12012 DAG.getVTList(MVT::i32, MVT::i32), Ops);
12014 // Replace the ADDs' nodes uses by the UMAAL node's values.
12015 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
12016 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
12018 // Return original node to notify the driver to stop replacing.
12019 return SDValue(AddeNode, 0);
12024 static SDValue PerformUMLALCombine(SDNode *N, SelectionDAG &DAG,
12025 const ARMSubtarget *Subtarget) {
12026 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP())
12029 // Check that we have a pair of ADDC and ADDE as operands.
12030 // Both addends of the ADDE must be zero.
12031 SDNode* AddcNode = N->getOperand(2).getNode();
12032 SDNode* AddeNode = N->getOperand(3).getNode();
12033 if ((AddcNode->getOpcode() == ARMISD::ADDC) &&
12034 (AddeNode->getOpcode() == ARMISD::ADDE) &&
12035 isNullConstant(AddeNode->getOperand(0)) &&
12036 isNullConstant(AddeNode->getOperand(1)) &&
12037 (AddeNode->getOperand(2).getNode() == AddcNode))
12038 return DAG.getNode(ARMISD::UMAAL, SDLoc(N),
12039 DAG.getVTList(MVT::i32, MVT::i32),
12040 {N->getOperand(0), N->getOperand(1),
12041 AddcNode->getOperand(0), AddcNode->getOperand(1)});
12046 static SDValue PerformAddcSubcCombine(SDNode *N,
12047 TargetLowering::DAGCombinerInfo &DCI,
12048 const ARMSubtarget *Subtarget) {
12049 SelectionDAG &DAG(DCI.DAG);
12051 if (N->getOpcode() == ARMISD::SUBC) {
12052 // (SUBC (ADDE 0, 0, C), 1) -> C
12053 SDValue LHS = N->getOperand(0);
12054 SDValue RHS = N->getOperand(1);
12055 if (LHS->getOpcode() == ARMISD::ADDE &&
12056 isNullConstant(LHS->getOperand(0)) &&
12057 isNullConstant(LHS->getOperand(1)) && isOneConstant(RHS)) {
12058 return DCI.CombineTo(N, SDValue(N, 0), LHS->getOperand(2));
12062 if (Subtarget->isThumb1Only()) {
12063 SDValue RHS = N->getOperand(1);
12064 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
12065 int32_t imm = C->getSExtValue();
12066 if (imm < 0 && imm > std::numeric_limits<int>::min()) {
12068 RHS = DAG.getConstant(-imm, DL, MVT::i32);
12069 unsigned Opcode = (N->getOpcode() == ARMISD::ADDC) ? ARMISD::SUBC
12071 return DAG.getNode(Opcode, DL, N->getVTList(), N->getOperand(0), RHS);
12079 static SDValue PerformAddeSubeCombine(SDNode *N,
12080 TargetLowering::DAGCombinerInfo &DCI,
12081 const ARMSubtarget *Subtarget) {
12082 if (Subtarget->isThumb1Only()) {
12083 SelectionDAG &DAG = DCI.DAG;
12084 SDValue RHS = N->getOperand(1);
12085 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS)) {
12086 int64_t imm = C->getSExtValue();
12090 // The with-carry-in form matches bitwise not instead of the negation.
12091 // Effectively, the inverse interpretation of the carry flag already
12092 // accounts for part of the negation.
12093 RHS = DAG.getConstant(~imm, DL, MVT::i32);
12095 unsigned Opcode = (N->getOpcode() == ARMISD::ADDE) ? ARMISD::SUBE
12097 return DAG.getNode(Opcode, DL, N->getVTList(),
12098 N->getOperand(0), RHS, N->getOperand(2));
12101 } else if (N->getOperand(1)->getOpcode() == ISD::SMUL_LOHI) {
12102 return AddCombineTo64bitMLAL(N, DCI, Subtarget);
12107 static SDValue PerformVSELECTCombine(SDNode *N,
12108 TargetLowering::DAGCombinerInfo &DCI,
12109 const ARMSubtarget *Subtarget) {
12110 // Transforms vselect(not(cond), lhs, rhs) into vselect(cond, rhs, lhs).
12112 // We need to re-implement this optimization here as the implementation in the
12113 // Target-Independent DAGCombiner does not handle the kind of constant we make
12114 // (it calls isConstOrConstSplat with AllowTruncation set to false - and for
12115 // good reason, allowing truncation there would break other targets).
12117 // Currently, this is only done for MVE, as it's the only target that benefits
12118 // from this transformation (e.g. VPNOT+VPSEL becomes a single VPSEL).
12119 if (!Subtarget->hasMVEIntegerOps())
12122 if (N->getOperand(0).getOpcode() != ISD::XOR)
12124 SDValue XOR = N->getOperand(0);
12126 // Check if the XOR's RHS is either a 1, or a BUILD_VECTOR of 1s.
12127 // It is important to check with truncation allowed as the BUILD_VECTORs we
12128 // generate in those situations will truncate their operands.
12129 ConstantSDNode *Const =
12130 isConstOrConstSplat(XOR->getOperand(1), /*AllowUndefs*/ false,
12131 /*AllowTruncation*/ true);
12132 if (!Const || !Const->isOne())
12135 // Rewrite into vselect(cond, rhs, lhs).
12136 SDValue Cond = XOR->getOperand(0);
12137 SDValue LHS = N->getOperand(1);
12138 SDValue RHS = N->getOperand(2);
12139 EVT Type = N->getValueType(0);
12140 return DCI.DAG.getNode(ISD::VSELECT, SDLoc(N), Type, Cond, RHS, LHS);
12143 static SDValue PerformABSCombine(SDNode *N,
12144 TargetLowering::DAGCombinerInfo &DCI,
12145 const ARMSubtarget *Subtarget) {
12147 SelectionDAG &DAG = DCI.DAG;
12148 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
12150 if (TLI.isOperationLegal(N->getOpcode(), N->getValueType(0)))
12153 if (!TLI.expandABS(N, res, DAG))
12159 /// PerformADDECombine - Target-specific dag combine transform from
12160 /// ARMISD::ADDC, ARMISD::ADDE, and ISD::MUL_LOHI to MLAL or
12161 /// ARMISD::ADDC, ARMISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
12162 static SDValue PerformADDECombine(SDNode *N,
12163 TargetLowering::DAGCombinerInfo &DCI,
12164 const ARMSubtarget *Subtarget) {
12165 // Only ARM and Thumb2 support UMLAL/SMLAL.
12166 if (Subtarget->isThumb1Only())
12167 return PerformAddeSubeCombine(N, DCI, Subtarget);
12169 // Only perform the checks after legalize when the pattern is available.
12170 if (DCI.isBeforeLegalize()) return SDValue();
12172 return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
12175 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
12176 /// operands N0 and N1. This is a helper for PerformADDCombine that is
12177 /// called with the default operands, and if that fails, with commuted
12179 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
12180 TargetLowering::DAGCombinerInfo &DCI,
12181 const ARMSubtarget *Subtarget){
12182 // Attempt to create vpadd for this add.
12183 if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
12186 // Attempt to create vpaddl for this add.
12187 if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
12189 if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
12193 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
12194 if (N0.getNode()->hasOneUse())
12195 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
12200 static SDValue PerformADDVecReduce(SDNode *N,
12201 TargetLowering::DAGCombinerInfo &DCI,
12202 const ARMSubtarget *Subtarget) {
12203 if (!Subtarget->hasMVEIntegerOps() || N->getValueType(0) != MVT::i64)
12206 SDValue N0 = N->getOperand(0);
12207 SDValue N1 = N->getOperand(1);
12209 // We are looking for a i64 add of a VADDLVx. Due to these being i64's, this
12211 // t1: i32,i32 = ARMISD::VADDLVs x
12212 // t2: i64 = build_pair t1, t1:1
12213 // t3: i64 = add t2, y
12214 // We also need to check for sext / zext and commutitive adds.
12215 auto MakeVecReduce = [&](unsigned Opcode, unsigned OpcodeA, SDValue NA,
12217 if (NB->getOpcode() != ISD::BUILD_PAIR)
12219 SDValue VecRed = NB->getOperand(0);
12220 if (VecRed->getOpcode() != Opcode || VecRed.getResNo() != 0 ||
12221 NB->getOperand(1) != SDValue(VecRed.getNode(), 1))
12225 SmallVector<SDValue, 4> Ops;
12226 Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA,
12227 DCI.DAG.getConstant(0, dl, MVT::i32)));
12228 Ops.push_back(DCI.DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, NA,
12229 DCI.DAG.getConstant(1, dl, MVT::i32)));
12230 for (unsigned i = 0, e = VecRed.getNumOperands(); i < e; i++)
12231 Ops.push_back(VecRed->getOperand(i));
12232 SDValue Red = DCI.DAG.getNode(OpcodeA, dl,
12233 DCI.DAG.getVTList({MVT::i32, MVT::i32}), Ops);
12234 return DCI.DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Red,
12235 SDValue(Red.getNode(), 1));
12238 if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N0, N1))
12240 if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N0, N1))
12242 if (SDValue M = MakeVecReduce(ARMISD::VADDLVs, ARMISD::VADDLVAs, N1, N0))
12244 if (SDValue M = MakeVecReduce(ARMISD::VADDLVu, ARMISD::VADDLVAu, N1, N0))
12246 if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N0, N1))
12248 if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N0, N1))
12250 if (SDValue M = MakeVecReduce(ARMISD::VADDLVps, ARMISD::VADDLVAps, N1, N0))
12252 if (SDValue M = MakeVecReduce(ARMISD::VADDLVpu, ARMISD::VADDLVApu, N1, N0))
12254 if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N0, N1))
12256 if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N0, N1))
12258 if (SDValue M = MakeVecReduce(ARMISD::VMLALVs, ARMISD::VMLALVAs, N1, N0))
12260 if (SDValue M = MakeVecReduce(ARMISD::VMLALVu, ARMISD::VMLALVAu, N1, N0))
12266 ARMTargetLowering::isDesirableToCommuteWithShift(const SDNode *N,
12267 CombineLevel Level) const {
12268 if (Level == BeforeLegalizeTypes)
12271 if (N->getOpcode() != ISD::SHL)
12274 if (Subtarget->isThumb1Only()) {
12275 // Avoid making expensive immediates by commuting shifts. (This logic
12276 // only applies to Thumb1 because ARM and Thumb2 immediates can be shifted
12278 if (N->getOpcode() != ISD::SHL)
12280 SDValue N1 = N->getOperand(0);
12281 if (N1->getOpcode() != ISD::ADD && N1->getOpcode() != ISD::AND &&
12282 N1->getOpcode() != ISD::OR && N1->getOpcode() != ISD::XOR)
12284 if (auto *Const = dyn_cast<ConstantSDNode>(N1->getOperand(1))) {
12285 if (Const->getAPIntValue().ult(256))
12287 if (N1->getOpcode() == ISD::ADD && Const->getAPIntValue().slt(0) &&
12288 Const->getAPIntValue().sgt(-256))
12294 // Turn off commute-with-shift transform after legalization, so it doesn't
12295 // conflict with PerformSHLSimplify. (We could try to detect when
12296 // PerformSHLSimplify would trigger more precisely, but it isn't
12297 // really necessary.)
12301 bool ARMTargetLowering::shouldFoldConstantShiftPairToMask(
12302 const SDNode *N, CombineLevel Level) const {
12303 if (!Subtarget->isThumb1Only())
12306 if (Level == BeforeLegalizeTypes)
12312 bool ARMTargetLowering::preferIncOfAddToSubOfNot(EVT VT) const {
12313 if (!Subtarget->hasNEON()) {
12314 if (Subtarget->isThumb1Only())
12315 return VT.getScalarSizeInBits() <= 32;
12318 return VT.isScalarInteger();
12321 static SDValue PerformSHLSimplify(SDNode *N,
12322 TargetLowering::DAGCombinerInfo &DCI,
12323 const ARMSubtarget *ST) {
12324 // Allow the generic combiner to identify potential bswaps.
12325 if (DCI.isBeforeLegalize())
12328 // DAG combiner will fold:
12329 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
12330 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2
12331 // Other code patterns that can be also be modified have the following form:
12332 // b + ((a << 1) | 510)
12333 // b + ((a << 1) & 510)
12334 // b + ((a << 1) ^ 510)
12335 // b + ((a << 1) + 510)
12337 // Many instructions can perform the shift for free, but it requires both
12338 // the operands to be registers. If c1 << c2 is too large, a mov immediate
12339 // instruction will needed. So, unfold back to the original pattern if:
12340 // - if c1 and c2 are small enough that they don't require mov imms.
12341 // - the user(s) of the node can perform an shl
12343 // No shifted operands for 16-bit instructions.
12344 if (ST->isThumb() && ST->isThumb1Only())
12347 // Check that all the users could perform the shl themselves.
12348 for (auto U : N->uses()) {
12349 switch(U->getOpcode()) {
12359 // Check that the user isn't already using a constant because there
12360 // aren't any instructions that support an immediate operand and a
12361 // shifted operand.
12362 if (isa<ConstantSDNode>(U->getOperand(0)) ||
12363 isa<ConstantSDNode>(U->getOperand(1)))
12366 // Check that it's not already using a shift.
12367 if (U->getOperand(0).getOpcode() == ISD::SHL ||
12368 U->getOperand(1).getOpcode() == ISD::SHL)
12374 if (N->getOpcode() != ISD::ADD && N->getOpcode() != ISD::OR &&
12375 N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND)
12378 if (N->getOperand(0).getOpcode() != ISD::SHL)
12381 SDValue SHL = N->getOperand(0);
12383 auto *C1ShlC2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
12384 auto *C2 = dyn_cast<ConstantSDNode>(SHL.getOperand(1));
12385 if (!C1ShlC2 || !C2)
12388 APInt C2Int = C2->getAPIntValue();
12389 APInt C1Int = C1ShlC2->getAPIntValue();
12391 // Check that performing a lshr will not lose any information.
12392 APInt Mask = APInt::getHighBitsSet(C2Int.getBitWidth(),
12393 C2Int.getBitWidth() - C2->getZExtValue());
12394 if ((C1Int & Mask) != C1Int)
12397 // Shift the first constant.
12398 C1Int.lshrInPlace(C2Int);
12400 // The immediates are encoded as an 8-bit value that can be rotated.
12401 auto LargeImm = [](const APInt &Imm) {
12402 unsigned Zeros = Imm.countLeadingZeros() + Imm.countTrailingZeros();
12403 return Imm.getBitWidth() - Zeros > 8;
12406 if (LargeImm(C1Int) || LargeImm(C2Int))
12409 SelectionDAG &DAG = DCI.DAG;
12411 SDValue X = SHL.getOperand(0);
12412 SDValue BinOp = DAG.getNode(N->getOpcode(), dl, MVT::i32, X,
12413 DAG.getConstant(C1Int, dl, MVT::i32));
12414 // Shift left to compensate for the lshr of C1Int.
12415 SDValue Res = DAG.getNode(ISD::SHL, dl, MVT::i32, BinOp, SHL.getOperand(1));
12417 LLVM_DEBUG(dbgs() << "Simplify shl use:\n"; SHL.getOperand(0).dump();
12418 SHL.dump(); N->dump());
12419 LLVM_DEBUG(dbgs() << "Into:\n"; X.dump(); BinOp.dump(); Res.dump());
12424 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
12426 static SDValue PerformADDCombine(SDNode *N,
12427 TargetLowering::DAGCombinerInfo &DCI,
12428 const ARMSubtarget *Subtarget) {
12429 SDValue N0 = N->getOperand(0);
12430 SDValue N1 = N->getOperand(1);
12432 // Only works one way, because it needs an immediate operand.
12433 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
12436 if (SDValue Result = PerformADDVecReduce(N, DCI, Subtarget))
12439 // First try with the default operand order.
12440 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
12443 // If that didn't work, try again with the operands commuted.
12444 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
12447 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
12449 static SDValue PerformSUBCombine(SDNode *N,
12450 TargetLowering::DAGCombinerInfo &DCI,
12451 const ARMSubtarget *Subtarget) {
12452 SDValue N0 = N->getOperand(0);
12453 SDValue N1 = N->getOperand(1);
12455 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
12456 if (N1.getNode()->hasOneUse())
12457 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
12460 if (!Subtarget->hasMVEIntegerOps() || !N->getValueType(0).isVector())
12463 // Fold (sub (ARMvmovImm 0), (ARMvdup x)) -> (ARMvdup (sub 0, x))
12464 // so that we can readily pattern match more mve instructions which can use
12465 // a scalar operand.
12466 SDValue VDup = N->getOperand(1);
12467 if (VDup->getOpcode() != ARMISD::VDUP)
12470 SDValue VMov = N->getOperand(0);
12471 if (VMov->getOpcode() == ISD::BITCAST)
12472 VMov = VMov->getOperand(0);
12474 if (VMov->getOpcode() != ARMISD::VMOVIMM || !isZeroVector(VMov))
12478 SDValue Negate = DCI.DAG.getNode(ISD::SUB, dl, MVT::i32,
12479 DCI.DAG.getConstant(0, dl, MVT::i32),
12480 VDup->getOperand(0));
12481 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0), Negate);
12484 /// PerformVMULCombine
12485 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
12486 /// special multiplier accumulator forwarding.
12487 /// vmul d3, d0, d2
12488 /// vmla d3, d1, d2
12490 /// vadd d3, d0, d1
12491 /// vmul d3, d3, d2
12492 // However, for (A + B) * (A + B),
12499 static SDValue PerformVMULCombine(SDNode *N,
12500 TargetLowering::DAGCombinerInfo &DCI,
12501 const ARMSubtarget *Subtarget) {
12502 if (!Subtarget->hasVMLxForwarding())
12505 SelectionDAG &DAG = DCI.DAG;
12506 SDValue N0 = N->getOperand(0);
12507 SDValue N1 = N->getOperand(1);
12508 unsigned Opcode = N0.getOpcode();
12509 if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
12510 Opcode != ISD::FADD && Opcode != ISD::FSUB) {
12511 Opcode = N1.getOpcode();
12512 if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
12513 Opcode != ISD::FADD && Opcode != ISD::FSUB)
12521 EVT VT = N->getValueType(0);
12523 SDValue N00 = N0->getOperand(0);
12524 SDValue N01 = N0->getOperand(1);
12525 return DAG.getNode(Opcode, DL, VT,
12526 DAG.getNode(ISD::MUL, DL, VT, N00, N1),
12527 DAG.getNode(ISD::MUL, DL, VT, N01, N1));
12530 static SDValue PerformMVEVMULLCombine(SDNode *N, SelectionDAG &DAG,
12531 const ARMSubtarget *Subtarget) {
12532 EVT VT = N->getValueType(0);
12533 if (VT != MVT::v2i64)
12536 SDValue N0 = N->getOperand(0);
12537 SDValue N1 = N->getOperand(1);
12539 auto IsSignExt = [&](SDValue Op) {
12540 if (Op->getOpcode() != ISD::SIGN_EXTEND_INREG)
12542 EVT VT = cast<VTSDNode>(Op->getOperand(1))->getVT();
12543 if (VT.getScalarSizeInBits() == 32)
12544 return Op->getOperand(0);
12547 auto IsZeroExt = [&](SDValue Op) {
12548 // Zero extends are a little more awkward. At the point we are matching
12549 // this, we are looking for an AND with a (-1, 0, -1, 0) buildvector mask.
12550 // That might be before of after a bitcast depending on how the and is
12551 // placed. Because this has to look through bitcasts, it is currently only
12552 // supported on LE.
12553 if (!Subtarget->isLittle())
12557 if (And->getOpcode() == ISD::BITCAST)
12558 And = And->getOperand(0);
12559 if (And->getOpcode() != ISD::AND)
12561 SDValue Mask = And->getOperand(1);
12562 if (Mask->getOpcode() == ISD::BITCAST)
12563 Mask = Mask->getOperand(0);
12565 if (Mask->getOpcode() != ISD::BUILD_VECTOR ||
12566 Mask.getValueType() != MVT::v4i32)
12568 if (isAllOnesConstant(Mask->getOperand(0)) &&
12569 isNullConstant(Mask->getOperand(1)) &&
12570 isAllOnesConstant(Mask->getOperand(2)) &&
12571 isNullConstant(Mask->getOperand(3)))
12572 return And->getOperand(0);
12577 if (SDValue Op0 = IsSignExt(N0)) {
12578 if (SDValue Op1 = IsSignExt(N1)) {
12579 SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
12580 SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
12581 return DAG.getNode(ARMISD::VMULLs, dl, VT, New0a, New1a);
12584 if (SDValue Op0 = IsZeroExt(N0)) {
12585 if (SDValue Op1 = IsZeroExt(N1)) {
12586 SDValue New0a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op0);
12587 SDValue New1a = DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, MVT::v4i32, Op1);
12588 return DAG.getNode(ARMISD::VMULLu, dl, VT, New0a, New1a);
12595 static SDValue PerformMULCombine(SDNode *N,
12596 TargetLowering::DAGCombinerInfo &DCI,
12597 const ARMSubtarget *Subtarget) {
12598 SelectionDAG &DAG = DCI.DAG;
12600 EVT VT = N->getValueType(0);
12601 if (Subtarget->hasMVEIntegerOps() && VT == MVT::v2i64)
12602 return PerformMVEVMULLCombine(N, DAG, Subtarget);
12604 if (Subtarget->isThumb1Only())
12607 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
12610 if (VT.is64BitVector() || VT.is128BitVector())
12611 return PerformVMULCombine(N, DCI, Subtarget);
12612 if (VT != MVT::i32)
12615 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
12619 int64_t MulAmt = C->getSExtValue();
12620 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
12622 ShiftAmt = ShiftAmt & (32 - 1);
12623 SDValue V = N->getOperand(0);
12627 MulAmt >>= ShiftAmt;
12630 if (isPowerOf2_32(MulAmt - 1)) {
12631 // (mul x, 2^N + 1) => (add (shl x, N), x)
12632 Res = DAG.getNode(ISD::ADD, DL, VT,
12634 DAG.getNode(ISD::SHL, DL, VT,
12636 DAG.getConstant(Log2_32(MulAmt - 1), DL,
12638 } else if (isPowerOf2_32(MulAmt + 1)) {
12639 // (mul x, 2^N - 1) => (sub (shl x, N), x)
12640 Res = DAG.getNode(ISD::SUB, DL, VT,
12641 DAG.getNode(ISD::SHL, DL, VT,
12643 DAG.getConstant(Log2_32(MulAmt + 1), DL,
12649 uint64_t MulAmtAbs = -MulAmt;
12650 if (isPowerOf2_32(MulAmtAbs + 1)) {
12651 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
12652 Res = DAG.getNode(ISD::SUB, DL, VT,
12654 DAG.getNode(ISD::SHL, DL, VT,
12656 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
12658 } else if (isPowerOf2_32(MulAmtAbs - 1)) {
12659 // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
12660 Res = DAG.getNode(ISD::ADD, DL, VT,
12662 DAG.getNode(ISD::SHL, DL, VT,
12664 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
12666 Res = DAG.getNode(ISD::SUB, DL, VT,
12667 DAG.getConstant(0, DL, MVT::i32), Res);
12673 Res = DAG.getNode(ISD::SHL, DL, VT,
12674 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
12676 // Do not add new nodes to DAG combiner worklist.
12677 DCI.CombineTo(N, Res, false);
12681 static SDValue CombineANDShift(SDNode *N,
12682 TargetLowering::DAGCombinerInfo &DCI,
12683 const ARMSubtarget *Subtarget) {
12684 // Allow DAGCombine to pattern-match before we touch the canonical form.
12685 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
12688 if (N->getValueType(0) != MVT::i32)
12691 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
12695 uint32_t C1 = (uint32_t)N1C->getZExtValue();
12696 // Don't transform uxtb/uxth.
12697 if (C1 == 255 || C1 == 65535)
12700 SDNode *N0 = N->getOperand(0).getNode();
12701 if (!N0->hasOneUse())
12704 if (N0->getOpcode() != ISD::SHL && N0->getOpcode() != ISD::SRL)
12707 bool LeftShift = N0->getOpcode() == ISD::SHL;
12709 ConstantSDNode *N01C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
12713 uint32_t C2 = (uint32_t)N01C->getZExtValue();
12714 if (!C2 || C2 >= 32)
12717 // Clear irrelevant bits in the mask.
12723 SelectionDAG &DAG = DCI.DAG;
12726 // We have a pattern of the form "(and (shl x, c2) c1)" or
12727 // "(and (srl x, c2) c1)", where c1 is a shifted mask. Try to
12728 // transform to a pair of shifts, to save materializing c1.
12730 // First pattern: right shift, then mask off leading bits.
12731 // FIXME: Use demanded bits?
12732 if (!LeftShift && isMask_32(C1)) {
12733 uint32_t C3 = countLeadingZeros(C1);
12735 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
12736 DAG.getConstant(C3 - C2, DL, MVT::i32));
12737 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
12738 DAG.getConstant(C3, DL, MVT::i32));
12742 // First pattern, reversed: left shift, then mask off trailing bits.
12743 if (LeftShift && isMask_32(~C1)) {
12744 uint32_t C3 = countTrailingZeros(C1);
12746 SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
12747 DAG.getConstant(C3 - C2, DL, MVT::i32));
12748 return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
12749 DAG.getConstant(C3, DL, MVT::i32));
12753 // Second pattern: left shift, then mask off leading bits.
12754 // FIXME: Use demanded bits?
12755 if (LeftShift && isShiftedMask_32(C1)) {
12756 uint32_t Trailing = countTrailingZeros(C1);
12757 uint32_t C3 = countLeadingZeros(C1);
12758 if (Trailing == C2 && C2 + C3 < 32) {
12759 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
12760 DAG.getConstant(C2 + C3, DL, MVT::i32));
12761 return DAG.getNode(ISD::SRL, DL, MVT::i32, SHL,
12762 DAG.getConstant(C3, DL, MVT::i32));
12766 // Second pattern, reversed: right shift, then mask off trailing bits.
12767 // FIXME: Handle other patterns of known/demanded bits.
12768 if (!LeftShift && isShiftedMask_32(C1)) {
12769 uint32_t Leading = countLeadingZeros(C1);
12770 uint32_t C3 = countTrailingZeros(C1);
12771 if (Leading == C2 && C2 + C3 < 32) {
12772 SDValue SHL = DAG.getNode(ISD::SRL, DL, MVT::i32, N0->getOperand(0),
12773 DAG.getConstant(C2 + C3, DL, MVT::i32));
12774 return DAG.getNode(ISD::SHL, DL, MVT::i32, SHL,
12775 DAG.getConstant(C3, DL, MVT::i32));
12779 // FIXME: Transform "(and (shl x, c2) c1)" ->
12780 // "(shl (and x, c1>>c2), c2)" if "c1 >> c2" is a cheaper immediate than
12785 static SDValue PerformANDCombine(SDNode *N,
12786 TargetLowering::DAGCombinerInfo &DCI,
12787 const ARMSubtarget *Subtarget) {
12788 // Attempt to use immediate-form VBIC
12789 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
12791 EVT VT = N->getValueType(0);
12792 SelectionDAG &DAG = DCI.DAG;
12794 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT) || VT == MVT::v4i1 ||
12795 VT == MVT::v8i1 || VT == MVT::v16i1)
12798 APInt SplatBits, SplatUndef;
12799 unsigned SplatBitSize;
12801 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
12802 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
12803 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
12804 SplatBitSize == 64) {
12806 SDValue Val = isVMOVModifiedImm((~SplatBits).getZExtValue(),
12807 SplatUndef.getZExtValue(), SplatBitSize,
12808 DAG, dl, VbicVT, VT, OtherModImm);
12809 if (Val.getNode()) {
12811 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
12812 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
12813 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
12818 if (!Subtarget->isThumb1Only()) {
12819 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
12820 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
12823 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
12827 if (Subtarget->isThumb1Only())
12828 if (SDValue Result = CombineANDShift(N, DCI, Subtarget))
12834 // Try combining OR nodes to SMULWB, SMULWT.
12835 static SDValue PerformORCombineToSMULWBT(SDNode *OR,
12836 TargetLowering::DAGCombinerInfo &DCI,
12837 const ARMSubtarget *Subtarget) {
12838 if (!Subtarget->hasV6Ops() ||
12839 (Subtarget->isThumb() &&
12840 (!Subtarget->hasThumb2() || !Subtarget->hasDSP())))
12843 SDValue SRL = OR->getOperand(0);
12844 SDValue SHL = OR->getOperand(1);
12846 if (SRL.getOpcode() != ISD::SRL || SHL.getOpcode() != ISD::SHL) {
12847 SRL = OR->getOperand(1);
12848 SHL = OR->getOperand(0);
12850 if (!isSRL16(SRL) || !isSHL16(SHL))
12853 // The first operands to the shifts need to be the two results from the
12854 // same smul_lohi node.
12855 if ((SRL.getOperand(0).getNode() != SHL.getOperand(0).getNode()) ||
12856 SRL.getOperand(0).getOpcode() != ISD::SMUL_LOHI)
12859 SDNode *SMULLOHI = SRL.getOperand(0).getNode();
12860 if (SRL.getOperand(0) != SDValue(SMULLOHI, 0) ||
12861 SHL.getOperand(0) != SDValue(SMULLOHI, 1))
12865 // (or (srl (smul_lohi ?, ?), 16), (shl (smul_lohi ?, ?), 16)))
12866 // For SMUL[B|T] smul_lohi will take a 32-bit and a 16-bit arguments.
12867 // For SMUWB the 16-bit value will signed extended somehow.
12868 // For SMULWT only the SRA is required.
12869 // Check both sides of SMUL_LOHI
12870 SDValue OpS16 = SMULLOHI->getOperand(0);
12871 SDValue OpS32 = SMULLOHI->getOperand(1);
12873 SelectionDAG &DAG = DCI.DAG;
12874 if (!isS16(OpS16, DAG) && !isSRA16(OpS16)) {
12876 OpS32 = SMULLOHI->getOperand(0);
12880 unsigned Opcode = 0;
12881 if (isS16(OpS16, DAG))
12882 Opcode = ARMISD::SMULWB;
12883 else if (isSRA16(OpS16)) {
12884 Opcode = ARMISD::SMULWT;
12885 OpS16 = OpS16->getOperand(0);
12890 SDValue Res = DAG.getNode(Opcode, dl, MVT::i32, OpS32, OpS16);
12891 DAG.ReplaceAllUsesOfValueWith(SDValue(OR, 0), Res);
12892 return SDValue(OR, 0);
12895 static SDValue PerformORCombineToBFI(SDNode *N,
12896 TargetLowering::DAGCombinerInfo &DCI,
12897 const ARMSubtarget *Subtarget) {
12898 // BFI is only available on V6T2+
12899 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
12902 EVT VT = N->getValueType(0);
12903 SDValue N0 = N->getOperand(0);
12904 SDValue N1 = N->getOperand(1);
12905 SelectionDAG &DAG = DCI.DAG;
12907 // 1) or (and A, mask), val => ARMbfi A, val, mask
12908 // iff (val & mask) == val
12910 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
12911 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
12912 // && mask == ~mask2
12913 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
12914 // && ~mask == mask2
12915 // (i.e., copy a bitfield value into another bitfield of the same width)
12917 if (VT != MVT::i32)
12920 SDValue N00 = N0.getOperand(0);
12922 // The value and the mask need to be constants so we can verify this is
12923 // actually a bitfield set. If the mask is 0xffff, we can do better
12924 // via a movt instruction, so don't use BFI in that case.
12925 SDValue MaskOp = N0.getOperand(1);
12926 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
12929 unsigned Mask = MaskC->getZExtValue();
12930 if (Mask == 0xffff)
12933 // Case (1): or (and A, mask), val => ARMbfi A, val, mask
12934 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
12936 unsigned Val = N1C->getZExtValue();
12937 if ((Val & ~Mask) != Val)
12940 if (ARM::isBitFieldInvertedMask(Mask)) {
12941 Val >>= countTrailingZeros(~Mask);
12943 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
12944 DAG.getConstant(Val, DL, MVT::i32),
12945 DAG.getConstant(Mask, DL, MVT::i32));
12947 DCI.CombineTo(N, Res, false);
12948 // Return value from the original node to inform the combiner than N is
12950 return SDValue(N, 0);
12952 } else if (N1.getOpcode() == ISD::AND) {
12953 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
12954 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
12957 unsigned Mask2 = N11C->getZExtValue();
12959 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
12961 if (ARM::isBitFieldInvertedMask(Mask) &&
12962 (Mask == ~Mask2)) {
12963 // The pack halfword instruction works better for masks that fit it,
12964 // so use that when it's available.
12965 if (Subtarget->hasDSP() &&
12966 (Mask == 0xffff || Mask == 0xffff0000))
12969 unsigned amt = countTrailingZeros(Mask2);
12970 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
12971 DAG.getConstant(amt, DL, MVT::i32));
12972 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
12973 DAG.getConstant(Mask, DL, MVT::i32));
12974 DCI.CombineTo(N, Res, false);
12975 // Return value from the original node to inform the combiner than N is
12977 return SDValue(N, 0);
12978 } else if (ARM::isBitFieldInvertedMask(~Mask) &&
12979 (~Mask == Mask2)) {
12980 // The pack halfword instruction works better for masks that fit it,
12981 // so use that when it's available.
12982 if (Subtarget->hasDSP() &&
12983 (Mask2 == 0xffff || Mask2 == 0xffff0000))
12986 unsigned lsb = countTrailingZeros(Mask);
12987 Res = DAG.getNode(ISD::SRL, DL, VT, N00,
12988 DAG.getConstant(lsb, DL, MVT::i32));
12989 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
12990 DAG.getConstant(Mask2, DL, MVT::i32));
12991 DCI.CombineTo(N, Res, false);
12992 // Return value from the original node to inform the combiner than N is
12994 return SDValue(N, 0);
12998 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
12999 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
13000 ARM::isBitFieldInvertedMask(~Mask)) {
13001 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
13002 // where lsb(mask) == #shamt and masked bits of B are known zero.
13003 SDValue ShAmt = N00.getOperand(1);
13004 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
13005 unsigned LSB = countTrailingZeros(Mask);
13009 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
13010 DAG.getConstant(~Mask, DL, MVT::i32));
13012 DCI.CombineTo(N, Res, false);
13013 // Return value from the original node to inform the combiner than N is
13015 return SDValue(N, 0);
13021 static bool isValidMVECond(unsigned CC, bool IsFloat) {
13038 static ARMCC::CondCodes getVCMPCondCode(SDValue N) {
13039 if (N->getOpcode() == ARMISD::VCMP)
13040 return (ARMCC::CondCodes)N->getConstantOperandVal(2);
13041 else if (N->getOpcode() == ARMISD::VCMPZ)
13042 return (ARMCC::CondCodes)N->getConstantOperandVal(1);
13044 llvm_unreachable("Not a VCMP/VCMPZ!");
13047 static bool CanInvertMVEVCMP(SDValue N) {
13048 ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N));
13049 return isValidMVECond(CC, N->getOperand(0).getValueType().isFloatingPoint());
13052 static SDValue PerformORCombine_i1(SDNode *N,
13053 TargetLowering::DAGCombinerInfo &DCI,
13054 const ARMSubtarget *Subtarget) {
13055 // Try to invert "or A, B" -> "and ~A, ~B", as the "and" is easier to chain
13056 // together with predicates
13057 EVT VT = N->getValueType(0);
13059 SDValue N0 = N->getOperand(0);
13060 SDValue N1 = N->getOperand(1);
13062 auto IsFreelyInvertable = [&](SDValue V) {
13063 if (V->getOpcode() == ARMISD::VCMP || V->getOpcode() == ARMISD::VCMPZ)
13064 return CanInvertMVEVCMP(V);
13068 // At least one operand must be freely invertable.
13069 if (!(IsFreelyInvertable(N0) || IsFreelyInvertable(N1)))
13072 SDValue NewN0 = DCI.DAG.getLogicalNOT(DL, N0, VT);
13073 SDValue NewN1 = DCI.DAG.getLogicalNOT(DL, N1, VT);
13074 SDValue And = DCI.DAG.getNode(ISD::AND, DL, VT, NewN0, NewN1);
13075 return DCI.DAG.getLogicalNOT(DL, And, VT);
13078 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
13079 static SDValue PerformORCombine(SDNode *N,
13080 TargetLowering::DAGCombinerInfo &DCI,
13081 const ARMSubtarget *Subtarget) {
13082 // Attempt to use immediate-form VORR
13083 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
13085 EVT VT = N->getValueType(0);
13086 SelectionDAG &DAG = DCI.DAG;
13088 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
13091 if (Subtarget->hasMVEIntegerOps() &&
13092 (VT == MVT::v4i1 || VT == MVT::v8i1 || VT == MVT::v16i1))
13093 return PerformORCombine_i1(N, DCI, Subtarget);
13095 APInt SplatBits, SplatUndef;
13096 unsigned SplatBitSize;
13098 if (BVN && (Subtarget->hasNEON() || Subtarget->hasMVEIntegerOps()) &&
13099 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
13100 if (SplatBitSize == 8 || SplatBitSize == 16 || SplatBitSize == 32 ||
13101 SplatBitSize == 64) {
13104 isVMOVModifiedImm(SplatBits.getZExtValue(), SplatUndef.getZExtValue(),
13105 SplatBitSize, DAG, dl, VorrVT, VT, OtherModImm);
13106 if (Val.getNode()) {
13108 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
13109 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
13110 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
13115 if (!Subtarget->isThumb1Only()) {
13116 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
13117 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
13119 if (SDValue Result = PerformORCombineToSMULWBT(N, DCI, Subtarget))
13123 SDValue N0 = N->getOperand(0);
13124 SDValue N1 = N->getOperand(1);
13126 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
13127 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
13128 DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
13130 // The code below optimizes (or (and X, Y), Z).
13131 // The AND operand needs to have a single user to make these optimizations
13133 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
13137 unsigned SplatBitSize;
13140 APInt SplatBits0, SplatBits1;
13141 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
13142 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
13143 // Ensure that the second operand of both ands are constants
13144 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
13145 HasAnyUndefs) && !HasAnyUndefs) {
13146 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
13147 HasAnyUndefs) && !HasAnyUndefs) {
13148 // Ensure that the bit width of the constants are the same and that
13149 // the splat arguments are logical inverses as per the pattern we
13150 // are trying to simplify.
13151 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
13152 SplatBits0 == ~SplatBits1) {
13153 // Canonicalize the vector type to make instruction selection
13155 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
13156 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
13159 N1->getOperand(0));
13160 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
13166 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
13168 if (N0.getOpcode() == ISD::AND && N0.hasOneUse()) {
13169 if (SDValue Res = PerformORCombineToBFI(N, DCI, Subtarget))
13173 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
13179 static SDValue PerformXORCombine(SDNode *N,
13180 TargetLowering::DAGCombinerInfo &DCI,
13181 const ARMSubtarget *Subtarget) {
13182 EVT VT = N->getValueType(0);
13183 SelectionDAG &DAG = DCI.DAG;
13185 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
13188 if (!Subtarget->isThumb1Only()) {
13189 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
13190 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
13193 if (SDValue Result = PerformSHLSimplify(N, DCI, Subtarget))
13197 if (Subtarget->hasMVEIntegerOps()) {
13198 // fold (xor(vcmp/z, 1)) into a vcmp with the opposite condition.
13199 SDValue N0 = N->getOperand(0);
13200 SDValue N1 = N->getOperand(1);
13201 const TargetLowering *TLI = Subtarget->getTargetLowering();
13202 if (TLI->isConstTrueVal(N1.getNode()) &&
13203 (N0->getOpcode() == ARMISD::VCMP || N0->getOpcode() == ARMISD::VCMPZ)) {
13204 if (CanInvertMVEVCMP(N0)) {
13206 ARMCC::CondCodes CC = ARMCC::getOppositeCondition(getVCMPCondCode(N0));
13208 SmallVector<SDValue, 4> Ops;
13209 Ops.push_back(N0->getOperand(0));
13210 if (N0->getOpcode() == ARMISD::VCMP)
13211 Ops.push_back(N0->getOperand(1));
13212 Ops.push_back(DCI.DAG.getConstant(CC, DL, MVT::i32));
13213 return DCI.DAG.getNode(N0->getOpcode(), DL, N0->getValueType(0), Ops);
13221 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
13222 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
13223 // their position in "to" (Rd).
13224 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
13225 assert(N->getOpcode() == ARMISD::BFI);
13227 SDValue From = N->getOperand(1);
13228 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
13229 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
13231 // If the Base came from a SHR #C, we can deduce that it is really testing bit
13232 // #C in the base of the SHR.
13233 if (From->getOpcode() == ISD::SRL &&
13234 isa<ConstantSDNode>(From->getOperand(1))) {
13235 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
13236 assert(Shift.getLimitedValue() < 32 && "Shift too large!");
13237 FromMask <<= Shift.getLimitedValue(31);
13238 From = From->getOperand(0);
13244 // If A and B contain one contiguous set of bits, does A | B == A . B?
13246 // Neither A nor B must be zero.
13247 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
13248 unsigned LastActiveBitInA = A.countTrailingZeros();
13249 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
13250 return LastActiveBitInA - 1 == FirstActiveBitInB;
13253 static SDValue FindBFIToCombineWith(SDNode *N) {
13254 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
13256 APInt ToMask, FromMask;
13257 SDValue From = ParseBFI(N, ToMask, FromMask);
13258 SDValue To = N->getOperand(0);
13260 // Now check for a compatible BFI to merge with. We can pass through BFIs that
13261 // aren't compatible, but not if they set the same bit in their destination as
13262 // we do (or that of any BFI we're going to combine with).
13264 APInt CombinedToMask = ToMask;
13265 while (V.getOpcode() == ARMISD::BFI) {
13266 APInt NewToMask, NewFromMask;
13267 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
13268 if (NewFrom != From) {
13269 // This BFI has a different base. Keep going.
13270 CombinedToMask |= NewToMask;
13271 V = V.getOperand(0);
13275 // Do the written bits conflict with any we've seen so far?
13276 if ((NewToMask & CombinedToMask).getBoolValue())
13277 // Conflicting bits - bail out because going further is unsafe.
13280 // Are the new bits contiguous when combined with the old bits?
13281 if (BitsProperlyConcatenate(ToMask, NewToMask) &&
13282 BitsProperlyConcatenate(FromMask, NewFromMask))
13284 if (BitsProperlyConcatenate(NewToMask, ToMask) &&
13285 BitsProperlyConcatenate(NewFromMask, FromMask))
13288 // We've seen a write to some bits, so track it.
13289 CombinedToMask |= NewToMask;
13291 V = V.getOperand(0);
13297 static SDValue PerformBFICombine(SDNode *N,
13298 TargetLowering::DAGCombinerInfo &DCI) {
13299 SDValue N1 = N->getOperand(1);
13300 if (N1.getOpcode() == ISD::AND) {
13301 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
13302 // the bits being cleared by the AND are not demanded by the BFI.
13303 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
13306 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
13307 unsigned LSB = countTrailingZeros(~InvMask);
13308 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
13310 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
13311 "undefined behavior");
13312 unsigned Mask = (1u << Width) - 1;
13313 unsigned Mask2 = N11C->getZExtValue();
13314 if ((Mask & (~Mask2)) == 0)
13315 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
13316 N->getOperand(0), N1.getOperand(0),
13318 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
13319 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
13320 // Keep track of any consecutive bits set that all come from the same base
13321 // value. We can combine these together into a single BFI.
13322 SDValue CombineBFI = FindBFIToCombineWith(N);
13323 if (CombineBFI == SDValue())
13326 // We've found a BFI.
13327 APInt ToMask1, FromMask1;
13328 SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
13330 APInt ToMask2, FromMask2;
13331 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
13332 assert(From1 == From2);
13335 // First, unlink CombineBFI.
13336 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
13337 // Then create a new BFI, combining the two together.
13338 APInt NewFromMask = FromMask1 | FromMask2;
13339 APInt NewToMask = ToMask1 | ToMask2;
13341 EVT VT = N->getValueType(0);
13344 if (NewFromMask[0] == 0)
13345 From1 = DCI.DAG.getNode(
13346 ISD::SRL, dl, VT, From1,
13347 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
13348 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
13349 DCI.DAG.getConstant(~NewToMask, dl, VT));
13354 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
13355 /// ARMISD::VMOVRRD.
13356 static SDValue PerformVMOVRRDCombine(SDNode *N,
13357 TargetLowering::DAGCombinerInfo &DCI,
13358 const ARMSubtarget *Subtarget) {
13359 // vmovrrd(vmovdrr x, y) -> x,y
13360 SDValue InDouble = N->getOperand(0);
13361 if (InDouble.getOpcode() == ARMISD::VMOVDRR && Subtarget->hasFP64())
13362 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
13364 // vmovrrd(load f64) -> (load i32), (load i32)
13365 SDNode *InNode = InDouble.getNode();
13366 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
13367 InNode->getValueType(0) == MVT::f64 &&
13368 InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
13369 !cast<LoadSDNode>(InNode)->isVolatile()) {
13370 // TODO: Should this be done for non-FrameIndex operands?
13371 LoadSDNode *LD = cast<LoadSDNode>(InNode);
13373 SelectionDAG &DAG = DCI.DAG;
13375 SDValue BasePtr = LD->getBasePtr();
13377 DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
13378 LD->getAlignment(), LD->getMemOperand()->getFlags());
13380 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
13381 DAG.getConstant(4, DL, MVT::i32));
13383 SDValue NewLD2 = DAG.getLoad(MVT::i32, DL, LD->getChain(), OffsetPtr,
13384 LD->getPointerInfo().getWithOffset(4),
13385 std::min(4U, LD->getAlignment()),
13386 LD->getMemOperand()->getFlags());
13388 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
13389 if (DCI.DAG.getDataLayout().isBigEndian())
13390 std::swap (NewLD1, NewLD2);
13391 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
13398 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
13399 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands.
13400 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
13401 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
13402 SDValue Op0 = N->getOperand(0);
13403 SDValue Op1 = N->getOperand(1);
13404 if (Op0.getOpcode() == ISD::BITCAST)
13405 Op0 = Op0.getOperand(0);
13406 if (Op1.getOpcode() == ISD::BITCAST)
13407 Op1 = Op1.getOperand(0);
13408 if (Op0.getOpcode() == ARMISD::VMOVRRD &&
13409 Op0.getNode() == Op1.getNode() &&
13410 Op0.getResNo() == 0 && Op1.getResNo() == 1)
13411 return DAG.getNode(ISD::BITCAST, SDLoc(N),
13412 N->getValueType(0), Op0.getOperand(0));
13416 static SDValue PerformVMOVhrCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13417 SDValue Op0 = N->getOperand(0);
13419 // VMOVhr (VMOVrh (X)) -> X
13420 if (Op0->getOpcode() == ARMISD::VMOVrh)
13421 return Op0->getOperand(0);
13423 // FullFP16: half values are passed in S-registers, and we don't
13424 // need any of the bitcast and moves:
13426 // t2: f32,ch = CopyFromReg t0, Register:f32 %0
13427 // t5: i32 = bitcast t2
13428 // t18: f16 = ARMISD::VMOVhr t5
13429 if (Op0->getOpcode() == ISD::BITCAST) {
13430 SDValue Copy = Op0->getOperand(0);
13431 if (Copy.getValueType() == MVT::f32 &&
13432 Copy->getOpcode() == ISD::CopyFromReg) {
13433 SDValue Ops[] = {Copy->getOperand(0), Copy->getOperand(1)};
13435 DCI.DAG.getNode(ISD::CopyFromReg, SDLoc(N), N->getValueType(0), Ops);
13440 // fold (VMOVhr (load x)) -> (load (f16*)x)
13441 if (LoadSDNode *LN0 = dyn_cast<LoadSDNode>(Op0)) {
13442 if (LN0->hasOneUse() && LN0->isUnindexed() &&
13443 LN0->getMemoryVT() == MVT::i16) {
13445 DCI.DAG.getLoad(N->getValueType(0), SDLoc(N), LN0->getChain(),
13446 LN0->getBasePtr(), LN0->getMemOperand());
13447 DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
13448 DCI.DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Load.getValue(1));
13453 // Only the bottom 16 bits of the source register are used.
13454 APInt DemandedMask = APInt::getLowBitsSet(32, 16);
13455 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
13456 if (TLI.SimplifyDemandedBits(Op0, DemandedMask, DCI))
13457 return SDValue(N, 0);
13462 static SDValue PerformVMOVrhCombine(SDNode *N,
13463 TargetLowering::DAGCombinerInfo &DCI) {
13464 SDValue N0 = N->getOperand(0);
13465 EVT VT = N->getValueType(0);
13467 // fold (VMOVrh (load x)) -> (zextload (i16*)x)
13468 if (ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse()) {
13469 LoadSDNode *LN0 = cast<LoadSDNode>(N0);
13472 DCI.DAG.getExtLoad(ISD::ZEXTLOAD, SDLoc(N), VT, LN0->getChain(),
13473 LN0->getBasePtr(), MVT::i16, LN0->getMemOperand());
13474 DCI.DAG.ReplaceAllUsesOfValueWith(SDValue(N, 0), Load.getValue(0));
13475 DCI.DAG.ReplaceAllUsesOfValueWith(N0.getValue(1), Load.getValue(1));
13479 // Fold VMOVrh(extract(x, n)) -> vgetlaneu(x, n)
13480 if (N0->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
13481 isa<ConstantSDNode>(N0->getOperand(1)))
13482 return DCI.DAG.getNode(ARMISD::VGETLANEu, SDLoc(N), VT, N0->getOperand(0),
13483 N0->getOperand(1));
13488 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
13489 /// are normal, non-volatile loads. If so, it is profitable to bitcast an
13490 /// i64 vector to have f64 elements, since the value can then be loaded
13491 /// directly into a VFP register.
13492 static bool hasNormalLoadOperand(SDNode *N) {
13493 unsigned NumElts = N->getValueType(0).getVectorNumElements();
13494 for (unsigned i = 0; i < NumElts; ++i) {
13495 SDNode *Elt = N->getOperand(i).getNode();
13496 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
13502 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
13503 /// ISD::BUILD_VECTOR.
13504 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
13505 TargetLowering::DAGCombinerInfo &DCI,
13506 const ARMSubtarget *Subtarget) {
13507 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
13508 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value
13509 // into a pair of GPRs, which is fine when the value is used as a scalar,
13510 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
13511 SelectionDAG &DAG = DCI.DAG;
13512 if (N->getNumOperands() == 2)
13513 if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
13516 // Load i64 elements as f64 values so that type legalization does not split
13517 // them up into i32 values.
13518 EVT VT = N->getValueType(0);
13519 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
13522 SmallVector<SDValue, 8> Ops;
13523 unsigned NumElts = VT.getVectorNumElements();
13524 for (unsigned i = 0; i < NumElts; ++i) {
13525 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
13527 // Make the DAGCombiner fold the bitcast.
13528 DCI.AddToWorklist(V.getNode());
13530 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
13531 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
13532 return DAG.getNode(ISD::BITCAST, dl, VT, BV);
13535 /// Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
13537 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13538 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
13539 // At that time, we may have inserted bitcasts from integer to float.
13540 // If these bitcasts have survived DAGCombine, change the lowering of this
13541 // BUILD_VECTOR in something more vector friendly, i.e., that does not
13542 // force to use floating point types.
13544 // Make sure we can change the type of the vector.
13545 // This is possible iff:
13546 // 1. The vector is only used in a bitcast to a integer type. I.e.,
13547 // 1.1. Vector is used only once.
13548 // 1.2. Use is a bit convert to an integer type.
13549 // 2. The size of its operands are 32-bits (64-bits are not legal).
13550 EVT VT = N->getValueType(0);
13551 EVT EltVT = VT.getVectorElementType();
13553 // Check 1.1. and 2.
13554 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
13557 // By construction, the input type must be float.
13558 assert(EltVT == MVT::f32 && "Unexpected type!");
13561 SDNode *Use = *N->use_begin();
13562 if (Use->getOpcode() != ISD::BITCAST ||
13563 Use->getValueType(0).isFloatingPoint())
13566 // Check profitability.
13567 // Model is, if more than half of the relevant operands are bitcast from
13568 // i32, turn the build_vector into a sequence of insert_vector_elt.
13569 // Relevant operands are everything that is not statically
13570 // (i.e., at compile time) bitcasted.
13571 unsigned NumOfBitCastedElts = 0;
13572 unsigned NumElts = VT.getVectorNumElements();
13573 unsigned NumOfRelevantElts = NumElts;
13574 for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
13575 SDValue Elt = N->getOperand(Idx);
13576 if (Elt->getOpcode() == ISD::BITCAST) {
13577 // Assume only bit cast to i32 will go away.
13578 if (Elt->getOperand(0).getValueType() == MVT::i32)
13579 ++NumOfBitCastedElts;
13580 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
13581 // Constants are statically casted, thus do not count them as
13582 // relevant operands.
13583 --NumOfRelevantElts;
13586 // Check if more than half of the elements require a non-free bitcast.
13587 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
13590 SelectionDAG &DAG = DCI.DAG;
13591 // Create the new vector type.
13592 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
13593 // Check if the type is legal.
13594 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13595 if (!TLI.isTypeLegal(VecVT))
13599 // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
13600 // => BITCAST INSERT_VECTOR_ELT
13601 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
13602 // (BITCAST EN), N.
13603 SDValue Vec = DAG.getUNDEF(VecVT);
13605 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
13606 SDValue V = N->getOperand(Idx);
13609 if (V.getOpcode() == ISD::BITCAST &&
13610 V->getOperand(0).getValueType() == MVT::i32)
13611 // Fold obvious case.
13612 V = V.getOperand(0);
13614 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
13615 // Make the DAGCombiner fold the bitcasts.
13616 DCI.AddToWorklist(V.getNode());
13618 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
13619 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
13621 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
13622 // Make the DAGCombiner fold the bitcasts.
13623 DCI.AddToWorklist(Vec.getNode());
13628 PerformPREDICATE_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
13629 EVT VT = N->getValueType(0);
13630 SDValue Op = N->getOperand(0);
13633 // PREDICATE_CAST(PREDICATE_CAST(x)) == PREDICATE_CAST(x)
13634 if (Op->getOpcode() == ARMISD::PREDICATE_CAST) {
13635 // If the valuetypes are the same, we can remove the cast entirely.
13636 if (Op->getOperand(0).getValueType() == VT)
13637 return Op->getOperand(0);
13638 return DCI.DAG.getNode(ARMISD::PREDICATE_CAST, dl, VT, Op->getOperand(0));
13645 PerformVECTOR_REG_CASTCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
13646 const ARMSubtarget *ST) {
13647 EVT VT = N->getValueType(0);
13648 SDValue Op = N->getOperand(0);
13651 // Under Little endian, a VECTOR_REG_CAST is equivalent to a BITCAST
13652 if (ST->isLittle())
13653 return DCI.DAG.getNode(ISD::BITCAST, dl, VT, Op);
13655 // VECTOR_REG_CAST(VECTOR_REG_CAST(x)) == VECTOR_REG_CAST(x)
13656 if (Op->getOpcode() == ARMISD::VECTOR_REG_CAST) {
13657 // If the valuetypes are the same, we can remove the cast entirely.
13658 if (Op->getOperand(0).getValueType() == VT)
13659 return Op->getOperand(0);
13660 return DCI.DAG.getNode(ARMISD::VECTOR_REG_CAST, dl, VT, Op->getOperand(0));
13666 static SDValue PerformVCMPCombine(SDNode *N,
13667 TargetLowering::DAGCombinerInfo &DCI,
13668 const ARMSubtarget *Subtarget) {
13669 if (!Subtarget->hasMVEIntegerOps())
13672 EVT VT = N->getValueType(0);
13673 SDValue Op0 = N->getOperand(0);
13674 SDValue Op1 = N->getOperand(1);
13675 ARMCC::CondCodes Cond =
13676 (ARMCC::CondCodes)cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
13679 // vcmp X, 0, cc -> vcmpz X, cc
13680 if (isZeroVector(Op1))
13681 return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op0,
13684 unsigned SwappedCond = getSwappedCondition(Cond);
13685 if (isValidMVECond(SwappedCond, VT.isFloatingPoint())) {
13686 // vcmp 0, X, cc -> vcmpz X, reversed(cc)
13687 if (isZeroVector(Op0))
13688 return DCI.DAG.getNode(ARMISD::VCMPZ, dl, VT, Op1,
13689 DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
13690 // vcmp vdup(Y), X, cc -> vcmp X, vdup(Y), reversed(cc)
13691 if (Op0->getOpcode() == ARMISD::VDUP && Op1->getOpcode() != ARMISD::VDUP)
13692 return DCI.DAG.getNode(ARMISD::VCMP, dl, VT, Op1, Op0,
13693 DCI.DAG.getConstant(SwappedCond, dl, MVT::i32));
13699 /// PerformInsertEltCombine - Target-specific dag combine xforms for
13700 /// ISD::INSERT_VECTOR_ELT.
13701 static SDValue PerformInsertEltCombine(SDNode *N,
13702 TargetLowering::DAGCombinerInfo &DCI) {
13703 // Bitcast an i64 load inserted into a vector to f64.
13704 // Otherwise, the i64 value will be legalized to a pair of i32 values.
13705 EVT VT = N->getValueType(0);
13706 SDNode *Elt = N->getOperand(1).getNode();
13707 if (VT.getVectorElementType() != MVT::i64 ||
13708 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
13711 SelectionDAG &DAG = DCI.DAG;
13713 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
13714 VT.getVectorNumElements());
13715 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
13716 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
13717 // Make the DAGCombiner fold the bitcasts.
13718 DCI.AddToWorklist(Vec.getNode());
13719 DCI.AddToWorklist(V.getNode());
13720 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
13721 Vec, V, N->getOperand(2));
13722 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
13725 static SDValue PerformExtractEltCombine(SDNode *N,
13726 TargetLowering::DAGCombinerInfo &DCI) {
13727 SDValue Op0 = N->getOperand(0);
13728 EVT VT = N->getValueType(0);
13731 // extract (vdup x) -> x
13732 if (Op0->getOpcode() == ARMISD::VDUP) {
13733 SDValue X = Op0->getOperand(0);
13734 if (VT == MVT::f16 && X.getValueType() == MVT::i32)
13735 return DCI.DAG.getNode(ARMISD::VMOVhr, dl, VT, X);
13736 if (VT == MVT::i32 && X.getValueType() == MVT::f16)
13737 return DCI.DAG.getNode(ARMISD::VMOVrh, dl, VT, X);
13739 while (X.getValueType() != VT && X->getOpcode() == ISD::BITCAST)
13740 X = X->getOperand(0);
13741 if (X.getValueType() == VT)
13748 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
13749 /// ISD::VECTOR_SHUFFLE.
13750 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
13751 // The LLVM shufflevector instruction does not require the shuffle mask
13752 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
13753 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the
13754 // operands do not match the mask length, they are extended by concatenating
13755 // them with undef vectors. That is probably the right thing for other
13756 // targets, but for NEON it is better to concatenate two double-register
13757 // size vector operands into a single quad-register size vector. Do that
13758 // transformation here:
13759 // shuffle(concat(v1, undef), concat(v2, undef)) ->
13760 // shuffle(concat(v1, v2), undef)
13761 SDValue Op0 = N->getOperand(0);
13762 SDValue Op1 = N->getOperand(1);
13763 if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
13764 Op1.getOpcode() != ISD::CONCAT_VECTORS ||
13765 Op0.getNumOperands() != 2 ||
13766 Op1.getNumOperands() != 2)
13768 SDValue Concat0Op1 = Op0.getOperand(1);
13769 SDValue Concat1Op1 = Op1.getOperand(1);
13770 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
13772 // Skip the transformation if any of the types are illegal.
13773 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
13774 EVT VT = N->getValueType(0);
13775 if (!TLI.isTypeLegal(VT) ||
13776 !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
13777 !TLI.isTypeLegal(Concat1Op1.getValueType()))
13780 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
13781 Op0.getOperand(0), Op1.getOperand(0));
13782 // Translate the shuffle mask.
13783 SmallVector<int, 16> NewMask;
13784 unsigned NumElts = VT.getVectorNumElements();
13785 unsigned HalfElts = NumElts/2;
13786 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
13787 for (unsigned n = 0; n < NumElts; ++n) {
13788 int MaskElt = SVN->getMaskElt(n);
13790 if (MaskElt < (int)HalfElts)
13792 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
13793 NewElt = HalfElts + MaskElt - NumElts;
13794 NewMask.push_back(NewElt);
13796 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
13797 DAG.getUNDEF(VT), NewMask);
13800 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
13801 /// NEON load/store intrinsics, and generic vector load/stores, to merge
13802 /// base address updates.
13803 /// For generic load/stores, the memory type is assumed to be a vector.
13804 /// The caller is assumed to have checked legality.
13805 static SDValue CombineBaseUpdate(SDNode *N,
13806 TargetLowering::DAGCombinerInfo &DCI) {
13807 SelectionDAG &DAG = DCI.DAG;
13808 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
13809 N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
13810 const bool isStore = N->getOpcode() == ISD::STORE;
13811 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
13812 SDValue Addr = N->getOperand(AddrOpIdx);
13813 MemSDNode *MemN = cast<MemSDNode>(N);
13816 // Search for a use of the address operand that is an increment.
13817 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
13818 UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
13819 SDNode *User = *UI;
13820 if (User->getOpcode() != ISD::ADD ||
13821 UI.getUse().getResNo() != Addr.getResNo())
13824 // Check that the add is independent of the load/store. Otherwise, folding
13825 // it would create a cycle. We can avoid searching through Addr as it's a
13826 // predecessor to both.
13827 SmallPtrSet<const SDNode *, 32> Visited;
13828 SmallVector<const SDNode *, 16> Worklist;
13829 Visited.insert(Addr.getNode());
13830 Worklist.push_back(N);
13831 Worklist.push_back(User);
13832 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
13833 SDNode::hasPredecessorHelper(User, Visited, Worklist))
13836 // Find the new opcode for the updating load/store.
13837 bool isLoadOp = true;
13838 bool isLaneOp = false;
13839 unsigned NewOpc = 0;
13840 unsigned NumVecs = 0;
13842 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
13844 default: llvm_unreachable("unexpected intrinsic for Neon base update");
13845 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD;
13846 NumVecs = 1; break;
13847 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD;
13848 NumVecs = 2; break;
13849 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD;
13850 NumVecs = 3; break;
13851 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD;
13852 NumVecs = 4; break;
13853 case Intrinsic::arm_neon_vld2dup:
13854 case Intrinsic::arm_neon_vld3dup:
13855 case Intrinsic::arm_neon_vld4dup:
13856 // TODO: Support updating VLDxDUP nodes. For now, we just skip
13857 // combining base updates for such intrinsics.
13859 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
13860 NumVecs = 2; isLaneOp = true; break;
13861 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
13862 NumVecs = 3; isLaneOp = true; break;
13863 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
13864 NumVecs = 4; isLaneOp = true; break;
13865 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD;
13866 NumVecs = 1; isLoadOp = false; break;
13867 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD;
13868 NumVecs = 2; isLoadOp = false; break;
13869 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD;
13870 NumVecs = 3; isLoadOp = false; break;
13871 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD;
13872 NumVecs = 4; isLoadOp = false; break;
13873 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
13874 NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
13875 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
13876 NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
13877 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
13878 NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
13882 switch (N->getOpcode()) {
13883 default: llvm_unreachable("unexpected opcode for Neon base update");
13884 case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
13885 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
13886 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
13887 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
13888 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD;
13889 NumVecs = 1; isLaneOp = false; break;
13890 case ISD::STORE: NewOpc = ARMISD::VST1_UPD;
13891 NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
13895 // Find the size of memory referenced by the load/store.
13898 VecTy = N->getValueType(0);
13899 } else if (isIntrinsic) {
13900 VecTy = N->getOperand(AddrOpIdx+1).getValueType();
13902 assert(isStore && "Node has to be a load, a store, or an intrinsic!");
13903 VecTy = N->getOperand(1).getValueType();
13906 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
13908 NumBytes /= VecTy.getVectorNumElements();
13910 // If the increment is a constant, it must match the memory ref size.
13911 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
13912 ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
13913 if (NumBytes >= 3 * 16 && (!CInc || CInc->getZExtValue() != NumBytes)) {
13914 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
13915 // separate instructions that make it harder to use a non-constant update.
13919 // OK, we found an ADD we can fold into the base update.
13920 // Now, create a _UPD node, taking care of not breaking alignment.
13922 EVT AlignedVecTy = VecTy;
13923 unsigned Alignment = MemN->getAlignment();
13925 // If this is a less-than-standard-aligned load/store, change the type to
13926 // match the standard alignment.
13927 // The alignment is overlooked when selecting _UPD variants; and it's
13928 // easier to introduce bitcasts here than fix that.
13929 // There are 3 ways to get to this base-update combine:
13930 // - intrinsics: they are assumed to be properly aligned (to the standard
13931 // alignment of the memory type), so we don't need to do anything.
13932 // - ARMISD::VLDx nodes: they are only generated from the aforementioned
13933 // intrinsics, so, likewise, there's nothing to do.
13934 // - generic load/store instructions: the alignment is specified as an
13935 // explicit operand, rather than implicitly as the standard alignment
13936 // of the memory type (like the intrisics). We need to change the
13937 // memory type to match the explicit alignment. That way, we don't
13938 // generate non-standard-aligned ARMISD::VLDx nodes.
13939 if (isa<LSBaseSDNode>(N)) {
13940 if (Alignment == 0)
13942 if (Alignment < VecTy.getScalarSizeInBits() / 8) {
13943 MVT EltTy = MVT::getIntegerVT(Alignment * 8);
13944 assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
13945 assert(!isLaneOp && "Unexpected generic load/store lane.");
13946 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
13947 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
13949 // Don't set an explicit alignment on regular load/stores that we want
13950 // to transform to VLD/VST 1_UPD nodes.
13951 // This matches the behavior of regular load/stores, which only get an
13952 // explicit alignment if the MMO alignment is larger than the standard
13953 // alignment of the memory type.
13954 // Intrinsics, however, always get an explicit alignment, set to the
13955 // alignment of the MMO.
13959 // Create the new updating load/store node.
13960 // First, create an SDVTList for the new updating node's results.
13962 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
13964 for (n = 0; n < NumResultVecs; ++n)
13965 Tys[n] = AlignedVecTy;
13966 Tys[n++] = MVT::i32;
13967 Tys[n] = MVT::Other;
13968 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
13970 // Then, gather the new node's operands.
13971 SmallVector<SDValue, 8> Ops;
13972 Ops.push_back(N->getOperand(0)); // incoming chain
13973 Ops.push_back(N->getOperand(AddrOpIdx));
13974 Ops.push_back(Inc);
13976 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
13977 // Try to match the intrinsic's signature
13978 Ops.push_back(StN->getValue());
13980 // Loads (and of course intrinsics) match the intrinsics' signature,
13981 // so just add all but the alignment operand.
13982 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
13983 Ops.push_back(N->getOperand(i));
13986 // For all node types, the alignment operand is always the last one.
13987 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
13989 // If this is a non-standard-aligned STORE, the penultimate operand is the
13990 // stored value. Bitcast it to the aligned type.
13991 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
13992 SDValue &StVal = Ops[Ops.size()-2];
13993 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
13996 EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
13997 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
13998 MemN->getMemOperand());
14000 // Update the uses.
14001 SmallVector<SDValue, 5> NewResults;
14002 for (unsigned i = 0; i < NumResultVecs; ++i)
14003 NewResults.push_back(SDValue(UpdN.getNode(), i));
14005 // If this is an non-standard-aligned LOAD, the first result is the loaded
14006 // value. Bitcast it to the expected result type.
14007 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
14008 SDValue &LdVal = NewResults[0];
14009 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
14012 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
14013 DCI.CombineTo(N, NewResults);
14014 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
14021 static SDValue PerformVLDCombine(SDNode *N,
14022 TargetLowering::DAGCombinerInfo &DCI) {
14023 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
14026 return CombineBaseUpdate(N, DCI);
14029 static SDValue PerformMVEVLDCombine(SDNode *N,
14030 TargetLowering::DAGCombinerInfo &DCI) {
14031 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
14034 SelectionDAG &DAG = DCI.DAG;
14035 SDValue Addr = N->getOperand(2);
14036 MemSDNode *MemN = cast<MemSDNode>(N);
14039 // For the stores, where there are multiple intrinsics we only actually want
14040 // to post-inc the last of the them.
14041 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
14042 if (IntNo == Intrinsic::arm_mve_vst2q &&
14043 cast<ConstantSDNode>(N->getOperand(5))->getZExtValue() != 1)
14045 if (IntNo == Intrinsic::arm_mve_vst4q &&
14046 cast<ConstantSDNode>(N->getOperand(7))->getZExtValue() != 3)
14049 // Search for a use of the address operand that is an increment.
14050 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
14051 UE = Addr.getNode()->use_end();
14053 SDNode *User = *UI;
14054 if (User->getOpcode() != ISD::ADD ||
14055 UI.getUse().getResNo() != Addr.getResNo())
14058 // Check that the add is independent of the load/store. Otherwise, folding
14059 // it would create a cycle. We can avoid searching through Addr as it's a
14060 // predecessor to both.
14061 SmallPtrSet<const SDNode *, 32> Visited;
14062 SmallVector<const SDNode *, 16> Worklist;
14063 Visited.insert(Addr.getNode());
14064 Worklist.push_back(N);
14065 Worklist.push_back(User);
14066 if (SDNode::hasPredecessorHelper(N, Visited, Worklist) ||
14067 SDNode::hasPredecessorHelper(User, Visited, Worklist))
14070 // Find the new opcode for the updating load/store.
14071 bool isLoadOp = true;
14072 unsigned NewOpc = 0;
14073 unsigned NumVecs = 0;
14076 llvm_unreachable("unexpected intrinsic for MVE VLDn combine");
14077 case Intrinsic::arm_mve_vld2q:
14078 NewOpc = ARMISD::VLD2_UPD;
14081 case Intrinsic::arm_mve_vld4q:
14082 NewOpc = ARMISD::VLD4_UPD;
14085 case Intrinsic::arm_mve_vst2q:
14086 NewOpc = ARMISD::VST2_UPD;
14090 case Intrinsic::arm_mve_vst4q:
14091 NewOpc = ARMISD::VST4_UPD;
14097 // Find the size of memory referenced by the load/store.
14100 VecTy = N->getValueType(0);
14102 VecTy = N->getOperand(3).getValueType();
14105 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
14107 // If the increment is a constant, it must match the memory ref size.
14108 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
14109 ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode());
14110 if (!CInc || CInc->getZExtValue() != NumBytes)
14113 // Create the new updating load/store node.
14114 // First, create an SDVTList for the new updating node's results.
14116 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
14118 for (n = 0; n < NumResultVecs; ++n)
14120 Tys[n++] = MVT::i32;
14121 Tys[n] = MVT::Other;
14122 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2));
14124 // Then, gather the new node's operands.
14125 SmallVector<SDValue, 8> Ops;
14126 Ops.push_back(N->getOperand(0)); // incoming chain
14127 Ops.push_back(N->getOperand(2)); // ptr
14128 Ops.push_back(Inc);
14130 for (unsigned i = 3; i < N->getNumOperands(); ++i)
14131 Ops.push_back(N->getOperand(i));
14133 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, VecTy,
14134 MemN->getMemOperand());
14136 // Update the uses.
14137 SmallVector<SDValue, 5> NewResults;
14138 for (unsigned i = 0; i < NumResultVecs; ++i)
14139 NewResults.push_back(SDValue(UpdN.getNode(), i));
14141 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); // chain
14142 DCI.CombineTo(N, NewResults);
14143 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
14151 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
14152 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
14153 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
14155 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
14156 SelectionDAG &DAG = DCI.DAG;
14157 EVT VT = N->getValueType(0);
14158 // vldN-dup instructions only support 64-bit vectors for N > 1.
14159 if (!VT.is64BitVector())
14162 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
14163 SDNode *VLD = N->getOperand(0).getNode();
14164 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
14166 unsigned NumVecs = 0;
14167 unsigned NewOpc = 0;
14168 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
14169 if (IntNo == Intrinsic::arm_neon_vld2lane) {
14171 NewOpc = ARMISD::VLD2DUP;
14172 } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
14174 NewOpc = ARMISD::VLD3DUP;
14175 } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
14177 NewOpc = ARMISD::VLD4DUP;
14182 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
14183 // numbers match the load.
14184 unsigned VLDLaneNo =
14185 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
14186 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
14188 // Ignore uses of the chain result.
14189 if (UI.getUse().getResNo() == NumVecs)
14191 SDNode *User = *UI;
14192 if (User->getOpcode() != ARMISD::VDUPLANE ||
14193 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
14197 // Create the vldN-dup node.
14200 for (n = 0; n < NumVecs; ++n)
14202 Tys[n] = MVT::Other;
14203 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
14204 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
14205 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
14206 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
14207 Ops, VLDMemInt->getMemoryVT(),
14208 VLDMemInt->getMemOperand());
14210 // Update the uses.
14211 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
14213 unsigned ResNo = UI.getUse().getResNo();
14214 // Ignore uses of the chain result.
14215 if (ResNo == NumVecs)
14217 SDNode *User = *UI;
14218 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
14221 // Now the vldN-lane intrinsic is dead except for its chain result.
14222 // Update uses of the chain.
14223 std::vector<SDValue> VLDDupResults;
14224 for (unsigned n = 0; n < NumVecs; ++n)
14225 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
14226 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
14227 DCI.CombineTo(VLD, VLDDupResults);
14232 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
14233 /// ARMISD::VDUPLANE.
14234 static SDValue PerformVDUPLANECombine(SDNode *N,
14235 TargetLowering::DAGCombinerInfo &DCI,
14236 const ARMSubtarget *Subtarget) {
14237 SDValue Op = N->getOperand(0);
14238 EVT VT = N->getValueType(0);
14240 // On MVE, we just convert the VDUPLANE to a VDUP with an extract.
14241 if (Subtarget->hasMVEIntegerOps()) {
14242 EVT ExtractVT = VT.getVectorElementType();
14243 // We need to ensure we are creating a legal type.
14244 if (!DCI.DAG.getTargetLoweringInfo().isTypeLegal(ExtractVT))
14245 ExtractVT = MVT::i32;
14246 SDValue Extract = DCI.DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), ExtractVT,
14247 N->getOperand(0), N->getOperand(1));
14248 return DCI.DAG.getNode(ARMISD::VDUP, SDLoc(N), VT, Extract);
14251 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
14252 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
14253 if (CombineVLDDUP(N, DCI))
14254 return SDValue(N, 0);
14256 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
14257 // redundant. Ignore bit_converts for now; element sizes are checked below.
14258 while (Op.getOpcode() == ISD::BITCAST)
14259 Op = Op.getOperand(0);
14260 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
14263 // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
14264 unsigned EltSize = Op.getScalarValueSizeInBits();
14265 // The canonical VMOV for a zero vector uses a 32-bit element size.
14266 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
14268 if (ARM_AM::decodeVMOVModImm(Imm, EltBits) == 0)
14270 if (EltSize > VT.getScalarSizeInBits())
14273 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
14276 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
14277 static SDValue PerformVDUPCombine(SDNode *N,
14278 TargetLowering::DAGCombinerInfo &DCI,
14279 const ARMSubtarget *Subtarget) {
14280 SelectionDAG &DAG = DCI.DAG;
14281 SDValue Op = N->getOperand(0);
14284 if (Subtarget->hasMVEIntegerOps()) {
14285 // Convert VDUP f32 -> VDUP BITCAST i32 under MVE, as we know the value will
14286 // need to come from a GPR.
14287 if (Op.getValueType() == MVT::f32)
14288 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
14289 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Op));
14290 else if (Op.getValueType() == MVT::f16)
14291 return DCI.DAG.getNode(ARMISD::VDUP, dl, N->getValueType(0),
14292 DAG.getNode(ARMISD::VMOVrh, dl, MVT::i32, Op));
14295 if (!Subtarget->hasNEON())
14298 // Match VDUP(LOAD) -> VLD1DUP.
14299 // We match this pattern here rather than waiting for isel because the
14300 // transform is only legal for unindexed loads.
14301 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
14302 if (LD && Op.hasOneUse() && LD->isUnindexed() &&
14303 LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
14304 SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
14305 DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
14306 SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
14307 SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
14308 Ops, LD->getMemoryVT(),
14309 LD->getMemOperand());
14310 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
14317 static SDValue PerformLOADCombine(SDNode *N,
14318 TargetLowering::DAGCombinerInfo &DCI) {
14319 EVT VT = N->getValueType(0);
14321 // If this is a legal vector load, try to combine it into a VLD1_UPD.
14322 if (ISD::isNormalLoad(N) && VT.isVector() &&
14323 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
14324 return CombineBaseUpdate(N, DCI);
14329 // Optimize trunc store (of multiple scalars) to shuffle and store. First,
14330 // pack all of the elements in one place. Next, store to memory in fewer
14332 static SDValue PerformTruncatingStoreCombine(StoreSDNode *St,
14333 SelectionDAG &DAG) {
14334 SDValue StVal = St->getValue();
14335 EVT VT = StVal.getValueType();
14336 if (!St->isTruncatingStore() || !VT.isVector())
14338 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14339 EVT StVT = St->getMemoryVT();
14340 unsigned NumElems = VT.getVectorNumElements();
14341 assert(StVT != VT && "Cannot truncate to the same type");
14342 unsigned FromEltSz = VT.getScalarSizeInBits();
14343 unsigned ToEltSz = StVT.getScalarSizeInBits();
14345 // From, To sizes and ElemCount must be pow of two
14346 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz))
14349 // We are going to use the original vector elt for storing.
14350 // Accumulated smaller vector elements must be a multiple of the store size.
14351 if (0 != (NumElems * FromEltSz) % ToEltSz)
14354 unsigned SizeRatio = FromEltSz / ToEltSz;
14355 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
14357 // Create a type on which we perform the shuffle.
14358 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
14359 NumElems * SizeRatio);
14360 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
14363 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
14364 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
14365 for (unsigned i = 0; i < NumElems; ++i)
14366 ShuffleVec[i] = DAG.getDataLayout().isBigEndian() ? (i + 1) * SizeRatio - 1
14369 // Can't shuffle using an illegal type.
14370 if (!TLI.isTypeLegal(WideVecVT))
14373 SDValue Shuff = DAG.getVectorShuffle(
14374 WideVecVT, DL, WideVec, DAG.getUNDEF(WideVec.getValueType()), ShuffleVec);
14375 // At this point all of the data is stored at the bottom of the
14376 // register. We now need to save it to mem.
14378 // Find the largest store unit
14379 MVT StoreType = MVT::i8;
14380 for (MVT Tp : MVT::integer_valuetypes()) {
14381 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
14384 // Didn't find a legal store type.
14385 if (!TLI.isTypeLegal(StoreType))
14388 // Bitcast the original vector into a vector of store-size units
14390 EVT::getVectorVT(*DAG.getContext(), StoreType,
14391 VT.getSizeInBits() / EVT(StoreType).getSizeInBits());
14392 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
14393 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
14394 SmallVector<SDValue, 8> Chains;
14395 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
14396 TLI.getPointerTy(DAG.getDataLayout()));
14397 SDValue BasePtr = St->getBasePtr();
14399 // Perform one or more big stores into memory.
14400 unsigned E = (ToEltSz * NumElems) / StoreType.getSizeInBits();
14401 for (unsigned I = 0; I < E; I++) {
14402 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreType,
14403 ShuffWide, DAG.getIntPtrConstant(I, DL));
14405 DAG.getStore(St->getChain(), DL, SubVec, BasePtr, St->getPointerInfo(),
14406 St->getAlignment(), St->getMemOperand()->getFlags());
14408 DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr, Increment);
14409 Chains.push_back(Ch);
14411 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
14414 // Try taking a single vector store from an truncate (which would otherwise turn
14415 // into an expensive buildvector) and splitting it into a series of narrowing
14417 static SDValue PerformSplittingToNarrowingStores(StoreSDNode *St,
14418 SelectionDAG &DAG) {
14419 if (!St->isSimple() || St->isTruncatingStore() || !St->isUnindexed())
14421 SDValue Trunc = St->getValue();
14422 if (Trunc->getOpcode() != ISD::TRUNCATE && Trunc->getOpcode() != ISD::FP_ROUND)
14424 EVT FromVT = Trunc->getOperand(0).getValueType();
14425 EVT ToVT = Trunc.getValueType();
14426 if (!ToVT.isVector())
14428 assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
14429 EVT ToEltVT = ToVT.getVectorElementType();
14430 EVT FromEltVT = FromVT.getVectorElementType();
14432 unsigned NumElements = 0;
14433 if (FromEltVT == MVT::i32 && (ToEltVT == MVT::i16 || ToEltVT == MVT::i8))
14435 if (FromEltVT == MVT::i16 && ToEltVT == MVT::i8)
14437 if (FromEltVT == MVT::f32 && ToEltVT == MVT::f16)
14439 if (NumElements == 0 ||
14440 (FromEltVT != MVT::f32 && FromVT.getVectorNumElements() == NumElements) ||
14441 FromVT.getVectorNumElements() % NumElements != 0)
14444 // Test if the Trunc will be convertable to a VMOVN with a shuffle, and if so
14445 // use the VMOVN over splitting the store. We are looking for patterns of:
14446 // !rev: 0 N 1 N+1 2 N+2 ...
14447 // rev: N 0 N+1 1 N+2 2 ...
14448 auto isVMOVNOriginalMask = [&](ArrayRef<int> M, bool rev) {
14449 unsigned NumElts = ToVT.getVectorNumElements();
14450 if (NumElts != M.size())
14453 unsigned Off0 = rev ? NumElts : 0;
14454 unsigned Off1 = rev ? 0 : NumElts;
14456 for (unsigned i = 0; i < NumElts; i += 2) {
14457 if (M[i] >= 0 && M[i] != (int)(Off0 + i / 2))
14459 if (M[i + 1] >= 0 && M[i + 1] != (int)(Off1 + i / 2))
14466 if (auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Trunc->getOperand(0)))
14467 if (isVMOVNOriginalMask(Shuffle->getMask(), false) ||
14468 isVMOVNOriginalMask(Shuffle->getMask(), true))
14471 LLVMContext &C = *DAG.getContext();
14473 // Details about the old store
14474 SDValue Ch = St->getChain();
14475 SDValue BasePtr = St->getBasePtr();
14476 Align Alignment = St->getOriginalAlign();
14477 MachineMemOperand::Flags MMOFlags = St->getMemOperand()->getFlags();
14478 AAMDNodes AAInfo = St->getAAInfo();
14480 // We split the store into slices of NumElements. fp16 trunc stores are vcvt
14481 // and then stored as truncating integer stores.
14482 EVT NewFromVT = EVT::getVectorVT(C, FromEltVT, NumElements);
14483 EVT NewToVT = EVT::getVectorVT(
14484 C, EVT::getIntegerVT(C, ToEltVT.getSizeInBits()), NumElements);
14486 SmallVector<SDValue, 4> Stores;
14487 for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
14488 unsigned NewOffset = i * NumElements * ToEltVT.getSizeInBits() / 8;
14489 SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset);
14492 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, NewFromVT, Trunc.getOperand(0),
14493 DAG.getConstant(i * NumElements, DL, MVT::i32));
14495 if (ToEltVT == MVT::f16) {
14497 DAG.getNode(ARMISD::VCVTN, DL, MVT::v8f16, DAG.getUNDEF(MVT::v8f16),
14498 Extract, DAG.getConstant(0, DL, MVT::i32));
14499 Extract = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v4i32, FPTrunc);
14502 SDValue Store = DAG.getTruncStore(
14503 Ch, DL, Extract, NewPtr, St->getPointerInfo().getWithOffset(NewOffset),
14504 NewToVT, Alignment.value(), MMOFlags, AAInfo);
14505 Stores.push_back(Store);
14507 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
14510 /// PerformSTORECombine - Target-specific dag combine xforms for
14512 static SDValue PerformSTORECombine(SDNode *N,
14513 TargetLowering::DAGCombinerInfo &DCI,
14514 const ARMSubtarget *Subtarget) {
14515 StoreSDNode *St = cast<StoreSDNode>(N);
14516 if (St->isVolatile())
14518 SDValue StVal = St->getValue();
14519 EVT VT = StVal.getValueType();
14521 if (Subtarget->hasNEON())
14522 if (SDValue Store = PerformTruncatingStoreCombine(St, DCI.DAG))
14525 if (Subtarget->hasMVEIntegerOps())
14526 if (SDValue NewToken = PerformSplittingToNarrowingStores(St, DCI.DAG))
14529 if (!ISD::isNormalStore(St))
14532 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
14533 // ARM stores of arguments in the same cache line.
14534 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
14535 StVal.getNode()->hasOneUse()) {
14536 SelectionDAG &DAG = DCI.DAG;
14537 bool isBigEndian = DAG.getDataLayout().isBigEndian();
14539 SDValue BasePtr = St->getBasePtr();
14540 SDValue NewST1 = DAG.getStore(
14541 St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
14542 BasePtr, St->getPointerInfo(), St->getAlignment(),
14543 St->getMemOperand()->getFlags());
14545 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
14546 DAG.getConstant(4, DL, MVT::i32));
14547 return DAG.getStore(NewST1.getValue(0), DL,
14548 StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
14549 OffsetPtr, St->getPointerInfo(),
14550 std::min(4U, St->getAlignment() / 2),
14551 St->getMemOperand()->getFlags());
14554 if (StVal.getValueType() == MVT::i64 &&
14555 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
14557 // Bitcast an i64 store extracted from a vector to f64.
14558 // Otherwise, the i64 value will be legalized to a pair of i32 values.
14559 SelectionDAG &DAG = DCI.DAG;
14561 SDValue IntVec = StVal.getOperand(0);
14562 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
14563 IntVec.getValueType().getVectorNumElements());
14564 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
14565 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14566 Vec, StVal.getOperand(1));
14568 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
14569 // Make the DAGCombiner fold the bitcasts.
14570 DCI.AddToWorklist(Vec.getNode());
14571 DCI.AddToWorklist(ExtElt.getNode());
14572 DCI.AddToWorklist(V.getNode());
14573 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
14574 St->getPointerInfo(), St->getAlignment(),
14575 St->getMemOperand()->getFlags(), St->getAAInfo());
14578 // If this is a legal vector store, try to combine it into a VST1_UPD.
14579 if (Subtarget->hasNEON() && ISD::isNormalStore(N) && VT.isVector() &&
14580 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
14581 return CombineBaseUpdate(N, DCI);
14586 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
14587 /// can replace combinations of VMUL and VCVT (floating-point to integer)
14588 /// when the VMUL has a constant operand that is a power of 2.
14590 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
14591 /// vmul.f32 d16, d17, d16
14592 /// vcvt.s32.f32 d16, d16
14594 /// vcvt.s32.f32 d16, d16, #3
14595 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
14596 const ARMSubtarget *Subtarget) {
14597 if (!Subtarget->hasNEON())
14600 SDValue Op = N->getOperand(0);
14601 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
14602 Op.getOpcode() != ISD::FMUL)
14605 SDValue ConstVec = Op->getOperand(1);
14606 if (!isa<BuildVectorSDNode>(ConstVec))
14609 MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
14610 uint32_t FloatBits = FloatTy.getSizeInBits();
14611 MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
14612 uint32_t IntBits = IntTy.getSizeInBits();
14613 unsigned NumLanes = Op.getValueType().getVectorNumElements();
14614 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
14615 // These instructions only exist converting from f32 to i32. We can handle
14616 // smaller integers by generating an extra truncate, but larger ones would
14617 // be lossy. We also can't handle anything other than 2 or 4 lanes, since
14618 // these intructions only support v2i32/v4i32 types.
14622 BitVector UndefElements;
14623 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14624 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
14625 if (C == -1 || C == 0 || C > 32)
14629 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
14630 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
14631 Intrinsic::arm_neon_vcvtfp2fxu;
14632 SDValue FixConv = DAG.getNode(
14633 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
14634 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
14635 DAG.getConstant(C, dl, MVT::i32));
14637 if (IntBits < FloatBits)
14638 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
14643 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
14644 /// can replace combinations of VCVT (integer to floating-point) and VDIV
14645 /// when the VDIV has a constant operand that is a power of 2.
14647 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
14648 /// vcvt.f32.s32 d16, d16
14649 /// vdiv.f32 d16, d17, d16
14651 /// vcvt.f32.s32 d16, d16, #3
14652 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
14653 const ARMSubtarget *Subtarget) {
14654 if (!Subtarget->hasNEON())
14657 SDValue Op = N->getOperand(0);
14658 unsigned OpOpcode = Op.getNode()->getOpcode();
14659 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
14660 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
14663 SDValue ConstVec = N->getOperand(1);
14664 if (!isa<BuildVectorSDNode>(ConstVec))
14667 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
14668 uint32_t FloatBits = FloatTy.getSizeInBits();
14669 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
14670 uint32_t IntBits = IntTy.getSizeInBits();
14671 unsigned NumLanes = Op.getValueType().getVectorNumElements();
14672 if (FloatBits != 32 || IntBits > 32 || (NumLanes != 4 && NumLanes != 2)) {
14673 // These instructions only exist converting from i32 to f32. We can handle
14674 // smaller integers by generating an extra extend, but larger ones would
14675 // be lossy. We also can't handle anything other than 2 or 4 lanes, since
14676 // these intructions only support v2i32/v4i32 types.
14680 BitVector UndefElements;
14681 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
14682 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
14683 if (C == -1 || C == 0 || C > 32)
14687 bool isSigned = OpOpcode == ISD::SINT_TO_FP;
14688 SDValue ConvInput = Op.getOperand(0);
14689 if (IntBits < FloatBits)
14690 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
14691 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
14694 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
14695 Intrinsic::arm_neon_vcvtfxu2fp;
14696 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
14698 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
14699 ConvInput, DAG.getConstant(C, dl, MVT::i32));
14702 static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG,
14703 const ARMSubtarget *ST) {
14704 if (!ST->hasMVEIntegerOps())
14707 assert(N->getOpcode() == ISD::VECREDUCE_ADD);
14708 EVT ResVT = N->getValueType(0);
14709 SDValue N0 = N->getOperand(0);
14712 // We are looking for something that will have illegal types if left alone,
14713 // but that we can convert to a single instruction undef MVE. For example
14714 // vecreduce_add(sext(A, v8i32)) => VADDV.s16 A
14716 // vecreduce_add(mul(zext(A, v16i32), zext(B, v16i32))) => VMLADAV.u8 A, B
14719 // VADDV u/s 8/16/32
14720 // VMLAV u/s 8/16/32
14722 // VMLALV u/s 16/32
14724 auto IsVADDV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes) {
14725 if (ResVT != RetTy || N0->getOpcode() != ExtendCode)
14727 SDValue A = N0->getOperand(0);
14728 if (llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
14732 auto IsVMLAV = [&](MVT RetTy, unsigned ExtendCode, ArrayRef<MVT> ExtTypes,
14733 SDValue &A, SDValue &B) {
14734 if (ResVT != RetTy || N0->getOpcode() != ISD::MUL)
14736 SDValue ExtA = N0->getOperand(0);
14737 SDValue ExtB = N0->getOperand(1);
14738 if (ExtA->getOpcode() != ExtendCode && ExtB->getOpcode() != ExtendCode)
14740 A = ExtA->getOperand(0);
14741 B = ExtB->getOperand(0);
14742 if (A.getValueType() == B.getValueType() &&
14743 llvm::any_of(ExtTypes, [&A](MVT Ty) { return A.getValueType() == Ty; }))
14747 auto Create64bitNode = [&](unsigned Opcode, ArrayRef<SDValue> Ops) {
14748 SDValue Node = DAG.getNode(Opcode, dl, {MVT::i32, MVT::i32}, Ops);
14749 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Node,
14750 SDValue(Node.getNode(), 1));
14753 if (SDValue A = IsVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}))
14754 return DAG.getNode(ARMISD::VADDVs, dl, ResVT, A);
14755 if (SDValue A = IsVADDV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}))
14756 return DAG.getNode(ARMISD::VADDVu, dl, ResVT, A);
14757 if (SDValue A = IsVADDV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v4i32}))
14758 return Create64bitNode(ARMISD::VADDLVs, {A});
14759 if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}))
14760 return Create64bitNode(ARMISD::VADDLVu, {A});
14763 if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
14764 return DAG.getNode(ARMISD::VMLAVs, dl, ResVT, A, B);
14765 if (IsVMLAV(MVT::i32, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B))
14766 return DAG.getNode(ARMISD::VMLAVu, dl, ResVT, A, B);
14767 if (IsVMLAV(MVT::i64, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B))
14768 return Create64bitNode(ARMISD::VMLALVs, {A, B});
14769 if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B))
14770 return Create64bitNode(ARMISD::VMLALVu, {A, B});
14774 static SDValue PerformVMOVNCombine(SDNode *N,
14775 TargetLowering::DAGCombinerInfo &DCI) {
14776 SDValue Op0 = N->getOperand(0);
14777 SDValue Op1 = N->getOperand(1);
14778 unsigned IsTop = N->getConstantOperandVal(2);
14780 // VMOVNt(c, VQMOVNb(a, b)) => VQMOVNt(c, b)
14781 // VMOVNb(c, VQMOVNb(a, b)) => VQMOVNb(c, b)
14782 if ((Op1->getOpcode() == ARMISD::VQMOVNs ||
14783 Op1->getOpcode() == ARMISD::VQMOVNu) &&
14784 Op1->getConstantOperandVal(2) == 0)
14785 return DCI.DAG.getNode(Op1->getOpcode(), SDLoc(Op1), N->getValueType(0),
14786 Op0, Op1->getOperand(1), N->getOperand(2));
14788 // Only the bottom lanes from Qm (Op1) and either the top or bottom lanes from
14789 // Qd (Op0) are demanded from a VMOVN, depending on whether we are inserting
14790 // into the top or bottom lanes.
14791 unsigned NumElts = N->getValueType(0).getVectorNumElements();
14792 APInt Op1DemandedElts = APInt::getSplat(NumElts, APInt::getLowBitsSet(2, 1));
14793 APInt Op0DemandedElts =
14794 IsTop ? Op1DemandedElts
14795 : APInt::getSplat(NumElts, APInt::getHighBitsSet(2, 1));
14797 APInt KnownUndef, KnownZero;
14798 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
14799 if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef,
14801 return SDValue(N, 0);
14802 if (TLI.SimplifyDemandedVectorElts(Op1, Op1DemandedElts, KnownUndef,
14804 return SDValue(N, 0);
14809 static SDValue PerformVQMOVNCombine(SDNode *N,
14810 TargetLowering::DAGCombinerInfo &DCI) {
14811 SDValue Op0 = N->getOperand(0);
14812 unsigned IsTop = N->getConstantOperandVal(2);
14814 unsigned NumElts = N->getValueType(0).getVectorNumElements();
14815 APInt Op0DemandedElts =
14816 APInt::getSplat(NumElts, IsTop ? APInt::getLowBitsSet(2, 1)
14817 : APInt::getHighBitsSet(2, 1));
14819 APInt KnownUndef, KnownZero;
14820 const TargetLowering &TLI = DCI.DAG.getTargetLoweringInfo();
14821 if (TLI.SimplifyDemandedVectorElts(Op0, Op0DemandedElts, KnownUndef,
14823 return SDValue(N, 0);
14827 static SDValue PerformLongShiftCombine(SDNode *N, SelectionDAG &DAG) {
14829 SDValue Op0 = N->getOperand(0);
14830 SDValue Op1 = N->getOperand(1);
14832 // Turn X << -C -> X >> C and viceversa. The negative shifts can come up from
14833 // uses of the intrinsics.
14834 if (auto C = dyn_cast<ConstantSDNode>(N->getOperand(2))) {
14835 int ShiftAmt = C->getSExtValue();
14836 if (ShiftAmt == 0) {
14837 SDValue Merge = DAG.getMergeValues({Op0, Op1}, DL);
14838 DAG.ReplaceAllUsesWith(N, Merge.getNode());
14842 if (ShiftAmt >= -32 && ShiftAmt < 0) {
14843 unsigned NewOpcode =
14844 N->getOpcode() == ARMISD::LSLL ? ARMISD::LSRL : ARMISD::LSLL;
14845 SDValue NewShift = DAG.getNode(NewOpcode, DL, N->getVTList(), Op0, Op1,
14846 DAG.getConstant(-ShiftAmt, DL, MVT::i32));
14847 DAG.ReplaceAllUsesWith(N, NewShift.getNode());
14855 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
14856 SDValue ARMTargetLowering::PerformIntrinsicCombine(SDNode *N,
14857 DAGCombinerInfo &DCI) const {
14858 SelectionDAG &DAG = DCI.DAG;
14859 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
14862 // Don't do anything for most intrinsics.
14865 // Vector shifts: check for immediate versions and lower them.
14866 // Note: This is done during DAG combining instead of DAG legalizing because
14867 // the build_vectors for 64-bit vector element shift counts are generally
14868 // not legal, and it is hard to see their values after they get legalized to
14869 // loads from a constant pool.
14870 case Intrinsic::arm_neon_vshifts:
14871 case Intrinsic::arm_neon_vshiftu:
14872 case Intrinsic::arm_neon_vrshifts:
14873 case Intrinsic::arm_neon_vrshiftu:
14874 case Intrinsic::arm_neon_vrshiftn:
14875 case Intrinsic::arm_neon_vqshifts:
14876 case Intrinsic::arm_neon_vqshiftu:
14877 case Intrinsic::arm_neon_vqshiftsu:
14878 case Intrinsic::arm_neon_vqshiftns:
14879 case Intrinsic::arm_neon_vqshiftnu:
14880 case Intrinsic::arm_neon_vqshiftnsu:
14881 case Intrinsic::arm_neon_vqrshiftns:
14882 case Intrinsic::arm_neon_vqrshiftnu:
14883 case Intrinsic::arm_neon_vqrshiftnsu: {
14884 EVT VT = N->getOperand(1).getValueType();
14886 unsigned VShiftOpc = 0;
14889 case Intrinsic::arm_neon_vshifts:
14890 case Intrinsic::arm_neon_vshiftu:
14891 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
14892 VShiftOpc = ARMISD::VSHLIMM;
14895 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
14896 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ? ARMISD::VSHRsIMM
14897 : ARMISD::VSHRuIMM);
14902 case Intrinsic::arm_neon_vrshifts:
14903 case Intrinsic::arm_neon_vrshiftu:
14904 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
14908 case Intrinsic::arm_neon_vqshifts:
14909 case Intrinsic::arm_neon_vqshiftu:
14910 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
14914 case Intrinsic::arm_neon_vqshiftsu:
14915 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
14917 llvm_unreachable("invalid shift count for vqshlu intrinsic");
14919 case Intrinsic::arm_neon_vrshiftn:
14920 case Intrinsic::arm_neon_vqshiftns:
14921 case Intrinsic::arm_neon_vqshiftnu:
14922 case Intrinsic::arm_neon_vqshiftnsu:
14923 case Intrinsic::arm_neon_vqrshiftns:
14924 case Intrinsic::arm_neon_vqrshiftnu:
14925 case Intrinsic::arm_neon_vqrshiftnsu:
14926 // Narrowing shifts require an immediate right shift.
14927 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
14929 llvm_unreachable("invalid shift count for narrowing vector shift "
14933 llvm_unreachable("unhandled vector shift");
14937 case Intrinsic::arm_neon_vshifts:
14938 case Intrinsic::arm_neon_vshiftu:
14939 // Opcode already set above.
14941 case Intrinsic::arm_neon_vrshifts:
14942 VShiftOpc = ARMISD::VRSHRsIMM;
14944 case Intrinsic::arm_neon_vrshiftu:
14945 VShiftOpc = ARMISD::VRSHRuIMM;
14947 case Intrinsic::arm_neon_vrshiftn:
14948 VShiftOpc = ARMISD::VRSHRNIMM;
14950 case Intrinsic::arm_neon_vqshifts:
14951 VShiftOpc = ARMISD::VQSHLsIMM;
14953 case Intrinsic::arm_neon_vqshiftu:
14954 VShiftOpc = ARMISD::VQSHLuIMM;
14956 case Intrinsic::arm_neon_vqshiftsu:
14957 VShiftOpc = ARMISD::VQSHLsuIMM;
14959 case Intrinsic::arm_neon_vqshiftns:
14960 VShiftOpc = ARMISD::VQSHRNsIMM;
14962 case Intrinsic::arm_neon_vqshiftnu:
14963 VShiftOpc = ARMISD::VQSHRNuIMM;
14965 case Intrinsic::arm_neon_vqshiftnsu:
14966 VShiftOpc = ARMISD::VQSHRNsuIMM;
14968 case Intrinsic::arm_neon_vqrshiftns:
14969 VShiftOpc = ARMISD::VQRSHRNsIMM;
14971 case Intrinsic::arm_neon_vqrshiftnu:
14972 VShiftOpc = ARMISD::VQRSHRNuIMM;
14974 case Intrinsic::arm_neon_vqrshiftnsu:
14975 VShiftOpc = ARMISD::VQRSHRNsuIMM;
14980 return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
14981 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
14984 case Intrinsic::arm_neon_vshiftins: {
14985 EVT VT = N->getOperand(1).getValueType();
14987 unsigned VShiftOpc = 0;
14989 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
14990 VShiftOpc = ARMISD::VSLIIMM;
14991 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
14992 VShiftOpc = ARMISD::VSRIIMM;
14994 llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
14998 return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
14999 N->getOperand(1), N->getOperand(2),
15000 DAG.getConstant(Cnt, dl, MVT::i32));
15003 case Intrinsic::arm_neon_vqrshifts:
15004 case Intrinsic::arm_neon_vqrshiftu:
15005 // No immediate versions of these to check for.
15008 case Intrinsic::arm_mve_vqdmlah:
15009 case Intrinsic::arm_mve_vqdmlash:
15010 case Intrinsic::arm_mve_vqrdmlah:
15011 case Intrinsic::arm_mve_vqrdmlash:
15012 case Intrinsic::arm_mve_vmla_n_predicated:
15013 case Intrinsic::arm_mve_vmlas_n_predicated:
15014 case Intrinsic::arm_mve_vqdmlah_predicated:
15015 case Intrinsic::arm_mve_vqdmlash_predicated:
15016 case Intrinsic::arm_mve_vqrdmlah_predicated:
15017 case Intrinsic::arm_mve_vqrdmlash_predicated: {
15018 // These intrinsics all take an i32 scalar operand which is narrowed to the
15019 // size of a single lane of the vector type they return. So we don't need
15020 // any bits of that operand above that point, which allows us to eliminate
15022 unsigned BitWidth = N->getValueType(0).getScalarSizeInBits();
15023 APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
15024 if (SimplifyDemandedBits(N->getOperand(3), DemandedMask, DCI))
15029 case Intrinsic::arm_mve_minv:
15030 case Intrinsic::arm_mve_maxv:
15031 case Intrinsic::arm_mve_minav:
15032 case Intrinsic::arm_mve_maxav:
15033 case Intrinsic::arm_mve_minv_predicated:
15034 case Intrinsic::arm_mve_maxv_predicated:
15035 case Intrinsic::arm_mve_minav_predicated:
15036 case Intrinsic::arm_mve_maxav_predicated: {
15037 // These intrinsics all take an i32 scalar operand which is narrowed to the
15038 // size of a single lane of the vector type they take as the other input.
15039 unsigned BitWidth = N->getOperand(2)->getValueType(0).getScalarSizeInBits();
15040 APInt DemandedMask = APInt::getLowBitsSet(32, BitWidth);
15041 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
15046 case Intrinsic::arm_mve_addv: {
15047 // Turn this intrinsic straight into the appropriate ARMISD::VADDV node,
15048 // which allow PerformADDVecReduce to turn it into VADDLV when possible.
15049 bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
15050 unsigned Opc = Unsigned ? ARMISD::VADDVu : ARMISD::VADDVs;
15051 return DAG.getNode(Opc, SDLoc(N), N->getVTList(), N->getOperand(1));
15054 case Intrinsic::arm_mve_addlv:
15055 case Intrinsic::arm_mve_addlv_predicated: {
15056 // Same for these, but ARMISD::VADDLV has to be followed by a BUILD_PAIR
15057 // which recombines the two outputs into an i64
15058 bool Unsigned = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
15059 unsigned Opc = IntNo == Intrinsic::arm_mve_addlv ?
15060 (Unsigned ? ARMISD::VADDLVu : ARMISD::VADDLVs) :
15061 (Unsigned ? ARMISD::VADDLVpu : ARMISD::VADDLVps);
15063 SmallVector<SDValue, 4> Ops;
15064 for (unsigned i = 1, e = N->getNumOperands(); i < e; i++)
15065 if (i != 2) // skip the unsigned flag
15066 Ops.push_back(N->getOperand(i));
15069 SDValue val = DAG.getNode(Opc, dl, {MVT::i32, MVT::i32}, Ops);
15070 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, val.getValue(0),
15078 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
15079 /// lowers them. As with the vector shift intrinsics, this is done during DAG
15080 /// combining instead of DAG legalizing because the build_vectors for 64-bit
15081 /// vector element shift counts are generally not legal, and it is hard to see
15082 /// their values after they get legalized to loads from a constant pool.
15083 static SDValue PerformShiftCombine(SDNode *N,
15084 TargetLowering::DAGCombinerInfo &DCI,
15085 const ARMSubtarget *ST) {
15086 SelectionDAG &DAG = DCI.DAG;
15087 EVT VT = N->getValueType(0);
15088 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
15089 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
15090 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
15091 SDValue N1 = N->getOperand(1);
15092 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
15093 SDValue N0 = N->getOperand(0);
15094 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
15095 DAG.MaskedValueIsZero(N0.getOperand(0),
15096 APInt::getHighBitsSet(32, 16)))
15097 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
15101 if (ST->isThumb1Only() && N->getOpcode() == ISD::SHL && VT == MVT::i32 &&
15102 N->getOperand(0)->getOpcode() == ISD::AND &&
15103 N->getOperand(0)->hasOneUse()) {
15104 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
15106 // Look for the pattern (shl (and x, AndMask), ShiftAmt). This doesn't
15107 // usually show up because instcombine prefers to canonicalize it to
15108 // (and (shl x, ShiftAmt) (shl AndMask, ShiftAmt)), but the shift can come
15109 // out of GEP lowering in some cases.
15110 SDValue N0 = N->getOperand(0);
15111 ConstantSDNode *ShiftAmtNode = dyn_cast<ConstantSDNode>(N->getOperand(1));
15114 uint32_t ShiftAmt = static_cast<uint32_t>(ShiftAmtNode->getZExtValue());
15115 ConstantSDNode *AndMaskNode = dyn_cast<ConstantSDNode>(N0->getOperand(1));
15118 uint32_t AndMask = static_cast<uint32_t>(AndMaskNode->getZExtValue());
15119 // Don't transform uxtb/uxth.
15120 if (AndMask == 255 || AndMask == 65535)
15122 if (isMask_32(AndMask)) {
15123 uint32_t MaskedBits = countLeadingZeros(AndMask);
15124 if (MaskedBits > ShiftAmt) {
15126 SDValue SHL = DAG.getNode(ISD::SHL, DL, MVT::i32, N0->getOperand(0),
15127 DAG.getConstant(MaskedBits, DL, MVT::i32));
15128 return DAG.getNode(
15129 ISD::SRL, DL, MVT::i32, SHL,
15130 DAG.getConstant(MaskedBits - ShiftAmt, DL, MVT::i32));
15135 // Nothing to be done for scalar shifts.
15136 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15137 if (!VT.isVector() || !TLI.isTypeLegal(VT))
15139 if (ST->hasMVEIntegerOps() && VT == MVT::v2i64)
15144 switch (N->getOpcode()) {
15145 default: llvm_unreachable("unexpected shift opcode");
15148 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
15150 return DAG.getNode(ARMISD::VSHLIMM, dl, VT, N->getOperand(0),
15151 DAG.getConstant(Cnt, dl, MVT::i32));
15157 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
15158 unsigned VShiftOpc =
15159 (N->getOpcode() == ISD::SRA ? ARMISD::VSHRsIMM : ARMISD::VSHRuIMM);
15161 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
15162 DAG.getConstant(Cnt, dl, MVT::i32));
15168 // Look for a sign/zero/fpextend extend of a larger than legal load. This can be
15169 // split into multiple extending loads, which are simpler to deal with than an
15170 // arbitrary extend. For fp extends we use an integer extending load and a VCVTL
15171 // to convert the type to an f32.
15172 static SDValue PerformSplittingToWideningLoad(SDNode *N, SelectionDAG &DAG) {
15173 SDValue N0 = N->getOperand(0);
15174 if (N0.getOpcode() != ISD::LOAD)
15176 LoadSDNode *LD = cast<LoadSDNode>(N0.getNode());
15177 if (!LD->isSimple() || !N0.hasOneUse() || LD->isIndexed() ||
15178 LD->getExtensionType() != ISD::NON_EXTLOAD)
15180 EVT FromVT = LD->getValueType(0);
15181 EVT ToVT = N->getValueType(0);
15182 if (!ToVT.isVector())
15184 assert(FromVT.getVectorNumElements() == ToVT.getVectorNumElements());
15185 EVT ToEltVT = ToVT.getVectorElementType();
15186 EVT FromEltVT = FromVT.getVectorElementType();
15188 unsigned NumElements = 0;
15189 if (ToEltVT == MVT::i32 && (FromEltVT == MVT::i16 || FromEltVT == MVT::i8))
15191 if (ToEltVT == MVT::i16 && FromEltVT == MVT::i8)
15193 if (ToEltVT == MVT::f32 && FromEltVT == MVT::f16)
15195 if (NumElements == 0 ||
15196 (FromEltVT != MVT::f16 && FromVT.getVectorNumElements() == NumElements) ||
15197 FromVT.getVectorNumElements() % NumElements != 0 ||
15198 !isPowerOf2_32(NumElements))
15201 LLVMContext &C = *DAG.getContext();
15203 // Details about the old load
15204 SDValue Ch = LD->getChain();
15205 SDValue BasePtr = LD->getBasePtr();
15206 Align Alignment = LD->getOriginalAlign();
15207 MachineMemOperand::Flags MMOFlags = LD->getMemOperand()->getFlags();
15208 AAMDNodes AAInfo = LD->getAAInfo();
15210 ISD::LoadExtType NewExtType =
15211 N->getOpcode() == ISD::SIGN_EXTEND ? ISD::SEXTLOAD : ISD::ZEXTLOAD;
15212 SDValue Offset = DAG.getUNDEF(BasePtr.getValueType());
15213 EVT NewFromVT = EVT::getVectorVT(
15214 C, EVT::getIntegerVT(C, FromEltVT.getScalarSizeInBits()), NumElements);
15215 EVT NewToVT = EVT::getVectorVT(
15216 C, EVT::getIntegerVT(C, ToEltVT.getScalarSizeInBits()), NumElements);
15218 SmallVector<SDValue, 4> Loads;
15219 SmallVector<SDValue, 4> Chains;
15220 for (unsigned i = 0; i < FromVT.getVectorNumElements() / NumElements; i++) {
15221 unsigned NewOffset = (i * NewFromVT.getSizeInBits()) / 8;
15222 SDValue NewPtr = DAG.getObjectPtrOffset(DL, BasePtr, NewOffset);
15225 DAG.getLoad(ISD::UNINDEXED, NewExtType, NewToVT, DL, Ch, NewPtr, Offset,
15226 LD->getPointerInfo().getWithOffset(NewOffset), NewFromVT,
15227 Alignment.value(), MMOFlags, AAInfo);
15228 Loads.push_back(NewLoad);
15229 Chains.push_back(SDValue(NewLoad.getNode(), 1));
15232 // Float truncs need to extended with VCVTB's into their floating point types.
15233 if (FromEltVT == MVT::f16) {
15234 SmallVector<SDValue, 4> Extends;
15236 for (unsigned i = 0; i < Loads.size(); i++) {
15238 DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, MVT::v8f16, Loads[i]);
15239 SDValue FPExt = DAG.getNode(ARMISD::VCVTL, DL, MVT::v4f32, LoadBC,
15240 DAG.getConstant(0, DL, MVT::i32));
15241 Extends.push_back(FPExt);
15247 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
15248 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewChain);
15249 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ToVT, Loads);
15252 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
15253 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
15254 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
15255 const ARMSubtarget *ST) {
15256 SDValue N0 = N->getOperand(0);
15258 // Check for sign- and zero-extensions of vector extract operations of 8- and
15259 // 16-bit vector elements. NEON and MVE support these directly. They are
15260 // handled during DAG combining because type legalization will promote them
15261 // to 32-bit types and it is messy to recognize the operations after that.
15262 if ((ST->hasNEON() || ST->hasMVEIntegerOps()) &&
15263 N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
15264 SDValue Vec = N0.getOperand(0);
15265 SDValue Lane = N0.getOperand(1);
15266 EVT VT = N->getValueType(0);
15267 EVT EltVT = N0.getValueType();
15268 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15270 if (VT == MVT::i32 &&
15271 (EltVT == MVT::i8 || EltVT == MVT::i16) &&
15272 TLI.isTypeLegal(Vec.getValueType()) &&
15273 isa<ConstantSDNode>(Lane)) {
15276 switch (N->getOpcode()) {
15277 default: llvm_unreachable("unexpected opcode");
15278 case ISD::SIGN_EXTEND:
15279 Opc = ARMISD::VGETLANEs;
15281 case ISD::ZERO_EXTEND:
15282 case ISD::ANY_EXTEND:
15283 Opc = ARMISD::VGETLANEu;
15286 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
15290 if (ST->hasMVEIntegerOps())
15291 if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
15297 static SDValue PerformFPExtendCombine(SDNode *N, SelectionDAG &DAG,
15298 const ARMSubtarget *ST) {
15299 if (ST->hasMVEFloatOps())
15300 if (SDValue NewLoad = PerformSplittingToWideningLoad(N, DAG))
15306 /// PerformMinMaxCombine - Target-specific DAG combining for creating truncating
15308 static SDValue PerformMinMaxCombine(SDNode *N, SelectionDAG &DAG,
15309 const ARMSubtarget *ST) {
15310 EVT VT = N->getValueType(0);
15311 SDValue N0 = N->getOperand(0);
15312 if (!ST->hasMVEIntegerOps())
15315 if (VT != MVT::v4i32 && VT != MVT::v8i16)
15318 auto IsSignedSaturate = [&](SDNode *Min, SDNode *Max) {
15319 // Check one is a smin and the other is a smax
15320 if (Min->getOpcode() != ISD::SMIN)
15321 std::swap(Min, Max);
15322 if (Min->getOpcode() != ISD::SMIN || Max->getOpcode() != ISD::SMAX)
15326 if (VT == MVT::v4i32)
15327 SaturateC = APInt(32, (1 << 15) - 1, true);
15328 else //if (VT == MVT::v8i16)
15329 SaturateC = APInt(16, (1 << 7) - 1, true);
15332 if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
15335 if (!ISD::isConstantSplatVector(Max->getOperand(1).getNode(), MaxC) ||
15336 MaxC != ~SaturateC)
15341 if (IsSignedSaturate(N, N0.getNode())) {
15344 if (VT == MVT::v4i32) {
15345 HalfVT = MVT::v8i16;
15346 ExtVT = MVT::v4i16;
15347 } else { // if (VT == MVT::v8i16)
15348 HalfVT = MVT::v16i8;
15352 // Create a VQMOVNB with undef top lanes, then signed extended into the top
15353 // half. That extend will hopefully be removed if only the bottom bits are
15354 // demanded (though a truncating store, for example).
15356 DAG.getNode(ARMISD::VQMOVNs, DL, HalfVT, DAG.getUNDEF(HalfVT),
15357 N0->getOperand(0), DAG.getConstant(0, DL, MVT::i32));
15358 SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
15359 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, Bitcast,
15360 DAG.getValueType(ExtVT));
15363 auto IsUnsignedSaturate = [&](SDNode *Min) {
15364 // For unsigned, we just need to check for <= 0xffff
15365 if (Min->getOpcode() != ISD::UMIN)
15369 if (VT == MVT::v4i32)
15370 SaturateC = APInt(32, (1 << 16) - 1, true);
15371 else //if (VT == MVT::v8i16)
15372 SaturateC = APInt(16, (1 << 8) - 1, true);
15375 if (!ISD::isConstantSplatVector(Min->getOperand(1).getNode(), MinC) ||
15381 if (IsUnsignedSaturate(N)) {
15385 if (VT == MVT::v4i32) {
15386 HalfVT = MVT::v8i16;
15387 ExtConst = 0x0000FFFF;
15388 } else { //if (VT == MVT::v8i16)
15389 HalfVT = MVT::v16i8;
15393 // Create a VQMOVNB with undef top lanes, then ZExt into the top half with
15394 // an AND. That extend will hopefully be removed if only the bottom bits are
15395 // demanded (though a truncating store, for example).
15397 DAG.getNode(ARMISD::VQMOVNu, DL, HalfVT, DAG.getUNDEF(HalfVT), N0,
15398 DAG.getConstant(0, DL, MVT::i32));
15399 SDValue Bitcast = DAG.getNode(ARMISD::VECTOR_REG_CAST, DL, VT, VQMOVN);
15400 return DAG.getNode(ISD::AND, DL, VT, Bitcast,
15401 DAG.getConstant(ExtConst, DL, VT));
15407 static const APInt *isPowerOf2Constant(SDValue V) {
15408 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15411 const APInt *CV = &C->getAPIntValue();
15412 return CV->isPowerOf2() ? CV : nullptr;
15415 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
15416 // If we have a CMOV, OR and AND combination such as:
15421 // * CN is a single bit;
15422 // * All bits covered by CM are known zero in y
15424 // Then we can convert this into a sequence of BFI instructions. This will
15425 // always be a win if CM is a single bit, will always be no worse than the
15426 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
15427 // three bits (due to the extra IT instruction).
15429 SDValue Op0 = CMOV->getOperand(0);
15430 SDValue Op1 = CMOV->getOperand(1);
15431 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
15432 auto CC = CCNode->getAPIntValue().getLimitedValue();
15433 SDValue CmpZ = CMOV->getOperand(4);
15435 // The compare must be against zero.
15436 if (!isNullConstant(CmpZ->getOperand(1)))
15439 assert(CmpZ->getOpcode() == ARMISD::CMPZ);
15440 SDValue And = CmpZ->getOperand(0);
15441 if (And->getOpcode() != ISD::AND)
15443 const APInt *AndC = isPowerOf2Constant(And->getOperand(1));
15446 SDValue X = And->getOperand(0);
15448 if (CC == ARMCC::EQ) {
15449 // We're performing an "equal to zero" compare. Swap the operands so we
15450 // canonicalize on a "not equal to zero" compare.
15451 std::swap(Op0, Op1);
15453 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
15456 if (Op1->getOpcode() != ISD::OR)
15459 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
15462 SDValue Y = Op1->getOperand(0);
15467 // Now, is it profitable to continue?
15468 APInt OrCI = OrC->getAPIntValue();
15469 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
15470 if (OrCI.countPopulation() > Heuristic)
15473 // Lastly, can we determine that the bits defined by OrCI
15475 KnownBits Known = DAG.computeKnownBits(Y);
15476 if ((OrCI & Known.Zero) != OrCI)
15479 // OK, we can do the combine.
15482 EVT VT = X.getValueType();
15483 unsigned BitInX = AndC->logBase2();
15486 // We must shift X first.
15487 X = DAG.getNode(ISD::SRL, dl, VT, X,
15488 DAG.getConstant(BitInX, dl, VT));
15491 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
15492 BitInY < NumActiveBits; ++BitInY) {
15493 if (OrCI[BitInY] == 0)
15495 APInt Mask(VT.getSizeInBits(), 0);
15496 Mask.setBit(BitInY);
15497 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
15498 // Confusingly, the operand is an *inverted* mask.
15499 DAG.getConstant(~Mask, dl, VT));
15505 // Given N, the value controlling the conditional branch, search for the loop
15506 // intrinsic, returning it, along with how the value is used. We need to handle
15507 // patterns such as the following:
15508 // (brcond (xor (setcc (loop.decrement), 0, ne), 1), exit)
15509 // (brcond (setcc (loop.decrement), 0, eq), exit)
15510 // (brcond (setcc (loop.decrement), 0, ne), header)
15511 static SDValue SearchLoopIntrinsic(SDValue N, ISD::CondCode &CC, int &Imm,
15513 switch (N->getOpcode()) {
15517 if (!isa<ConstantSDNode>(N.getOperand(1)))
15519 if (!cast<ConstantSDNode>(N.getOperand(1))->isOne())
15522 return SearchLoopIntrinsic(N.getOperand(0), CC, Imm, Negate);
15525 auto *Const = dyn_cast<ConstantSDNode>(N.getOperand(1));
15528 if (Const->isNullValue())
15530 else if (Const->isOne())
15534 CC = cast<CondCodeSDNode>(N.getOperand(2))->get();
15535 return SearchLoopIntrinsic(N->getOperand(0), CC, Imm, Negate);
15537 case ISD::INTRINSIC_W_CHAIN: {
15538 unsigned IntOp = cast<ConstantSDNode>(N.getOperand(1))->getZExtValue();
15539 if (IntOp != Intrinsic::test_set_loop_iterations &&
15540 IntOp != Intrinsic::loop_decrement_reg)
15548 static SDValue PerformHWLoopCombine(SDNode *N,
15549 TargetLowering::DAGCombinerInfo &DCI,
15550 const ARMSubtarget *ST) {
15552 // The hwloop intrinsics that we're interested are used for control-flow,
15553 // either for entering or exiting the loop:
15554 // - test.set.loop.iterations will test whether its operand is zero. If it
15555 // is zero, the proceeding branch should not enter the loop.
15556 // - loop.decrement.reg also tests whether its operand is zero. If it is
15557 // zero, the proceeding branch should not branch back to the beginning of
15559 // So here, we need to check that how the brcond is using the result of each
15560 // of the intrinsics to ensure that we're branching to the right place at the
15566 bool Negate = false;
15567 SDValue Chain = N->getOperand(0);
15570 if (N->getOpcode() == ISD::BRCOND) {
15572 Cond = N->getOperand(1);
15573 Dest = N->getOperand(2);
15575 assert(N->getOpcode() == ISD::BR_CC && "Expected BRCOND or BR_CC!");
15576 CC = cast<CondCodeSDNode>(N->getOperand(1))->get();
15577 Cond = N->getOperand(2);
15578 Dest = N->getOperand(4);
15579 if (auto *Const = dyn_cast<ConstantSDNode>(N->getOperand(3))) {
15580 if (!Const->isOne() && !Const->isNullValue())
15582 Imm = Const->getZExtValue();
15587 SDValue Int = SearchLoopIntrinsic(Cond, CC, Imm, Negate);
15592 CC = ISD::getSetCCInverse(CC, /* Integer inverse */ MVT::i32);
15594 auto IsTrueIfZero = [](ISD::CondCode CC, int Imm) {
15595 return (CC == ISD::SETEQ && Imm == 0) ||
15596 (CC == ISD::SETNE && Imm == 1) ||
15597 (CC == ISD::SETLT && Imm == 1) ||
15598 (CC == ISD::SETULT && Imm == 1);
15601 auto IsFalseIfZero = [](ISD::CondCode CC, int Imm) {
15602 return (CC == ISD::SETEQ && Imm == 1) ||
15603 (CC == ISD::SETNE && Imm == 0) ||
15604 (CC == ISD::SETGT && Imm == 0) ||
15605 (CC == ISD::SETUGT && Imm == 0) ||
15606 (CC == ISD::SETGE && Imm == 1) ||
15607 (CC == ISD::SETUGE && Imm == 1);
15610 assert((IsTrueIfZero(CC, Imm) || IsFalseIfZero(CC, Imm)) &&
15611 "unsupported condition");
15614 SelectionDAG &DAG = DCI.DAG;
15615 SDValue Elements = Int.getOperand(2);
15616 unsigned IntOp = cast<ConstantSDNode>(Int->getOperand(1))->getZExtValue();
15617 assert((N->hasOneUse() && N->use_begin()->getOpcode() == ISD::BR)
15618 && "expected single br user");
15619 SDNode *Br = *N->use_begin();
15620 SDValue OtherTarget = Br->getOperand(1);
15622 // Update the unconditional branch to branch to the given Dest.
15623 auto UpdateUncondBr = [](SDNode *Br, SDValue Dest, SelectionDAG &DAG) {
15624 SDValue NewBrOps[] = { Br->getOperand(0), Dest };
15625 SDValue NewBr = DAG.getNode(ISD::BR, SDLoc(Br), MVT::Other, NewBrOps);
15626 DAG.ReplaceAllUsesOfValueWith(SDValue(Br, 0), NewBr);
15629 if (IntOp == Intrinsic::test_set_loop_iterations) {
15631 // We expect this 'instruction' to branch when the counter is zero.
15632 if (IsTrueIfZero(CC, Imm)) {
15633 SDValue Ops[] = { Chain, Elements, Dest };
15634 Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
15636 // The logic is the reverse of what we need for WLS, so find the other
15637 // basic block target: the target of the proceeding br.
15638 UpdateUncondBr(Br, Dest, DAG);
15640 SDValue Ops[] = { Chain, Elements, OtherTarget };
15641 Res = DAG.getNode(ARMISD::WLS, dl, MVT::Other, Ops);
15643 DAG.ReplaceAllUsesOfValueWith(Int.getValue(1), Int.getOperand(0));
15646 SDValue Size = DAG.getTargetConstant(
15647 cast<ConstantSDNode>(Int.getOperand(3))->getZExtValue(), dl, MVT::i32);
15648 SDValue Args[] = { Int.getOperand(0), Elements, Size, };
15649 SDValue LoopDec = DAG.getNode(ARMISD::LOOP_DEC, dl,
15650 DAG.getVTList(MVT::i32, MVT::Other), Args);
15651 DAG.ReplaceAllUsesWith(Int.getNode(), LoopDec.getNode());
15653 // We expect this instruction to branch when the count is not zero.
15654 SDValue Target = IsFalseIfZero(CC, Imm) ? Dest : OtherTarget;
15656 // Update the unconditional branch to target the loop preheader if we've
15657 // found the condition has been reversed.
15658 if (Target == OtherTarget)
15659 UpdateUncondBr(Br, Dest, DAG);
15661 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
15662 SDValue(LoopDec.getNode(), 1), Chain);
15664 SDValue EndArgs[] = { Chain, SDValue(LoopDec.getNode(), 0), Target };
15665 return DAG.getNode(ARMISD::LE, dl, MVT::Other, EndArgs);
15670 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
15672 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
15673 SDValue Cmp = N->getOperand(4);
15674 if (Cmp.getOpcode() != ARMISD::CMPZ)
15675 // Only looking at NE cases.
15678 EVT VT = N->getValueType(0);
15680 SDValue LHS = Cmp.getOperand(0);
15681 SDValue RHS = Cmp.getOperand(1);
15682 SDValue Chain = N->getOperand(0);
15683 SDValue BB = N->getOperand(1);
15684 SDValue ARMcc = N->getOperand(2);
15685 ARMCC::CondCodes CC =
15686 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
15688 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
15689 // -> (brcond Chain BB CC CPSR Cmp)
15690 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
15691 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
15692 LHS->getOperand(0)->hasOneUse()) {
15693 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
15694 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
15695 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15696 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
15697 if ((LHS00C && LHS00C->getZExtValue() == 0) &&
15698 (LHS01C && LHS01C->getZExtValue() == 1) &&
15699 (LHS1C && LHS1C->getZExtValue() == 1) &&
15700 (RHSC && RHSC->getZExtValue() == 0)) {
15701 return DAG.getNode(
15702 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
15703 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
15710 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
15712 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
15713 SDValue Cmp = N->getOperand(4);
15714 if (Cmp.getOpcode() != ARMISD::CMPZ)
15715 // Only looking at EQ and NE cases.
15718 EVT VT = N->getValueType(0);
15720 SDValue LHS = Cmp.getOperand(0);
15721 SDValue RHS = Cmp.getOperand(1);
15722 SDValue FalseVal = N->getOperand(0);
15723 SDValue TrueVal = N->getOperand(1);
15724 SDValue ARMcc = N->getOperand(2);
15725 ARMCC::CondCodes CC =
15726 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
15728 // BFI is only available on V6T2+.
15729 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
15730 SDValue R = PerformCMOVToBFICombine(N, DAG);
15751 /// FIXME: Turn this into a target neutral optimization?
15753 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
15754 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
15755 N->getOperand(3), Cmp);
15756 } else if (CC == ARMCC::EQ && TrueVal == RHS) {
15758 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
15759 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
15760 N->getOperand(3), NewCmp);
15763 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
15764 // -> (cmov F T CC CPSR Cmp)
15765 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
15766 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
15767 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
15768 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
15769 if ((LHS0C && LHS0C->getZExtValue() == 0) &&
15770 (LHS1C && LHS1C->getZExtValue() == 1) &&
15771 (RHSC && RHSC->getZExtValue() == 0)) {
15772 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
15773 LHS->getOperand(2), LHS->getOperand(3),
15774 LHS->getOperand(4));
15778 if (!VT.isInteger())
15781 // Materialize a boolean comparison for integers so we can avoid branching.
15782 if (isNullConstant(FalseVal)) {
15783 if (CC == ARMCC::EQ && isOneConstant(TrueVal)) {
15784 if (!Subtarget->isThumb1Only() && Subtarget->hasV5TOps()) {
15785 // If x == y then x - y == 0 and ARM's CLZ will return 32, shifting it
15786 // right 5 bits will make that 32 be 1, otherwise it will be 0.
15787 // CMOV 0, 1, ==, (CMPZ x, y) -> SRL (CTLZ (SUB x, y)), 5
15788 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
15789 Res = DAG.getNode(ISD::SRL, dl, VT, DAG.getNode(ISD::CTLZ, dl, VT, Sub),
15790 DAG.getConstant(5, dl, MVT::i32));
15792 // CMOV 0, 1, ==, (CMPZ x, y) ->
15793 // (ADDCARRY (SUB x, y), t:0, t:1)
15794 // where t = (SUBCARRY 0, (SUB x, y), 0)
15796 // The SUBCARRY computes 0 - (x - y) and this will give a borrow when
15797 // x != y. In other words, a carry C == 1 when x == y, C == 0
15799 // The final ADDCARRY computes
15800 // x - y + (0 - (x - y)) + C == C
15801 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, LHS, RHS);
15802 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
15803 SDValue Neg = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, Sub);
15804 // ISD::SUBCARRY returns a borrow but we want the carry here
15807 DAG.getNode(ISD::SUB, dl, MVT::i32,
15808 DAG.getConstant(1, dl, MVT::i32), Neg.getValue(1));
15809 Res = DAG.getNode(ISD::ADDCARRY, dl, VTs, Sub, Neg, Carry);
15811 } else if (CC == ARMCC::NE && !isNullConstant(RHS) &&
15812 (!Subtarget->isThumb1Only() || isPowerOf2Constant(TrueVal))) {
15813 // This seems pointless but will allow us to combine it further below.
15814 // CMOV 0, z, !=, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
15816 DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
15817 SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
15818 Sub.getValue(1), SDValue());
15819 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, TrueVal, ARMcc,
15820 N->getOperand(3), CPSRGlue.getValue(1));
15823 } else if (isNullConstant(TrueVal)) {
15824 if (CC == ARMCC::EQ && !isNullConstant(RHS) &&
15825 (!Subtarget->isThumb1Only() || isPowerOf2Constant(FalseVal))) {
15826 // This seems pointless but will allow us to combine it further below
15827 // Note that we change == for != as this is the dual for the case above.
15828 // CMOV z, 0, ==, (CMPZ x, y) -> CMOV (SUBS x, y), z, !=, (SUBS x, y):1
15830 DAG.getNode(ARMISD::SUBS, dl, DAG.getVTList(VT, MVT::i32), LHS, RHS);
15831 SDValue CPSRGlue = DAG.getCopyToReg(DAG.getEntryNode(), dl, ARM::CPSR,
15832 Sub.getValue(1), SDValue());
15833 Res = DAG.getNode(ARMISD::CMOV, dl, VT, Sub, FalseVal,
15834 DAG.getConstant(ARMCC::NE, dl, MVT::i32),
15835 N->getOperand(3), CPSRGlue.getValue(1));
15840 // On Thumb1, the DAG above may be further combined if z is a power of 2
15842 // CMOV (SUBS x, y), z, !=, (SUBS x, y):1 ->
15843 // t1 = (USUBO (SUB x, y), 1)
15844 // t2 = (SUBCARRY (SUB x, y), t1:0, t1:1)
15845 // Result = if K != 0 then (SHL t2:0, K) else t2:0
15847 // This also handles the special case of comparing against zero; it's
15848 // essentially, the same pattern, except there's no SUBS:
15849 // CMOV x, z, !=, (CMPZ x, 0) ->
15850 // t1 = (USUBO x, 1)
15851 // t2 = (SUBCARRY x, t1:0, t1:1)
15852 // Result = if K != 0 then (SHL t2:0, K) else t2:0
15853 const APInt *TrueConst;
15854 if (Subtarget->isThumb1Only() && CC == ARMCC::NE &&
15855 ((FalseVal.getOpcode() == ARMISD::SUBS &&
15856 FalseVal.getOperand(0) == LHS && FalseVal.getOperand(1) == RHS) ||
15857 (FalseVal == LHS && isNullConstant(RHS))) &&
15858 (TrueConst = isPowerOf2Constant(TrueVal))) {
15859 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
15860 unsigned ShiftAmount = TrueConst->logBase2();
15862 TrueVal = DAG.getConstant(1, dl, VT);
15863 SDValue Subc = DAG.getNode(ISD::USUBO, dl, VTs, FalseVal, TrueVal);
15864 Res = DAG.getNode(ISD::SUBCARRY, dl, VTs, FalseVal, Subc, Subc.getValue(1));
15867 Res = DAG.getNode(ISD::SHL, dl, VT, Res,
15868 DAG.getConstant(ShiftAmount, dl, MVT::i32));
15871 if (Res.getNode()) {
15872 KnownBits Known = DAG.computeKnownBits(SDValue(N,0));
15873 // Capture demanded bits information that would be otherwise lost.
15874 if (Known.Zero == 0xfffffffe)
15875 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
15876 DAG.getValueType(MVT::i1));
15877 else if (Known.Zero == 0xffffff00)
15878 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
15879 DAG.getValueType(MVT::i8));
15880 else if (Known.Zero == 0xffff0000)
15881 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
15882 DAG.getValueType(MVT::i16));
15888 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG,
15889 const ARMSubtarget *ST) {
15890 SDValue Src = N->getOperand(0);
15891 EVT DstVT = N->getValueType(0);
15893 // Convert v4f32 bitcast (v4i32 vdup (i32)) -> v4f32 vdup (i32) under MVE.
15894 if (ST->hasMVEIntegerOps() && Src.getOpcode() == ARMISD::VDUP) {
15895 EVT SrcVT = Src.getValueType();
15896 if (SrcVT.getScalarSizeInBits() == DstVT.getScalarSizeInBits())
15897 return DAG.getNode(ARMISD::VDUP, SDLoc(N), DstVT, Src.getOperand(0));
15900 // We may have a bitcast of something that has already had this bitcast
15901 // combine performed on it, so skip past any VECTOR_REG_CASTs.
15902 while (Src.getOpcode() == ARMISD::VECTOR_REG_CAST)
15903 Src = Src.getOperand(0);
15905 // Bitcast from element-wise VMOV or VMVN doesn't need VREV if the VREV that
15906 // would be generated is at least the width of the element type.
15907 EVT SrcVT = Src.getValueType();
15908 if ((Src.getOpcode() == ARMISD::VMOVIMM ||
15909 Src.getOpcode() == ARMISD::VMVNIMM ||
15910 Src.getOpcode() == ARMISD::VMOVFPIMM) &&
15911 SrcVT.getScalarSizeInBits() <= DstVT.getScalarSizeInBits() &&
15912 DAG.getDataLayout().isBigEndian())
15913 return DAG.getNode(ARMISD::VECTOR_REG_CAST, SDLoc(N), DstVT, Src);
15918 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
15919 DAGCombinerInfo &DCI) const {
15920 switch (N->getOpcode()) {
15922 case ISD::VSELECT: return PerformVSELECTCombine(N, DCI, Subtarget);
15923 case ISD::ABS: return PerformABSCombine(N, DCI, Subtarget);
15924 case ARMISD::ADDE: return PerformADDECombine(N, DCI, Subtarget);
15925 case ARMISD::UMLAL: return PerformUMLALCombine(N, DCI.DAG, Subtarget);
15926 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget);
15927 case ISD::SUB: return PerformSUBCombine(N, DCI, Subtarget);
15928 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
15929 case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
15930 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget);
15931 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget);
15933 case ISD::BR_CC: return PerformHWLoopCombine(N, DCI, Subtarget);
15935 case ARMISD::SUBC: return PerformAddcSubcCombine(N, DCI, Subtarget);
15936 case ARMISD::SUBE: return PerformAddeSubeCombine(N, DCI, Subtarget);
15937 case ARMISD::BFI: return PerformBFICombine(N, DCI);
15938 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
15939 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
15940 case ARMISD::VMOVhr: return PerformVMOVhrCombine(N, DCI);
15941 case ARMISD::VMOVrh: return PerformVMOVrhCombine(N, DCI);
15942 case ISD::STORE: return PerformSTORECombine(N, DCI, Subtarget);
15943 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
15944 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
15945 case ISD::EXTRACT_VECTOR_ELT: return PerformExtractEltCombine(N, DCI);
15946 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
15947 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI, Subtarget);
15948 case ARMISD::VDUP: return PerformVDUPCombine(N, DCI, Subtarget);
15949 case ISD::FP_TO_SINT:
15950 case ISD::FP_TO_UINT:
15951 return PerformVCVTCombine(N, DCI.DAG, Subtarget);
15953 return PerformVDIVCombine(N, DCI.DAG, Subtarget);
15954 case ISD::INTRINSIC_WO_CHAIN:
15955 return PerformIntrinsicCombine(N, DCI);
15959 return PerformShiftCombine(N, DCI, Subtarget);
15960 case ISD::SIGN_EXTEND:
15961 case ISD::ZERO_EXTEND:
15962 case ISD::ANY_EXTEND:
15963 return PerformExtendCombine(N, DCI.DAG, Subtarget);
15964 case ISD::FP_EXTEND:
15965 return PerformFPExtendCombine(N, DCI.DAG, Subtarget);
15970 return PerformMinMaxCombine(N, DCI.DAG, Subtarget);
15971 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
15972 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
15973 case ISD::LOAD: return PerformLOADCombine(N, DCI);
15974 case ARMISD::VLD1DUP:
15975 case ARMISD::VLD2DUP:
15976 case ARMISD::VLD3DUP:
15977 case ARMISD::VLD4DUP:
15978 return PerformVLDCombine(N, DCI);
15979 case ARMISD::BUILD_VECTOR:
15980 return PerformARMBUILD_VECTORCombine(N, DCI);
15982 return PerformBITCASTCombine(N, DCI.DAG, Subtarget);
15983 case ARMISD::PREDICATE_CAST:
15984 return PerformPREDICATE_CASTCombine(N, DCI);
15985 case ARMISD::VECTOR_REG_CAST:
15986 return PerformVECTOR_REG_CASTCombine(N, DCI, Subtarget);
15988 return PerformVCMPCombine(N, DCI, Subtarget);
15989 case ISD::VECREDUCE_ADD:
15990 return PerformVECREDUCE_ADDCombine(N, DCI.DAG, Subtarget);
15991 case ARMISD::VMOVN:
15992 return PerformVMOVNCombine(N, DCI);
15993 case ARMISD::VQMOVNs:
15994 case ARMISD::VQMOVNu:
15995 return PerformVQMOVNCombine(N, DCI);
15999 return PerformLongShiftCombine(N, DCI.DAG);
16000 case ARMISD::SMULWB: {
16001 unsigned BitWidth = N->getValueType(0).getSizeInBits();
16002 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
16003 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
16007 case ARMISD::SMULWT: {
16008 unsigned BitWidth = N->getValueType(0).getSizeInBits();
16009 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
16010 if (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI))
16014 case ARMISD::SMLALBB:
16015 case ARMISD::QADD16b:
16016 case ARMISD::QSUB16b: {
16017 unsigned BitWidth = N->getValueType(0).getSizeInBits();
16018 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 16);
16019 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
16020 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
16024 case ARMISD::SMLALBT: {
16025 unsigned LowWidth = N->getOperand(0).getValueType().getSizeInBits();
16026 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
16027 unsigned HighWidth = N->getOperand(1).getValueType().getSizeInBits();
16028 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
16029 if ((SimplifyDemandedBits(N->getOperand(0), LowMask, DCI)) ||
16030 (SimplifyDemandedBits(N->getOperand(1), HighMask, DCI)))
16034 case ARMISD::SMLALTB: {
16035 unsigned HighWidth = N->getOperand(0).getValueType().getSizeInBits();
16036 APInt HighMask = APInt::getHighBitsSet(HighWidth, 16);
16037 unsigned LowWidth = N->getOperand(1).getValueType().getSizeInBits();
16038 APInt LowMask = APInt::getLowBitsSet(LowWidth, 16);
16039 if ((SimplifyDemandedBits(N->getOperand(0), HighMask, DCI)) ||
16040 (SimplifyDemandedBits(N->getOperand(1), LowMask, DCI)))
16044 case ARMISD::SMLALTT: {
16045 unsigned BitWidth = N->getValueType(0).getSizeInBits();
16046 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 16);
16047 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
16048 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
16052 case ARMISD::QADD8b:
16053 case ARMISD::QSUB8b: {
16054 unsigned BitWidth = N->getValueType(0).getSizeInBits();
16055 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, 8);
16056 if ((SimplifyDemandedBits(N->getOperand(0), DemandedMask, DCI)) ||
16057 (SimplifyDemandedBits(N->getOperand(1), DemandedMask, DCI)))
16061 case ISD::INTRINSIC_VOID:
16062 case ISD::INTRINSIC_W_CHAIN:
16063 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
16064 case Intrinsic::arm_neon_vld1:
16065 case Intrinsic::arm_neon_vld1x2:
16066 case Intrinsic::arm_neon_vld1x3:
16067 case Intrinsic::arm_neon_vld1x4:
16068 case Intrinsic::arm_neon_vld2:
16069 case Intrinsic::arm_neon_vld3:
16070 case Intrinsic::arm_neon_vld4:
16071 case Intrinsic::arm_neon_vld2lane:
16072 case Intrinsic::arm_neon_vld3lane:
16073 case Intrinsic::arm_neon_vld4lane:
16074 case Intrinsic::arm_neon_vld2dup:
16075 case Intrinsic::arm_neon_vld3dup:
16076 case Intrinsic::arm_neon_vld4dup:
16077 case Intrinsic::arm_neon_vst1:
16078 case Intrinsic::arm_neon_vst1x2:
16079 case Intrinsic::arm_neon_vst1x3:
16080 case Intrinsic::arm_neon_vst1x4:
16081 case Intrinsic::arm_neon_vst2:
16082 case Intrinsic::arm_neon_vst3:
16083 case Intrinsic::arm_neon_vst4:
16084 case Intrinsic::arm_neon_vst2lane:
16085 case Intrinsic::arm_neon_vst3lane:
16086 case Intrinsic::arm_neon_vst4lane:
16087 return PerformVLDCombine(N, DCI);
16088 case Intrinsic::arm_mve_vld2q:
16089 case Intrinsic::arm_mve_vld4q:
16090 case Intrinsic::arm_mve_vst2q:
16091 case Intrinsic::arm_mve_vst4q:
16092 return PerformMVEVLDCombine(N, DCI);
16100 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
16102 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
16105 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
16106 unsigned Alignment,
16107 MachineMemOperand::Flags,
16108 bool *Fast) const {
16109 // Depends what it gets converted into if the type is weird.
16110 if (!VT.isSimple())
16113 // The AllowsUnaligned flag models the SCTLR.A setting in ARM cpus
16114 bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
16115 auto Ty = VT.getSimpleVT().SimpleTy;
16117 if (Ty == MVT::i8 || Ty == MVT::i16 || Ty == MVT::i32) {
16118 // Unaligned access can use (for example) LRDB, LRDH, LDR
16119 if (AllowsUnaligned) {
16121 *Fast = Subtarget->hasV7Ops();
16126 if (Ty == MVT::f64 || Ty == MVT::v2f64) {
16127 // For any little-endian targets with neon, we can support unaligned ld/st
16128 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
16129 // A big-endian target may also explicitly support unaligned accesses
16130 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
16137 if (!Subtarget->hasMVEIntegerOps())
16140 // These are for predicates
16141 if ((Ty == MVT::v16i1 || Ty == MVT::v8i1 || Ty == MVT::v4i1)) {
16147 // These are for truncated stores/narrowing loads. They are fine so long as
16148 // the alignment is at least the size of the item being loaded
16149 if ((Ty == MVT::v4i8 || Ty == MVT::v8i8 || Ty == MVT::v4i16) &&
16150 Alignment >= VT.getScalarSizeInBits() / 8) {
16156 // In little-endian MVE, the store instructions VSTRB.U8, VSTRH.U16 and
16157 // VSTRW.U32 all store the vector register in exactly the same format, and
16158 // differ only in the range of their immediate offset field and the required
16159 // alignment. So there is always a store that can be used, regardless of
16162 // For big endian, that is not the case. But can still emit a (VSTRB.U8;
16163 // VREV64.8) pair and get the same effect. This will likely be better than
16164 // aligning the vector through the stack.
16165 if (Ty == MVT::v16i8 || Ty == MVT::v8i16 || Ty == MVT::v8f16 ||
16166 Ty == MVT::v4i32 || Ty == MVT::v4f32 || Ty == MVT::v2i64 ||
16167 Ty == MVT::v2f64) {
16177 EVT ARMTargetLowering::getOptimalMemOpType(
16178 const MemOp &Op, const AttributeList &FuncAttributes) const {
16179 // See if we can use NEON instructions for this...
16180 if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() &&
16181 !FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
16183 if (Op.size() >= 16 &&
16184 (Op.isAligned(Align(16)) ||
16185 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1,
16186 MachineMemOperand::MONone, &Fast) &&
16189 } else if (Op.size() >= 8 &&
16190 (Op.isAligned(Align(8)) ||
16191 (allowsMisalignedMemoryAccesses(
16192 MVT::f64, 0, 1, MachineMemOperand::MONone, &Fast) &&
16198 // Let the target-independent logic figure it out.
16202 // 64-bit integers are split into their high and low parts and held in two
16203 // different registers, so the trunc is free since the low register can just
16205 bool ARMTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
16206 if (!SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
16208 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
16209 unsigned DestBits = DstTy->getPrimitiveSizeInBits();
16210 return (SrcBits == 64 && DestBits == 32);
16213 bool ARMTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
16214 if (SrcVT.isVector() || DstVT.isVector() || !SrcVT.isInteger() ||
16215 !DstVT.isInteger())
16217 unsigned SrcBits = SrcVT.getSizeInBits();
16218 unsigned DestBits = DstVT.getSizeInBits();
16219 return (SrcBits == 64 && DestBits == 32);
16222 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
16223 if (Val.getOpcode() != ISD::LOAD)
16226 EVT VT1 = Val.getValueType();
16227 if (!VT1.isSimple() || !VT1.isInteger() ||
16228 !VT2.isSimple() || !VT2.isInteger())
16231 switch (VT1.getSimpleVT().SimpleTy) {
16236 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
16243 bool ARMTargetLowering::isFNegFree(EVT VT) const {
16244 if (!VT.isSimple())
16247 // There are quite a few FP16 instructions (e.g. VNMLA, VNMLS, etc.) that
16248 // negate values directly (fneg is free). So, we don't want to let the DAG
16249 // combiner rewrite fneg into xors and some other instructions. For f16 and
16250 // FullFP16 argument passing, some bitcast nodes may be introduced,
16251 // triggering this DAG combine rewrite, so we are avoiding that with this.
16252 switch (VT.getSimpleVT().SimpleTy) {
16255 return Subtarget->hasFullFP16();
16261 /// Check if Ext1 and Ext2 are extends of the same type, doubling the bitwidth
16262 /// of the vector elements.
16263 static bool areExtractExts(Value *Ext1, Value *Ext2) {
16264 auto areExtDoubled = [](Instruction *Ext) {
16265 return Ext->getType()->getScalarSizeInBits() ==
16266 2 * Ext->getOperand(0)->getType()->getScalarSizeInBits();
16269 if (!match(Ext1, m_ZExtOrSExt(m_Value())) ||
16270 !match(Ext2, m_ZExtOrSExt(m_Value())) ||
16271 !areExtDoubled(cast<Instruction>(Ext1)) ||
16272 !areExtDoubled(cast<Instruction>(Ext2)))
16278 /// Check if sinking \p I's operands to I's basic block is profitable, because
16279 /// the operands can be folded into a target instruction, e.g.
16280 /// sext/zext can be folded into vsubl.
16281 bool ARMTargetLowering::shouldSinkOperands(Instruction *I,
16282 SmallVectorImpl<Use *> &Ops) const {
16283 if (!I->getType()->isVectorTy())
16286 if (Subtarget->hasNEON()) {
16287 switch (I->getOpcode()) {
16288 case Instruction::Sub:
16289 case Instruction::Add: {
16290 if (!areExtractExts(I->getOperand(0), I->getOperand(1)))
16292 Ops.push_back(&I->getOperandUse(0));
16293 Ops.push_back(&I->getOperandUse(1));
16301 if (!Subtarget->hasMVEIntegerOps())
16304 auto IsFMSMul = [&](Instruction *I) {
16305 if (!I->hasOneUse())
16307 auto *Sub = cast<Instruction>(*I->users().begin());
16308 return Sub->getOpcode() == Instruction::FSub && Sub->getOperand(1) == I;
16310 auto IsFMS = [&](Instruction *I) {
16311 if (match(I->getOperand(0), m_FNeg(m_Value())) ||
16312 match(I->getOperand(1), m_FNeg(m_Value())))
16317 auto IsSinker = [&](Instruction *I, int Operand) {
16318 switch (I->getOpcode()) {
16319 case Instruction::Add:
16320 case Instruction::Mul:
16321 case Instruction::FAdd:
16322 case Instruction::ICmp:
16323 case Instruction::FCmp:
16325 case Instruction::FMul:
16326 return !IsFMSMul(I);
16327 case Instruction::Sub:
16328 case Instruction::FSub:
16329 case Instruction::Shl:
16330 case Instruction::LShr:
16331 case Instruction::AShr:
16332 return Operand == 1;
16333 case Instruction::Call:
16334 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
16335 switch (II->getIntrinsicID()) {
16336 case Intrinsic::fma:
16348 for (auto OpIdx : enumerate(I->operands())) {
16349 Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
16350 // Make sure we are not already sinking this operand
16351 if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
16354 Instruction *Shuffle = Op;
16355 if (Shuffle->getOpcode() == Instruction::BitCast)
16356 Shuffle = dyn_cast<Instruction>(Shuffle->getOperand(0));
16357 // We are looking for a splat that can be sunk.
16359 !match(Shuffle, m_Shuffle(
16360 m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
16361 m_Undef(), m_ZeroMask())))
16363 if (!IsSinker(I, OpIdx.index()))
16366 // All uses of the shuffle should be sunk to avoid duplicating it across gpr
16367 // and vector registers
16368 for (Use &U : Op->uses()) {
16369 Instruction *Insn = cast<Instruction>(U.getUser());
16370 if (!IsSinker(Insn, U.getOperandNo()))
16374 Ops.push_back(&Shuffle->getOperandUse(0));
16376 Ops.push_back(&Op->getOperandUse(0));
16377 Ops.push_back(&OpIdx.value());
16382 Type *ARMTargetLowering::shouldConvertSplatType(ShuffleVectorInst *SVI) const {
16383 if (!Subtarget->hasMVEIntegerOps())
16385 Type *SVIType = SVI->getType();
16386 Type *ScalarType = SVIType->getScalarType();
16388 if (ScalarType->isFloatTy())
16389 return Type::getInt32Ty(SVIType->getContext());
16390 if (ScalarType->isHalfTy())
16391 return Type::getInt16Ty(SVIType->getContext());
16395 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
16396 EVT VT = ExtVal.getValueType();
16398 if (!isTypeLegal(VT))
16401 if (auto *Ld = dyn_cast<MaskedLoadSDNode>(ExtVal.getOperand(0))) {
16402 if (Ld->isExpandingLoad())
16406 if (Subtarget->hasMVEIntegerOps())
16409 // Don't create a loadext if we can fold the extension into a wide/long
16411 // If there's more than one user instruction, the loadext is desirable no
16412 // matter what. There can be two uses by the same instruction.
16413 if (ExtVal->use_empty() ||
16414 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
16417 SDNode *U = *ExtVal->use_begin();
16418 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
16419 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHLIMM))
16425 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
16426 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
16429 if (!isTypeLegal(EVT::getEVT(Ty1)))
16432 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
16434 // Assuming the caller doesn't have a zeroext or signext return parameter,
16435 // truncation all the way down to i1 is valid.
16439 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
16440 const AddrMode &AM, Type *Ty,
16441 unsigned AS) const {
16442 if (isLegalAddressingMode(DL, AM, Ty, AS)) {
16443 if (Subtarget->hasFPAO())
16444 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
16450 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
16451 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
16452 /// expanded to FMAs when this method returns true, otherwise fmuladd is
16453 /// expanded to fmul + fadd.
16455 /// ARM supports both fused and unfused multiply-add operations; we already
16456 /// lower a pair of fmul and fadd to the latter so it's not clear that there
16457 /// would be a gain or that the gain would be worthwhile enough to risk
16458 /// correctness bugs.
16460 /// For MVE, we set this to true as it helps simplify the need for some
16461 /// patterns (and we don't have the non-fused floating point instruction).
16462 bool ARMTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
16464 if (!VT.isSimple())
16467 switch (VT.getSimpleVT().SimpleTy) {
16470 return Subtarget->hasMVEFloatOps();
16472 return Subtarget->useFPVFMx16();
16474 return Subtarget->useFPVFMx();
16476 return Subtarget->useFPVFMx64();
16484 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
16488 unsigned Scale = 1;
16489 switch (VT.getSimpleVT().SimpleTy) {
16499 // On thumb1 we load most things (i32, i64, floats, etc) with a LDR
16505 if ((V & (Scale - 1)) != 0)
16507 return isUInt<5>(V / Scale);
16510 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
16511 const ARMSubtarget *Subtarget) {
16512 if (!VT.isInteger() && !VT.isFloatingPoint())
16514 if (VT.isVector() && Subtarget->hasNEON())
16516 if (VT.isVector() && VT.isFloatingPoint() && Subtarget->hasMVEIntegerOps() &&
16517 !Subtarget->hasMVEFloatOps())
16520 bool IsNeg = false;
16526 unsigned NumBytes = std::max((unsigned)VT.getSizeInBits() / 8, 1U);
16528 // MVE: size * imm7
16529 if (VT.isVector() && Subtarget->hasMVEIntegerOps()) {
16530 switch (VT.getSimpleVT().getVectorElementType().SimpleTy) {
16533 return isShiftedUInt<7,2>(V);
16536 return isShiftedUInt<7,1>(V);
16538 return isUInt<7>(V);
16544 // half VLDR: 2 * imm8
16545 if (VT.isFloatingPoint() && NumBytes == 2 && Subtarget->hasFPRegs16())
16546 return isShiftedUInt<8, 1>(V);
16547 // VLDR and LDRD: 4 * imm8
16548 if ((VT.isFloatingPoint() && Subtarget->hasVFP2Base()) || NumBytes == 8)
16549 return isShiftedUInt<8, 2>(V);
16551 if (NumBytes == 1 || NumBytes == 2 || NumBytes == 4) {
16552 // + imm12 or - imm8
16554 return isUInt<8>(V);
16555 return isUInt<12>(V);
16561 /// isLegalAddressImmediate - Return true if the integer value can be used
16562 /// as the offset of the target addressing mode for load / store of the
16564 static bool isLegalAddressImmediate(int64_t V, EVT VT,
16565 const ARMSubtarget *Subtarget) {
16569 if (!VT.isSimple())
16572 if (Subtarget->isThumb1Only())
16573 return isLegalT1AddressImmediate(V, VT);
16574 else if (Subtarget->isThumb2())
16575 return isLegalT2AddressImmediate(V, VT, Subtarget);
16580 switch (VT.getSimpleVT().SimpleTy) {
16581 default: return false;
16586 return isUInt<12>(V);
16589 return isUInt<8>(V);
16592 if (!Subtarget->hasVFP2Base()) // FIXME: NEON?
16594 return isShiftedUInt<8, 2>(V);
16598 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
16600 int Scale = AM.Scale;
16604 switch (VT.getSimpleVT().SimpleTy) {
16605 default: return false;
16613 Scale = Scale & ~1;
16614 return Scale == 2 || Scale == 4 || Scale == 8;
16616 // FIXME: What are we trying to model here? ldrd doesn't have an r + r
16617 // version in Thumb mode.
16621 // r * 2 (this can be lowered to r + r).
16622 if (!AM.HasBaseReg && Scale == 2)
16626 // Note, we allow "void" uses (basically, uses that aren't loads or
16627 // stores), because arm allows folding a scale into many arithmetic
16628 // operations. This should be made more precise and revisited later.
16630 // Allow r << imm, but the imm has to be a multiple of two.
16631 if (Scale & 1) return false;
16632 return isPowerOf2_32(Scale);
16636 bool ARMTargetLowering::isLegalT1ScaledAddressingMode(const AddrMode &AM,
16638 const int Scale = AM.Scale;
16640 // Negative scales are not supported in Thumb1.
16644 // Thumb1 addressing modes do not support register scaling excepting the
16645 // following cases:
16646 // 1. Scale == 1 means no scaling.
16647 // 2. Scale == 2 this can be lowered to r + r if there is no base register.
16648 return (Scale == 1) || (!AM.HasBaseReg && Scale == 2);
16651 /// isLegalAddressingMode - Return true if the addressing mode represented
16652 /// by AM is legal for this target, for a load/store of the specified type.
16653 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
16654 const AddrMode &AM, Type *Ty,
16655 unsigned AS, Instruction *I) const {
16656 EVT VT = getValueType(DL, Ty, true);
16657 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
16660 // Can never fold addr of global into load/store.
16664 switch (AM.Scale) {
16665 case 0: // no scale reg, must be "r+i" or "r", or "i".
16668 // ARM doesn't support any R+R*scale+imm addr modes.
16672 if (!VT.isSimple())
16675 if (Subtarget->isThumb1Only())
16676 return isLegalT1ScaledAddressingMode(AM, VT);
16678 if (Subtarget->isThumb2())
16679 return isLegalT2ScaledAddressingMode(AM, VT);
16681 int Scale = AM.Scale;
16682 switch (VT.getSimpleVT().SimpleTy) {
16683 default: return false;
16687 if (Scale < 0) Scale = -Scale;
16691 return isPowerOf2_32(Scale & ~1);
16695 if (Scale == 1 || (AM.HasBaseReg && Scale == -1))
16697 // r * 2 (this can be lowered to r + r).
16698 if (!AM.HasBaseReg && Scale == 2)
16703 // Note, we allow "void" uses (basically, uses that aren't loads or
16704 // stores), because arm allows folding a scale into many arithmetic
16705 // operations. This should be made more precise and revisited later.
16707 // Allow r << imm, but the imm has to be a multiple of two.
16708 if (Scale & 1) return false;
16709 return isPowerOf2_32(Scale);
16715 /// isLegalICmpImmediate - Return true if the specified immediate is legal
16716 /// icmp immediate, that is the target has icmp instructions which can compare
16717 /// a register against the immediate without having to materialize the
16718 /// immediate into a register.
16719 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
16720 // Thumb2 and ARM modes can use cmn for negative immediates.
16721 if (!Subtarget->isThumb())
16722 return ARM_AM::getSOImmVal((uint32_t)Imm) != -1 ||
16723 ARM_AM::getSOImmVal(-(uint32_t)Imm) != -1;
16724 if (Subtarget->isThumb2())
16725 return ARM_AM::getT2SOImmVal((uint32_t)Imm) != -1 ||
16726 ARM_AM::getT2SOImmVal(-(uint32_t)Imm) != -1;
16727 // Thumb1 doesn't have cmn, and only 8-bit immediates.
16728 return Imm >= 0 && Imm <= 255;
16731 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
16732 /// *or sub* immediate, that is the target has add or sub instructions which can
16733 /// add a register with the immediate without having to materialize the
16734 /// immediate into a register.
16735 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
16736 // Same encoding for add/sub, just flip the sign.
16737 int64_t AbsImm = std::abs(Imm);
16738 if (!Subtarget->isThumb())
16739 return ARM_AM::getSOImmVal(AbsImm) != -1;
16740 if (Subtarget->isThumb2())
16741 return ARM_AM::getT2SOImmVal(AbsImm) != -1;
16742 // Thumb1 only has 8-bit unsigned immediate.
16743 return AbsImm >= 0 && AbsImm <= 255;
16746 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
16747 bool isSEXTLoad, SDValue &Base,
16748 SDValue &Offset, bool &isInc,
16749 SelectionDAG &DAG) {
16750 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
16753 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
16754 // AddressingMode 3
16755 Base = Ptr->getOperand(0);
16756 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
16757 int RHSC = (int)RHS->getZExtValue();
16758 if (RHSC < 0 && RHSC > -256) {
16759 assert(Ptr->getOpcode() == ISD::ADD);
16761 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16765 isInc = (Ptr->getOpcode() == ISD::ADD);
16766 Offset = Ptr->getOperand(1);
16768 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
16769 // AddressingMode 2
16770 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
16771 int RHSC = (int)RHS->getZExtValue();
16772 if (RHSC < 0 && RHSC > -0x1000) {
16773 assert(Ptr->getOpcode() == ISD::ADD);
16775 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16776 Base = Ptr->getOperand(0);
16781 if (Ptr->getOpcode() == ISD::ADD) {
16783 ARM_AM::ShiftOpc ShOpcVal=
16784 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
16785 if (ShOpcVal != ARM_AM::no_shift) {
16786 Base = Ptr->getOperand(1);
16787 Offset = Ptr->getOperand(0);
16789 Base = Ptr->getOperand(0);
16790 Offset = Ptr->getOperand(1);
16795 isInc = (Ptr->getOpcode() == ISD::ADD);
16796 Base = Ptr->getOperand(0);
16797 Offset = Ptr->getOperand(1);
16801 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
16805 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
16806 bool isSEXTLoad, SDValue &Base,
16807 SDValue &Offset, bool &isInc,
16808 SelectionDAG &DAG) {
16809 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
16812 Base = Ptr->getOperand(0);
16813 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
16814 int RHSC = (int)RHS->getZExtValue();
16815 if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
16816 assert(Ptr->getOpcode() == ISD::ADD);
16818 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16820 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
16821 isInc = Ptr->getOpcode() == ISD::ADD;
16822 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
16830 static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment,
16831 bool isSEXTLoad, bool IsMasked, bool isLE,
16832 SDValue &Base, SDValue &Offset,
16833 bool &isInc, SelectionDAG &DAG) {
16834 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
16836 if (!isa<ConstantSDNode>(Ptr->getOperand(1)))
16839 // We allow LE non-masked loads to change the type (for example use a vldrb.8
16840 // as opposed to a vldrw.32). This can allow extra addressing modes or
16841 // alignments for what is otherwise an equivalent instruction.
16842 bool CanChangeType = isLE && !IsMasked;
16844 ConstantSDNode *RHS = cast<ConstantSDNode>(Ptr->getOperand(1));
16845 int RHSC = (int)RHS->getZExtValue();
16847 auto IsInRange = [&](int RHSC, int Limit, int Scale) {
16848 if (RHSC < 0 && RHSC > -Limit * Scale && RHSC % Scale == 0) {
16849 assert(Ptr->getOpcode() == ISD::ADD);
16851 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
16853 } else if (RHSC > 0 && RHSC < Limit * Scale && RHSC % Scale == 0) {
16854 isInc = Ptr->getOpcode() == ISD::ADD;
16855 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
16861 // Try to find a matching instruction based on s/zext, Alignment, Offset and
16862 // (in BE/masked) type.
16863 Base = Ptr->getOperand(0);
16864 if (VT == MVT::v4i16) {
16865 if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2))
16867 } else if (VT == MVT::v4i8 || VT == MVT::v8i8) {
16868 if (IsInRange(RHSC, 0x80, 1))
16870 } else if (Alignment >= 4 &&
16871 (CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) &&
16872 IsInRange(RHSC, 0x80, 4))
16874 else if (Alignment >= 2 &&
16875 (CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) &&
16876 IsInRange(RHSC, 0x80, 2))
16878 else if ((CanChangeType || VT == MVT::v16i8) && IsInRange(RHSC, 0x80, 1))
16883 /// getPreIndexedAddressParts - returns true by value, base pointer and
16884 /// offset pointer and addressing mode by reference if the node's address
16885 /// can be legally represented as pre-indexed load / store address.
16887 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
16889 ISD::MemIndexedMode &AM,
16890 SelectionDAG &DAG) const {
16891 if (Subtarget->isThumb1Only())
16897 bool isSEXTLoad = false;
16898 bool IsMasked = false;
16899 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
16900 Ptr = LD->getBasePtr();
16901 VT = LD->getMemoryVT();
16902 Alignment = LD->getAlign();
16903 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16904 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
16905 Ptr = ST->getBasePtr();
16906 VT = ST->getMemoryVT();
16907 Alignment = ST->getAlign();
16908 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
16909 Ptr = LD->getBasePtr();
16910 VT = LD->getMemoryVT();
16911 Alignment = LD->getAlign();
16912 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16914 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
16915 Ptr = ST->getBasePtr();
16916 VT = ST->getMemoryVT();
16917 Alignment = ST->getAlign();
16923 bool isLegal = false;
16925 isLegal = Subtarget->hasMVEIntegerOps() &&
16926 getMVEIndexedAddressParts(
16927 Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked,
16928 Subtarget->isLittle(), Base, Offset, isInc, DAG);
16930 if (Subtarget->isThumb2())
16931 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
16932 Offset, isInc, DAG);
16934 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
16935 Offset, isInc, DAG);
16940 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
16944 /// getPostIndexedAddressParts - returns true by value, base pointer and
16945 /// offset pointer and addressing mode by reference if this node can be
16946 /// combined with a load / store to form a post-indexed load / store.
16947 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
16950 ISD::MemIndexedMode &AM,
16951 SelectionDAG &DAG) const {
16955 bool isSEXTLoad = false, isNonExt;
16956 bool IsMasked = false;
16957 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
16958 VT = LD->getMemoryVT();
16959 Ptr = LD->getBasePtr();
16960 Alignment = LD->getAlign();
16961 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16962 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
16963 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
16964 VT = ST->getMemoryVT();
16965 Ptr = ST->getBasePtr();
16966 Alignment = ST->getAlign();
16967 isNonExt = !ST->isTruncatingStore();
16968 } else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
16969 VT = LD->getMemoryVT();
16970 Ptr = LD->getBasePtr();
16971 Alignment = LD->getAlign();
16972 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
16973 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
16975 } else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
16976 VT = ST->getMemoryVT();
16977 Ptr = ST->getBasePtr();
16978 Alignment = ST->getAlign();
16979 isNonExt = !ST->isTruncatingStore();
16984 if (Subtarget->isThumb1Only()) {
16985 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
16986 // must be non-extending/truncating, i32, with an offset of 4.
16987 assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
16988 if (Op->getOpcode() != ISD::ADD || !isNonExt)
16990 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
16991 if (!RHS || RHS->getZExtValue() != 4)
16994 Offset = Op->getOperand(1);
16995 Base = Op->getOperand(0);
16996 AM = ISD::POST_INC;
17001 bool isLegal = false;
17003 isLegal = Subtarget->hasMVEIntegerOps() &&
17004 getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked,
17005 Subtarget->isLittle(), Base, Offset,
17008 if (Subtarget->isThumb2())
17009 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
17012 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
17019 // Swap base ptr and offset to catch more post-index load / store when
17020 // it's legal. In Thumb2 mode, offset must be an immediate.
17021 if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
17022 !Subtarget->isThumb2())
17023 std::swap(Base, Offset);
17025 // Post-indexed load / store update the base pointer.
17030 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
17034 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
17036 const APInt &DemandedElts,
17037 const SelectionDAG &DAG,
17038 unsigned Depth) const {
17039 unsigned BitWidth = Known.getBitWidth();
17041 switch (Op.getOpcode()) {
17047 // Special cases when we convert a carry to a boolean.
17048 if (Op.getResNo() == 0) {
17049 SDValue LHS = Op.getOperand(0);
17050 SDValue RHS = Op.getOperand(1);
17051 // (ADDE 0, 0, C) will give us a single bit.
17052 if (Op->getOpcode() == ARMISD::ADDE && isNullConstant(LHS) &&
17053 isNullConstant(RHS)) {
17054 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
17059 case ARMISD::CMOV: {
17060 // Bits are known zero/one if known on the LHS and RHS.
17061 Known = DAG.computeKnownBits(Op.getOperand(0), Depth+1);
17062 if (Known.isUnknown())
17065 KnownBits KnownRHS = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
17066 Known.Zero &= KnownRHS.Zero;
17067 Known.One &= KnownRHS.One;
17070 case ISD::INTRINSIC_W_CHAIN: {
17071 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
17072 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
17075 case Intrinsic::arm_ldaex:
17076 case Intrinsic::arm_ldrex: {
17077 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
17078 unsigned MemBits = VT.getScalarSizeInBits();
17079 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
17084 case ARMISD::BFI: {
17085 // Conservatively, we can recurse down the first operand
17086 // and just mask out all affected bits.
17087 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
17089 // The operand to BFI is already a mask suitable for removing the bits it
17091 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
17092 const APInt &Mask = CI->getAPIntValue();
17093 Known.Zero &= Mask;
17097 case ARMISD::VGETLANEs:
17098 case ARMISD::VGETLANEu: {
17099 const SDValue &SrcSV = Op.getOperand(0);
17100 EVT VecVT = SrcSV.getValueType();
17101 assert(VecVT.isVector() && "VGETLANE expected a vector type");
17102 const unsigned NumSrcElts = VecVT.getVectorNumElements();
17103 ConstantSDNode *Pos = cast<ConstantSDNode>(Op.getOperand(1).getNode());
17104 assert(Pos->getAPIntValue().ult(NumSrcElts) &&
17105 "VGETLANE index out of bounds");
17106 unsigned Idx = Pos->getZExtValue();
17107 APInt DemandedElt = APInt::getOneBitSet(NumSrcElts, Idx);
17108 Known = DAG.computeKnownBits(SrcSV, DemandedElt, Depth + 1);
17110 EVT VT = Op.getValueType();
17111 const unsigned DstSz = VT.getScalarSizeInBits();
17112 const unsigned SrcSz = VecVT.getVectorElementType().getSizeInBits();
17114 assert(SrcSz == Known.getBitWidth());
17115 assert(DstSz > SrcSz);
17116 if (Op.getOpcode() == ARMISD::VGETLANEs)
17117 Known = Known.sext(DstSz);
17119 Known = Known.zext(DstSz);
17121 assert(DstSz == Known.getBitWidth());
17124 case ARMISD::VMOVrh: {
17125 KnownBits KnownOp = DAG.computeKnownBits(Op->getOperand(0), Depth + 1);
17126 assert(KnownOp.getBitWidth() == 16);
17127 Known = KnownOp.zext(32);
17133 bool ARMTargetLowering::targetShrinkDemandedConstant(
17134 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
17135 TargetLoweringOpt &TLO) const {
17136 // Delay optimization, so we don't have to deal with illegal types, or block
17141 // Only optimize AND for now.
17142 if (Op.getOpcode() != ISD::AND)
17145 EVT VT = Op.getValueType();
17151 assert(VT == MVT::i32 && "Unexpected integer type");
17153 // Make sure the RHS really is a constant.
17154 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
17158 unsigned Mask = C->getZExtValue();
17160 unsigned Demanded = DemandedBits.getZExtValue();
17161 unsigned ShrunkMask = Mask & Demanded;
17162 unsigned ExpandedMask = Mask | ~Demanded;
17164 // If the mask is all zeros, let the target-independent code replace the
17165 // result with zero.
17166 if (ShrunkMask == 0)
17169 // If the mask is all ones, erase the AND. (Currently, the target-independent
17170 // code won't do this, so we have to do it explicitly to avoid an infinite
17171 // loop in obscure cases.)
17172 if (ExpandedMask == ~0U)
17173 return TLO.CombineTo(Op, Op.getOperand(0));
17175 auto IsLegalMask = [ShrunkMask, ExpandedMask](unsigned Mask) -> bool {
17176 return (ShrunkMask & Mask) == ShrunkMask && (~ExpandedMask & Mask) == 0;
17178 auto UseMask = [Mask, Op, VT, &TLO](unsigned NewMask) -> bool {
17179 if (NewMask == Mask)
17182 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
17183 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
17184 return TLO.CombineTo(Op, NewOp);
17187 // Prefer uxtb mask.
17188 if (IsLegalMask(0xFF))
17189 return UseMask(0xFF);
17191 // Prefer uxth mask.
17192 if (IsLegalMask(0xFFFF))
17193 return UseMask(0xFFFF);
17195 // [1, 255] is Thumb1 movs+ands, legal immediate for ARM/Thumb2.
17196 // FIXME: Prefer a contiguous sequence of bits for other optimizations.
17197 if (ShrunkMask < 256)
17198 return UseMask(ShrunkMask);
17200 // [-256, -2] is Thumb1 movs+bics, legal immediate for ARM/Thumb2.
17201 // FIXME: Prefer a contiguous sequence of bits for other optimizations.
17202 if ((int)ExpandedMask <= -2 && (int)ExpandedMask >= -256)
17203 return UseMask(ExpandedMask);
17205 // Potential improvements:
17207 // We could try to recognize lsls+lsrs or lsrs+lsls pairs here.
17208 // We could try to prefer Thumb1 immediates which can be lowered to a
17209 // two-instruction sequence.
17210 // We could try to recognize more legal ARM/Thumb2 immediates here.
17215 bool ARMTargetLowering::SimplifyDemandedBitsForTargetNode(
17216 SDValue Op, const APInt &OriginalDemandedBits,
17217 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
17218 unsigned Depth) const {
17219 unsigned Opc = Op.getOpcode();
17223 case ARMISD::LSRL: {
17224 // If this is result 0 and the other result is unused, see if the demand
17225 // bits allow us to shrink this long shift into a standard small shift in
17226 // the opposite direction.
17227 if (Op.getResNo() == 0 && !Op->hasAnyUseOfValue(1) &&
17228 isa<ConstantSDNode>(Op->getOperand(2))) {
17229 unsigned ShAmt = Op->getConstantOperandVal(2);
17230 if (ShAmt < 32 && OriginalDemandedBits.isSubsetOf(
17231 APInt::getAllOnesValue(32) << (32 - ShAmt)))
17232 return TLO.CombineTo(
17233 Op, TLO.DAG.getNode(
17234 ISD::SHL, SDLoc(Op), MVT::i32, Op.getOperand(1),
17235 TLO.DAG.getConstant(32 - ShAmt, SDLoc(Op), MVT::i32)));
17241 return TargetLowering::SimplifyDemandedBitsForTargetNode(
17242 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
17245 //===----------------------------------------------------------------------===//
17246 // ARM Inline Assembly Support
17247 //===----------------------------------------------------------------------===//
17249 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
17250 // Looking for "rev" which is V6+.
17251 if (!Subtarget->hasV6Ops())
17254 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
17255 std::string AsmStr = IA->getAsmString();
17256 SmallVector<StringRef, 4> AsmPieces;
17257 SplitString(AsmStr, AsmPieces, ";\n");
17259 switch (AsmPieces.size()) {
17260 default: return false;
17262 AsmStr = std::string(AsmPieces[0]);
17264 SplitString(AsmStr, AsmPieces, " \t,");
17267 if (AsmPieces.size() == 3 &&
17268 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
17269 IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
17270 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
17271 if (Ty && Ty->getBitWidth() == 32)
17272 return IntrinsicLowering::LowerToByteSwap(CI);
17280 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
17281 // At this point, we have to lower this constraint to something else, so we
17282 // lower it to an "r" or "w". However, by doing this we will force the result
17283 // to be in register, while the X constraint is much more permissive.
17285 // Although we are correct (we are free to emit anything, without
17286 // constraints), we might break use cases that would expect us to be more
17287 // efficient and emit something else.
17288 if (!Subtarget->hasVFP2Base())
17290 if (ConstraintVT.isFloatingPoint())
17292 if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
17293 (ConstraintVT.getSizeInBits() == 64 ||
17294 ConstraintVT.getSizeInBits() == 128))
17300 /// getConstraintType - Given a constraint letter, return the type of
17301 /// constraint it is for this target.
17302 ARMTargetLowering::ConstraintType
17303 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
17304 unsigned S = Constraint.size();
17306 switch (Constraint[0]) {
17308 case 'l': return C_RegisterClass;
17309 case 'w': return C_RegisterClass;
17310 case 'h': return C_RegisterClass;
17311 case 'x': return C_RegisterClass;
17312 case 't': return C_RegisterClass;
17313 case 'j': return C_Immediate; // Constant for movw.
17314 // An address with a single base register. Due to the way we
17315 // currently handle addresses it is the same as an 'r' memory constraint.
17316 case 'Q': return C_Memory;
17318 } else if (S == 2) {
17319 switch (Constraint[0]) {
17321 case 'T': return C_RegisterClass;
17322 // All 'U+' constraints are addresses.
17323 case 'U': return C_Memory;
17326 return TargetLowering::getConstraintType(Constraint);
17329 /// Examine constraint type and operand type and determine a weight value.
17330 /// This object must already have been set up with the operand type
17331 /// and the current alternative constraint selected.
17332 TargetLowering::ConstraintWeight
17333 ARMTargetLowering::getSingleConstraintMatchWeight(
17334 AsmOperandInfo &info, const char *constraint) const {
17335 ConstraintWeight weight = CW_Invalid;
17336 Value *CallOperandVal = info.CallOperandVal;
17337 // If we don't have a value, we can't do a match,
17338 // but allow it at the lowest weight.
17339 if (!CallOperandVal)
17341 Type *type = CallOperandVal->getType();
17342 // Look at the constraint type.
17343 switch (*constraint) {
17345 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
17348 if (type->isIntegerTy()) {
17349 if (Subtarget->isThumb())
17350 weight = CW_SpecificReg;
17352 weight = CW_Register;
17356 if (type->isFloatingPointTy())
17357 weight = CW_Register;
17363 using RCPair = std::pair<unsigned, const TargetRegisterClass *>;
17365 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
17366 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
17367 switch (Constraint.size()) {
17369 // GCC ARM Constraint Letters
17370 switch (Constraint[0]) {
17371 case 'l': // Low regs or general regs.
17372 if (Subtarget->isThumb())
17373 return RCPair(0U, &ARM::tGPRRegClass);
17374 return RCPair(0U, &ARM::GPRRegClass);
17375 case 'h': // High regs or no regs.
17376 if (Subtarget->isThumb())
17377 return RCPair(0U, &ARM::hGPRRegClass);
17380 if (Subtarget->isThumb1Only())
17381 return RCPair(0U, &ARM::tGPRRegClass);
17382 return RCPair(0U, &ARM::GPRRegClass);
17384 if (VT == MVT::Other)
17386 if (VT == MVT::f32)
17387 return RCPair(0U, &ARM::SPRRegClass);
17388 if (VT.getSizeInBits() == 64)
17389 return RCPair(0U, &ARM::DPRRegClass);
17390 if (VT.getSizeInBits() == 128)
17391 return RCPair(0U, &ARM::QPRRegClass);
17394 if (VT == MVT::Other)
17396 if (VT == MVT::f32)
17397 return RCPair(0U, &ARM::SPR_8RegClass);
17398 if (VT.getSizeInBits() == 64)
17399 return RCPair(0U, &ARM::DPR_8RegClass);
17400 if (VT.getSizeInBits() == 128)
17401 return RCPair(0U, &ARM::QPR_8RegClass);
17404 if (VT == MVT::Other)
17406 if (VT == MVT::f32 || VT == MVT::i32)
17407 return RCPair(0U, &ARM::SPRRegClass);
17408 if (VT.getSizeInBits() == 64)
17409 return RCPair(0U, &ARM::DPR_VFP2RegClass);
17410 if (VT.getSizeInBits() == 128)
17411 return RCPair(0U, &ARM::QPR_VFP2RegClass);
17417 if (Constraint[0] == 'T') {
17418 switch (Constraint[1]) {
17422 return RCPair(0U, &ARM::tGPREvenRegClass);
17424 return RCPair(0U, &ARM::tGPROddRegClass);
17433 if (StringRef("{cc}").equals_lower(Constraint))
17434 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
17436 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
17439 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
17440 /// vector. If it is invalid, don't add anything to Ops.
17441 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
17442 std::string &Constraint,
17443 std::vector<SDValue>&Ops,
17444 SelectionDAG &DAG) const {
17447 // Currently only support length 1 constraints.
17448 if (Constraint.length() != 1) return;
17450 char ConstraintLetter = Constraint[0];
17451 switch (ConstraintLetter) {
17454 case 'I': case 'J': case 'K': case 'L':
17455 case 'M': case 'N': case 'O':
17456 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
17460 int64_t CVal64 = C->getSExtValue();
17461 int CVal = (int) CVal64;
17462 // None of these constraints allow values larger than 32 bits. Check
17463 // that the value fits in an int.
17464 if (CVal != CVal64)
17467 switch (ConstraintLetter) {
17469 // Constant suitable for movw, must be between 0 and
17471 if (Subtarget->hasV6T2Ops() || (Subtarget->hasV8MBaselineOps()))
17472 if (CVal >= 0 && CVal <= 65535)
17476 if (Subtarget->isThumb1Only()) {
17477 // This must be a constant between 0 and 255, for ADD
17479 if (CVal >= 0 && CVal <= 255)
17481 } else if (Subtarget->isThumb2()) {
17482 // A constant that can be used as an immediate value in a
17483 // data-processing instruction.
17484 if (ARM_AM::getT2SOImmVal(CVal) != -1)
17487 // A constant that can be used as an immediate value in a
17488 // data-processing instruction.
17489 if (ARM_AM::getSOImmVal(CVal) != -1)
17495 if (Subtarget->isThumb1Only()) {
17496 // This must be a constant between -255 and -1, for negated ADD
17497 // immediates. This can be used in GCC with an "n" modifier that
17498 // prints the negated value, for use with SUB instructions. It is
17499 // not useful otherwise but is implemented for compatibility.
17500 if (CVal >= -255 && CVal <= -1)
17503 // This must be a constant between -4095 and 4095. It is not clear
17504 // what this constraint is intended for. Implemented for
17505 // compatibility with GCC.
17506 if (CVal >= -4095 && CVal <= 4095)
17512 if (Subtarget->isThumb1Only()) {
17513 // A 32-bit value where only one byte has a nonzero value. Exclude
17514 // zero to match GCC. This constraint is used by GCC internally for
17515 // constants that can be loaded with a move/shift combination.
17516 // It is not useful otherwise but is implemented for compatibility.
17517 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
17519 } else if (Subtarget->isThumb2()) {
17520 // A constant whose bitwise inverse can be used as an immediate
17521 // value in a data-processing instruction. This can be used in GCC
17522 // with a "B" modifier that prints the inverted value, for use with
17523 // BIC and MVN instructions. It is not useful otherwise but is
17524 // implemented for compatibility.
17525 if (ARM_AM::getT2SOImmVal(~CVal) != -1)
17528 // A constant whose bitwise inverse can be used as an immediate
17529 // value in a data-processing instruction. This can be used in GCC
17530 // with a "B" modifier that prints the inverted value, for use with
17531 // BIC and MVN instructions. It is not useful otherwise but is
17532 // implemented for compatibility.
17533 if (ARM_AM::getSOImmVal(~CVal) != -1)
17539 if (Subtarget->isThumb1Only()) {
17540 // This must be a constant between -7 and 7,
17541 // for 3-operand ADD/SUB immediate instructions.
17542 if (CVal >= -7 && CVal < 7)
17544 } else if (Subtarget->isThumb2()) {
17545 // A constant whose negation can be used as an immediate value in a
17546 // data-processing instruction. This can be used in GCC with an "n"
17547 // modifier that prints the negated value, for use with SUB
17548 // instructions. It is not useful otherwise but is implemented for
17550 if (ARM_AM::getT2SOImmVal(-CVal) != -1)
17553 // A constant whose negation can be used as an immediate value in a
17554 // data-processing instruction. This can be used in GCC with an "n"
17555 // modifier that prints the negated value, for use with SUB
17556 // instructions. It is not useful otherwise but is implemented for
17558 if (ARM_AM::getSOImmVal(-CVal) != -1)
17564 if (Subtarget->isThumb1Only()) {
17565 // This must be a multiple of 4 between 0 and 1020, for
17566 // ADD sp + immediate.
17567 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
17570 // A power of two or a constant between 0 and 32. This is used in
17571 // GCC for the shift amount on shifted register operands, but it is
17572 // useful in general for any shift amounts.
17573 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
17579 if (Subtarget->isThumb1Only()) {
17580 // This must be a constant between 0 and 31, for shift amounts.
17581 if (CVal >= 0 && CVal <= 31)
17587 if (Subtarget->isThumb1Only()) {
17588 // This must be a multiple of 4 between -508 and 508, for
17589 // ADD/SUB sp = sp + immediate.
17590 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
17595 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
17599 if (Result.getNode()) {
17600 Ops.push_back(Result);
17603 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
17606 static RTLIB::Libcall getDivRemLibcall(
17607 const SDNode *N, MVT::SimpleValueType SVT) {
17608 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
17609 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&
17610 "Unhandled Opcode in getDivRemLibcall");
17611 bool isSigned = N->getOpcode() == ISD::SDIVREM ||
17612 N->getOpcode() == ISD::SREM;
17615 default: llvm_unreachable("Unexpected request for libcall!");
17616 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
17617 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
17618 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
17619 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
17624 static TargetLowering::ArgListTy getDivRemArgList(
17625 const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
17626 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
17627 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&
17628 "Unhandled Opcode in getDivRemArgList");
17629 bool isSigned = N->getOpcode() == ISD::SDIVREM ||
17630 N->getOpcode() == ISD::SREM;
17631 TargetLowering::ArgListTy Args;
17632 TargetLowering::ArgListEntry Entry;
17633 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
17634 EVT ArgVT = N->getOperand(i).getValueType();
17635 Type *ArgTy = ArgVT.getTypeForEVT(*Context);
17636 Entry.Node = N->getOperand(i);
17638 Entry.IsSExt = isSigned;
17639 Entry.IsZExt = !isSigned;
17640 Args.push_back(Entry);
17642 if (Subtarget->isTargetWindows() && Args.size() >= 2)
17643 std::swap(Args[0], Args[1]);
17647 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
17648 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
17649 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
17650 Subtarget->isTargetWindows()) &&
17651 "Register-based DivRem lowering only");
17652 unsigned Opcode = Op->getOpcode();
17653 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
17654 "Invalid opcode for Div/Rem lowering");
17655 bool isSigned = (Opcode == ISD::SDIVREM);
17656 EVT VT = Op->getValueType(0);
17657 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
17660 // If the target has hardware divide, use divide + multiply + subtract:
17662 // rem = a - b * div
17663 // return {div, rem}
17664 // This should be lowered into UDIV/SDIV + MLS later on.
17665 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivideInThumbMode()
17666 : Subtarget->hasDivideInARMMode();
17667 if (hasDivide && Op->getValueType(0).isSimple() &&
17668 Op->getSimpleValueType(0) == MVT::i32) {
17669 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
17670 const SDValue Dividend = Op->getOperand(0);
17671 const SDValue Divisor = Op->getOperand(1);
17672 SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
17673 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
17674 SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
17676 SDValue Values[2] = {Div, Rem};
17677 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
17680 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
17681 VT.getSimpleVT().SimpleTy);
17682 SDValue InChain = DAG.getEntryNode();
17684 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
17688 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
17689 getPointerTy(DAG.getDataLayout()));
17691 Type *RetTy = StructType::get(Ty, Ty);
17693 if (Subtarget->isTargetWindows())
17694 InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
17696 TargetLowering::CallLoweringInfo CLI(DAG);
17697 CLI.setDebugLoc(dl).setChain(InChain)
17698 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
17699 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
17701 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
17702 return CallInfo.first;
17705 // Lowers REM using divmod helpers
17706 // see RTABI section 4.2/4.3
17707 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
17708 // Build return types (div and rem)
17709 std::vector<Type*> RetTyParams;
17710 Type *RetTyElement;
17712 switch (N->getValueType(0).getSimpleVT().SimpleTy) {
17713 default: llvm_unreachable("Unexpected request for libcall!");
17714 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break;
17715 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
17716 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
17717 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
17720 RetTyParams.push_back(RetTyElement);
17721 RetTyParams.push_back(RetTyElement);
17722 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
17723 Type *RetTy = StructType::get(*DAG.getContext(), ret);
17725 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
17727 SDValue InChain = DAG.getEntryNode();
17728 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
17730 bool isSigned = N->getOpcode() == ISD::SREM;
17731 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
17732 getPointerTy(DAG.getDataLayout()));
17734 if (Subtarget->isTargetWindows())
17735 InChain = WinDBZCheckDenominator(DAG, N, InChain);
17738 CallLoweringInfo CLI(DAG);
17739 CLI.setChain(InChain)
17740 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
17741 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
17742 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
17744 // Return second (rem) result operand (first contains div)
17745 SDNode *ResNode = CallResult.first.getNode();
17746 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
17747 return ResNode->getOperand(1);
17751 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
17752 assert(Subtarget->isTargetWindows() && "unsupported target platform");
17756 SDValue Chain = Op.getOperand(0);
17757 SDValue Size = Op.getOperand(1);
17759 if (DAG.getMachineFunction().getFunction().hasFnAttribute(
17760 "no-stack-arg-probe")) {
17762 cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
17763 SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
17764 Chain = SP.getValue(1);
17765 SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
17768 DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
17769 DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32));
17770 Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP);
17771 SDValue Ops[2] = { SP, Chain };
17772 return DAG.getMergeValues(Ops, DL);
17775 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
17776 DAG.getConstant(2, DL, MVT::i32));
17779 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
17780 Flag = Chain.getValue(1);
17782 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17783 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
17785 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
17786 Chain = NewSP.getValue(1);
17788 SDValue Ops[2] = { NewSP, Chain };
17789 return DAG.getMergeValues(Ops, DL);
17792 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
17793 bool IsStrict = Op->isStrictFPOpcode();
17794 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
17795 const unsigned DstSz = Op.getValueType().getSizeInBits();
17796 const unsigned SrcSz = SrcVal.getValueType().getSizeInBits();
17797 assert(DstSz > SrcSz && DstSz <= 64 && SrcSz >= 16 &&
17798 "Unexpected type for custom-lowering FP_EXTEND");
17800 assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
17801 "With both FP DP and 16, any FP conversion is legal!");
17803 assert(!(DstSz == 32 && Subtarget->hasFP16()) &&
17804 "With FP16, 16 to 32 conversion is legal!");
17806 // Converting from 32 -> 64 is valid if we have FP64.
17807 if (SrcSz == 32 && DstSz == 64 && Subtarget->hasFP64()) {
17808 // FIXME: Remove this when we have strict fp instruction selection patterns
17811 SDValue Result = DAG.getNode(ISD::FP_EXTEND,
17812 Loc, Op.getValueType(), SrcVal);
17813 return DAG.getMergeValues({Result, Op.getOperand(0)}, Loc);
17818 // Either we are converting from 16 -> 64, without FP16 and/or
17819 // FP.double-precision or without Armv8-fp. So we must do it in two
17821 // Or we are converting from 32 -> 64 without fp.double-precision or 16 -> 32
17822 // without FP16. So we must do a function call.
17825 MakeLibCallOptions CallOptions;
17826 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
17827 for (unsigned Sz = SrcSz; Sz <= 32 && Sz < DstSz; Sz *= 2) {
17828 bool Supported = (Sz == 16 ? Subtarget->hasFP16() : Subtarget->hasFP64());
17829 MVT SrcVT = (Sz == 16 ? MVT::f16 : MVT::f32);
17830 MVT DstVT = (Sz == 16 ? MVT::f32 : MVT::f64);
17833 SrcVal = DAG.getNode(ISD::STRICT_FP_EXTEND, Loc,
17834 {DstVT, MVT::Other}, {Chain, SrcVal});
17835 Chain = SrcVal.getValue(1);
17837 SrcVal = DAG.getNode(ISD::FP_EXTEND, Loc, DstVT, SrcVal);
17840 LC = RTLIB::getFPEXT(SrcVT, DstVT);
17841 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
17842 "Unexpected type for custom-lowering FP_EXTEND");
17843 std::tie(SrcVal, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
17848 return IsStrict ? DAG.getMergeValues({SrcVal, Chain}, Loc) : SrcVal;
17851 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
17852 bool IsStrict = Op->isStrictFPOpcode();
17854 SDValue SrcVal = Op.getOperand(IsStrict ? 1 : 0);
17855 EVT SrcVT = SrcVal.getValueType();
17856 EVT DstVT = Op.getValueType();
17857 const unsigned DstSz = Op.getValueType().getSizeInBits();
17858 const unsigned SrcSz = SrcVT.getSizeInBits();
17860 assert(DstSz < SrcSz && SrcSz <= 64 && DstSz >= 16 &&
17861 "Unexpected type for custom-lowering FP_ROUND");
17863 assert((!Subtarget->hasFP64() || !Subtarget->hasFPARMv8Base()) &&
17864 "With both FP DP and 16, any FP conversion is legal!");
17868 // Instruction from 32 -> 16 if hasFP16 is valid
17869 if (SrcSz == 32 && Subtarget->hasFP16())
17872 // Lib call from 32 -> 16 / 64 -> [32, 16]
17873 RTLIB::Libcall LC = RTLIB::getFPROUND(SrcVT, DstVT);
17874 assert(LC != RTLIB::UNKNOWN_LIBCALL &&
17875 "Unexpected type for custom-lowering FP_ROUND");
17876 MakeLibCallOptions CallOptions;
17877 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
17879 std::tie(Result, Chain) = makeLibCall(DAG, LC, DstVT, SrcVal, CallOptions,
17881 return IsStrict ? DAG.getMergeValues({Result, Chain}, Loc) : Result;
17884 void ARMTargetLowering::lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
17885 SelectionDAG &DAG) const {
17886 assert(N->getValueType(0) == MVT::i64 && "Unexpected type (!= i64) on ABS.");
17887 MVT HalfT = MVT::i32;
17889 SDValue Hi, Lo, Tmp;
17891 if (!isOperationLegalOrCustom(ISD::ADDCARRY, HalfT) ||
17892 !isOperationLegalOrCustom(ISD::UADDO, HalfT))
17895 unsigned OpTypeBits = HalfT.getScalarSizeInBits();
17896 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
17898 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
17899 DAG.getConstant(0, dl, HalfT));
17900 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
17901 DAG.getConstant(1, dl, HalfT));
17903 Tmp = DAG.getNode(ISD::SRA, dl, HalfT, Hi,
17904 DAG.getConstant(OpTypeBits - 1, dl,
17905 getShiftAmountTy(HalfT, DAG.getDataLayout())));
17906 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
17907 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
17908 SDValue(Lo.getNode(), 1));
17909 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
17910 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
17912 Results.push_back(Lo);
17913 Results.push_back(Hi);
17917 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
17918 // The ARM target isn't yet aware of offsets.
17922 bool ARM::isBitFieldInvertedMask(unsigned v) {
17923 if (v == 0xffffffff)
17926 // there can be 1's on either or both "outsides", all the "inside"
17927 // bits must be 0's
17928 return isShiftedMask_32(~v);
17931 /// isFPImmLegal - Returns true if the target can instruction select the
17932 /// specified FP immediate natively. If false, the legalizer will
17933 /// materialize the FP immediate as a load from a constant pool.
17934 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
17935 bool ForCodeSize) const {
17936 if (!Subtarget->hasVFP3Base())
17938 if (VT == MVT::f16 && Subtarget->hasFullFP16())
17939 return ARM_AM::getFP16Imm(Imm) != -1;
17940 if (VT == MVT::f32)
17941 return ARM_AM::getFP32Imm(Imm) != -1;
17942 if (VT == MVT::f64 && Subtarget->hasFP64())
17943 return ARM_AM::getFP64Imm(Imm) != -1;
17947 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
17948 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
17949 /// specified in the intrinsic calls.
17950 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
17952 MachineFunction &MF,
17953 unsigned Intrinsic) const {
17954 switch (Intrinsic) {
17955 case Intrinsic::arm_neon_vld1:
17956 case Intrinsic::arm_neon_vld2:
17957 case Intrinsic::arm_neon_vld3:
17958 case Intrinsic::arm_neon_vld4:
17959 case Intrinsic::arm_neon_vld2lane:
17960 case Intrinsic::arm_neon_vld3lane:
17961 case Intrinsic::arm_neon_vld4lane:
17962 case Intrinsic::arm_neon_vld2dup:
17963 case Intrinsic::arm_neon_vld3dup:
17964 case Intrinsic::arm_neon_vld4dup: {
17965 Info.opc = ISD::INTRINSIC_W_CHAIN;
17966 // Conservatively set memVT to the entire set of vectors loaded.
17967 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
17968 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
17969 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
17970 Info.ptrVal = I.getArgOperand(0);
17972 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
17973 Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
17974 // volatile loads with NEON intrinsics not supported
17975 Info.flags = MachineMemOperand::MOLoad;
17978 case Intrinsic::arm_neon_vld1x2:
17979 case Intrinsic::arm_neon_vld1x3:
17980 case Intrinsic::arm_neon_vld1x4: {
17981 Info.opc = ISD::INTRINSIC_W_CHAIN;
17982 // Conservatively set memVT to the entire set of vectors loaded.
17983 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
17984 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
17985 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
17986 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
17988 Info.align.reset();
17989 // volatile loads with NEON intrinsics not supported
17990 Info.flags = MachineMemOperand::MOLoad;
17993 case Intrinsic::arm_neon_vst1:
17994 case Intrinsic::arm_neon_vst2:
17995 case Intrinsic::arm_neon_vst3:
17996 case Intrinsic::arm_neon_vst4:
17997 case Intrinsic::arm_neon_vst2lane:
17998 case Intrinsic::arm_neon_vst3lane:
17999 case Intrinsic::arm_neon_vst4lane: {
18000 Info.opc = ISD::INTRINSIC_VOID;
18001 // Conservatively set memVT to the entire set of vectors stored.
18002 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18003 unsigned NumElts = 0;
18004 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
18005 Type *ArgTy = I.getArgOperand(ArgI)->getType();
18006 if (!ArgTy->isVectorTy())
18008 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
18010 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
18011 Info.ptrVal = I.getArgOperand(0);
18013 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
18014 Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
18015 // volatile stores with NEON intrinsics not supported
18016 Info.flags = MachineMemOperand::MOStore;
18019 case Intrinsic::arm_neon_vst1x2:
18020 case Intrinsic::arm_neon_vst1x3:
18021 case Intrinsic::arm_neon_vst1x4: {
18022 Info.opc = ISD::INTRINSIC_VOID;
18023 // Conservatively set memVT to the entire set of vectors stored.
18024 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18025 unsigned NumElts = 0;
18026 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
18027 Type *ArgTy = I.getArgOperand(ArgI)->getType();
18028 if (!ArgTy->isVectorTy())
18030 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
18032 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
18033 Info.ptrVal = I.getArgOperand(0);
18035 Info.align.reset();
18036 // volatile stores with NEON intrinsics not supported
18037 Info.flags = MachineMemOperand::MOStore;
18040 case Intrinsic::arm_mve_vld2q:
18041 case Intrinsic::arm_mve_vld4q: {
18042 Info.opc = ISD::INTRINSIC_W_CHAIN;
18043 // Conservatively set memVT to the entire set of vectors loaded.
18044 Type *VecTy = cast<StructType>(I.getType())->getElementType(1);
18045 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vld2q ? 2 : 4;
18046 Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
18047 Info.ptrVal = I.getArgOperand(0);
18049 Info.align = Align(VecTy->getScalarSizeInBits() / 8);
18050 // volatile loads with MVE intrinsics not supported
18051 Info.flags = MachineMemOperand::MOLoad;
18054 case Intrinsic::arm_mve_vst2q:
18055 case Intrinsic::arm_mve_vst4q: {
18056 Info.opc = ISD::INTRINSIC_VOID;
18057 // Conservatively set memVT to the entire set of vectors stored.
18058 Type *VecTy = I.getArgOperand(1)->getType();
18059 unsigned Factor = Intrinsic == Intrinsic::arm_mve_vst2q ? 2 : 4;
18060 Info.memVT = EVT::getVectorVT(VecTy->getContext(), MVT::i64, Factor * 2);
18061 Info.ptrVal = I.getArgOperand(0);
18063 Info.align = Align(VecTy->getScalarSizeInBits() / 8);
18064 // volatile stores with MVE intrinsics not supported
18065 Info.flags = MachineMemOperand::MOStore;
18068 case Intrinsic::arm_ldaex:
18069 case Intrinsic::arm_ldrex: {
18070 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18071 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
18072 Info.opc = ISD::INTRINSIC_W_CHAIN;
18073 Info.memVT = MVT::getVT(PtrTy->getElementType());
18074 Info.ptrVal = I.getArgOperand(0);
18076 Info.align = DL.getABITypeAlign(PtrTy->getElementType());
18077 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
18080 case Intrinsic::arm_stlex:
18081 case Intrinsic::arm_strex: {
18082 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
18083 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
18084 Info.opc = ISD::INTRINSIC_W_CHAIN;
18085 Info.memVT = MVT::getVT(PtrTy->getElementType());
18086 Info.ptrVal = I.getArgOperand(1);
18088 Info.align = DL.getABITypeAlign(PtrTy->getElementType());
18089 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
18092 case Intrinsic::arm_stlexd:
18093 case Intrinsic::arm_strexd:
18094 Info.opc = ISD::INTRINSIC_W_CHAIN;
18095 Info.memVT = MVT::i64;
18096 Info.ptrVal = I.getArgOperand(2);
18098 Info.align = Align(8);
18099 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
18102 case Intrinsic::arm_ldaexd:
18103 case Intrinsic::arm_ldrexd:
18104 Info.opc = ISD::INTRINSIC_W_CHAIN;
18105 Info.memVT = MVT::i64;
18106 Info.ptrVal = I.getArgOperand(0);
18108 Info.align = Align(8);
18109 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile;
18119 /// Returns true if it is beneficial to convert a load of a constant
18120 /// to just the constant itself.
18121 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
18123 assert(Ty->isIntegerTy());
18125 unsigned Bits = Ty->getPrimitiveSizeInBits();
18126 if (Bits == 0 || Bits > 32)
18131 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
18132 unsigned Index) const {
18133 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
18136 return (Index == 0 || Index == ResVT.getVectorNumElements());
18139 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
18140 ARM_MB::MemBOpt Domain) const {
18141 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18143 // First, if the target has no DMB, see what fallback we can use.
18144 if (!Subtarget->hasDataBarrier()) {
18145 // Some ARMv6 cpus can support data barriers with an mcr instruction.
18146 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
18148 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
18149 Function *MCR = Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
18150 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
18151 Builder.getInt32(0), Builder.getInt32(7),
18152 Builder.getInt32(10), Builder.getInt32(5)};
18153 return Builder.CreateCall(MCR, args);
18155 // Instead of using barriers, atomic accesses on these subtargets use
18157 llvm_unreachable("makeDMB on a target so old that it has no barriers");
18160 Function *DMB = Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
18161 // Only a full system barrier exists in the M-class architectures.
18162 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
18163 Constant *CDomain = Builder.getInt32(Domain);
18164 return Builder.CreateCall(DMB, CDomain);
18168 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
18169 Instruction *ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
18171 AtomicOrdering Ord) const {
18173 case AtomicOrdering::NotAtomic:
18174 case AtomicOrdering::Unordered:
18175 llvm_unreachable("Invalid fence: unordered/non-atomic");
18176 case AtomicOrdering::Monotonic:
18177 case AtomicOrdering::Acquire:
18178 return nullptr; // Nothing to do
18179 case AtomicOrdering::SequentiallyConsistent:
18180 if (!Inst->hasAtomicStore())
18181 return nullptr; // Nothing to do
18183 case AtomicOrdering::Release:
18184 case AtomicOrdering::AcquireRelease:
18185 if (Subtarget->preferISHSTBarriers())
18186 return makeDMB(Builder, ARM_MB::ISHST);
18187 // FIXME: add a comment with a link to documentation justifying this.
18189 return makeDMB(Builder, ARM_MB::ISH);
18191 llvm_unreachable("Unknown fence ordering in emitLeadingFence");
18194 Instruction *ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
18196 AtomicOrdering Ord) const {
18198 case AtomicOrdering::NotAtomic:
18199 case AtomicOrdering::Unordered:
18200 llvm_unreachable("Invalid fence: unordered/not-atomic");
18201 case AtomicOrdering::Monotonic:
18202 case AtomicOrdering::Release:
18203 return nullptr; // Nothing to do
18204 case AtomicOrdering::Acquire:
18205 case AtomicOrdering::AcquireRelease:
18206 case AtomicOrdering::SequentiallyConsistent:
18207 return makeDMB(Builder, ARM_MB::ISH);
18209 llvm_unreachable("Unknown fence ordering in emitTrailingFence");
18212 // Loads and stores less than 64-bits are already atomic; ones above that
18213 // are doomed anyway, so defer to the default libcall and blame the OS when
18214 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
18215 // anything for those.
18216 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
18217 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
18218 return (Size == 64) && !Subtarget->isMClass();
18221 // Loads and stores less than 64-bits are already atomic; ones above that
18222 // are doomed anyway, so defer to the default libcall and blame the OS when
18223 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
18224 // anything for those.
18225 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
18226 // guarantee, see DDI0406C ARM architecture reference manual,
18227 // sections A8.8.72-74 LDRD)
18228 TargetLowering::AtomicExpansionKind
18229 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
18230 unsigned Size = LI->getType()->getPrimitiveSizeInBits();
18231 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
18232 : AtomicExpansionKind::None;
18235 // For the real atomic operations, we have ldrex/strex up to 32 bits,
18236 // and up to 64 bits on the non-M profiles
18237 TargetLowering::AtomicExpansionKind
18238 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
18239 if (AI->isFloatingPointOperation())
18240 return AtomicExpansionKind::CmpXChg;
18242 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
18243 bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
18244 return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
18245 ? AtomicExpansionKind::LLSC
18246 : AtomicExpansionKind::None;
18249 TargetLowering::AtomicExpansionKind
18250 ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
18251 // At -O0, fast-regalloc cannot cope with the live vregs necessary to
18252 // implement cmpxchg without spilling. If the address being exchanged is also
18253 // on the stack and close enough to the spill slot, this can lead to a
18254 // situation where the monitor always gets cleared and the atomic operation
18255 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
18256 bool HasAtomicCmpXchg =
18257 !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
18258 if (getTargetMachine().getOptLevel() != 0 && HasAtomicCmpXchg)
18259 return AtomicExpansionKind::LLSC;
18260 return AtomicExpansionKind::None;
18263 bool ARMTargetLowering::shouldInsertFencesForAtomic(
18264 const Instruction *I) const {
18265 return InsertFencesForAtomic;
18268 // This has so far only been implemented for MachO.
18269 bool ARMTargetLowering::useLoadStackGuardNode() const {
18270 return Subtarget->isTargetMachO();
18273 void ARMTargetLowering::insertSSPDeclarations(Module &M) const {
18274 if (!Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
18275 return TargetLowering::insertSSPDeclarations(M);
18277 // MSVC CRT has a global variable holding security cookie.
18278 M.getOrInsertGlobal("__security_cookie",
18279 Type::getInt8PtrTy(M.getContext()));
18281 // MSVC CRT has a function to validate security cookie.
18282 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
18283 "__security_check_cookie", Type::getVoidTy(M.getContext()),
18284 Type::getInt8PtrTy(M.getContext()));
18285 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee()))
18286 F->addAttribute(1, Attribute::AttrKind::InReg);
18289 Value *ARMTargetLowering::getSDagStackGuard(const Module &M) const {
18290 // MSVC CRT has a global variable holding security cookie.
18291 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
18292 return M.getGlobalVariable("__security_cookie");
18293 return TargetLowering::getSDagStackGuard(M);
18296 Function *ARMTargetLowering::getSSPStackGuardCheck(const Module &M) const {
18297 // MSVC CRT has a function to validate security cookie.
18298 if (Subtarget->getTargetTriple().isWindowsMSVCEnvironment())
18299 return M.getFunction("__security_check_cookie");
18300 return TargetLowering::getSSPStackGuardCheck(M);
18303 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
18304 unsigned &Cost) const {
18305 // If we do not have NEON, vector types are not natively supported.
18306 if (!Subtarget->hasNEON())
18309 // Floating point values and vector values map to the same register file.
18310 // Therefore, although we could do a store extract of a vector type, this is
18311 // better to leave at float as we have more freedom in the addressing mode for
18313 if (VectorTy->isFPOrFPVectorTy())
18316 // If the index is unknown at compile time, this is very expensive to lower
18317 // and it is not possible to combine the store with the extract.
18318 if (!isa<ConstantInt>(Idx))
18321 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
18322 unsigned BitWidth = VectorTy->getPrimitiveSizeInBits().getFixedSize();
18323 // We can do a store + vector extract on any vector that fits perfectly in a D
18325 if (BitWidth == 64 || BitWidth == 128) {
18332 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
18333 return Subtarget->hasV6T2Ops();
18336 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
18337 return Subtarget->hasV6T2Ops();
18340 bool ARMTargetLowering::shouldExpandShift(SelectionDAG &DAG, SDNode *N) const {
18341 return !Subtarget->hasMinSize() || Subtarget->isTargetWindows();
18344 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
18345 AtomicOrdering Ord) const {
18346 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18347 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
18348 bool IsAcquire = isAcquireOrStronger(Ord);
18350 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
18351 // intrinsic must return {i32, i32} and we have to recombine them into a
18352 // single i64 here.
18353 if (ValTy->getPrimitiveSizeInBits() == 64) {
18354 Intrinsic::ID Int =
18355 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
18356 Function *Ldrex = Intrinsic::getDeclaration(M, Int);
18358 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
18359 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
18361 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
18362 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
18363 if (!Subtarget->isLittle())
18364 std::swap (Lo, Hi);
18365 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
18366 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
18367 return Builder.CreateOr(
18368 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
18371 Type *Tys[] = { Addr->getType() };
18372 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
18373 Function *Ldrex = Intrinsic::getDeclaration(M, Int, Tys);
18375 return Builder.CreateTruncOrBitCast(
18376 Builder.CreateCall(Ldrex, Addr),
18377 cast<PointerType>(Addr->getType())->getElementType());
18380 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
18381 IRBuilder<> &Builder) const {
18382 if (!Subtarget->hasV7Ops())
18384 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18385 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
18388 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
18390 AtomicOrdering Ord) const {
18391 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
18392 bool IsRelease = isReleaseOrStronger(Ord);
18394 // Since the intrinsics must have legal type, the i64 intrinsics take two
18395 // parameters: "i32, i32". We must marshal Val into the appropriate form
18396 // before the call.
18397 if (Val->getType()->getPrimitiveSizeInBits() == 64) {
18398 Intrinsic::ID Int =
18399 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
18400 Function *Strex = Intrinsic::getDeclaration(M, Int);
18401 Type *Int32Ty = Type::getInt32Ty(M->getContext());
18403 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
18404 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
18405 if (!Subtarget->isLittle())
18407 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
18408 return Builder.CreateCall(Strex, {Lo, Hi, Addr});
18411 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
18412 Type *Tys[] = { Addr->getType() };
18413 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
18415 return Builder.CreateCall(
18416 Strex, {Builder.CreateZExtOrBitCast(
18417 Val, Strex->getFunctionType()->getParamType(0)),
18422 bool ARMTargetLowering::alignLoopsWithOptSize() const {
18423 return Subtarget->isMClass();
18426 /// A helper function for determining the number of interleaved accesses we
18427 /// will generate when lowering accesses of the given type.
18429 ARMTargetLowering::getNumInterleavedAccesses(VectorType *VecTy,
18430 const DataLayout &DL) const {
18431 return (DL.getTypeSizeInBits(VecTy) + 127) / 128;
18434 bool ARMTargetLowering::isLegalInterleavedAccessType(
18435 unsigned Factor, FixedVectorType *VecTy, const DataLayout &DL) const {
18437 unsigned VecSize = DL.getTypeSizeInBits(VecTy);
18438 unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType());
18440 if (!Subtarget->hasNEON() && !Subtarget->hasMVEIntegerOps())
18443 // Ensure the vector doesn't have f16 elements. Even though we could do an
18444 // i16 vldN, we can't hold the f16 vectors and will end up converting via
18446 if (Subtarget->hasNEON() && VecTy->getElementType()->isHalfTy())
18448 if (Subtarget->hasMVEIntegerOps() && Factor == 3)
18451 // Ensure the number of vector elements is greater than 1.
18452 if (VecTy->getNumElements() < 2)
18455 // Ensure the element type is legal.
18456 if (ElSize != 8 && ElSize != 16 && ElSize != 32)
18459 // Ensure the total vector size is 64 or a multiple of 128. Types larger than
18460 // 128 will be split into multiple interleaved accesses.
18461 if (Subtarget->hasNEON() && VecSize == 64)
18463 return VecSize % 128 == 0;
18466 unsigned ARMTargetLowering::getMaxSupportedInterleaveFactor() const {
18467 if (Subtarget->hasNEON())
18469 if (Subtarget->hasMVEIntegerOps())
18470 return MVEMaxSupportedInterleaveFactor;
18471 return TargetLoweringBase::getMaxSupportedInterleaveFactor();
18474 /// Lower an interleaved load into a vldN intrinsic.
18476 /// E.g. Lower an interleaved load (Factor = 2):
18477 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
18478 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
18479 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
18482 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
18483 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
18484 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
18485 bool ARMTargetLowering::lowerInterleavedLoad(
18486 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
18487 ArrayRef<unsigned> Indices, unsigned Factor) const {
18488 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
18489 "Invalid interleave factor");
18490 assert(!Shuffles.empty() && "Empty shufflevector input");
18491 assert(Shuffles.size() == Indices.size() &&
18492 "Unmatched number of shufflevectors and indices");
18494 auto *VecTy = cast<FixedVectorType>(Shuffles[0]->getType());
18495 Type *EltTy = VecTy->getElementType();
18497 const DataLayout &DL = LI->getModule()->getDataLayout();
18499 // Skip if we do not have NEON and skip illegal vector types. We can
18500 // "legalize" wide vector types into multiple interleaved accesses as long as
18501 // the vector types are divisible by 128.
18502 if (!isLegalInterleavedAccessType(Factor, VecTy, DL))
18505 unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL);
18507 // A pointer vector can not be the return type of the ldN intrinsics. Need to
18508 // load integer vectors first and then convert to pointer vectors.
18509 if (EltTy->isPointerTy())
18510 VecTy = FixedVectorType::get(DL.getIntPtrType(EltTy), VecTy);
18512 IRBuilder<> Builder(LI);
18514 // The base address of the load.
18515 Value *BaseAddr = LI->getPointerOperand();
18517 if (NumLoads > 1) {
18518 // If we're going to generate more than one load, reset the sub-vector type
18519 // to something legal.
18520 VecTy = FixedVectorType::get(VecTy->getElementType(),
18521 VecTy->getNumElements() / NumLoads);
18523 // We will compute the pointer operand of each load from the original base
18524 // address using GEPs. Cast the base address to a pointer to the scalar
18526 BaseAddr = Builder.CreateBitCast(
18528 VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace()));
18531 assert(isTypeLegal(EVT::getEVT(VecTy)) && "Illegal vldN vector type!");
18533 auto createLoadIntrinsic = [&](Value *BaseAddr) {
18534 if (Subtarget->hasNEON()) {
18535 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
18536 Type *Tys[] = {VecTy, Int8Ptr};
18537 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
18538 Intrinsic::arm_neon_vld3,
18539 Intrinsic::arm_neon_vld4};
18540 Function *VldnFunc =
18541 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
18543 SmallVector<Value *, 2> Ops;
18544 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
18545 Ops.push_back(Builder.getInt32(LI->getAlignment()));
18547 return Builder.CreateCall(VldnFunc, Ops, "vldN");
18549 assert((Factor == 2 || Factor == 4) &&
18550 "expected interleave factor of 2 or 4 for MVE");
18551 Intrinsic::ID LoadInts =
18552 Factor == 2 ? Intrinsic::arm_mve_vld2q : Intrinsic::arm_mve_vld4q;
18554 VecTy->getElementType()->getPointerTo(LI->getPointerAddressSpace());
18555 Type *Tys[] = {VecTy, VecEltTy};
18556 Function *VldnFunc =
18557 Intrinsic::getDeclaration(LI->getModule(), LoadInts, Tys);
18559 SmallVector<Value *, 2> Ops;
18560 Ops.push_back(Builder.CreateBitCast(BaseAddr, VecEltTy));
18561 return Builder.CreateCall(VldnFunc, Ops, "vldN");
18565 // Holds sub-vectors extracted from the load intrinsic return values. The
18566 // sub-vectors are associated with the shufflevector instructions they will
18568 DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs;
18570 for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) {
18571 // If we're generating more than one load, compute the base address of
18572 // subsequent loads as an offset from the previous.
18574 BaseAddr = Builder.CreateConstGEP1_32(VecTy->getElementType(), BaseAddr,
18575 VecTy->getNumElements() * Factor);
18577 CallInst *VldN = createLoadIntrinsic(BaseAddr);
18579 // Replace uses of each shufflevector with the corresponding vector loaded
18581 for (unsigned i = 0; i < Shuffles.size(); i++) {
18582 ShuffleVectorInst *SV = Shuffles[i];
18583 unsigned Index = Indices[i];
18585 Value *SubVec = Builder.CreateExtractValue(VldN, Index);
18587 // Convert the integer vector to pointer vector if the element is pointer.
18588 if (EltTy->isPointerTy())
18589 SubVec = Builder.CreateIntToPtr(
18591 FixedVectorType::get(SV->getType()->getElementType(), VecTy));
18593 SubVecs[SV].push_back(SubVec);
18597 // Replace uses of the shufflevector instructions with the sub-vectors
18598 // returned by the load intrinsic. If a shufflevector instruction is
18599 // associated with more than one sub-vector, those sub-vectors will be
18600 // concatenated into a single wide vector.
18601 for (ShuffleVectorInst *SVI : Shuffles) {
18602 auto &SubVec = SubVecs[SVI];
18604 SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0];
18605 SVI->replaceAllUsesWith(WideVec);
18611 /// Lower an interleaved store into a vstN intrinsic.
18613 /// E.g. Lower an interleaved store (Factor = 3):
18614 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
18615 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
18616 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
18619 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
18620 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
18621 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
18622 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
18624 /// Note that the new shufflevectors will be removed and we'll only generate one
18625 /// vst3 instruction in CodeGen.
18627 /// Example for a more general valid mask (Factor 3). Lower:
18628 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
18629 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
18630 /// store <12 x i32> %i.vec, <12 x i32>* %ptr
18633 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
18634 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
18635 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
18636 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
18637 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
18638 ShuffleVectorInst *SVI,
18639 unsigned Factor) const {
18640 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
18641 "Invalid interleave factor");
18643 auto *VecTy = cast<FixedVectorType>(SVI->getType());
18644 assert(VecTy->getNumElements() % Factor == 0 && "Invalid interleaved store");
18646 unsigned LaneLen = VecTy->getNumElements() / Factor;
18647 Type *EltTy = VecTy->getElementType();
18648 auto *SubVecTy = FixedVectorType::get(EltTy, LaneLen);
18650 const DataLayout &DL = SI->getModule()->getDataLayout();
18652 // Skip if we do not have NEON and skip illegal vector types. We can
18653 // "legalize" wide vector types into multiple interleaved accesses as long as
18654 // the vector types are divisible by 128.
18655 if (!isLegalInterleavedAccessType(Factor, SubVecTy, DL))
18658 unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL);
18660 Value *Op0 = SVI->getOperand(0);
18661 Value *Op1 = SVI->getOperand(1);
18662 IRBuilder<> Builder(SI);
18664 // StN intrinsics don't support pointer vectors as arguments. Convert pointer
18665 // vectors to integer vectors.
18666 if (EltTy->isPointerTy()) {
18667 Type *IntTy = DL.getIntPtrType(EltTy);
18669 // Convert to the corresponding integer vector.
18671 FixedVectorType::get(IntTy, cast<FixedVectorType>(Op0->getType()));
18672 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
18673 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
18675 SubVecTy = FixedVectorType::get(IntTy, LaneLen);
18678 // The base address of the store.
18679 Value *BaseAddr = SI->getPointerOperand();
18681 if (NumStores > 1) {
18682 // If we're going to generate more than one store, reset the lane length
18683 // and sub-vector type to something legal.
18684 LaneLen /= NumStores;
18685 SubVecTy = FixedVectorType::get(SubVecTy->getElementType(), LaneLen);
18687 // We will compute the pointer operand of each store from the original base
18688 // address using GEPs. Cast the base address to a pointer to the scalar
18690 BaseAddr = Builder.CreateBitCast(
18692 SubVecTy->getElementType()->getPointerTo(SI->getPointerAddressSpace()));
18695 assert(isTypeLegal(EVT::getEVT(SubVecTy)) && "Illegal vstN vector type!");
18697 auto Mask = SVI->getShuffleMask();
18699 auto createStoreIntrinsic = [&](Value *BaseAddr,
18700 SmallVectorImpl<Value *> &Shuffles) {
18701 if (Subtarget->hasNEON()) {
18702 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
18703 Intrinsic::arm_neon_vst3,
18704 Intrinsic::arm_neon_vst4};
18705 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
18706 Type *Tys[] = {Int8Ptr, SubVecTy};
18708 Function *VstNFunc = Intrinsic::getDeclaration(
18709 SI->getModule(), StoreInts[Factor - 2], Tys);
18711 SmallVector<Value *, 6> Ops;
18712 Ops.push_back(Builder.CreateBitCast(BaseAddr, Int8Ptr));
18713 for (auto S : Shuffles)
18715 Ops.push_back(Builder.getInt32(SI->getAlignment()));
18716 Builder.CreateCall(VstNFunc, Ops);
18718 assert((Factor == 2 || Factor == 4) &&
18719 "expected interleave factor of 2 or 4 for MVE");
18720 Intrinsic::ID StoreInts =
18721 Factor == 2 ? Intrinsic::arm_mve_vst2q : Intrinsic::arm_mve_vst4q;
18722 Type *EltPtrTy = SubVecTy->getElementType()->getPointerTo(
18723 SI->getPointerAddressSpace());
18724 Type *Tys[] = {EltPtrTy, SubVecTy};
18725 Function *VstNFunc =
18726 Intrinsic::getDeclaration(SI->getModule(), StoreInts, Tys);
18728 SmallVector<Value *, 6> Ops;
18729 Ops.push_back(Builder.CreateBitCast(BaseAddr, EltPtrTy));
18730 for (auto S : Shuffles)
18732 for (unsigned F = 0; F < Factor; F++) {
18733 Ops.push_back(Builder.getInt32(F));
18734 Builder.CreateCall(VstNFunc, Ops);
18740 for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) {
18741 // If we generating more than one store, we compute the base address of
18742 // subsequent stores as an offset from the previous.
18743 if (StoreCount > 0)
18744 BaseAddr = Builder.CreateConstGEP1_32(SubVecTy->getElementType(),
18745 BaseAddr, LaneLen * Factor);
18747 SmallVector<Value *, 4> Shuffles;
18749 // Split the shufflevector operands into sub vectors for the new vstN call.
18750 for (unsigned i = 0; i < Factor; i++) {
18751 unsigned IdxI = StoreCount * LaneLen * Factor + i;
18752 if (Mask[IdxI] >= 0) {
18753 Shuffles.push_back(Builder.CreateShuffleVector(
18754 Op0, Op1, createSequentialMask(Mask[IdxI], LaneLen, 0)));
18756 unsigned StartMask = 0;
18757 for (unsigned j = 1; j < LaneLen; j++) {
18758 unsigned IdxJ = StoreCount * LaneLen * Factor + j;
18759 if (Mask[IdxJ * Factor + IdxI] >= 0) {
18760 StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ;
18764 // Note: If all elements in a chunk are undefs, StartMask=0!
18765 // Note: Filling undef gaps with random elements is ok, since
18766 // those elements were being written anyway (with undefs).
18767 // In the case of all undefs we're defaulting to using elems from 0
18768 // Note: StartMask cannot be negative, it's checked in
18769 // isReInterleaveMask
18770 Shuffles.push_back(Builder.CreateShuffleVector(
18771 Op0, Op1, createSequentialMask(StartMask, LaneLen, 0)));
18775 createStoreIntrinsic(BaseAddr, Shuffles);
18788 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
18789 uint64_t &Members) {
18790 if (auto *ST = dyn_cast<StructType>(Ty)) {
18791 for (unsigned i = 0; i < ST->getNumElements(); ++i) {
18792 uint64_t SubMembers = 0;
18793 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
18795 Members += SubMembers;
18797 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
18798 uint64_t SubMembers = 0;
18799 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
18801 Members += SubMembers * AT->getNumElements();
18802 } else if (Ty->isFloatTy()) {
18803 if (Base != HA_UNKNOWN && Base != HA_FLOAT)
18807 } else if (Ty->isDoubleTy()) {
18808 if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
18812 } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
18819 return VT->getPrimitiveSizeInBits().getFixedSize() == 64;
18821 return VT->getPrimitiveSizeInBits().getFixedSize() == 128;
18823 switch (VT->getPrimitiveSizeInBits().getFixedSize()) {
18836 return (Members > 0 && Members <= 4);
18839 /// Return the correct alignment for the current calling convention.
18840 Align ARMTargetLowering::getABIAlignmentForCallingConv(Type *ArgTy,
18841 DataLayout DL) const {
18842 const Align ABITypeAlign = DL.getABITypeAlign(ArgTy);
18843 if (!ArgTy->isVectorTy())
18844 return ABITypeAlign;
18846 // Avoid over-aligning vector parameters. It would require realigning the
18847 // stack and waste space for no real benefit.
18848 return std::min(ABITypeAlign, DL.getStackAlignment());
18851 /// Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
18852 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
18853 /// passing according to AAPCS rules.
18854 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
18855 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
18856 if (getEffectiveCallingConv(CallConv, isVarArg) !=
18857 CallingConv::ARM_AAPCS_VFP)
18860 HABaseType Base = HA_UNKNOWN;
18861 uint64_t Members = 0;
18862 bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
18863 LLVM_DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
18865 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
18866 return IsHA || IsIntArray;
18869 Register ARMTargetLowering::getExceptionPointerRegister(
18870 const Constant *PersonalityFn) const {
18871 // Platforms which do not use SjLj EH may return values in these registers
18872 // via the personality function.
18873 return Subtarget->useSjLjEH() ? Register() : ARM::R0;
18876 Register ARMTargetLowering::getExceptionSelectorRegister(
18877 const Constant *PersonalityFn) const {
18878 // Platforms which do not use SjLj EH may return values in these registers
18879 // via the personality function.
18880 return Subtarget->useSjLjEH() ? Register() : ARM::R1;
18883 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
18884 // Update IsSplitCSR in ARMFunctionInfo.
18885 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
18886 AFI->setIsSplitCSR(true);
18889 void ARMTargetLowering::insertCopiesSplitCSR(
18890 MachineBasicBlock *Entry,
18891 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
18892 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
18893 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
18897 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
18898 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
18899 MachineBasicBlock::iterator MBBI = Entry->begin();
18900 for (const MCPhysReg *I = IStart; *I; ++I) {
18901 const TargetRegisterClass *RC = nullptr;
18902 if (ARM::GPRRegClass.contains(*I))
18903 RC = &ARM::GPRRegClass;
18904 else if (ARM::DPRRegClass.contains(*I))
18905 RC = &ARM::DPRRegClass;
18907 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
18909 Register NewVR = MRI->createVirtualRegister(RC);
18910 // Create copy from CSR to a virtual register.
18911 // FIXME: this currently does not emit CFI pseudo-instructions, it works
18912 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
18913 // nounwind. If we want to generalize this later, we may need to emit
18914 // CFI pseudo-instructions.
18915 assert(Entry->getParent()->getFunction().hasFnAttribute(
18916 Attribute::NoUnwind) &&
18917 "Function should be nounwind in insertCopiesSplitCSR!");
18918 Entry->addLiveIn(*I);
18919 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
18922 // Insert the copy-back instructions right before the terminator.
18923 for (auto *Exit : Exits)
18924 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
18925 TII->get(TargetOpcode::COPY), *I)
18930 void ARMTargetLowering::finalizeLowering(MachineFunction &MF) const {
18931 MF.getFrameInfo().computeMaxCallFrameSize(MF);
18932 TargetLoweringBase::finalizeLowering(MF);