1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "ARMISelLowering.h"
16 #include "ARMCallingConv.h"
17 #include "ARMConstantPoolValue.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMPerfectShuffle.h"
20 #include "ARMSubtarget.h"
21 #include "ARMTargetMachine.h"
22 #include "ARMTargetObjectFile.h"
23 #include "MCTargetDesc/ARMAddressingModes.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/IntrinsicLowering.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/SelectionDAG.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/DebugInfoMetadata.h"
41 #include "llvm/IR/GlobalValue.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/MC/MCSectionMachO.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetOptions.h"
58 #define DEBUG_TYPE "arm-isel"
60 STATISTIC(NumTailCalls, "Number of tail calls");
61 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
62 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
63 STATISTIC(NumConstpoolPromoted,
64 "Number of constants with their storage promoted into constant pools");
67 ARMInterworking("arm-interworking", cl::Hidden,
68 cl::desc("Enable / disable ARM interworking (for debugging only)"),
71 static cl::opt<bool> EnableConstpoolPromotion(
72 "arm-promote-constant", cl::Hidden,
73 cl::desc("Enable / disable promotion of unnamed_addr constants into "
76 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
77 "arm-promote-constant-max-size", cl::Hidden,
78 cl::desc("Maximum size of constant to promote into a constant pool"),
80 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
81 "arm-promote-constant-max-total", cl::Hidden,
82 cl::desc("Maximum size of ALL constants to promote into a constant pool"),
86 class ARMCCState : public CCState {
88 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
89 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
91 : CCState(CC, isVarArg, MF, locs, C) {
92 assert(((PC == Call) || (PC == Prologue)) &&
93 "ARMCCState users must specify whether their context is call"
94 "or prologue generation.");
100 void ARMTargetLowering::InitLibcallCallingConvs() {
101 // The builtins on ARM always use AAPCS, irrespective of wheter C is AAPCS or
103 for (const auto LC : {
176 RTLIB::FPEXT_F64_F128,
177 RTLIB::FPEXT_F32_F128,
178 RTLIB::FPEXT_F32_F64,
179 RTLIB::FPEXT_F16_F32,
180 RTLIB::FPROUND_F32_F16,
181 RTLIB::FPROUND_F64_F16,
182 RTLIB::FPROUND_F80_F16,
183 RTLIB::FPROUND_F128_F16,
184 RTLIB::FPROUND_F64_F32,
185 RTLIB::FPROUND_F80_F32,
186 RTLIB::FPROUND_F128_F32,
187 RTLIB::FPROUND_F80_F64,
188 RTLIB::FPROUND_F128_F64,
189 RTLIB::FPTOSINT_F32_I32,
190 RTLIB::FPTOSINT_F32_I64,
191 RTLIB::FPTOSINT_F32_I128,
192 RTLIB::FPTOSINT_F64_I32,
193 RTLIB::FPTOSINT_F64_I64,
194 RTLIB::FPTOSINT_F64_I128,
195 RTLIB::FPTOSINT_F80_I32,
196 RTLIB::FPTOSINT_F80_I64,
197 RTLIB::FPTOSINT_F80_I128,
198 RTLIB::FPTOSINT_F128_I32,
199 RTLIB::FPTOSINT_F128_I64,
200 RTLIB::FPTOSINT_F128_I128,
201 RTLIB::FPTOUINT_F32_I32,
202 RTLIB::FPTOUINT_F32_I64,
203 RTLIB::FPTOUINT_F32_I128,
204 RTLIB::FPTOUINT_F64_I32,
205 RTLIB::FPTOUINT_F64_I64,
206 RTLIB::FPTOUINT_F64_I128,
207 RTLIB::FPTOUINT_F80_I32,
208 RTLIB::FPTOUINT_F80_I64,
209 RTLIB::FPTOUINT_F80_I128,
210 RTLIB::FPTOUINT_F128_I32,
211 RTLIB::FPTOUINT_F128_I64,
212 RTLIB::FPTOUINT_F128_I128,
213 RTLIB::SINTTOFP_I32_F32,
214 RTLIB::SINTTOFP_I32_F64,
215 RTLIB::SINTTOFP_I32_F80,
216 RTLIB::SINTTOFP_I32_F128,
217 RTLIB::SINTTOFP_I64_F32,
218 RTLIB::SINTTOFP_I64_F64,
219 RTLIB::SINTTOFP_I64_F80,
220 RTLIB::SINTTOFP_I64_F128,
221 RTLIB::SINTTOFP_I128_F32,
222 RTLIB::SINTTOFP_I128_F64,
223 RTLIB::SINTTOFP_I128_F80,
224 RTLIB::SINTTOFP_I128_F128,
225 RTLIB::UINTTOFP_I32_F32,
226 RTLIB::UINTTOFP_I32_F64,
227 RTLIB::UINTTOFP_I32_F80,
228 RTLIB::UINTTOFP_I32_F128,
229 RTLIB::UINTTOFP_I64_F32,
230 RTLIB::UINTTOFP_I64_F64,
231 RTLIB::UINTTOFP_I64_F80,
232 RTLIB::UINTTOFP_I64_F128,
233 RTLIB::UINTTOFP_I128_F32,
234 RTLIB::UINTTOFP_I128_F64,
235 RTLIB::UINTTOFP_I128_F80,
236 RTLIB::UINTTOFP_I128_F128,
262 setLibcallCallingConv(LC, CallingConv::ARM_AAPCS);
265 // The APCS parameter registers.
266 static const MCPhysReg GPRArgRegs[] = {
267 ARM::R0, ARM::R1, ARM::R2, ARM::R3
270 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
271 MVT PromotedBitwiseVT) {
272 if (VT != PromotedLdStVT) {
273 setOperationAction(ISD::LOAD, VT, Promote);
274 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
276 setOperationAction(ISD::STORE, VT, Promote);
277 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
280 MVT ElemTy = VT.getVectorElementType();
281 if (ElemTy != MVT::f64)
282 setOperationAction(ISD::SETCC, VT, Custom);
283 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
284 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
285 if (ElemTy == MVT::i32) {
286 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
287 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
288 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
289 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
291 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
292 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
293 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
294 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
296 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
297 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
298 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
299 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
300 setOperationAction(ISD::SELECT, VT, Expand);
301 setOperationAction(ISD::SELECT_CC, VT, Expand);
302 setOperationAction(ISD::VSELECT, VT, Expand);
303 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
304 if (VT.isInteger()) {
305 setOperationAction(ISD::SHL, VT, Custom);
306 setOperationAction(ISD::SRA, VT, Custom);
307 setOperationAction(ISD::SRL, VT, Custom);
310 // Promote all bit-wise operations.
311 if (VT.isInteger() && VT != PromotedBitwiseVT) {
312 setOperationAction(ISD::AND, VT, Promote);
313 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
314 setOperationAction(ISD::OR, VT, Promote);
315 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT);
316 setOperationAction(ISD::XOR, VT, Promote);
317 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
320 // Neon does not support vector divide/remainder operations.
321 setOperationAction(ISD::SDIV, VT, Expand);
322 setOperationAction(ISD::UDIV, VT, Expand);
323 setOperationAction(ISD::FDIV, VT, Expand);
324 setOperationAction(ISD::SREM, VT, Expand);
325 setOperationAction(ISD::UREM, VT, Expand);
326 setOperationAction(ISD::FREM, VT, Expand);
328 if (!VT.isFloatingPoint() &&
329 VT != MVT::v2i64 && VT != MVT::v1i64)
330 for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
331 setOperationAction(Opcode, VT, Legal);
334 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
335 addRegisterClass(VT, &ARM::DPRRegClass);
336 addTypeForNEON(VT, MVT::f64, MVT::v2i32);
339 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
340 addRegisterClass(VT, &ARM::DPairRegClass);
341 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
344 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
345 const ARMSubtarget &STI)
346 : TargetLowering(TM), Subtarget(&STI) {
347 RegInfo = Subtarget->getRegisterInfo();
348 Itins = Subtarget->getInstrItineraryData();
350 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
352 InitLibcallCallingConvs();
354 if (Subtarget->isTargetMachO()) {
355 // Uses VFP for Thumb libfuncs if available.
356 if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
357 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
358 static const struct {
359 const RTLIB::Libcall Op;
360 const char * const Name;
361 const ISD::CondCode Cond;
363 // Single-precision floating-point arithmetic.
364 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
365 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
366 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
367 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
369 // Double-precision floating-point arithmetic.
370 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
371 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
372 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
373 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
375 // Single-precision comparisons.
376 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE },
377 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE },
378 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE },
379 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE },
380 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE },
381 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE },
382 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE },
383 { RTLIB::O_F32, "__unordsf2vfp", ISD::SETEQ },
385 // Double-precision comparisons.
386 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE },
387 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE },
388 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE },
389 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE },
390 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE },
391 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE },
392 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE },
393 { RTLIB::O_F64, "__unorddf2vfp", ISD::SETEQ },
395 // Floating-point to integer conversions.
396 // i64 conversions are done via library routines even when generating VFP
397 // instructions, so use the same ones.
398 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID },
399 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
400 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID },
401 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
403 // Conversions between floating types.
404 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID },
405 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID },
407 // Integer to floating-point conversions.
408 // i64 conversions are done via library routines even when generating VFP
409 // instructions, so use the same ones.
410 // FIXME: There appears to be some naming inconsistency in ARM libgcc:
411 // e.g., __floatunsidf vs. __floatunssidfvfp.
412 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID },
413 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
414 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID },
415 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
418 for (const auto &LC : LibraryCalls) {
419 setLibcallName(LC.Op, LC.Name);
420 if (LC.Cond != ISD::SETCC_INVALID)
421 setCmpLibcallCC(LC.Op, LC.Cond);
425 // Set the correct calling convention for ARMv7k WatchOS. It's just
426 // AAPCS_VFP for functions as simple as libcalls.
427 if (Subtarget->isTargetWatchABI()) {
428 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i)
429 setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP);
433 // These libcalls are not available in 32-bit.
434 setLibcallName(RTLIB::SHL_I128, nullptr);
435 setLibcallName(RTLIB::SRL_I128, nullptr);
436 setLibcallName(RTLIB::SRA_I128, nullptr);
439 if (Subtarget->isAAPCS_ABI() &&
440 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
441 Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
442 static const struct {
443 const RTLIB::Libcall Op;
444 const char * const Name;
445 const CallingConv::ID CC;
446 const ISD::CondCode Cond;
448 // Double-precision floating-point arithmetic helper functions
449 // RTABI chapter 4.1.2, Table 2
450 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
451 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
452 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
453 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
455 // Double-precision floating-point comparison helper functions
456 // RTABI chapter 4.1.2, Table 3
457 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
458 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
459 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
460 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
461 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
462 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
463 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
464 { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
466 // Single-precision floating-point arithmetic helper functions
467 // RTABI chapter 4.1.2, Table 4
468 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
469 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
470 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
471 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
473 // Single-precision floating-point comparison helper functions
474 // RTABI chapter 4.1.2, Table 5
475 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
476 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
477 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
478 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
479 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
480 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
481 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
482 { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
484 // Floating-point to integer conversions.
485 // RTABI chapter 4.1.2, Table 6
486 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
487 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
488 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
489 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
490 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
491 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
492 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
493 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
495 // Conversions between floating types.
496 // RTABI chapter 4.1.2, Table 7
497 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
498 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
499 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
501 // Integer to floating-point conversions.
502 // RTABI chapter 4.1.2, Table 8
503 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
504 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
505 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
506 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
507 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
508 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
509 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
510 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
512 // Long long helper functions
513 // RTABI chapter 4.2, Table 9
514 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
515 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
516 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
517 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
519 // Integer division functions
520 // RTABI chapter 4.3.1
521 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
522 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
523 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
524 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
525 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
526 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
527 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
528 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
531 for (const auto &LC : LibraryCalls) {
532 setLibcallName(LC.Op, LC.Name);
533 setLibcallCallingConv(LC.Op, LC.CC);
534 if (LC.Cond != ISD::SETCC_INVALID)
535 setCmpLibcallCC(LC.Op, LC.Cond);
538 // EABI dependent RTLIB
539 if (TM.Options.EABIVersion == EABI::EABI4 ||
540 TM.Options.EABIVersion == EABI::EABI5) {
541 static const struct {
542 const RTLIB::Libcall Op;
543 const char *const Name;
544 const CallingConv::ID CC;
545 const ISD::CondCode Cond;
546 } MemOpsLibraryCalls[] = {
548 // RTABI chapter 4.3.4
549 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
550 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
551 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
554 for (const auto &LC : MemOpsLibraryCalls) {
555 setLibcallName(LC.Op, LC.Name);
556 setLibcallCallingConv(LC.Op, LC.CC);
557 if (LC.Cond != ISD::SETCC_INVALID)
558 setCmpLibcallCC(LC.Op, LC.Cond);
563 if (Subtarget->isTargetWindows()) {
564 static const struct {
565 const RTLIB::Libcall Op;
566 const char * const Name;
567 const CallingConv::ID CC;
569 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
570 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
571 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
572 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
573 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
574 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
575 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
576 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
579 for (const auto &LC : LibraryCalls) {
580 setLibcallName(LC.Op, LC.Name);
581 setLibcallCallingConv(LC.Op, LC.CC);
585 // Use divmod compiler-rt calls for iOS 5.0 and later.
586 if (Subtarget->isTargetWatchOS() ||
587 (Subtarget->isTargetIOS() &&
588 !Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
589 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
590 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
593 // The half <-> float conversion functions are always soft-float on
594 // non-watchos platforms, but are needed for some targets which use a
595 // hard-float calling convention by default.
596 if (!Subtarget->isTargetWatchABI()) {
597 if (Subtarget->isAAPCS_ABI()) {
598 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
599 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
600 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
602 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
603 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
604 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
608 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
609 // a __gnu_ prefix (which is the default).
610 if (Subtarget->isTargetAEABI()) {
611 setLibcallName(RTLIB::FPROUND_F32_F16, "__aeabi_f2h");
612 setLibcallName(RTLIB::FPROUND_F64_F16, "__aeabi_d2h");
613 setLibcallName(RTLIB::FPEXT_F16_F32, "__aeabi_h2f");
616 if (Subtarget->isThumb1Only())
617 addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
619 addRegisterClass(MVT::i32, &ARM::GPRRegClass);
620 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
621 !Subtarget->isThumb1Only()) {
622 addRegisterClass(MVT::f32, &ARM::SPRRegClass);
623 addRegisterClass(MVT::f64, &ARM::DPRRegClass);
626 for (MVT VT : MVT::vector_valuetypes()) {
627 for (MVT InnerVT : MVT::vector_valuetypes()) {
628 setTruncStoreAction(VT, InnerVT, Expand);
629 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
630 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
631 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
634 setOperationAction(ISD::MULHS, VT, Expand);
635 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
636 setOperationAction(ISD::MULHU, VT, Expand);
637 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
639 setOperationAction(ISD::BSWAP, VT, Expand);
642 setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
643 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
645 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
646 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
648 if (Subtarget->hasNEON()) {
649 addDRTypeForNEON(MVT::v2f32);
650 addDRTypeForNEON(MVT::v8i8);
651 addDRTypeForNEON(MVT::v4i16);
652 addDRTypeForNEON(MVT::v2i32);
653 addDRTypeForNEON(MVT::v1i64);
655 addQRTypeForNEON(MVT::v4f32);
656 addQRTypeForNEON(MVT::v2f64);
657 addQRTypeForNEON(MVT::v16i8);
658 addQRTypeForNEON(MVT::v8i16);
659 addQRTypeForNEON(MVT::v4i32);
660 addQRTypeForNEON(MVT::v2i64);
662 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
663 // neither Neon nor VFP support any arithmetic operations on it.
664 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
665 // supported for v4f32.
666 setOperationAction(ISD::FADD, MVT::v2f64, Expand);
667 setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
668 setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
669 // FIXME: Code duplication: FDIV and FREM are expanded always, see
670 // ARMTargetLowering::addTypeForNEON method for details.
671 setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
672 setOperationAction(ISD::FREM, MVT::v2f64, Expand);
673 // FIXME: Create unittest.
674 // In another words, find a way when "copysign" appears in DAG with vector
676 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
677 // FIXME: Code duplication: SETCC has custom operation action, see
678 // ARMTargetLowering::addTypeForNEON method for details.
679 setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
680 // FIXME: Create unittest for FNEG and for FABS.
681 setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
682 setOperationAction(ISD::FABS, MVT::v2f64, Expand);
683 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
684 setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
685 setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
686 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
687 setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
688 setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
689 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
690 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
691 setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
692 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
693 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
694 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
695 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
696 setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
697 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
698 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
699 setOperationAction(ISD::FMA, MVT::v2f64, Expand);
701 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
702 setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
703 setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
704 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
705 setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
706 setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
707 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
708 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
709 setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
710 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
711 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
712 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
713 setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
714 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
715 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
717 // Mark v2f32 intrinsics.
718 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
719 setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
720 setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
721 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand);
722 setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
723 setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
724 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
725 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
726 setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
727 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
728 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
729 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
730 setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
731 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
732 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
734 // Neon does not support some operations on v1i64 and v2i64 types.
735 setOperationAction(ISD::MUL, MVT::v1i64, Expand);
736 // Custom handling for some quad-vector types to detect VMULL.
737 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
738 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
739 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
740 // Custom handling for some vector types to avoid expensive expansions
741 setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
742 setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
743 setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
744 setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
745 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
746 // a destination type that is wider than the source, and nor does
747 // it have a FP_TO_[SU]INT instruction with a narrower destination than
749 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
750 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
751 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
752 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
754 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
755 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand);
757 // NEON does not have single instruction CTPOP for vectors with element
758 // types wider than 8-bits. However, custom lowering can leverage the
759 // v8i8/v16i8 vcnt instruction.
760 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom);
761 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
762 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom);
763 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom);
764 setOperationAction(ISD::CTPOP, MVT::v1i64, Expand);
765 setOperationAction(ISD::CTPOP, MVT::v2i64, Expand);
767 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand);
768 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand);
770 // NEON does not have single instruction CTTZ for vectors.
771 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
772 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
773 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
774 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
776 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
777 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
778 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
779 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
781 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
782 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
783 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
784 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
786 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
787 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
788 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
789 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
791 // NEON only has FMA instructions as of VFP4.
792 if (!Subtarget->hasVFP4()) {
793 setOperationAction(ISD::FMA, MVT::v2f32, Expand);
794 setOperationAction(ISD::FMA, MVT::v4f32, Expand);
797 setTargetDAGCombine(ISD::INTRINSIC_VOID);
798 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
799 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
800 setTargetDAGCombine(ISD::SHL);
801 setTargetDAGCombine(ISD::SRL);
802 setTargetDAGCombine(ISD::SRA);
803 setTargetDAGCombine(ISD::SIGN_EXTEND);
804 setTargetDAGCombine(ISD::ZERO_EXTEND);
805 setTargetDAGCombine(ISD::ANY_EXTEND);
806 setTargetDAGCombine(ISD::BUILD_VECTOR);
807 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
808 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
809 setTargetDAGCombine(ISD::STORE);
810 setTargetDAGCombine(ISD::FP_TO_SINT);
811 setTargetDAGCombine(ISD::FP_TO_UINT);
812 setTargetDAGCombine(ISD::FDIV);
813 setTargetDAGCombine(ISD::LOAD);
815 // It is legal to extload from v4i8 to v4i16 or v4i32.
816 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
818 for (MVT VT : MVT::integer_vector_valuetypes()) {
819 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
820 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
821 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
826 // ARM and Thumb2 support UMLAL/SMLAL.
827 if (!Subtarget->isThumb1Only())
828 setTargetDAGCombine(ISD::ADDC);
830 if (Subtarget->isFPOnlySP()) {
831 // When targeting a floating-point unit with only single-precision
832 // operations, f64 is legal for the few double-precision instructions which
833 // are present However, no double-precision operations other than moves,
834 // loads and stores are provided by the hardware.
835 setOperationAction(ISD::FADD, MVT::f64, Expand);
836 setOperationAction(ISD::FSUB, MVT::f64, Expand);
837 setOperationAction(ISD::FMUL, MVT::f64, Expand);
838 setOperationAction(ISD::FMA, MVT::f64, Expand);
839 setOperationAction(ISD::FDIV, MVT::f64, Expand);
840 setOperationAction(ISD::FREM, MVT::f64, Expand);
841 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
842 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand);
843 setOperationAction(ISD::FNEG, MVT::f64, Expand);
844 setOperationAction(ISD::FABS, MVT::f64, Expand);
845 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
846 setOperationAction(ISD::FSIN, MVT::f64, Expand);
847 setOperationAction(ISD::FCOS, MVT::f64, Expand);
848 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
849 setOperationAction(ISD::FPOW, MVT::f64, Expand);
850 setOperationAction(ISD::FLOG, MVT::f64, Expand);
851 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
852 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
853 setOperationAction(ISD::FEXP, MVT::f64, Expand);
854 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
855 setOperationAction(ISD::FCEIL, MVT::f64, Expand);
856 setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
857 setOperationAction(ISD::FRINT, MVT::f64, Expand);
858 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
859 setOperationAction(ISD::FFLOOR, MVT::f64, Expand);
860 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
861 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
862 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
863 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
864 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
865 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
866 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
867 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
870 computeRegisterProperties(Subtarget->getRegisterInfo());
872 // ARM does not have floating-point extending loads.
873 for (MVT VT : MVT::fp_valuetypes()) {
874 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
875 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
878 // ... or truncating stores
879 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
880 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
881 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
883 // ARM does not have i1 sign extending load.
884 for (MVT VT : MVT::integer_valuetypes())
885 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
887 // ARM supports all 4 flavors of integer indexed load / store.
888 if (!Subtarget->isThumb1Only()) {
889 for (unsigned im = (unsigned)ISD::PRE_INC;
890 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
891 setIndexedLoadAction(im, MVT::i1, Legal);
892 setIndexedLoadAction(im, MVT::i8, Legal);
893 setIndexedLoadAction(im, MVT::i16, Legal);
894 setIndexedLoadAction(im, MVT::i32, Legal);
895 setIndexedStoreAction(im, MVT::i1, Legal);
896 setIndexedStoreAction(im, MVT::i8, Legal);
897 setIndexedStoreAction(im, MVT::i16, Legal);
898 setIndexedStoreAction(im, MVT::i32, Legal);
901 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
902 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
903 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
906 setOperationAction(ISD::SADDO, MVT::i32, Custom);
907 setOperationAction(ISD::UADDO, MVT::i32, Custom);
908 setOperationAction(ISD::SSUBO, MVT::i32, Custom);
909 setOperationAction(ISD::USUBO, MVT::i32, Custom);
911 // i64 operation support.
912 setOperationAction(ISD::MUL, MVT::i64, Expand);
913 setOperationAction(ISD::MULHU, MVT::i32, Expand);
914 if (Subtarget->isThumb1Only()) {
915 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
916 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
918 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
919 || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
920 setOperationAction(ISD::MULHS, MVT::i32, Expand);
922 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
923 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
924 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
925 setOperationAction(ISD::SRL, MVT::i64, Custom);
926 setOperationAction(ISD::SRA, MVT::i64, Custom);
928 if (!Subtarget->isThumb1Only()) {
929 // FIXME: We should do this for Thumb1 as well.
930 setOperationAction(ISD::ADDC, MVT::i32, Custom);
931 setOperationAction(ISD::ADDE, MVT::i32, Custom);
932 setOperationAction(ISD::SUBC, MVT::i32, Custom);
933 setOperationAction(ISD::SUBE, MVT::i32, Custom);
936 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
937 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
939 // ARM does not have ROTL.
940 setOperationAction(ISD::ROTL, MVT::i32, Expand);
941 for (MVT VT : MVT::vector_valuetypes()) {
942 setOperationAction(ISD::ROTL, VT, Expand);
943 setOperationAction(ISD::ROTR, VT, Expand);
945 setOperationAction(ISD::CTTZ, MVT::i32, Custom);
946 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
947 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
948 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
950 // @llvm.readcyclecounter requires the Performance Monitors extension.
951 // Default to the 0 expansion on unsupported platforms.
952 // FIXME: Technically there are older ARM CPUs that have
953 // implementation-specific ways of obtaining this information.
954 if (Subtarget->hasPerfMon())
955 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
957 // Only ARMv6 has BSWAP.
958 if (!Subtarget->hasV6Ops())
959 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
961 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide()
962 : Subtarget->hasDivideInARMMode();
964 // These are expanded into libcalls if the cpu doesn't have HW divider.
965 setOperationAction(ISD::SDIV, MVT::i32, LibCall);
966 setOperationAction(ISD::UDIV, MVT::i32, LibCall);
969 if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) {
970 setOperationAction(ISD::SDIV, MVT::i32, Custom);
971 setOperationAction(ISD::UDIV, MVT::i32, Custom);
973 setOperationAction(ISD::SDIV, MVT::i64, Custom);
974 setOperationAction(ISD::UDIV, MVT::i64, Custom);
977 setOperationAction(ISD::SREM, MVT::i32, Expand);
978 setOperationAction(ISD::UREM, MVT::i32, Expand);
979 // Register based DivRem for AEABI (RTABI 4.2)
980 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
981 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
982 Subtarget->isTargetWindows()) {
983 setOperationAction(ISD::SREM, MVT::i64, Custom);
984 setOperationAction(ISD::UREM, MVT::i64, Custom);
985 HasStandaloneRem = false;
987 for (const auto &LC :
988 {RTLIB::SDIVREM_I8, RTLIB::SDIVREM_I16, RTLIB::SDIVREM_I32})
989 setLibcallName(LC, Subtarget->isTargetWindows() ? "__rt_sdiv"
990 : "__aeabi_idivmod");
991 setLibcallName(RTLIB::SDIVREM_I64, Subtarget->isTargetWindows()
993 : "__aeabi_ldivmod");
994 for (const auto &LC :
995 {RTLIB::UDIVREM_I8, RTLIB::UDIVREM_I16, RTLIB::UDIVREM_I32})
996 setLibcallName(LC, Subtarget->isTargetWindows() ? "__rt_udiv"
997 : "__aeabi_uidivmod");
998 setLibcallName(RTLIB::UDIVREM_I64, Subtarget->isTargetWindows()
1000 : "__aeabi_uldivmod");
1002 setLibcallCallingConv(RTLIB::SDIVREM_I8, CallingConv::ARM_AAPCS);
1003 setLibcallCallingConv(RTLIB::SDIVREM_I16, CallingConv::ARM_AAPCS);
1004 setLibcallCallingConv(RTLIB::SDIVREM_I32, CallingConv::ARM_AAPCS);
1005 setLibcallCallingConv(RTLIB::SDIVREM_I64, CallingConv::ARM_AAPCS);
1006 setLibcallCallingConv(RTLIB::UDIVREM_I8, CallingConv::ARM_AAPCS);
1007 setLibcallCallingConv(RTLIB::UDIVREM_I16, CallingConv::ARM_AAPCS);
1008 setLibcallCallingConv(RTLIB::UDIVREM_I32, CallingConv::ARM_AAPCS);
1009 setLibcallCallingConv(RTLIB::UDIVREM_I64, CallingConv::ARM_AAPCS);
1011 setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
1012 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
1013 setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
1014 setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
1016 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1017 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1020 if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT())
1021 for (auto &VT : {MVT::f32, MVT::f64})
1022 setOperationAction(ISD::FPOWI, VT, Custom);
1024 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1025 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1026 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1027 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1029 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1031 // Use the default implementation.
1032 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1033 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1034 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1035 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1036 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1037 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1039 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
1040 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1042 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
1044 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1045 // the default expansion.
1046 InsertFencesForAtomic = false;
1047 if (Subtarget->hasAnyDataBarrier() &&
1048 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1049 // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1050 // to ldrex/strex loops already.
1051 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1052 if (!Subtarget->isThumb() || !Subtarget->isMClass())
1053 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
1055 // On v8, we have particularly efficient implementations of atomic fences
1056 // if they can be combined with nearby atomic loads and stores.
1057 if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) {
1058 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1059 InsertFencesForAtomic = true;
1062 // If there's anything we can use as a barrier, go through custom lowering
1063 // for ATOMIC_FENCE.
1064 // If target has DMB in thumb, Fences can be inserted.
1065 if (Subtarget->hasDataBarrier())
1066 InsertFencesForAtomic = true;
1068 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other,
1069 Subtarget->hasAnyDataBarrier() ? Custom : Expand);
1071 // Set them all for expansion, which will force libcalls.
1072 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
1073 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
1074 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
1075 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
1076 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand);
1077 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand);
1078 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand);
1079 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
1080 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
1081 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
1082 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
1083 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
1084 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1085 // Unordered/Monotonic case.
1086 if (!InsertFencesForAtomic) {
1087 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1088 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1092 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1094 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1095 if (!Subtarget->hasV6Ops()) {
1096 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1097 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
1099 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1101 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1102 !Subtarget->isThumb1Only()) {
1103 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1104 // iff target supports vfp2.
1105 setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1106 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1109 // We want to custom lower some of our intrinsics.
1110 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1111 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1112 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1113 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1114 if (Subtarget->useSjLjEH())
1115 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1117 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1118 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1119 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1120 setOperationAction(ISD::SELECT, MVT::i32, Custom);
1121 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1122 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1123 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1124 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1125 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1127 // Thumb-1 cannot currently select ARMISD::SUBE.
1128 if (!Subtarget->isThumb1Only())
1129 setOperationAction(ISD::SETCCE, MVT::i32, Custom);
1131 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1132 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1133 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1134 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1135 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1137 // We don't support sin/cos/fmod/copysign/pow
1138 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1139 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1140 setOperationAction(ISD::FCOS, MVT::f32, Expand);
1141 setOperationAction(ISD::FCOS, MVT::f64, Expand);
1142 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1143 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1144 setOperationAction(ISD::FREM, MVT::f64, Expand);
1145 setOperationAction(ISD::FREM, MVT::f32, Expand);
1146 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1147 !Subtarget->isThumb1Only()) {
1148 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1149 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1151 setOperationAction(ISD::FPOW, MVT::f64, Expand);
1152 setOperationAction(ISD::FPOW, MVT::f32, Expand);
1154 if (!Subtarget->hasVFP4()) {
1155 setOperationAction(ISD::FMA, MVT::f64, Expand);
1156 setOperationAction(ISD::FMA, MVT::f32, Expand);
1159 // Various VFP goodness
1160 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1161 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1162 if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
1163 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1164 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1167 // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1168 if (!Subtarget->hasFP16()) {
1169 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1170 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1174 // Combine sin / cos into one node or libcall if possible.
1175 if (Subtarget->hasSinCos()) {
1176 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1177 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1178 if (Subtarget->isTargetWatchABI()) {
1179 setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP);
1180 setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP);
1182 if (Subtarget->isTargetIOS() || Subtarget->isTargetWatchOS()) {
1183 // For iOS, we don't want to the normal expansion of a libcall to
1184 // sincos. We want to issue a libcall to __sincos_stret.
1185 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1186 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1190 // FP-ARMv8 implements a lot of rounding-like FP operations.
1191 if (Subtarget->hasFPARMv8()) {
1192 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1193 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1194 setOperationAction(ISD::FROUND, MVT::f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1196 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1197 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1198 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1199 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1200 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1201 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1202 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1203 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1205 if (!Subtarget->isFPOnlySP()) {
1206 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1208 setOperationAction(ISD::FROUND, MVT::f64, Legal);
1209 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1211 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1212 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1213 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1217 if (Subtarget->hasNEON()) {
1218 // vmin and vmax aren't available in a scalar form, so we use
1219 // a NEON instruction with an undef lane instead.
1220 setOperationAction(ISD::FMINNAN, MVT::f32, Legal);
1221 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal);
1222 setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal);
1223 setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal);
1224 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal);
1225 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal);
1228 // We have target-specific dag combine patterns for the following nodes:
1229 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine
1230 setTargetDAGCombine(ISD::ADD);
1231 setTargetDAGCombine(ISD::SUB);
1232 setTargetDAGCombine(ISD::MUL);
1233 setTargetDAGCombine(ISD::AND);
1234 setTargetDAGCombine(ISD::OR);
1235 setTargetDAGCombine(ISD::XOR);
1237 if (Subtarget->hasV6Ops())
1238 setTargetDAGCombine(ISD::SRL);
1240 setStackPointerRegisterToSaveRestore(ARM::SP);
1242 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1243 !Subtarget->hasVFP2())
1244 setSchedulingPreference(Sched::RegPressure);
1246 setSchedulingPreference(Sched::Hybrid);
1248 //// temporary - rewrite interface to use type
1249 MaxStoresPerMemset = 8;
1250 MaxStoresPerMemsetOptSize = 4;
1251 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1252 MaxStoresPerMemcpyOptSize = 2;
1253 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1254 MaxStoresPerMemmoveOptSize = 2;
1256 // On ARM arguments smaller than 4 bytes are extended, so all arguments
1257 // are at least 4 bytes aligned.
1258 setMinStackArgumentAlignment(4);
1260 // Prefer likely predicted branches to selects on out-of-order cores.
1261 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1263 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
1266 bool ARMTargetLowering::useSoftFloat() const {
1267 return Subtarget->useSoftFloat();
1270 // FIXME: It might make sense to define the representative register class as the
1271 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1272 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1273 // SPR's representative would be DPR_VFP2. This should work well if register
1274 // pressure tracking were modified such that a register use would increment the
1275 // pressure of the register class's representative and all of it's super
1276 // classes' representatives transitively. We have not implemented this because
1277 // of the difficulty prior to coalescing of modeling operand register classes
1278 // due to the common occurrence of cross class copies and subregister insertions
1280 std::pair<const TargetRegisterClass *, uint8_t>
1281 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1283 const TargetRegisterClass *RRC = nullptr;
1285 switch (VT.SimpleTy) {
1287 return TargetLowering::findRepresentativeClass(TRI, VT);
1288 // Use DPR as representative register class for all floating point
1289 // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1290 // the cost is 1 for both f32 and f64.
1291 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1292 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1293 RRC = &ARM::DPRRegClass;
1294 // When NEON is used for SP, only half of the register file is available
1295 // because operations that define both SP and DP results will be constrained
1296 // to the VFP2 class (D0-D15). We currently model this constraint prior to
1297 // coalescing by double-counting the SP regs. See the FIXME above.
1298 if (Subtarget->useNEONForSinglePrecisionFP())
1301 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1302 case MVT::v4f32: case MVT::v2f64:
1303 RRC = &ARM::DPRRegClass;
1307 RRC = &ARM::DPRRegClass;
1311 RRC = &ARM::DPRRegClass;
1315 return std::make_pair(RRC, Cost);
1318 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1319 switch ((ARMISD::NodeType)Opcode) {
1320 case ARMISD::FIRST_NUMBER: break;
1321 case ARMISD::Wrapper: return "ARMISD::Wrapper";
1322 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC";
1323 case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
1324 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1325 case ARMISD::CALL: return "ARMISD::CALL";
1326 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
1327 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
1328 case ARMISD::BRCOND: return "ARMISD::BRCOND";
1329 case ARMISD::BR_JT: return "ARMISD::BR_JT";
1330 case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
1331 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
1332 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG";
1333 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
1334 case ARMISD::CMP: return "ARMISD::CMP";
1335 case ARMISD::CMN: return "ARMISD::CMN";
1336 case ARMISD::CMPZ: return "ARMISD::CMPZ";
1337 case ARMISD::CMPFP: return "ARMISD::CMPFP";
1338 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
1339 case ARMISD::BCC_i64: return "ARMISD::BCC_i64";
1340 case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
1342 case ARMISD::CMOV: return "ARMISD::CMOV";
1344 case ARMISD::SSAT: return "ARMISD::SSAT";
1346 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
1347 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
1348 case ARMISD::RRX: return "ARMISD::RRX";
1350 case ARMISD::ADDC: return "ARMISD::ADDC";
1351 case ARMISD::ADDE: return "ARMISD::ADDE";
1352 case ARMISD::SUBC: return "ARMISD::SUBC";
1353 case ARMISD::SUBE: return "ARMISD::SUBE";
1355 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
1356 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
1358 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1359 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1360 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1362 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
1364 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1366 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
1368 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1370 case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
1372 case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK";
1373 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK";
1375 case ARMISD::VCEQ: return "ARMISD::VCEQ";
1376 case ARMISD::VCEQZ: return "ARMISD::VCEQZ";
1377 case ARMISD::VCGE: return "ARMISD::VCGE";
1378 case ARMISD::VCGEZ: return "ARMISD::VCGEZ";
1379 case ARMISD::VCLEZ: return "ARMISD::VCLEZ";
1380 case ARMISD::VCGEU: return "ARMISD::VCGEU";
1381 case ARMISD::VCGT: return "ARMISD::VCGT";
1382 case ARMISD::VCGTZ: return "ARMISD::VCGTZ";
1383 case ARMISD::VCLTZ: return "ARMISD::VCLTZ";
1384 case ARMISD::VCGTU: return "ARMISD::VCGTU";
1385 case ARMISD::VTST: return "ARMISD::VTST";
1387 case ARMISD::VSHL: return "ARMISD::VSHL";
1388 case ARMISD::VSHRs: return "ARMISD::VSHRs";
1389 case ARMISD::VSHRu: return "ARMISD::VSHRu";
1390 case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
1391 case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
1392 case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
1393 case ARMISD::VQSHLs: return "ARMISD::VQSHLs";
1394 case ARMISD::VQSHLu: return "ARMISD::VQSHLu";
1395 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu";
1396 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs";
1397 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu";
1398 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu";
1399 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs";
1400 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu";
1401 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu";
1402 case ARMISD::VSLI: return "ARMISD::VSLI";
1403 case ARMISD::VSRI: return "ARMISD::VSRI";
1404 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
1405 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
1406 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM";
1407 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM";
1408 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM";
1409 case ARMISD::VDUP: return "ARMISD::VDUP";
1410 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
1411 case ARMISD::VEXT: return "ARMISD::VEXT";
1412 case ARMISD::VREV64: return "ARMISD::VREV64";
1413 case ARMISD::VREV32: return "ARMISD::VREV32";
1414 case ARMISD::VREV16: return "ARMISD::VREV16";
1415 case ARMISD::VZIP: return "ARMISD::VZIP";
1416 case ARMISD::VUZP: return "ARMISD::VUZP";
1417 case ARMISD::VTRN: return "ARMISD::VTRN";
1418 case ARMISD::VTBL1: return "ARMISD::VTBL1";
1419 case ARMISD::VTBL2: return "ARMISD::VTBL2";
1420 case ARMISD::VMULLs: return "ARMISD::VMULLs";
1421 case ARMISD::VMULLu: return "ARMISD::VMULLu";
1422 case ARMISD::UMAAL: return "ARMISD::UMAAL";
1423 case ARMISD::UMLAL: return "ARMISD::UMLAL";
1424 case ARMISD::SMLAL: return "ARMISD::SMLAL";
1425 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
1426 case ARMISD::BFI: return "ARMISD::BFI";
1427 case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
1428 case ARMISD::VBICIMM: return "ARMISD::VBICIMM";
1429 case ARMISD::VBSL: return "ARMISD::VBSL";
1430 case ARMISD::MEMCPY: return "ARMISD::MEMCPY";
1431 case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP";
1432 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP";
1433 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP";
1434 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP";
1435 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD";
1436 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD";
1437 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD";
1438 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD";
1439 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD";
1440 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD";
1441 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD";
1442 case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD";
1443 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD";
1444 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD";
1445 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD";
1446 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD";
1447 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD";
1448 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD";
1449 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD";
1450 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD";
1451 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD";
1452 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD";
1457 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1460 return getPointerTy(DL);
1461 return VT.changeVectorElementTypeToInteger();
1464 /// getRegClassFor - Return the register class that should be used for the
1465 /// specified value type.
1466 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
1467 // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1468 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1469 // load / store 4 to 8 consecutive D registers.
1470 if (Subtarget->hasNEON()) {
1471 if (VT == MVT::v4i64)
1472 return &ARM::QQPRRegClass;
1473 if (VT == MVT::v8i64)
1474 return &ARM::QQQQPRRegClass;
1476 return TargetLowering::getRegClassFor(VT);
1479 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1480 // source/dest is aligned and the copy size is large enough. We therefore want
1481 // to align such objects passed to memory intrinsics.
1482 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1483 unsigned &PrefAlign) const {
1484 if (!isa<MemIntrinsic>(CI))
1487 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1488 // cycle faster than 4-byte aligned LDM.
1489 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1493 // Create a fast isel object.
1495 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1496 const TargetLibraryInfo *libInfo) const {
1497 return ARM::createFastISel(funcInfo, libInfo);
1500 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1501 unsigned NumVals = N->getNumValues();
1503 return Sched::RegPressure;
1505 for (unsigned i = 0; i != NumVals; ++i) {
1506 EVT VT = N->getValueType(i);
1507 if (VT == MVT::Glue || VT == MVT::Other)
1509 if (VT.isFloatingPoint() || VT.isVector())
1513 if (!N->isMachineOpcode())
1514 return Sched::RegPressure;
1516 // Load are scheduled for latency even if there instruction itinerary
1517 // is not available.
1518 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1519 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1521 if (MCID.getNumDefs() == 0)
1522 return Sched::RegPressure;
1523 if (!Itins->isEmpty() &&
1524 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1527 return Sched::RegPressure;
1530 //===----------------------------------------------------------------------===//
1532 //===----------------------------------------------------------------------===//
1534 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1535 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1537 default: llvm_unreachable("Unknown condition code!");
1538 case ISD::SETNE: return ARMCC::NE;
1539 case ISD::SETEQ: return ARMCC::EQ;
1540 case ISD::SETGT: return ARMCC::GT;
1541 case ISD::SETGE: return ARMCC::GE;
1542 case ISD::SETLT: return ARMCC::LT;
1543 case ISD::SETLE: return ARMCC::LE;
1544 case ISD::SETUGT: return ARMCC::HI;
1545 case ISD::SETUGE: return ARMCC::HS;
1546 case ISD::SETULT: return ARMCC::LO;
1547 case ISD::SETULE: return ARMCC::LS;
1551 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1552 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1553 ARMCC::CondCodes &CondCode2) {
1554 CondCode2 = ARMCC::AL;
1556 default: llvm_unreachable("Unknown FP condition!");
1558 case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
1560 case ISD::SETOGT: CondCode = ARMCC::GT; break;
1562 case ISD::SETOGE: CondCode = ARMCC::GE; break;
1563 case ISD::SETOLT: CondCode = ARMCC::MI; break;
1564 case ISD::SETOLE: CondCode = ARMCC::LS; break;
1565 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
1566 case ISD::SETO: CondCode = ARMCC::VC; break;
1567 case ISD::SETUO: CondCode = ARMCC::VS; break;
1568 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
1569 case ISD::SETUGT: CondCode = ARMCC::HI; break;
1570 case ISD::SETUGE: CondCode = ARMCC::PL; break;
1572 case ISD::SETULT: CondCode = ARMCC::LT; break;
1574 case ISD::SETULE: CondCode = ARMCC::LE; break;
1576 case ISD::SETUNE: CondCode = ARMCC::NE; break;
1580 //===----------------------------------------------------------------------===//
1581 // Calling Convention Implementation
1582 //===----------------------------------------------------------------------===//
1584 #include "ARMGenCallingConv.inc"
1586 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1587 /// account presence of floating point hardware and calling convention
1588 /// limitations, such as support for variadic functions.
1590 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1591 bool isVarArg) const {
1594 llvm_unreachable("Unsupported calling convention");
1595 case CallingConv::ARM_AAPCS:
1596 case CallingConv::ARM_APCS:
1597 case CallingConv::GHC:
1599 case CallingConv::PreserveMost:
1600 return CallingConv::PreserveMost;
1601 case CallingConv::ARM_AAPCS_VFP:
1602 case CallingConv::Swift:
1603 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1604 case CallingConv::C:
1605 if (!Subtarget->isAAPCS_ABI())
1606 return CallingConv::ARM_APCS;
1607 else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
1608 getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1610 return CallingConv::ARM_AAPCS_VFP;
1612 return CallingConv::ARM_AAPCS;
1613 case CallingConv::Fast:
1614 case CallingConv::CXX_FAST_TLS:
1615 if (!Subtarget->isAAPCS_ABI()) {
1616 if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1617 return CallingConv::Fast;
1618 return CallingConv::ARM_APCS;
1619 } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1620 return CallingConv::ARM_AAPCS_VFP;
1622 return CallingConv::ARM_AAPCS;
1626 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1627 bool isVarArg) const {
1628 return CCAssignFnForNode(CC, false, isVarArg);
1631 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1632 bool isVarArg) const {
1633 return CCAssignFnForNode(CC, true, isVarArg);
1636 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
1637 /// CallingConvention.
1638 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1640 bool isVarArg) const {
1641 switch (getEffectiveCallingConv(CC, isVarArg)) {
1643 llvm_unreachable("Unsupported calling convention");
1644 case CallingConv::ARM_APCS:
1645 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1646 case CallingConv::ARM_AAPCS:
1647 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1648 case CallingConv::ARM_AAPCS_VFP:
1649 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1650 case CallingConv::Fast:
1651 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1652 case CallingConv::GHC:
1653 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1654 case CallingConv::PreserveMost:
1655 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1659 /// LowerCallResult - Lower the result values of a call into the
1660 /// appropriate copies out of appropriate physical registers.
1661 SDValue ARMTargetLowering::LowerCallResult(
1662 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1663 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1664 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1665 SDValue ThisVal) const {
1667 // Assign locations to each value returned by this call.
1668 SmallVector<CCValAssign, 16> RVLocs;
1669 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1670 *DAG.getContext(), Call);
1671 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
1673 // Copy all of the result registers out of their specified physreg.
1674 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1675 CCValAssign VA = RVLocs[i];
1677 // Pass 'this' value directly from the argument to return value, to avoid
1678 // reg unit interference
1679 if (i == 0 && isThisReturn) {
1680 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
1681 "unexpected return calling convention register assignment");
1682 InVals.push_back(ThisVal);
1687 if (VA.needsCustom()) {
1688 // Handle f64 or half of a v2f64.
1689 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1691 Chain = Lo.getValue(1);
1692 InFlag = Lo.getValue(2);
1693 VA = RVLocs[++i]; // skip ahead to next loc
1694 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1696 Chain = Hi.getValue(1);
1697 InFlag = Hi.getValue(2);
1698 if (!Subtarget->isLittle())
1700 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1702 if (VA.getLocVT() == MVT::v2f64) {
1703 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1704 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1705 DAG.getConstant(0, dl, MVT::i32));
1707 VA = RVLocs[++i]; // skip ahead to next loc
1708 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1709 Chain = Lo.getValue(1);
1710 InFlag = Lo.getValue(2);
1711 VA = RVLocs[++i]; // skip ahead to next loc
1712 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1713 Chain = Hi.getValue(1);
1714 InFlag = Hi.getValue(2);
1715 if (!Subtarget->isLittle())
1717 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1718 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1719 DAG.getConstant(1, dl, MVT::i32));
1722 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1724 Chain = Val.getValue(1);
1725 InFlag = Val.getValue(2);
1728 switch (VA.getLocInfo()) {
1729 default: llvm_unreachable("Unknown loc info!");
1730 case CCValAssign::Full: break;
1731 case CCValAssign::BCvt:
1732 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1736 InVals.push_back(Val);
1742 /// LowerMemOpCallTo - Store the argument to the stack.
1743 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
1744 SDValue Arg, const SDLoc &dl,
1746 const CCValAssign &VA,
1747 ISD::ArgFlagsTy Flags) const {
1748 unsigned LocMemOffset = VA.getLocMemOffset();
1749 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1750 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
1752 return DAG.getStore(
1753 Chain, dl, Arg, PtrOff,
1754 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
1757 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
1758 SDValue Chain, SDValue &Arg,
1759 RegsToPassVector &RegsToPass,
1760 CCValAssign &VA, CCValAssign &NextVA,
1762 SmallVectorImpl<SDValue> &MemOpChains,
1763 ISD::ArgFlagsTy Flags) const {
1765 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1766 DAG.getVTList(MVT::i32, MVT::i32), Arg);
1767 unsigned id = Subtarget->isLittle() ? 0 : 1;
1768 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
1770 if (NextVA.isRegLoc())
1771 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
1773 assert(NextVA.isMemLoc());
1774 if (!StackPtr.getNode())
1775 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
1776 getPointerTy(DAG.getDataLayout()));
1778 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
1784 /// LowerCall - Lowering a call into a callseq_start <-
1785 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1788 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1789 SmallVectorImpl<SDValue> &InVals) const {
1790 SelectionDAG &DAG = CLI.DAG;
1792 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1793 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1794 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1795 SDValue Chain = CLI.Chain;
1796 SDValue Callee = CLI.Callee;
1797 bool &isTailCall = CLI.IsTailCall;
1798 CallingConv::ID CallConv = CLI.CallConv;
1799 bool doesNotRet = CLI.DoesNotReturn;
1800 bool isVarArg = CLI.IsVarArg;
1802 MachineFunction &MF = DAG.getMachineFunction();
1803 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1804 bool isThisReturn = false;
1805 bool isSibCall = false;
1806 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
1808 // Disable tail calls if they're not supported.
1809 if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
1813 // Check if it's really possible to do a tail call.
1814 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1815 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
1816 Outs, OutVals, Ins, DAG);
1817 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
1818 report_fatal_error("failed to perform tail call elimination on a call "
1819 "site marked musttail");
1820 // We don't support GuaranteedTailCallOpt for ARM, only automatically
1821 // detected sibcalls.
1828 // Analyze operands of the call, assigning locations to each operand.
1829 SmallVector<CCValAssign, 16> ArgLocs;
1830 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1831 *DAG.getContext(), Call);
1832 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
1834 // Get a count of how many bytes are to be pushed on the stack.
1835 unsigned NumBytes = CCInfo.getNextStackOffset();
1837 // For tail calls, memory operands are available in our caller's stack.
1841 // Adjust the stack pointer for the new arguments...
1842 // These operations are automatically eliminated by the prolog/epilog pass
1844 Chain = DAG.getCALLSEQ_START(Chain,
1845 DAG.getIntPtrConstant(NumBytes, dl, true), dl);
1848 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
1850 RegsToPassVector RegsToPass;
1851 SmallVector<SDValue, 8> MemOpChains;
1853 // Walk the register/memloc assignments, inserting copies/loads. In the case
1854 // of tail call optimization, arguments are handled later.
1855 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1857 ++i, ++realArgIdx) {
1858 CCValAssign &VA = ArgLocs[i];
1859 SDValue Arg = OutVals[realArgIdx];
1860 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1861 bool isByVal = Flags.isByVal();
1863 // Promote the value if needed.
1864 switch (VA.getLocInfo()) {
1865 default: llvm_unreachable("Unknown loc info!");
1866 case CCValAssign::Full: break;
1867 case CCValAssign::SExt:
1868 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1870 case CCValAssign::ZExt:
1871 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1873 case CCValAssign::AExt:
1874 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1876 case CCValAssign::BCvt:
1877 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1881 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
1882 if (VA.needsCustom()) {
1883 if (VA.getLocVT() == MVT::v2f64) {
1884 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1885 DAG.getConstant(0, dl, MVT::i32));
1886 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1887 DAG.getConstant(1, dl, MVT::i32));
1889 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1890 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1892 VA = ArgLocs[++i]; // skip ahead to next loc
1893 if (VA.isRegLoc()) {
1894 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1895 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1897 assert(VA.isMemLoc());
1899 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1900 dl, DAG, VA, Flags));
1903 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1904 StackPtr, MemOpChains, Flags);
1906 } else if (VA.isRegLoc()) {
1907 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
1908 assert(VA.getLocVT() == MVT::i32 &&
1909 "unexpected calling convention register assignment");
1910 assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
1911 "unexpected use of 'returned'");
1912 isThisReturn = true;
1914 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1915 } else if (isByVal) {
1916 assert(VA.isMemLoc());
1917 unsigned offset = 0;
1919 // True if this byval aggregate will be split between registers
1921 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1922 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
1924 if (CurByValIdx < ByValArgsCount) {
1926 unsigned RegBegin, RegEnd;
1927 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1930 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
1932 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1933 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
1934 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
1935 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
1936 MachinePointerInfo(),
1937 DAG.InferPtrAlignment(AddArg));
1938 MemOpChains.push_back(Load.getValue(1));
1939 RegsToPass.push_back(std::make_pair(j, Load));
1942 // If parameter size outsides register area, "offset" value
1943 // helps us to calculate stack slot for remained part properly.
1944 offset = RegEnd - RegBegin;
1946 CCInfo.nextInRegsParam();
1949 if (Flags.getByValSize() > 4*offset) {
1950 auto PtrVT = getPointerTy(DAG.getDataLayout());
1951 unsigned LocMemOffset = VA.getLocMemOffset();
1952 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1953 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
1954 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
1955 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
1956 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
1958 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
1961 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
1962 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1963 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
1966 } else if (!isSibCall) {
1967 assert(VA.isMemLoc());
1969 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
1970 dl, DAG, VA, Flags));
1974 if (!MemOpChains.empty())
1975 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
1977 // Build a sequence of copy-to-reg nodes chained together with token chain
1978 // and flag operands which copy the outgoing args into the appropriate regs.
1980 // Tail call byval lowering might overwrite argument registers so in case of
1981 // tail call optimization the copies to registers are lowered later.
1983 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
1984 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
1985 RegsToPass[i].second, InFlag);
1986 InFlag = Chain.getValue(1);
1989 // For tail calls lower the arguments to the 'real' stack slot.
1991 // Force all the incoming stack arguments to be loaded from the stack
1992 // before any new outgoing arguments are stored to the stack, because the
1993 // outgoing stack slots may alias the incoming argument stack slots, and
1994 // the alias isn't otherwise explicit. This is slightly more conservative
1995 // than necessary, because it means that each store effectively depends
1996 // on every argument instead of just those arguments it would clobber.
1998 // Do not flag preceding copytoreg stuff together with the following stuff.
2000 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2001 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2002 RegsToPass[i].second, InFlag);
2003 InFlag = Chain.getValue(1);
2008 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2009 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2010 // node so that legalize doesn't hack it.
2011 bool isDirect = false;
2013 const TargetMachine &TM = getTargetMachine();
2014 const Module *Mod = MF.getFunction()->getParent();
2015 const GlobalValue *GV = nullptr;
2016 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2017 GV = G->getGlobal();
2019 !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
2021 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
2022 bool isLocalARMFunc = false;
2023 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2024 auto PtrVt = getPointerTy(DAG.getDataLayout());
2026 if (Subtarget->genLongCalls()) {
2027 assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
2028 "long-calls codegen is not position independent!");
2029 // Handle a global address or an external symbol. If it's not one of
2030 // those, the target's already in a register, so we don't need to do
2032 if (isa<GlobalAddressSDNode>(Callee)) {
2033 // Create a constant pool entry for the callee address
2034 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2035 ARMConstantPoolValue *CPV =
2036 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2038 // Get the address of the callee into a register
2039 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2040 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2041 Callee = DAG.getLoad(
2042 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2043 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2044 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2045 const char *Sym = S->getSymbol();
2047 // Create a constant pool entry for the callee address
2048 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2049 ARMConstantPoolValue *CPV =
2050 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2051 ARMPCLabelIndex, 0);
2052 // Get the address of the callee into a register
2053 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2054 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2055 Callee = DAG.getLoad(
2056 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2057 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2059 } else if (isa<GlobalAddressSDNode>(Callee)) {
2060 // If we're optimizing for minimum size and the function is called three or
2061 // more times in this block, we can improve codesize by calling indirectly
2062 // as BLXr has a 16-bit encoding.
2063 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2064 auto *BB = CLI.CS->getParent();
2065 bool PreferIndirect =
2066 Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
2067 count_if(GV->users(), [&BB](const User *U) {
2068 return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
2071 if (!PreferIndirect) {
2073 bool isDef = GV->isStrongDefinitionForLinker();
2075 // ARM call to a local ARM function is predicable.
2076 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2077 // tBX takes a register source operand.
2078 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2079 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2080 Callee = DAG.getNode(
2081 ARMISD::WrapperPIC, dl, PtrVt,
2082 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2083 Callee = DAG.getLoad(
2084 PtrVt, dl, DAG.getEntryNode(), Callee,
2085 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2086 /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2087 MachineMemOperand::MOInvariant);
2088 } else if (Subtarget->isTargetCOFF()) {
2089 assert(Subtarget->isTargetWindows() &&
2090 "Windows is the only supported COFF target");
2091 unsigned TargetFlags = GV->hasDLLImportStorageClass()
2092 ? ARMII::MO_DLLIMPORT
2093 : ARMII::MO_NO_FLAG;
2094 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0,
2096 if (GV->hasDLLImportStorageClass())
2098 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2099 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2100 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2102 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2105 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2107 // tBX takes a register source operand.
2108 const char *Sym = S->getSymbol();
2109 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2110 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2111 ARMConstantPoolValue *CPV =
2112 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2113 ARMPCLabelIndex, 4);
2114 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2115 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2116 Callee = DAG.getLoad(
2117 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2118 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2119 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2120 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2122 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2126 // FIXME: handle tail calls differently.
2128 if (Subtarget->isThumb()) {
2129 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2130 CallOpc = ARMISD::CALL_NOLINK;
2132 CallOpc = ARMISD::CALL;
2134 if (!isDirect && !Subtarget->hasV5TOps())
2135 CallOpc = ARMISD::CALL_NOLINK;
2136 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2137 // Emit regular call when code size is the priority
2138 !MF.getFunction()->optForMinSize())
2139 // "mov lr, pc; b _foo" to avoid confusing the RSP
2140 CallOpc = ARMISD::CALL_NOLINK;
2142 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2145 std::vector<SDValue> Ops;
2146 Ops.push_back(Chain);
2147 Ops.push_back(Callee);
2149 // Add argument registers to the end of the list so that they are known live
2151 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2152 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2153 RegsToPass[i].second.getValueType()));
2155 // Add a register mask operand representing the call-preserved registers.
2157 const uint32_t *Mask;
2158 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2160 // For 'this' returns, use the R0-preserving mask if applicable
2161 Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2163 // Set isThisReturn to false if the calling convention is not one that
2164 // allows 'returned' to be modeled in this way, so LowerCallResult does
2165 // not try to pass 'this' straight through
2166 isThisReturn = false;
2167 Mask = ARI->getCallPreservedMask(MF, CallConv);
2170 Mask = ARI->getCallPreservedMask(MF, CallConv);
2172 assert(Mask && "Missing call preserved mask for calling convention");
2173 Ops.push_back(DAG.getRegisterMask(Mask));
2176 if (InFlag.getNode())
2177 Ops.push_back(InFlag);
2179 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2181 MF.getFrameInfo().setHasTailCall();
2182 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2185 // Returns a chain and a flag for retval copy to use.
2186 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2187 InFlag = Chain.getValue(1);
2189 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2190 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2192 InFlag = Chain.getValue(1);
2194 // Handle result values, copying them out of physregs into vregs that we
2196 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2197 InVals, isThisReturn,
2198 isThisReturn ? OutVals[0] : SDValue());
2201 /// HandleByVal - Every parameter *after* a byval parameter is passed
2202 /// on the stack. Remember the next parameter register to allocate,
2203 /// and then confiscate the rest of the parameter registers to insure
2205 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2206 unsigned Align) const {
2207 assert((State->getCallOrPrologue() == Prologue ||
2208 State->getCallOrPrologue() == Call) &&
2209 "unhandled ParmContext");
2211 // Byval (as with any stack) slots are always at least 4 byte aligned.
2212 Align = std::max(Align, 4U);
2214 unsigned Reg = State->AllocateReg(GPRArgRegs);
2218 unsigned AlignInRegs = Align / 4;
2219 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2220 for (unsigned i = 0; i < Waste; ++i)
2221 Reg = State->AllocateReg(GPRArgRegs);
2226 unsigned Excess = 4 * (ARM::R4 - Reg);
2228 // Special case when NSAA != SP and parameter size greater than size of
2229 // all remained GPR regs. In that case we can't split parameter, we must
2230 // send it to stack. We also must set NCRN to R4, so waste all
2231 // remained registers.
2232 const unsigned NSAAOffset = State->getNextStackOffset();
2233 if (NSAAOffset != 0 && Size > Excess) {
2234 while (State->AllocateReg(GPRArgRegs))
2239 // First register for byval parameter is the first register that wasn't
2240 // allocated before this method call, so it would be "reg".
2241 // If parameter is small enough to be saved in range [reg, r4), then
2242 // the end (first after last) register would be reg + param-size-in-regs,
2243 // else parameter would be splitted between registers and stack,
2244 // end register would be r4 in this case.
2245 unsigned ByValRegBegin = Reg;
2246 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2247 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2248 // Note, first register is allocated in the beginning of function already,
2249 // allocate remained amount of registers we need.
2250 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2251 State->AllocateReg(GPRArgRegs);
2252 // A byval parameter that is split between registers and memory needs its
2253 // size truncated here.
2254 // In the case where the entire structure fits in registers, we set the
2255 // size in memory to zero.
2256 Size = std::max<int>(Size - Excess, 0);
2259 /// MatchingStackOffset - Return true if the given stack call argument is
2260 /// already available in the same position (relatively) of the caller's
2261 /// incoming argument stack.
2263 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2264 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2265 const TargetInstrInfo *TII) {
2266 unsigned Bytes = Arg.getValueSizeInBits() / 8;
2268 if (Arg.getOpcode() == ISD::CopyFromReg) {
2269 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2270 if (!TargetRegisterInfo::isVirtualRegister(VR))
2272 MachineInstr *Def = MRI->getVRegDef(VR);
2275 if (!Flags.isByVal()) {
2276 if (!TII->isLoadFromStackSlot(*Def, FI))
2281 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2282 if (Flags.isByVal())
2283 // ByVal argument is passed in as a pointer but it's now being
2284 // dereferenced. e.g.
2285 // define @foo(%struct.X* %A) {
2286 // tail call @bar(%struct.X* byval %A)
2289 SDValue Ptr = Ld->getBasePtr();
2290 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2293 FI = FINode->getIndex();
2297 assert(FI != INT_MAX);
2298 if (!MFI.isFixedObjectIndex(FI))
2300 return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2303 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2304 /// for tail call optimization. Targets which want to do tail call
2305 /// optimization should implement this function.
2307 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
2308 CallingConv::ID CalleeCC,
2310 bool isCalleeStructRet,
2311 bool isCallerStructRet,
2312 const SmallVectorImpl<ISD::OutputArg> &Outs,
2313 const SmallVectorImpl<SDValue> &OutVals,
2314 const SmallVectorImpl<ISD::InputArg> &Ins,
2315 SelectionDAG& DAG) const {
2316 MachineFunction &MF = DAG.getMachineFunction();
2317 const Function *CallerF = MF.getFunction();
2318 CallingConv::ID CallerCC = CallerF->getCallingConv();
2320 assert(Subtarget->supportsTailCall());
2322 // Look for obvious safe cases to perform tail call optimization that do not
2323 // require ABI changes. This is what gcc calls sibcall.
2325 // Exception-handling functions need a special set of instructions to indicate
2326 // a return to the hardware. Tail-calling another function would probably
2328 if (CallerF->hasFnAttribute("interrupt"))
2331 // Also avoid sibcall optimization if either caller or callee uses struct
2332 // return semantics.
2333 if (isCalleeStructRet || isCallerStructRet)
2336 // Externally-defined functions with weak linkage should not be
2337 // tail-called on ARM when the OS does not support dynamic
2338 // pre-emption of symbols, as the AAELF spec requires normal calls
2339 // to undefined weak functions to be replaced with a NOP or jump to the
2340 // next instruction. The behaviour of branch instructions in this
2341 // situation (as used for tail calls) is implementation-defined, so we
2342 // cannot rely on the linker replacing the tail call with a return.
2343 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2344 const GlobalValue *GV = G->getGlobal();
2345 const Triple &TT = getTargetMachine().getTargetTriple();
2346 if (GV->hasExternalWeakLinkage() &&
2347 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2351 // Check that the call results are passed in the same way.
2352 LLVMContext &C = *DAG.getContext();
2353 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
2354 CCAssignFnForReturn(CalleeCC, isVarArg),
2355 CCAssignFnForReturn(CallerCC, isVarArg)))
2357 // The callee has to preserve all registers the caller needs to preserve.
2358 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2359 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2360 if (CalleeCC != CallerCC) {
2361 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2362 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2366 // If Caller's vararg or byval argument has been split between registers and
2367 // stack, do not perform tail call, since part of the argument is in caller's
2369 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2370 if (AFI_Caller->getArgRegsSaveSize())
2373 // If the callee takes no arguments then go on to check the results of the
2375 if (!Outs.empty()) {
2376 // Check if stack adjustment is needed. For now, do not do this if any
2377 // argument is passed on the stack.
2378 SmallVector<CCValAssign, 16> ArgLocs;
2379 ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C, Call);
2380 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2381 if (CCInfo.getNextStackOffset()) {
2382 // Check if the arguments are already laid out in the right way as
2383 // the caller's fixed stack objects.
2384 MachineFrameInfo &MFI = MF.getFrameInfo();
2385 const MachineRegisterInfo *MRI = &MF.getRegInfo();
2386 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2387 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2389 ++i, ++realArgIdx) {
2390 CCValAssign &VA = ArgLocs[i];
2391 EVT RegVT = VA.getLocVT();
2392 SDValue Arg = OutVals[realArgIdx];
2393 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2394 if (VA.getLocInfo() == CCValAssign::Indirect)
2396 if (VA.needsCustom()) {
2397 // f64 and vector types are split into multiple registers or
2398 // register/stack-slot combinations. The types will not match
2399 // the registers; give up on memory f64 refs until we figure
2400 // out what to do about this.
2403 if (!ArgLocs[++i].isRegLoc())
2405 if (RegVT == MVT::v2f64) {
2406 if (!ArgLocs[++i].isRegLoc())
2408 if (!ArgLocs[++i].isRegLoc())
2411 } else if (!VA.isRegLoc()) {
2412 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2419 const MachineRegisterInfo &MRI = MF.getRegInfo();
2420 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2428 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2429 MachineFunction &MF, bool isVarArg,
2430 const SmallVectorImpl<ISD::OutputArg> &Outs,
2431 LLVMContext &Context) const {
2432 SmallVector<CCValAssign, 16> RVLocs;
2433 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2434 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2437 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2438 const SDLoc &DL, SelectionDAG &DAG) {
2439 const MachineFunction &MF = DAG.getMachineFunction();
2440 const Function *F = MF.getFunction();
2442 StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
2444 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2445 // version of the "preferred return address". These offsets affect the return
2446 // instruction if this is a return from PL1 without hypervisor extensions.
2447 // IRQ/FIQ: +4 "subs pc, lr, #4"
2448 // SWI: 0 "subs pc, lr, #0"
2449 // ABORT: +4 "subs pc, lr, #4"
2450 // UNDEF: +4/+2 "subs pc, lr, #0"
2451 // UNDEF varies depending on where the exception came from ARM or Thumb
2452 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2455 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2458 else if (IntKind == "SWI" || IntKind == "UNDEF")
2461 report_fatal_error("Unsupported interrupt attribute. If present, value "
2462 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2464 RetOps.insert(RetOps.begin() + 1,
2465 DAG.getConstant(LROffset, DL, MVT::i32, false));
2467 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2471 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2473 const SmallVectorImpl<ISD::OutputArg> &Outs,
2474 const SmallVectorImpl<SDValue> &OutVals,
2475 const SDLoc &dl, SelectionDAG &DAG) const {
2477 // CCValAssign - represent the assignment of the return value to a location.
2478 SmallVector<CCValAssign, 16> RVLocs;
2480 // CCState - Info about the registers and stack slots.
2481 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2482 *DAG.getContext(), Call);
2484 // Analyze outgoing return values.
2485 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2488 SmallVector<SDValue, 4> RetOps;
2489 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2490 bool isLittleEndian = Subtarget->isLittle();
2492 MachineFunction &MF = DAG.getMachineFunction();
2493 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2494 AFI->setReturnRegsCount(RVLocs.size());
2496 // Copy the result values into the output registers.
2497 for (unsigned i = 0, realRVLocIdx = 0;
2499 ++i, ++realRVLocIdx) {
2500 CCValAssign &VA = RVLocs[i];
2501 assert(VA.isRegLoc() && "Can only return in registers!");
2503 SDValue Arg = OutVals[realRVLocIdx];
2505 switch (VA.getLocInfo()) {
2506 default: llvm_unreachable("Unknown loc info!");
2507 case CCValAssign::Full: break;
2508 case CCValAssign::BCvt:
2509 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2513 if (VA.needsCustom()) {
2514 if (VA.getLocVT() == MVT::v2f64) {
2515 // Extract the first half and return it in two registers.
2516 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2517 DAG.getConstant(0, dl, MVT::i32));
2518 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
2519 DAG.getVTList(MVT::i32, MVT::i32), Half);
2521 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2522 HalfGPRs.getValue(isLittleEndian ? 0 : 1),
2524 Flag = Chain.getValue(1);
2525 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2526 VA = RVLocs[++i]; // skip ahead to next loc
2527 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2528 HalfGPRs.getValue(isLittleEndian ? 1 : 0),
2530 Flag = Chain.getValue(1);
2531 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2532 VA = RVLocs[++i]; // skip ahead to next loc
2534 // Extract the 2nd half and fall through to handle it as an f64 value.
2535 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2536 DAG.getConstant(1, dl, MVT::i32));
2538 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
2540 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2541 DAG.getVTList(MVT::i32, MVT::i32), Arg);
2542 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2543 fmrrd.getValue(isLittleEndian ? 0 : 1),
2545 Flag = Chain.getValue(1);
2546 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2547 VA = RVLocs[++i]; // skip ahead to next loc
2548 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2549 fmrrd.getValue(isLittleEndian ? 1 : 0),
2552 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
2554 // Guarantee that all emitted copies are
2555 // stuck together, avoiding something bad.
2556 Flag = Chain.getValue(1);
2557 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2559 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2560 const MCPhysReg *I =
2561 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2564 if (ARM::GPRRegClass.contains(*I))
2565 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2566 else if (ARM::DPRRegClass.contains(*I))
2567 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
2569 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2573 // Update chain and glue.
2576 RetOps.push_back(Flag);
2578 // CPUs which aren't M-class use a special sequence to return from
2579 // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
2580 // though we use "subs pc, lr, #N").
2582 // M-class CPUs actually use a normal return sequence with a special
2583 // (hardware-provided) value in LR, so the normal code path works.
2584 if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
2585 !Subtarget->isMClass()) {
2586 if (Subtarget->isThumb1Only())
2587 report_fatal_error("interrupt attribute is not supported in Thumb1");
2588 return LowerInterruptReturn(RetOps, dl, DAG);
2591 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
2594 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2595 if (N->getNumValues() != 1)
2597 if (!N->hasNUsesOfValue(1, 0))
2600 SDValue TCChain = Chain;
2601 SDNode *Copy = *N->use_begin();
2602 if (Copy->getOpcode() == ISD::CopyToReg) {
2603 // If the copy has a glue operand, we conservatively assume it isn't safe to
2604 // perform a tail call.
2605 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2607 TCChain = Copy->getOperand(0);
2608 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
2609 SDNode *VMov = Copy;
2610 // f64 returned in a pair of GPRs.
2611 SmallPtrSet<SDNode*, 2> Copies;
2612 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2614 if (UI->getOpcode() != ISD::CopyToReg)
2618 if (Copies.size() > 2)
2621 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2623 SDValue UseChain = UI->getOperand(0);
2624 if (Copies.count(UseChain.getNode()))
2628 // We are at the top of this chain.
2629 // If the copy has a glue operand, we conservatively assume it
2630 // isn't safe to perform a tail call.
2631 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
2637 } else if (Copy->getOpcode() == ISD::BITCAST) {
2638 // f32 returned in a single GPR.
2639 if (!Copy->hasOneUse())
2641 Copy = *Copy->use_begin();
2642 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
2644 // If the copy has a glue operand, we conservatively assume it isn't safe to
2645 // perform a tail call.
2646 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2648 TCChain = Copy->getOperand(0);
2653 bool HasRet = false;
2654 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2656 if (UI->getOpcode() != ARMISD::RET_FLAG &&
2657 UI->getOpcode() != ARMISD::INTRET_FLAG)
2669 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2670 if (!Subtarget->supportsTailCall())
2674 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2675 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2681 // Trying to write a 64 bit value so need to split into two 32 bit values first,
2682 // and pass the lower and high parts through.
2683 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
2685 SDValue WriteValue = Op->getOperand(2);
2687 // This function is only supposed to be called for i64 type argument.
2688 assert(WriteValue.getValueType() == MVT::i64
2689 && "LowerWRITE_REGISTER called for non-i64 type argument.");
2691 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2692 DAG.getConstant(0, DL, MVT::i32));
2693 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2694 DAG.getConstant(1, DL, MVT::i32));
2695 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
2696 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
2699 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2700 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
2701 // one of the above mentioned nodes. It has to be wrapped because otherwise
2702 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2703 // be used to form addressing mode. These wrapped nodes will be selected
2705 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
2706 EVT PtrVT = Op.getValueType();
2707 // FIXME there is no actual debug info here
2709 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2711 if (CP->isMachineConstantPoolEntry())
2712 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2713 CP->getAlignment());
2715 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2716 CP->getAlignment());
2717 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
2720 unsigned ARMTargetLowering::getJumpTableEncoding() const {
2721 return MachineJumpTableInfo::EK_Inline;
2724 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
2725 SelectionDAG &DAG) const {
2726 MachineFunction &MF = DAG.getMachineFunction();
2727 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2728 unsigned ARMPCLabelIndex = 0;
2730 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2731 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2733 bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
2734 if (!IsPositionIndependent) {
2735 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
2737 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2738 ARMPCLabelIndex = AFI->createPICLabelUId();
2739 ARMConstantPoolValue *CPV =
2740 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
2741 ARMCP::CPBlockAddress, PCAdj);
2742 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2744 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
2745 SDValue Result = DAG.getLoad(
2746 PtrVT, DL, DAG.getEntryNode(), CPAddr,
2747 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2748 if (!IsPositionIndependent)
2750 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
2751 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
2754 /// \brief Convert a TLS address reference into the correct sequence of loads
2755 /// and calls to compute the variable's address for Darwin, and return an
2756 /// SDValue containing the final node.
2758 /// Darwin only has one TLS scheme which must be capable of dealing with the
2759 /// fully general situation, in the worst case. This means:
2760 /// + "extern __thread" declaration.
2761 /// + Defined in a possibly unknown dynamic library.
2763 /// The general system is that each __thread variable has a [3 x i32] descriptor
2764 /// which contains information used by the runtime to calculate the address. The
2765 /// only part of this the compiler needs to know about is the first word, which
2766 /// contains a function pointer that must be called with the address of the
2767 /// entire descriptor in "r0".
2769 /// Since this descriptor may be in a different unit, in general access must
2770 /// proceed along the usual ARM rules. A common sequence to produce is:
2772 /// movw rT1, :lower16:_var$non_lazy_ptr
2773 /// movt rT1, :upper16:_var$non_lazy_ptr
2777 /// [...address now in r0...]
2779 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
2780 SelectionDAG &DAG) const {
2781 assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin");
2784 // First step is to get the address of the actua global symbol. This is where
2785 // the TLS descriptor lives.
2786 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
2788 // The first entry in the descriptor is a function pointer that we must call
2789 // to obtain the address of the variable.
2790 SDValue Chain = DAG.getEntryNode();
2791 SDValue FuncTLVGet = DAG.getLoad(
2792 MVT::i32, DL, Chain, DescAddr,
2793 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2794 /* Alignment = */ 4,
2795 MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
2796 MachineMemOperand::MOInvariant);
2797 Chain = FuncTLVGet.getValue(1);
2799 MachineFunction &F = DAG.getMachineFunction();
2800 MachineFrameInfo &MFI = F.getFrameInfo();
2801 MFI.setAdjustsStack(true);
2803 // TLS calls preserve all registers except those that absolutely must be
2804 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
2807 getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo();
2808 auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
2809 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
2811 // Finally, we can make the call. This is just a degenerate version of a
2812 // normal AArch64 call node: r0 takes the address of the descriptor, and
2813 // returns the address of the variable in this thread.
2814 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
2816 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
2817 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
2818 DAG.getRegisterMask(Mask), Chain.getValue(1));
2819 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
2823 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
2824 SelectionDAG &DAG) const {
2825 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
2827 SDValue Chain = DAG.getEntryNode();
2828 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2831 // Load the current TEB (thread environment block)
2832 SDValue Ops[] = {Chain,
2833 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
2834 DAG.getConstant(15, DL, MVT::i32),
2835 DAG.getConstant(0, DL, MVT::i32),
2836 DAG.getConstant(13, DL, MVT::i32),
2837 DAG.getConstant(0, DL, MVT::i32),
2838 DAG.getConstant(2, DL, MVT::i32)};
2839 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
2840 DAG.getVTList(MVT::i32, MVT::Other), Ops);
2842 SDValue TEB = CurrentTEB.getValue(0);
2843 Chain = CurrentTEB.getValue(1);
2845 // Load the ThreadLocalStoragePointer from the TEB
2846 // A pointer to the TLS array is located at offset 0x2c from the TEB.
2848 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
2849 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
2851 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
2852 // offset into the TLSArray.
2854 // Load the TLS index from the C runtime
2856 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
2857 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
2858 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
2860 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
2861 DAG.getConstant(2, DL, MVT::i32));
2862 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
2863 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
2864 MachinePointerInfo());
2866 // Get the offset of the start of the .tls section (section base)
2867 const auto *GA = cast<GlobalAddressSDNode>(Op);
2868 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
2869 SDValue Offset = DAG.getLoad(
2870 PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
2871 DAG.getTargetConstantPool(CPV, PtrVT, 4)),
2872 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2874 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
2877 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
2879 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
2880 SelectionDAG &DAG) const {
2882 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2883 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2884 MachineFunction &MF = DAG.getMachineFunction();
2885 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2886 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2887 ARMConstantPoolValue *CPV =
2888 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2889 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
2890 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2891 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
2892 Argument = DAG.getLoad(
2893 PtrVT, dl, DAG.getEntryNode(), Argument,
2894 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2895 SDValue Chain = Argument.getValue(1);
2897 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2898 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
2900 // call __tls_get_addr.
2903 Entry.Node = Argument;
2904 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
2905 Args.push_back(Entry);
2907 // FIXME: is there useful debug info available here?
2908 TargetLowering::CallLoweringInfo CLI(DAG);
2909 CLI.setDebugLoc(dl).setChain(Chain)
2910 .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
2911 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
2913 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2914 return CallResult.first;
2917 // Lower ISD::GlobalTLSAddress using the "initial exec" or
2918 // "local exec" model.
2920 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
2922 TLSModel::Model model) const {
2923 const GlobalValue *GV = GA->getGlobal();
2926 SDValue Chain = DAG.getEntryNode();
2927 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2928 // Get the Thread Pointer
2929 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2931 if (model == TLSModel::InitialExec) {
2932 MachineFunction &MF = DAG.getMachineFunction();
2933 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2934 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2935 // Initial exec model.
2936 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2937 ARMConstantPoolValue *CPV =
2938 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2939 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
2941 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2942 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2943 Offset = DAG.getLoad(
2944 PtrVT, dl, Chain, Offset,
2945 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2946 Chain = Offset.getValue(1);
2948 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2949 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
2951 Offset = DAG.getLoad(
2952 PtrVT, dl, Chain, Offset,
2953 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2956 assert(model == TLSModel::LocalExec);
2957 ARMConstantPoolValue *CPV =
2958 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
2959 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2960 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2961 Offset = DAG.getLoad(
2962 PtrVT, dl, Chain, Offset,
2963 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2966 // The address of the thread local variable is the add of the thread
2967 // pointer with the offset of the variable.
2968 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
2972 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
2973 if (Subtarget->isTargetDarwin())
2974 return LowerGlobalTLSAddressDarwin(Op, DAG);
2976 if (Subtarget->isTargetWindows())
2977 return LowerGlobalTLSAddressWindows(Op, DAG);
2979 // TODO: implement the "local dynamic" model
2980 assert(Subtarget->isTargetELF() && "Only ELF implemented here");
2981 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
2982 if (DAG.getTarget().Options.EmulatedTLS)
2983 return LowerToTLSEmulatedModel(GA, DAG);
2985 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
2988 case TLSModel::GeneralDynamic:
2989 case TLSModel::LocalDynamic:
2990 return LowerToTLSGeneralDynamicModel(GA, DAG);
2991 case TLSModel::InitialExec:
2992 case TLSModel::LocalExec:
2993 return LowerToTLSExecModels(GA, DAG, model);
2995 llvm_unreachable("bogus TLS model");
2998 /// Return true if all users of V are within function F, looking through
3000 static bool allUsersAreInFunction(const Value *V, const Function *F) {
3001 SmallVector<const User*,4> Worklist;
3002 for (auto *U : V->users())
3003 Worklist.push_back(U);
3004 while (!Worklist.empty()) {
3005 auto *U = Worklist.pop_back_val();
3006 if (isa<ConstantExpr>(U)) {
3007 for (auto *UU : U->users())
3008 Worklist.push_back(UU);
3012 auto *I = dyn_cast<Instruction>(U);
3013 if (!I || I->getParent()->getParent() != F)
3019 /// Return true if all users of V are within some (any) function, looking through
3020 /// ConstantExprs. In other words, are there any global constant users?
3021 static bool allUsersAreInFunctions(const Value *V) {
3022 SmallVector<const User*,4> Worklist;
3023 for (auto *U : V->users())
3024 Worklist.push_back(U);
3025 while (!Worklist.empty()) {
3026 auto *U = Worklist.pop_back_val();
3027 if (isa<ConstantExpr>(U)) {
3028 for (auto *UU : U->users())
3029 Worklist.push_back(UU);
3033 if (!isa<Instruction>(U))
3039 // Return true if T is an integer, float or an array/vector of either.
3040 static bool isSimpleType(Type *T) {
3041 if (T->isIntegerTy() || T->isFloatingPointTy())
3043 Type *SubT = nullptr;
3045 SubT = T->getArrayElementType();
3046 else if (T->isVectorTy())
3047 SubT = T->getVectorElementType();
3050 return SubT->isIntegerTy() || SubT->isFloatingPointTy();
3053 static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG,
3054 EVT PtrVT, SDLoc dl) {
3055 // If we're creating a pool entry for a constant global with unnamed address,
3056 // and the global is small enough, we can emit it inline into the constant pool
3057 // to save ourselves an indirection.
3059 // This is a win if the constant is only used in one function (so it doesn't
3060 // need to be duplicated) or duplicating the constant wouldn't increase code
3061 // size (implying the constant is no larger than 4 bytes).
3062 const Function *F = DAG.getMachineFunction().getFunction();
3064 // We rely on this decision to inline being idemopotent and unrelated to the
3065 // use-site. We know that if we inline a variable at one use site, we'll
3066 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3067 // doesn't know about this optimization, so bail out if it's enabled else
3068 // we could decide to inline here (and thus never emit the GV) but require
3069 // the GV from fast-isel generated code.
3070 if (!EnableConstpoolPromotion ||
3071 DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3074 auto *GVar = dyn_cast<GlobalVariable>(GV);
3075 if (!GVar || !GVar->hasInitializer() ||
3076 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3077 !GVar->hasLocalLinkage())
3080 // Ensure that we don't try and inline any type that contains pointers. If
3081 // we inline a value that contains relocations, we move the relocations from
3082 // .data to .text which is not ideal.
3083 auto *Init = GVar->getInitializer();
3084 if (!isSimpleType(Init->getType()))
3087 // The constant islands pass can only really deal with alignment requests
3088 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3089 // any type wanting greater alignment requirements than 4 bytes. We also
3090 // can only promote constants that are multiples of 4 bytes in size or
3091 // are paddable to a multiple of 4. Currently we only try and pad constants
3092 // that are strings for simplicity.
3093 auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3094 unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3095 unsigned Align = GVar->getAlignment();
3096 unsigned RequiredPadding = 4 - (Size % 4);
3097 bool PaddingPossible =
3098 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3099 if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize)
3102 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3103 MachineFunction &MF = DAG.getMachineFunction();
3104 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3106 // We can't bloat the constant pool too much, else the ConstantIslands pass
3107 // may fail to converge. If we haven't promoted this global yet (it may have
3108 // multiple uses), and promoting it would increase the constant pool size (Sz
3109 // > 4), ensure we have space to do so up to MaxTotal.
3110 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3111 if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3112 ConstpoolPromotionMaxTotal)
3115 // This is only valid if all users are in a single function OR it has users
3116 // in multiple functions but it no larger than a pointer. We also check if
3117 // GVar has constant (non-ConstantExpr) users. If so, it essentially has its
3119 if (!allUsersAreInFunction(GVar, F) &&
3120 !(Size <= 4 && allUsersAreInFunctions(GVar)))
3123 // We're going to inline this global. Pad it out if needed.
3124 if (RequiredPadding != 4) {
3125 StringRef S = CDAInit->getAsString();
3127 SmallVector<uint8_t,16> V(S.size());
3128 std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3129 while (RequiredPadding--)
3131 Init = ConstantDataArray::get(*DAG.getContext(), V);
3134 auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3136 DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4);
3137 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3138 AFI->markGlobalAsPromotedToConstantPool(GVar);
3139 AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3142 ++NumConstpoolPromoted;
3143 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3146 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3147 SelectionDAG &DAG) const {
3148 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3150 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3151 const TargetMachine &TM = getTargetMachine();
3152 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3153 GV = GA->getBaseObject();
3155 (isa<GlobalVariable>(GV) && cast<GlobalVariable>(GV)->isConstant()) ||
3158 // promoteToConstantPool only if not generating XO text section
3159 if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3160 if (SDValue V = promoteToConstantPool(GV, DAG, PtrVT, dl))
3163 if (isPositionIndependent()) {
3164 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3166 MachineFunction &MF = DAG.getMachineFunction();
3167 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3168 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3169 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3171 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3172 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
3173 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
3174 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
3175 /*AddCurrentAddress=*/UseGOT_PREL);
3176 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3177 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3178 SDValue Result = DAG.getLoad(
3179 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3180 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3181 SDValue Chain = Result.getValue(1);
3182 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3183 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3186 DAG.getLoad(PtrVT, dl, Chain, Result,
3187 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3189 } else if (Subtarget->isROPI() && IsRO) {
3191 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3192 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3194 } else if (Subtarget->isRWPI() && !IsRO) {
3196 ARMConstantPoolValue *CPV =
3197 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3198 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3199 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3200 SDValue G = DAG.getLoad(
3201 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3202 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3203 SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3204 SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, G);
3208 // If we have T2 ops, we can materialize the address directly via movt/movw
3209 // pair. This is always cheaper.
3210 if (Subtarget->useMovt(DAG.getMachineFunction())) {
3212 // FIXME: Once remat is capable of dealing with instructions with register
3213 // operands, expand this into two nodes.
3214 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3215 DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3217 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
3218 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3220 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3221 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3225 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3226 SelectionDAG &DAG) const {
3227 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3228 "ROPI/RWPI not currently supported for Darwin");
3229 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3231 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3233 if (Subtarget->useMovt(DAG.getMachineFunction()))
3236 // FIXME: Once remat is capable of dealing with instructions with register
3237 // operands, expand this into multiple nodes
3239 isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3241 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3242 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3244 if (Subtarget->isGVIndirectSymbol(GV))
3245 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3246 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3250 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3251 SelectionDAG &DAG) const {
3252 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3253 assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
3254 "Windows on ARM expects to use movw/movt");
3255 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3256 "ROPI/RWPI not currently supported for Windows");
3258 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3259 const ARMII::TOF TargetFlags =
3260 (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
3261 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3267 // FIXME: Once remat is capable of dealing with instructions with register
3268 // operands, expand this into two nodes.
3269 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3270 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
3272 if (GV->hasDLLImportStorageClass())
3273 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3274 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3279 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3281 SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3282 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3283 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3284 Op.getOperand(1), Val);
3288 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3290 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3291 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3294 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3295 SelectionDAG &DAG) const {
3297 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3302 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3303 const ARMSubtarget *Subtarget) const {
3304 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3307 default: return SDValue(); // Don't custom lower most intrinsics.
3308 case Intrinsic::arm_rbit: {
3309 assert(Op.getOperand(1).getValueType() == MVT::i32 &&
3310 "RBIT intrinsic must have i32 type!");
3311 return DAG.getNode(ISD::BITREVERSE, dl, MVT::i32, Op.getOperand(1));
3313 case Intrinsic::thread_pointer: {
3314 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3315 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3317 case Intrinsic::eh_sjlj_lsda: {
3318 MachineFunction &MF = DAG.getMachineFunction();
3319 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3320 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3321 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3323 bool IsPositionIndependent = isPositionIndependent();
3324 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3325 ARMConstantPoolValue *CPV =
3326 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
3327 ARMCP::CPLSDA, PCAdj);
3328 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3329 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3330 SDValue Result = DAG.getLoad(
3331 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3332 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3334 if (IsPositionIndependent) {
3335 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3336 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3340 case Intrinsic::arm_neon_vmulls:
3341 case Intrinsic::arm_neon_vmullu: {
3342 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3343 ? ARMISD::VMULLs : ARMISD::VMULLu;
3344 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3345 Op.getOperand(1), Op.getOperand(2));
3347 case Intrinsic::arm_neon_vminnm:
3348 case Intrinsic::arm_neon_vmaxnm: {
3349 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3350 ? ISD::FMINNUM : ISD::FMAXNUM;
3351 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3352 Op.getOperand(1), Op.getOperand(2));
3354 case Intrinsic::arm_neon_vminu:
3355 case Intrinsic::arm_neon_vmaxu: {
3356 if (Op.getValueType().isFloatingPoint())
3358 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3359 ? ISD::UMIN : ISD::UMAX;
3360 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3361 Op.getOperand(1), Op.getOperand(2));
3363 case Intrinsic::arm_neon_vmins:
3364 case Intrinsic::arm_neon_vmaxs: {
3365 // v{min,max}s is overloaded between signed integers and floats.
3366 if (!Op.getValueType().isFloatingPoint()) {
3367 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3368 ? ISD::SMIN : ISD::SMAX;
3369 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3370 Op.getOperand(1), Op.getOperand(2));
3372 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3373 ? ISD::FMINNAN : ISD::FMAXNAN;
3374 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3375 Op.getOperand(1), Op.getOperand(2));
3380 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3381 const ARMSubtarget *Subtarget) {
3382 // FIXME: handle "fence singlethread" more efficiently.
3384 if (!Subtarget->hasDataBarrier()) {
3385 // Some ARMv6 cpus can support data barriers with an mcr instruction.
3386 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
3388 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
3389 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3390 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
3391 DAG.getConstant(0, dl, MVT::i32));
3394 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
3395 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
3396 ARM_MB::MemBOpt Domain = ARM_MB::ISH;
3397 if (Subtarget->isMClass()) {
3398 // Only a full system barrier exists in the M-class architectures.
3399 Domain = ARM_MB::SY;
3400 } else if (Subtarget->preferISHSTBarriers() &&
3401 Ord == AtomicOrdering::Release) {
3402 // Swift happens to implement ISHST barriers in a way that's compatible with
3403 // Release semantics but weaker than ISH so we'd be fools not to use
3404 // it. Beware: other processors probably don't!
3405 Domain = ARM_MB::ISHST;
3408 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
3409 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
3410 DAG.getConstant(Domain, dl, MVT::i32));
3413 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
3414 const ARMSubtarget *Subtarget) {
3415 // ARM pre v5TE and Thumb1 does not have preload instructions.
3416 if (!(Subtarget->isThumb2() ||
3417 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
3418 // Just preserve the chain.
3419 return Op.getOperand(0);
3422 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
3424 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
3425 // ARMv7 with MP extension has PLDW.
3426 return Op.getOperand(0);
3428 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3429 if (Subtarget->isThumb()) {
3431 isRead = ~isRead & 1;
3432 isData = ~isData & 1;
3435 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
3436 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
3437 DAG.getConstant(isData, dl, MVT::i32));
3440 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
3441 MachineFunction &MF = DAG.getMachineFunction();
3442 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
3444 // vastart just stores the address of the VarArgsFrameIndex slot into the
3445 // memory location argument.
3447 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
3448 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3449 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3450 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3451 MachinePointerInfo(SV));
3454 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
3455 CCValAssign &NextVA,
3458 const SDLoc &dl) const {
3459 MachineFunction &MF = DAG.getMachineFunction();
3460 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3462 const TargetRegisterClass *RC;
3463 if (AFI->isThumb1OnlyFunction())
3464 RC = &ARM::tGPRRegClass;
3466 RC = &ARM::GPRRegClass;
3468 // Transform the arguments stored in physical registers into virtual ones.
3469 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3470 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3473 if (NextVA.isMemLoc()) {
3474 MachineFrameInfo &MFI = MF.getFrameInfo();
3475 int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
3477 // Create load node to retrieve arguments from the stack.
3478 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3479 ArgValue2 = DAG.getLoad(
3480 MVT::i32, dl, Root, FIN,
3481 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3483 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3484 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3486 if (!Subtarget->isLittle())
3487 std::swap (ArgValue, ArgValue2);
3488 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
3491 // The remaining GPRs hold either the beginning of variable-argument
3492 // data, or the beginning of an aggregate passed by value (usually
3493 // byval). Either way, we allocate stack slots adjacent to the data
3494 // provided by our caller, and store the unallocated registers there.
3495 // If this is a variadic function, the va_list pointer will begin with
3496 // these values; otherwise, this reassembles a (byval) structure that
3497 // was split between registers and memory.
3498 // Return: The frame index registers were stored into.
3499 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
3500 const SDLoc &dl, SDValue &Chain,
3501 const Value *OrigArg,
3502 unsigned InRegsParamRecordIdx,
3503 int ArgOffset, unsigned ArgSize) const {
3504 // Currently, two use-cases possible:
3505 // Case #1. Non-var-args function, and we meet first byval parameter.
3506 // Setup first unallocated register as first byval register;
3507 // eat all remained registers
3508 // (these two actions are performed by HandleByVal method).
3509 // Then, here, we initialize stack frame with
3510 // "store-reg" instructions.
3511 // Case #2. Var-args function, that doesn't contain byval parameters.
3512 // The same: eat all remained unallocated registers,
3513 // initialize stack frame.
3515 MachineFunction &MF = DAG.getMachineFunction();
3516 MachineFrameInfo &MFI = MF.getFrameInfo();
3517 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3518 unsigned RBegin, REnd;
3519 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
3520 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
3522 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3523 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
3528 ArgOffset = -4 * (ARM::R4 - RBegin);
3530 auto PtrVT = getPointerTy(DAG.getDataLayout());
3531 int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
3532 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
3534 SmallVector<SDValue, 4> MemOps;
3535 const TargetRegisterClass *RC =
3536 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
3538 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
3539 unsigned VReg = MF.addLiveIn(Reg, RC);
3540 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
3541 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3542 MachinePointerInfo(OrigArg, 4 * i));
3543 MemOps.push_back(Store);
3544 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
3547 if (!MemOps.empty())
3548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3552 // Setup stack frame, the va_list pointer will start from.
3553 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
3554 const SDLoc &dl, SDValue &Chain,
3556 unsigned TotalArgRegsSaveSize,
3557 bool ForceMutable) const {
3558 MachineFunction &MF = DAG.getMachineFunction();
3559 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3561 // Try to store any remaining integer argument regs
3562 // to their spots on the stack so that they may be loaded by dereferencing
3563 // the result of va_next.
3564 // If there is no regs to be stored, just point address after last
3565 // argument passed via stack.
3566 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
3567 CCInfo.getInRegsParamsCount(),
3568 CCInfo.getNextStackOffset(), 4);
3569 AFI->setVarArgsFrameIndex(FrameIndex);
3572 SDValue ARMTargetLowering::LowerFormalArguments(
3573 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3574 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3575 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3576 MachineFunction &MF = DAG.getMachineFunction();
3577 MachineFrameInfo &MFI = MF.getFrameInfo();
3579 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3581 // Assign locations to all of the incoming arguments.
3582 SmallVector<CCValAssign, 16> ArgLocs;
3583 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3584 *DAG.getContext(), Prologue);
3585 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
3587 SmallVector<SDValue, 16> ArgValues;
3589 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
3590 unsigned CurArgIdx = 0;
3592 // Initially ArgRegsSaveSize is zero.
3593 // Then we increase this value each time we meet byval parameter.
3594 // We also increase this value in case of varargs function.
3595 AFI->setArgRegsSaveSize(0);
3597 // Calculate the amount of stack space that we need to allocate to store
3598 // byval and variadic arguments that are passed in registers.
3599 // We need to know this before we allocate the first byval or variadic
3600 // argument, as they will be allocated a stack slot below the CFA (Canonical
3601 // Frame Address, the stack pointer at entry to the function).
3602 unsigned ArgRegBegin = ARM::R4;
3603 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3604 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
3607 CCValAssign &VA = ArgLocs[i];
3608 unsigned Index = VA.getValNo();
3609 ISD::ArgFlagsTy Flags = Ins[Index].Flags;
3610 if (!Flags.isByVal())
3613 assert(VA.isMemLoc() && "unexpected byval pointer in reg");
3614 unsigned RBegin, REnd;
3615 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
3616 ArgRegBegin = std::min(ArgRegBegin, RBegin);
3618 CCInfo.nextInRegsParam();
3620 CCInfo.rewindByValRegsInfo();
3622 int lastInsIndex = -1;
3623 if (isVarArg && MFI.hasVAStart()) {
3624 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3625 if (RegIdx != array_lengthof(GPRArgRegs))
3626 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
3629 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
3630 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
3631 auto PtrVT = getPointerTy(DAG.getDataLayout());
3633 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3634 CCValAssign &VA = ArgLocs[i];
3635 if (Ins[VA.getValNo()].isOrigArg()) {
3636 std::advance(CurOrigArg,
3637 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
3638 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
3640 // Arguments stored in registers.
3641 if (VA.isRegLoc()) {
3642 EVT RegVT = VA.getLocVT();
3644 if (VA.needsCustom()) {
3645 // f64 and vector types are split up into multiple registers or
3646 // combinations of registers and stack slots.
3647 if (VA.getLocVT() == MVT::v2f64) {
3648 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3650 VA = ArgLocs[++i]; // skip ahead to next loc
3652 if (VA.isMemLoc()) {
3653 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
3654 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3655 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
3656 MachinePointerInfo::getFixedStack(
3657 DAG.getMachineFunction(), FI));
3659 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3662 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
3663 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3664 ArgValue, ArgValue1,
3665 DAG.getIntPtrConstant(0, dl));
3666 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3667 ArgValue, ArgValue2,
3668 DAG.getIntPtrConstant(1, dl));
3670 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3673 const TargetRegisterClass *RC;
3675 if (RegVT == MVT::f32)
3676 RC = &ARM::SPRRegClass;
3677 else if (RegVT == MVT::f64)
3678 RC = &ARM::DPRRegClass;
3679 else if (RegVT == MVT::v2f64)
3680 RC = &ARM::QPRRegClass;
3681 else if (RegVT == MVT::i32)
3682 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
3683 : &ARM::GPRRegClass;
3685 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
3687 // Transform the arguments in physical registers into virtual ones.
3688 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3689 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3692 // If this is an 8 or 16-bit value, it is really passed promoted
3693 // to 32 bits. Insert an assert[sz]ext to capture this, then
3694 // truncate to the right size.
3695 switch (VA.getLocInfo()) {
3696 default: llvm_unreachable("Unknown loc info!");
3697 case CCValAssign::Full: break;
3698 case CCValAssign::BCvt:
3699 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
3701 case CCValAssign::SExt:
3702 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3703 DAG.getValueType(VA.getValVT()));
3704 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3706 case CCValAssign::ZExt:
3707 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3708 DAG.getValueType(VA.getValVT()));
3709 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3713 InVals.push_back(ArgValue);
3715 } else { // VA.isRegLoc()
3718 assert(VA.isMemLoc());
3719 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
3721 int index = VA.getValNo();
3723 // Some Ins[] entries become multiple ArgLoc[] entries.
3724 // Process them only once.
3725 if (index != lastInsIndex)
3727 ISD::ArgFlagsTy Flags = Ins[index].Flags;
3728 // FIXME: For now, all byval parameter objects are marked mutable.
3729 // This can be changed with more analysis.
3730 // In case of tail call optimization mark all arguments mutable.
3731 // Since they could be overwritten by lowering of arguments in case of
3733 if (Flags.isByVal()) {
3734 assert(Ins[index].isOrigArg() &&
3735 "Byval arguments cannot be implicit");
3736 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
3738 int FrameIndex = StoreByValRegs(
3739 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3740 VA.getLocMemOffset(), Flags.getByValSize());
3741 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
3742 CCInfo.nextInRegsParam();
3744 unsigned FIOffset = VA.getLocMemOffset();
3745 int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
3748 // Create load nodes to retrieve arguments from the stack.
3749 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3750 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
3751 MachinePointerInfo::getFixedStack(
3752 DAG.getMachineFunction(), FI)));
3754 lastInsIndex = index;
3760 if (isVarArg && MFI.hasVAStart())
3761 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3762 CCInfo.getNextStackOffset(),
3763 TotalArgRegsSaveSize);
3765 AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
3770 /// isFloatingPointZero - Return true if this is +0.0.
3771 static bool isFloatingPointZero(SDValue Op) {
3772 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
3773 return CFP->getValueAPF().isPosZero();
3774 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
3775 // Maybe this has already been legalized into the constant pool?
3776 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
3777 SDValue WrapperOp = Op.getOperand(1).getOperand(0);
3778 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
3779 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
3780 return CFP->getValueAPF().isPosZero();
3782 } else if (Op->getOpcode() == ISD::BITCAST &&
3783 Op->getValueType(0) == MVT::f64) {
3784 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
3785 // created by LowerConstantFP().
3786 SDValue BitcastOp = Op->getOperand(0);
3787 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
3788 isNullConstant(BitcastOp->getOperand(0)))
3794 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
3795 /// the given operands.
3796 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3797 SDValue &ARMcc, SelectionDAG &DAG,
3798 const SDLoc &dl) const {
3799 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3800 unsigned C = RHSC->getZExtValue();
3801 if (!isLegalICmpImmediate(C)) {
3802 // Constant does not fit, try adjusting it by one?
3807 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
3808 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3809 RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3814 if (C != 0 && isLegalICmpImmediate(C-1)) {
3815 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3816 RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3821 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
3822 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3823 RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3828 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
3829 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3830 RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3837 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
3838 ARMISD::NodeType CompareType;
3841 CompareType = ARMISD::CMP;
3846 CompareType = ARMISD::CMPZ;
3849 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
3850 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
3853 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
3854 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
3855 SelectionDAG &DAG, const SDLoc &dl) const {
3856 assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
3858 if (!isFloatingPointZero(RHS))
3859 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
3861 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
3862 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
3865 /// duplicateCmp - Glue values can have only one use, so this function
3866 /// duplicates a comparison node.
3868 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
3869 unsigned Opc = Cmp.getOpcode();
3871 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
3872 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
3874 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
3875 Cmp = Cmp.getOperand(0);
3876 Opc = Cmp.getOpcode();
3877 if (Opc == ARMISD::CMPFP)
3878 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
3880 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
3881 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
3883 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
3886 std::pair<SDValue, SDValue>
3887 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
3888 SDValue &ARMcc) const {
3889 assert(Op.getValueType() == MVT::i32 && "Unsupported value type");
3891 SDValue Value, OverflowCmp;
3892 SDValue LHS = Op.getOperand(0);
3893 SDValue RHS = Op.getOperand(1);
3896 // FIXME: We are currently always generating CMPs because we don't support
3897 // generating CMN through the backend. This is not as good as the natural
3898 // CMP case because it causes a register dependency and cannot be folded
3901 switch (Op.getOpcode()) {
3903 llvm_unreachable("Unknown overflow instruction!");
3905 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3906 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3907 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3910 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3911 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3912 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3915 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3916 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3917 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3920 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3921 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3922 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3926 return std::make_pair(Value, OverflowCmp);
3931 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
3932 // Let legalize expand this if it isn't a legal type yet.
3933 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3936 SDValue Value, OverflowCmp;
3938 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
3939 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3941 // We use 0 and 1 as false and true values.
3942 SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3943 SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3944 EVT VT = Op.getValueType();
3946 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
3947 ARMcc, CCR, OverflowCmp);
3949 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3950 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3954 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3955 SDValue Cond = Op.getOperand(0);
3956 SDValue SelectTrue = Op.getOperand(1);
3957 SDValue SelectFalse = Op.getOperand(2);
3959 unsigned Opc = Cond.getOpcode();
3961 if (Cond.getResNo() == 1 &&
3962 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
3963 Opc == ISD::USUBO)) {
3964 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
3967 SDValue Value, OverflowCmp;
3969 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
3970 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3971 EVT VT = Op.getValueType();
3973 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
3979 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
3980 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
3982 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
3983 const ConstantSDNode *CMOVTrue =
3984 dyn_cast<ConstantSDNode>(Cond.getOperand(0));
3985 const ConstantSDNode *CMOVFalse =
3986 dyn_cast<ConstantSDNode>(Cond.getOperand(1));
3988 if (CMOVTrue && CMOVFalse) {
3989 unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
3990 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
3994 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
3996 False = SelectFalse;
3997 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4002 if (True.getNode() && False.getNode()) {
4003 EVT VT = Op.getValueType();
4004 SDValue ARMcc = Cond.getOperand(2);
4005 SDValue CCR = Cond.getOperand(3);
4006 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4007 assert(True.getValueType() == VT);
4008 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4013 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4014 // undefined bits before doing a full-word comparison with zero.
4015 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4016 DAG.getConstant(1, dl, Cond.getValueType()));
4018 return DAG.getSelectCC(dl, Cond,
4019 DAG.getConstant(0, dl, Cond.getValueType()),
4020 SelectTrue, SelectFalse, ISD::SETNE);
4023 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4024 bool &swpCmpOps, bool &swpVselOps) {
4025 // Start by selecting the GE condition code for opcodes that return true for
4027 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4029 CondCode = ARMCC::GE;
4031 // and GT for opcodes that return false for 'equality'.
4032 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4034 CondCode = ARMCC::GT;
4036 // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4037 // to swap the compare operands.
4038 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4042 // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4043 // If we have an unordered opcode, we need to swap the operands to the VSEL
4044 // instruction (effectively negating the condition).
4046 // This also has the effect of swapping which one of 'less' or 'greater'
4047 // returns true, so we also swap the compare operands. It also switches
4048 // whether we return true for 'equality', so we compensate by picking the
4049 // opposite condition code to our original choice.
4050 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4051 CC == ISD::SETUGT) {
4052 swpCmpOps = !swpCmpOps;
4053 swpVselOps = !swpVselOps;
4054 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4057 // 'ordered' is 'anything but unordered', so use the VS condition code and
4058 // swap the VSEL operands.
4059 if (CC == ISD::SETO) {
4060 CondCode = ARMCC::VS;
4064 // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4065 // code and swap the VSEL operands.
4066 if (CC == ISD::SETUNE) {
4067 CondCode = ARMCC::EQ;
4072 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4073 SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4074 SDValue Cmp, SelectionDAG &DAG) const {
4075 if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
4076 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4077 DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4078 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4079 DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4081 SDValue TrueLow = TrueVal.getValue(0);
4082 SDValue TrueHigh = TrueVal.getValue(1);
4083 SDValue FalseLow = FalseVal.getValue(0);
4084 SDValue FalseHigh = FalseVal.getValue(1);
4086 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4088 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4089 ARMcc, CCR, duplicateCmp(Cmp, DAG));
4091 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4093 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4098 static bool isGTorGE(ISD::CondCode CC) {
4099 return CC == ISD::SETGT || CC == ISD::SETGE;
4102 static bool isLTorLE(ISD::CondCode CC) {
4103 return CC == ISD::SETLT || CC == ISD::SETLE;
4106 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4107 // All of these conditions (and their <= and >= counterparts) will do:
4112 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4113 const SDValue TrueVal, const SDValue FalseVal,
4114 const ISD::CondCode CC, const SDValue K) {
4115 return (isGTorGE(CC) &&
4116 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4118 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4121 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4122 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4123 const SDValue TrueVal, const SDValue FalseVal,
4124 const ISD::CondCode CC, const SDValue K) {
4125 return (isGTorGE(CC) &&
4126 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4128 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4131 // Check if two chained conditionals could be converted into SSAT.
4133 // SSAT can replace a set of two conditional selectors that bound a number to an
4134 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
4136 // x < -k ? -k : (x > k ? k : x)
4137 // x < -k ? -k : (x < k ? x : k)
4138 // x > -k ? (x > k ? k : x) : -k
4139 // x < k ? (x < -k ? -k : x) : k
4142 // It returns true if the conversion can be done, false otherwise.
4143 // Additionally, the variable is returned in parameter V and the constant in K.
4144 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
4147 SDValue LHS1 = Op.getOperand(0);
4148 SDValue RHS1 = Op.getOperand(1);
4149 SDValue TrueVal1 = Op.getOperand(2);
4150 SDValue FalseVal1 = Op.getOperand(3);
4151 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4153 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4154 if (Op2.getOpcode() != ISD::SELECT_CC)
4157 SDValue LHS2 = Op2.getOperand(0);
4158 SDValue RHS2 = Op2.getOperand(1);
4159 SDValue TrueVal2 = Op2.getOperand(2);
4160 SDValue FalseVal2 = Op2.getOperand(3);
4161 ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
4163 // Find out which are the constants and which are the variables
4164 // in each conditional
4165 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4168 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4171 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4172 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4173 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4174 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4176 // We must detect cases where the original operations worked with 16- or
4177 // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
4178 // must work with sign-extended values but the select operations return
4179 // the original non-extended value.
4180 SDValue V2TmpReg = V2Tmp;
4181 if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
4182 V2TmpReg = V2Tmp->getOperand(0);
4184 // Check that the registers and the constants have the correct values
4185 // in both conditionals
4186 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4190 // Figure out which conditional is saturating the lower/upper bound.
4191 const SDValue *LowerCheckOp =
4192 isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4194 : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2
4196 const SDValue *UpperCheckOp =
4197 isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4199 : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2
4202 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4205 // Check that the constant in the lower-bound check is
4206 // the opposite of the constant in the upper-bound check
4207 // in 1's complement.
4208 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4209 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4210 int64_t PosVal = std::max(Val1, Val2);
4212 if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4213 (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4214 Val1 == ~Val2 && isPowerOf2_64(PosVal + 1)) {
4217 K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
4224 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
4226 EVT VT = Op.getValueType();
4229 // Try to convert two saturating conditional selects into a single SSAT
4231 uint64_t SatConstant;
4232 if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
4233 isSaturatingConditional(Op, SatValue, SatConstant))
4234 return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
4235 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4237 SDValue LHS = Op.getOperand(0);
4238 SDValue RHS = Op.getOperand(1);
4239 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4240 SDValue TrueVal = Op.getOperand(2);
4241 SDValue FalseVal = Op.getOperand(3);
4243 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4244 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4247 // If softenSetCCOperands only returned one value, we should compare it to
4249 if (!RHS.getNode()) {
4250 RHS = DAG.getConstant(0, dl, LHS.getValueType());
4255 if (LHS.getValueType() == MVT::i32) {
4256 // Try to generate VSEL on ARMv8.
4257 // The VSEL instruction can't use all the usual ARM condition
4258 // codes: it only has two bits to select the condition code, so it's
4259 // constrained to use only GE, GT, VS and EQ.
4261 // To implement all the various ISD::SETXXX opcodes, we sometimes need to
4262 // swap the operands of the previous compare instruction (effectively
4263 // inverting the compare condition, swapping 'less' and 'greater') and
4264 // sometimes need to swap the operands to the VSEL (which inverts the
4265 // condition in the sense of firing whenever the previous condition didn't)
4266 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4267 TrueVal.getValueType() == MVT::f64)) {
4268 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4269 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
4270 CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
4271 CC = ISD::getSetCCInverse(CC, true);
4272 std::swap(TrueVal, FalseVal);
4277 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4278 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4279 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4282 ARMCC::CondCodes CondCode, CondCode2;
4283 FPCCToARMCC(CC, CondCode, CondCode2);
4285 // Try to generate VMAXNM/VMINNM on ARMv8.
4286 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4287 TrueVal.getValueType() == MVT::f64)) {
4288 bool swpCmpOps = false;
4289 bool swpVselOps = false;
4290 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
4292 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
4293 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
4295 std::swap(LHS, RHS);
4297 std::swap(TrueVal, FalseVal);
4301 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4302 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
4303 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4304 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4305 if (CondCode2 != ARMCC::AL) {
4306 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
4307 // FIXME: Needs another CMP because flag can have but one use.
4308 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
4309 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4314 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
4315 /// to morph to an integer compare sequence.
4316 static bool canChangeToInt(SDValue Op, bool &SeenZero,
4317 const ARMSubtarget *Subtarget) {
4318 SDNode *N = Op.getNode();
4319 if (!N->hasOneUse())
4320 // Otherwise it requires moving the value from fp to integer registers.
4322 if (!N->getNumValues())
4324 EVT VT = Op.getValueType();
4325 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
4326 // f32 case is generally profitable. f64 case only makes sense when vcmpe +
4327 // vmrs are very slow, e.g. cortex-a8.
4330 if (isFloatingPointZero(Op)) {
4334 return ISD::isNormalLoad(N);
4337 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
4338 if (isFloatingPointZero(Op))
4339 return DAG.getConstant(0, SDLoc(Op), MVT::i32);
4341 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4342 return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
4343 Ld->getPointerInfo(), Ld->getAlignment(),
4344 Ld->getMemOperand()->getFlags());
4346 llvm_unreachable("Unknown VFP cmp argument!");
4349 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
4350 SDValue &RetVal1, SDValue &RetVal2) {
4353 if (isFloatingPointZero(Op)) {
4354 RetVal1 = DAG.getConstant(0, dl, MVT::i32);
4355 RetVal2 = DAG.getConstant(0, dl, MVT::i32);
4359 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4360 SDValue Ptr = Ld->getBasePtr();
4362 DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
4363 Ld->getAlignment(), Ld->getMemOperand()->getFlags());
4365 EVT PtrType = Ptr.getValueType();
4366 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
4367 SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
4368 PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
4369 RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
4370 Ld->getPointerInfo().getWithOffset(4), NewAlign,
4371 Ld->getMemOperand()->getFlags());
4375 llvm_unreachable("Unknown VFP cmp argument!");
4378 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
4379 /// f32 and even f64 comparisons to integer ones.
4381 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
4382 SDValue Chain = Op.getOperand(0);
4383 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4384 SDValue LHS = Op.getOperand(2);
4385 SDValue RHS = Op.getOperand(3);
4386 SDValue Dest = Op.getOperand(4);
4389 bool LHSSeenZero = false;
4390 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
4391 bool RHSSeenZero = false;
4392 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
4393 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4394 // If unsafe fp math optimization is enabled and there are no other uses of
4395 // the CMP operands, and the condition code is EQ or NE, we can optimize it
4396 // to an integer comparison.
4397 if (CC == ISD::SETOEQ)
4399 else if (CC == ISD::SETUNE)
4402 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4404 if (LHS.getValueType() == MVT::f32) {
4405 LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4406 bitcastf32Toi32(LHS, DAG), Mask);
4407 RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4408 bitcastf32Toi32(RHS, DAG), Mask);
4409 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4410 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4411 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4412 Chain, Dest, ARMcc, CCR, Cmp);
4417 expandf64Toi32(LHS, DAG, LHS1, LHS2);
4418 expandf64Toi32(RHS, DAG, RHS1, RHS2);
4419 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
4420 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
4421 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4422 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4423 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4424 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4425 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
4431 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
4432 SDValue Chain = Op.getOperand(0);
4433 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4434 SDValue LHS = Op.getOperand(2);
4435 SDValue RHS = Op.getOperand(3);
4436 SDValue Dest = Op.getOperand(4);
4439 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4440 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4443 // If softenSetCCOperands only returned one value, we should compare it to
4445 if (!RHS.getNode()) {
4446 RHS = DAG.getConstant(0, dl, LHS.getValueType());
4451 if (LHS.getValueType() == MVT::i32) {
4453 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4454 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4455 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4456 Chain, Dest, ARMcc, CCR, Cmp);
4459 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
4461 if (getTargetMachine().Options.UnsafeFPMath &&
4462 (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
4463 CC == ISD::SETNE || CC == ISD::SETUNE)) {
4464 if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
4468 ARMCC::CondCodes CondCode, CondCode2;
4469 FPCCToARMCC(CC, CondCode, CondCode2);
4471 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4472 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
4473 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4474 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4475 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4476 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4477 if (CondCode2 != ARMCC::AL) {
4478 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
4479 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
4480 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4485 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
4486 SDValue Chain = Op.getOperand(0);
4487 SDValue Table = Op.getOperand(1);
4488 SDValue Index = Op.getOperand(2);
4491 EVT PTy = getPointerTy(DAG.getDataLayout());
4492 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
4493 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
4494 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
4495 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
4496 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
4497 if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
4498 // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
4499 // which does another jump to the destination. This also makes it easier
4500 // to translate it to TBB / TBH later (Thumb2 only).
4501 // FIXME: This might not work if the function is extremely large.
4502 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
4503 Addr, Op.getOperand(2), JTI);
4505 if (isPositionIndependent() || Subtarget->isROPI()) {
4507 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
4508 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4509 Chain = Addr.getValue(1);
4510 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
4511 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4514 DAG.getLoad(PTy, dl, Chain, Addr,
4515 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4516 Chain = Addr.getValue(1);
4517 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4521 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
4522 EVT VT = Op.getValueType();
4525 if (Op.getValueType().getVectorElementType() == MVT::i32) {
4526 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
4528 return DAG.UnrollVectorOp(Op.getNode());
4531 assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
4532 "Invalid type for custom lowering!");
4533 if (VT != MVT::v4i16)
4534 return DAG.UnrollVectorOp(Op.getNode());
4536 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
4537 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
4540 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
4541 EVT VT = Op.getValueType();
4543 return LowerVectorFP_TO_INT(Op, DAG);
4544 if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
4546 if (Op.getOpcode() == ISD::FP_TO_SINT)
4547 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
4550 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
4552 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4553 /*isSigned*/ false, SDLoc(Op)).first;
4559 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
4560 EVT VT = Op.getValueType();
4563 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
4564 if (VT.getVectorElementType() == MVT::f32)
4566 return DAG.UnrollVectorOp(Op.getNode());
4569 assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
4570 "Invalid type for custom lowering!");
4571 if (VT != MVT::v4f32)
4572 return DAG.UnrollVectorOp(Op.getNode());
4576 switch (Op.getOpcode()) {
4577 default: llvm_unreachable("Invalid opcode!");
4578 case ISD::SINT_TO_FP:
4579 CastOpc = ISD::SIGN_EXTEND;
4580 Opc = ISD::SINT_TO_FP;
4582 case ISD::UINT_TO_FP:
4583 CastOpc = ISD::ZERO_EXTEND;
4584 Opc = ISD::UINT_TO_FP;
4588 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
4589 return DAG.getNode(Opc, dl, VT, Op);
4592 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
4593 EVT VT = Op.getValueType();
4595 return LowerVectorINT_TO_FP(Op, DAG);
4596 if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
4598 if (Op.getOpcode() == ISD::SINT_TO_FP)
4599 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
4602 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
4604 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4605 /*isSigned*/ false, SDLoc(Op)).first;
4611 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
4612 // Implement fcopysign with a fabs and a conditional fneg.
4613 SDValue Tmp0 = Op.getOperand(0);
4614 SDValue Tmp1 = Op.getOperand(1);
4616 EVT VT = Op.getValueType();
4617 EVT SrcVT = Tmp1.getValueType();
4618 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
4619 Tmp0.getOpcode() == ARMISD::VMOVDRR;
4620 bool UseNEON = !InGPR && Subtarget->hasNEON();
4623 // Use VBSL to copy the sign bit.
4624 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
4625 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
4626 DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
4627 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
4629 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4630 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
4631 DAG.getConstant(32, dl, MVT::i32));
4632 else /*if (VT == MVT::f32)*/
4633 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
4634 if (SrcVT == MVT::f32) {
4635 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
4637 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4638 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
4639 DAG.getConstant(32, dl, MVT::i32));
4640 } else if (VT == MVT::f32)
4641 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
4642 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
4643 DAG.getConstant(32, dl, MVT::i32));
4644 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
4645 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
4647 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
4649 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
4650 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
4651 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
4653 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
4654 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
4655 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
4656 if (VT == MVT::f32) {
4657 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
4658 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
4659 DAG.getConstant(0, dl, MVT::i32));
4661 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
4667 // Bitcast operand 1 to i32.
4668 if (SrcVT == MVT::f64)
4669 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4671 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
4673 // Or in the signbit with integer operations.
4674 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
4675 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4676 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
4677 if (VT == MVT::f32) {
4678 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
4679 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
4680 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4681 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
4684 // f64: Or the high part with signbit and then combine two parts.
4685 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4687 SDValue Lo = Tmp0.getValue(0);
4688 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
4689 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
4690 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
4693 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
4694 MachineFunction &MF = DAG.getMachineFunction();
4695 MachineFrameInfo &MFI = MF.getFrameInfo();
4696 MFI.setReturnAddressIsTaken(true);
4698 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4701 EVT VT = Op.getValueType();
4703 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4705 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4706 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
4707 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
4708 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
4709 MachinePointerInfo());
4712 // Return LR, which contains the return address. Mark it an implicit live-in.
4713 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
4714 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
4717 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
4718 const ARMBaseRegisterInfo &ARI =
4719 *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
4720 MachineFunction &MF = DAG.getMachineFunction();
4721 MachineFrameInfo &MFI = MF.getFrameInfo();
4722 MFI.setFrameAddressIsTaken(true);
4724 EVT VT = Op.getValueType();
4725 SDLoc dl(Op); // FIXME probably not meaningful
4726 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4727 unsigned FrameReg = ARI.getFrameRegister(MF);
4728 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
4730 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
4731 MachinePointerInfo());
4735 // FIXME? Maybe this could be a TableGen attribute on some registers and
4736 // this table could be generated automatically from RegInfo.
4737 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT,
4738 SelectionDAG &DAG) const {
4739 unsigned Reg = StringSwitch<unsigned>(RegName)
4740 .Case("sp", ARM::SP)
4744 report_fatal_error(Twine("Invalid register name \""
4745 + StringRef(RegName) + "\"."));
4748 // Result is 64 bit value so split into two 32 bit values and return as a
4750 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
4751 SelectionDAG &DAG) {
4754 // This function is only supposed to be called for i64 type destination.
4755 assert(N->getValueType(0) == MVT::i64
4756 && "ExpandREAD_REGISTER called for non-i64 type result.");
4758 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
4759 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
4763 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
4765 Results.push_back(Read.getOperand(0));
4768 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
4769 /// When \p DstVT, the destination type of \p BC, is on the vector
4770 /// register bank and the source of bitcast, \p Op, operates on the same bank,
4771 /// it might be possible to combine them, such that everything stays on the
4772 /// vector register bank.
4773 /// \p return The node that would replace \p BT, if the combine
4775 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
4776 SelectionDAG &DAG) {
4777 SDValue Op = BC->getOperand(0);
4778 EVT DstVT = BC->getValueType(0);
4780 // The only vector instruction that can produce a scalar (remember,
4781 // since the bitcast was about to be turned into VMOVDRR, the source
4782 // type is i64) from a vector is EXTRACT_VECTOR_ELT.
4783 // Moreover, we can do this combine only if there is one use.
4784 // Finally, if the destination type is not a vector, there is not
4785 // much point on forcing everything on the vector bank.
4786 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4790 // If the index is not constant, we will introduce an additional
4791 // multiply that will stick.
4792 // Give up in that case.
4793 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
4796 unsigned DstNumElt = DstVT.getVectorNumElements();
4798 // Compute the new index.
4799 const APInt &APIntIndex = Index->getAPIntValue();
4800 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
4801 NewIndex *= APIntIndex;
4802 // Check if the new constant index fits into i32.
4803 if (NewIndex.getBitWidth() > 32)
4806 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
4807 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
4809 SDValue ExtractSrc = Op.getOperand(0);
4810 EVT VecVT = EVT::getVectorVT(
4811 *DAG.getContext(), DstVT.getScalarType(),
4812 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
4813 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
4814 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
4815 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
4818 /// ExpandBITCAST - If the target supports VFP, this function is called to
4819 /// expand a bit convert where either the source or destination type is i64 to
4820 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
4821 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
4822 /// vectors), since the legalizer won't know what to do with that.
4823 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
4824 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4826 SDValue Op = N->getOperand(0);
4828 // This function is only supposed to be called for i64 types, either as the
4829 // source or destination of the bit convert.
4830 EVT SrcVT = Op.getValueType();
4831 EVT DstVT = N->getValueType(0);
4832 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
4833 "ExpandBITCAST called for non-i64 type");
4835 // Turn i64->f64 into VMOVDRR.
4836 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
4837 // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
4838 // if we can combine the bitcast with its source.
4839 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
4842 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4843 DAG.getConstant(0, dl, MVT::i32));
4844 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4845 DAG.getConstant(1, dl, MVT::i32));
4846 return DAG.getNode(ISD::BITCAST, dl, DstVT,
4847 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
4850 // Turn f64->i64 into VMOVRRD.
4851 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
4853 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
4854 SrcVT.getVectorNumElements() > 1)
4855 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4856 DAG.getVTList(MVT::i32, MVT::i32),
4857 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
4859 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4860 DAG.getVTList(MVT::i32, MVT::i32), Op);
4861 // Merge the pieces into a single i64 value.
4862 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
4868 /// getZeroVector - Returns a vector of specified type with all zero elements.
4869 /// Zero vectors are used to represent vector negation and in those cases
4870 /// will be implemented with the NEON VNEG instruction. However, VNEG does
4871 /// not support i64 elements, so sometimes the zero vectors will need to be
4872 /// explicitly constructed. Regardless, use a canonical VMOV to create the
4874 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4875 assert(VT.isVector() && "Expected a vector type");
4876 // The canonical modified immediate encoding of a zero vector is....0!
4877 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
4878 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
4879 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
4880 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
4883 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
4884 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4885 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
4886 SelectionDAG &DAG) const {
4887 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4888 EVT VT = Op.getValueType();
4889 unsigned VTBits = VT.getSizeInBits();
4891 SDValue ShOpLo = Op.getOperand(0);
4892 SDValue ShOpHi = Op.getOperand(1);
4893 SDValue ShAmt = Op.getOperand(2);
4895 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4896 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
4898 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
4900 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4901 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4902 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
4903 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4904 DAG.getConstant(VTBits, dl, MVT::i32));
4905 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
4906 SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4907 SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4908 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4909 ISD::SETGE, ARMcc, DAG, dl);
4910 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
4914 SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
4915 SDValue HiBigShift = Opc == ISD::SRA
4916 ? DAG.getNode(Opc, dl, VT, ShOpHi,
4917 DAG.getConstant(VTBits - 1, dl, VT))
4918 : DAG.getConstant(0, dl, VT);
4919 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4920 ISD::SETGE, ARMcc, DAG, dl);
4921 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4924 SDValue Ops[2] = { Lo, Hi };
4925 return DAG.getMergeValues(Ops, dl);
4928 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
4929 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4930 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
4931 SelectionDAG &DAG) const {
4932 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4933 EVT VT = Op.getValueType();
4934 unsigned VTBits = VT.getSizeInBits();
4936 SDValue ShOpLo = Op.getOperand(0);
4937 SDValue ShOpHi = Op.getOperand(1);
4938 SDValue ShAmt = Op.getOperand(2);
4940 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4942 assert(Op.getOpcode() == ISD::SHL_PARTS);
4943 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4944 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4945 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
4946 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
4947 SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4949 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4950 DAG.getConstant(VTBits, dl, MVT::i32));
4951 SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
4952 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4953 ISD::SETGE, ARMcc, DAG, dl);
4954 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4957 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4958 ISD::SETGE, ARMcc, DAG, dl);
4959 SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
4960 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
4961 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
4963 SDValue Ops[2] = { Lo, Hi };
4964 return DAG.getMergeValues(Ops, dl);
4967 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4968 SelectionDAG &DAG) const {
4969 // The rounding mode is in bits 23:22 of the FPSCR.
4970 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4971 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
4972 // so that the shift + and get folded into a bitfield extract.
4974 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
4975 DAG.getConstant(Intrinsic::arm_get_fpscr, dl,
4977 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
4978 DAG.getConstant(1U << 22, dl, MVT::i32));
4979 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
4980 DAG.getConstant(22, dl, MVT::i32));
4981 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
4982 DAG.getConstant(3, dl, MVT::i32));
4985 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
4986 const ARMSubtarget *ST) {
4988 EVT VT = N->getValueType(0);
4989 if (VT.isVector()) {
4990 assert(ST->hasNEON());
4992 // Compute the least significant set bit: LSB = X & -X
4993 SDValue X = N->getOperand(0);
4994 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
4995 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
4997 EVT ElemTy = VT.getVectorElementType();
4999 if (ElemTy == MVT::i8) {
5000 // Compute with: cttz(x) = ctpop(lsb - 1)
5001 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5002 DAG.getTargetConstant(1, dl, ElemTy));
5003 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5004 return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
5007 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
5008 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
5009 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
5010 unsigned NumBits = ElemTy.getSizeInBits();
5011 SDValue WidthMinus1 =
5012 DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5013 DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
5014 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
5015 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
5018 // Compute with: cttz(x) = ctpop(lsb - 1)
5020 // Since we can only compute the number of bits in a byte with vcnt.8, we
5021 // have to gather the result with pairwise addition (vpaddl) for i16, i32,
5026 if (ElemTy == MVT::i64) {
5027 // Load constant 0xffff'ffff'ffff'ffff to register.
5028 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5029 DAG.getTargetConstant(0x1eff, dl, MVT::i32));
5030 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
5032 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5033 DAG.getTargetConstant(1, dl, ElemTy));
5034 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5037 // Count #bits with vcnt.8.
5038 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5039 SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits);
5040 SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8);
5042 // Gather the #bits with vpaddl (pairwise add.)
5043 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5044 SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit,
5045 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5047 if (ElemTy == MVT::i16)
5050 EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32;
5051 SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit,
5052 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5054 if (ElemTy == MVT::i32)
5057 assert(ElemTy == MVT::i64);
5058 SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5059 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5064 if (!ST->hasV6T2Ops())
5067 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
5068 return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
5071 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
5072 /// for each 16-bit element from operand, repeated. The basic idea is to
5073 /// leverage vcnt to get the 8-bit counts, gather and add the results.
5075 /// Trace for v4i16:
5076 /// input = [v0 v1 v2 v3 ] (vi 16-bit element)
5077 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
5078 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
5079 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
5080 /// [b0 b1 b2 b3 b4 b5 b6 b7]
5081 /// +[b1 b0 b3 b2 b5 b4 b7 b6]
5082 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
5083 /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits)
5084 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
5085 EVT VT = N->getValueType(0);
5088 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5089 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
5090 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
5091 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
5092 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
5093 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
5096 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
5097 /// bit-count for each 16-bit element from the operand. We need slightly
5098 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
5099 /// 64/128-bit registers.
5101 /// Trace for v4i16:
5102 /// input = [v0 v1 v2 v3 ] (vi 16-bit element)
5103 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
5104 /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ]
5105 /// v4i16:Extracted = [k0 k1 k2 k3 ]
5106 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
5107 EVT VT = N->getValueType(0);
5110 SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
5111 if (VT.is64BitVector()) {
5112 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
5113 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
5114 DAG.getIntPtrConstant(0, DL));
5116 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
5117 BitCounts, DAG.getIntPtrConstant(0, DL));
5118 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
5122 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
5123 /// bit-count for each 32-bit element from the operand. The idea here is
5124 /// to split the vector into 16-bit elements, leverage the 16-bit count
5125 /// routine, and then combine the results.
5127 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
5128 /// input = [v0 v1 ] (vi: 32-bit elements)
5129 /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
5130 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
5131 /// vrev: N0 = [k1 k0 k3 k2 ]
5133 /// N1 =+[k1 k0 k3 k2 ]
5135 /// N2 =+[k1 k3 k0 k2 ]
5137 /// Extended =+[k1 k3 k0 k2 ]
5139 /// Extracted=+[k1 k3 ]
5141 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
5142 EVT VT = N->getValueType(0);
5145 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5147 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
5148 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
5149 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
5150 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
5151 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
5153 if (VT.is64BitVector()) {
5154 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
5155 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
5156 DAG.getIntPtrConstant(0, DL));
5158 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
5159 DAG.getIntPtrConstant(0, DL));
5160 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
5164 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
5165 const ARMSubtarget *ST) {
5166 EVT VT = N->getValueType(0);
5168 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
5169 assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
5170 VT == MVT::v4i16 || VT == MVT::v8i16) &&
5171 "Unexpected type for custom ctpop lowering");
5173 if (VT.getVectorElementType() == MVT::i32)
5174 return lowerCTPOP32BitElements(N, DAG);
5176 return lowerCTPOP16BitElements(N, DAG);
5179 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
5180 const ARMSubtarget *ST) {
5181 EVT VT = N->getValueType(0);
5187 // Lower vector shifts on NEON to use VSHL.
5188 assert(ST->hasNEON() && "unexpected vector shift");
5190 // Left shifts translate directly to the vshiftu intrinsic.
5191 if (N->getOpcode() == ISD::SHL)
5192 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5193 DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl,
5195 N->getOperand(0), N->getOperand(1));
5197 assert((N->getOpcode() == ISD::SRA ||
5198 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
5200 // NEON uses the same intrinsics for both left and right shifts. For
5201 // right shifts, the shift amounts are negative, so negate the vector of
5203 EVT ShiftVT = N->getOperand(1).getValueType();
5204 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
5205 getZeroVector(ShiftVT, DAG, dl),
5207 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
5208 Intrinsic::arm_neon_vshifts :
5209 Intrinsic::arm_neon_vshiftu);
5210 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5211 DAG.getConstant(vshiftInt, dl, MVT::i32),
5212 N->getOperand(0), NegatedCount);
5215 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
5216 const ARMSubtarget *ST) {
5217 EVT VT = N->getValueType(0);
5220 // We can get here for a node like i32 = ISD::SHL i32, i64
5224 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
5225 "Unknown shift to lower!");
5227 // We only lower SRA, SRL of 1 here, all others use generic lowering.
5228 if (!isOneConstant(N->getOperand(1)))
5231 // If we are in thumb mode, we don't have RRX.
5232 if (ST->isThumb1Only()) return SDValue();
5234 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
5235 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5236 DAG.getConstant(0, dl, MVT::i32));
5237 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5238 DAG.getConstant(1, dl, MVT::i32));
5240 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
5241 // captures the result into a carry flag.
5242 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
5243 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
5245 // The low part is an ARMISD::RRX operand, which shifts the carry in.
5246 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
5248 // Merge the pieces into a single i64 value.
5249 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5252 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
5253 SDValue TmpOp0, TmpOp1;
5254 bool Invert = false;
5258 SDValue Op0 = Op.getOperand(0);
5259 SDValue Op1 = Op.getOperand(1);
5260 SDValue CC = Op.getOperand(2);
5261 EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
5262 EVT VT = Op.getValueType();
5263 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
5266 if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
5267 (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
5268 // Special-case integer 64-bit equality comparisons. They aren't legal,
5269 // but they can be lowered with a few vector instructions.
5270 unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
5271 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
5272 SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
5273 SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
5274 SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
5275 DAG.getCondCode(ISD::SETEQ));
5276 SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
5277 SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
5278 Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
5279 if (SetCCOpcode == ISD::SETNE)
5280 Merged = DAG.getNOT(dl, Merged, CmpVT);
5281 Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
5285 if (CmpVT.getVectorElementType() == MVT::i64)
5286 // 64-bit comparisons are not legal in general.
5289 if (Op1.getValueType().isFloatingPoint()) {
5290 switch (SetCCOpcode) {
5291 default: llvm_unreachable("Illegal FP comparison");
5293 case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH;
5295 case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
5297 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
5299 case ISD::SETGT: Opc = ARMISD::VCGT; break;
5301 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
5303 case ISD::SETGE: Opc = ARMISD::VCGE; break;
5304 case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
5305 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
5306 case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
5307 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
5308 case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
5310 // Expand this to (OLT | OGT).
5314 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5315 Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
5321 // Expand this to (OLT | OGE).
5325 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5326 Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
5330 // Integer comparisons.
5331 switch (SetCCOpcode) {
5332 default: llvm_unreachable("Illegal integer comparison");
5333 case ISD::SETNE: Invert = true;
5334 case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
5335 case ISD::SETLT: Swap = true;
5336 case ISD::SETGT: Opc = ARMISD::VCGT; break;
5337 case ISD::SETLE: Swap = true;
5338 case ISD::SETGE: Opc = ARMISD::VCGE; break;
5339 case ISD::SETULT: Swap = true;
5340 case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
5341 case ISD::SETULE: Swap = true;
5342 case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
5345 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
5346 if (Opc == ARMISD::VCEQ) {
5349 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5351 else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
5354 // Ignore bitconvert.
5355 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
5356 AndOp = AndOp.getOperand(0);
5358 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
5360 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
5361 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
5368 std::swap(Op0, Op1);
5370 // If one of the operands is a constant vector zero, attempt to fold the
5371 // comparison to a specialized compare-against-zero form.
5373 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5375 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5376 if (Opc == ARMISD::VCGE)
5377 Opc = ARMISD::VCLEZ;
5378 else if (Opc == ARMISD::VCGT)
5379 Opc = ARMISD::VCLTZ;
5384 if (SingleOp.getNode()) {
5387 Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
5389 Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
5391 Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
5393 Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
5395 Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
5397 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5400 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5403 Result = DAG.getSExtOrTrunc(Result, dl, VT);
5406 Result = DAG.getNOT(dl, Result, VT);
5411 static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) {
5412 SDValue LHS = Op.getOperand(0);
5413 SDValue RHS = Op.getOperand(1);
5414 SDValue Carry = Op.getOperand(2);
5415 SDValue Cond = Op.getOperand(3);
5418 assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.");
5420 assert(Carry.getOpcode() != ISD::CARRY_FALSE);
5421 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
5422 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
5424 SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
5425 SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
5426 SDValue ARMcc = DAG.getConstant(
5427 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
5428 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5429 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
5430 Cmp.getValue(1), SDValue());
5431 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
5432 CCR, Chain.getValue(1));
5435 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
5436 /// valid vector constant for a NEON instruction with a "modified immediate"
5437 /// operand (e.g., VMOV). If so, return the encoded value.
5438 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
5439 unsigned SplatBitSize, SelectionDAG &DAG,
5440 const SDLoc &dl, EVT &VT, bool is128Bits,
5441 NEONModImmType type) {
5442 unsigned OpCmode, Imm;
5444 // SplatBitSize is set to the smallest size that splats the vector, so a
5445 // zero vector will always have SplatBitSize == 8. However, NEON modified
5446 // immediate instructions others than VMOV do not support the 8-bit encoding
5447 // of a zero vector, and the default encoding of zero is supposed to be the
5452 switch (SplatBitSize) {
5454 if (type != VMOVModImm)
5456 // Any 1-byte value is OK. Op=0, Cmode=1110.
5457 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
5460 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
5464 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
5465 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
5466 if ((SplatBits & ~0xff) == 0) {
5467 // Value = 0x00nn: Op=x, Cmode=100x.
5472 if ((SplatBits & ~0xff00) == 0) {
5473 // Value = 0xnn00: Op=x, Cmode=101x.
5475 Imm = SplatBits >> 8;
5481 // NEON's 32-bit VMOV supports splat values where:
5482 // * only one byte is nonzero, or
5483 // * the least significant byte is 0xff and the second byte is nonzero, or
5484 // * the least significant 2 bytes are 0xff and the third is nonzero.
5485 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
5486 if ((SplatBits & ~0xff) == 0) {
5487 // Value = 0x000000nn: Op=x, Cmode=000x.
5492 if ((SplatBits & ~0xff00) == 0) {
5493 // Value = 0x0000nn00: Op=x, Cmode=001x.
5495 Imm = SplatBits >> 8;
5498 if ((SplatBits & ~0xff0000) == 0) {
5499 // Value = 0x00nn0000: Op=x, Cmode=010x.
5501 Imm = SplatBits >> 16;
5504 if ((SplatBits & ~0xff000000) == 0) {
5505 // Value = 0xnn000000: Op=x, Cmode=011x.
5507 Imm = SplatBits >> 24;
5511 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
5512 if (type == OtherModImm) return SDValue();
5514 if ((SplatBits & ~0xffff) == 0 &&
5515 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
5516 // Value = 0x0000nnff: Op=x, Cmode=1100.
5518 Imm = SplatBits >> 8;
5522 if ((SplatBits & ~0xffffff) == 0 &&
5523 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
5524 // Value = 0x00nnffff: Op=x, Cmode=1101.
5526 Imm = SplatBits >> 16;
5530 // Note: there are a few 32-bit splat values (specifically: 00ffff00,
5531 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
5532 // VMOV.I32. A (very) minor optimization would be to replicate the value
5533 // and fall through here to test for a valid 64-bit splat. But, then the
5534 // caller would also need to check and handle the change in size.
5538 if (type != VMOVModImm)
5540 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
5541 uint64_t BitMask = 0xff;
5543 unsigned ImmMask = 1;
5545 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
5546 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
5549 } else if ((SplatBits & BitMask) != 0) {
5556 if (DAG.getDataLayout().isBigEndian())
5557 // swap higher and lower 32 bit word
5558 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
5560 // Op=1, Cmode=1110.
5562 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
5567 llvm_unreachable("unexpected size for isNEONModifiedImm");
5570 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
5571 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
5574 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
5575 const ARMSubtarget *ST) const {
5576 bool IsDouble = Op.getValueType() == MVT::f64;
5577 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
5578 const APFloat &FPVal = CFP->getValueAPF();
5580 // Prevent floating-point constants from using literal loads
5581 // when execute-only is enabled.
5582 if (ST->genExecuteOnly()) {
5583 APInt INTVal = FPVal.bitcastToAPInt();
5586 SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
5587 SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
5588 if (!ST->isLittle())
5590 return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
5592 return DAG.getConstant(INTVal, DL, MVT::i32);
5599 // Use the default (constant pool) lowering for double constants when we have
5601 if (IsDouble && Subtarget->isFPOnlySP())
5604 // Try splatting with a VMOV.f32...
5605 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
5608 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
5609 // We have code in place to select a valid ConstantFP already, no need to
5614 // It's a float and we are trying to use NEON operations where
5615 // possible. Lower it to a splat followed by an extract.
5617 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
5618 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
5620 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
5621 DAG.getConstant(0, DL, MVT::i32));
5624 // The rest of our options are NEON only, make sure that's allowed before
5626 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
5630 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
5632 // It wouldn't really be worth bothering for doubles except for one very
5633 // important value, which does happen to match: 0.0. So make sure we don't do
5635 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
5638 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
5639 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
5640 VMovVT, false, VMOVModImm);
5641 if (NewVal != SDValue()) {
5643 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
5646 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5648 // It's a float: cast and extract a vector element.
5649 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5651 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5652 DAG.getConstant(0, DL, MVT::i32));
5655 // Finally, try a VMVN.i32
5656 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
5658 if (NewVal != SDValue()) {
5660 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
5663 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5665 // It's a float: cast and extract a vector element.
5666 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5668 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5669 DAG.getConstant(0, DL, MVT::i32));
5675 // check if an VEXT instruction can handle the shuffle mask when the
5676 // vector sources of the shuffle are the same.
5677 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
5678 unsigned NumElts = VT.getVectorNumElements();
5680 // Assume that the first shuffle index is not UNDEF. Fail if it is.
5686 // If this is a VEXT shuffle, the immediate value is the index of the first
5687 // element. The other shuffle indices must be the successive elements after
5689 unsigned ExpectedElt = Imm;
5690 for (unsigned i = 1; i < NumElts; ++i) {
5691 // Increment the expected index. If it wraps around, just follow it
5692 // back to index zero and keep going.
5694 if (ExpectedElt == NumElts)
5697 if (M[i] < 0) continue; // ignore UNDEF indices
5698 if (ExpectedElt != static_cast<unsigned>(M[i]))
5706 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
5707 bool &ReverseVEXT, unsigned &Imm) {
5708 unsigned NumElts = VT.getVectorNumElements();
5709 ReverseVEXT = false;
5711 // Assume that the first shuffle index is not UNDEF. Fail if it is.
5717 // If this is a VEXT shuffle, the immediate value is the index of the first
5718 // element. The other shuffle indices must be the successive elements after
5720 unsigned ExpectedElt = Imm;
5721 for (unsigned i = 1; i < NumElts; ++i) {
5722 // Increment the expected index. If it wraps around, it may still be
5723 // a VEXT but the source vectors must be swapped.
5725 if (ExpectedElt == NumElts * 2) {
5730 if (M[i] < 0) continue; // ignore UNDEF indices
5731 if (ExpectedElt != static_cast<unsigned>(M[i]))
5735 // Adjust the index value if the source operands will be swapped.
5742 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
5743 /// instruction with the specified blocksize. (The order of the elements
5744 /// within each block of the vector is reversed.)
5745 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
5746 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
5747 "Only possible block sizes for VREV are: 16, 32, 64");
5749 unsigned EltSz = VT.getScalarSizeInBits();
5753 unsigned NumElts = VT.getVectorNumElements();
5754 unsigned BlockElts = M[0] + 1;
5755 // If the first shuffle index is UNDEF, be optimistic.
5757 BlockElts = BlockSize / EltSz;
5759 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
5762 for (unsigned i = 0; i < NumElts; ++i) {
5763 if (M[i] < 0) continue; // ignore UNDEF indices
5764 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
5771 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
5772 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
5773 // range, then 0 is placed into the resulting vector. So pretty much any mask
5774 // of 8 elements can work here.
5775 return VT == MVT::v8i8 && M.size() == 8;
5778 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
5779 // checking that pairs of elements in the shuffle mask represent the same index
5780 // in each vector, incrementing the expected index by 2 at each step.
5781 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
5782 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
5784 // WhichResult gives the offset for each element in the mask based on which
5785 // of the two results it belongs to.
5787 // The transpose can be represented either as:
5788 // result1 = shufflevector v1, v2, result1_shuffle_mask
5789 // result2 = shufflevector v1, v2, result2_shuffle_mask
5790 // where v1/v2 and the shuffle masks have the same number of elements
5791 // (here WhichResult (see below) indicates which result is being checked)
5794 // results = shufflevector v1, v2, shuffle_mask
5795 // where both results are returned in one vector and the shuffle mask has twice
5796 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
5797 // want to check the low half and high half of the shuffle mask as if it were
5799 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5800 unsigned EltSz = VT.getScalarSizeInBits();
5804 unsigned NumElts = VT.getVectorNumElements();
5805 if (M.size() != NumElts && M.size() != NumElts*2)
5808 // If the mask is twice as long as the input vector then we need to check the
5809 // upper and lower parts of the mask with a matching value for WhichResult
5810 // FIXME: A mask with only even values will be rejected in case the first
5811 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
5812 // M[0] is used to determine WhichResult
5813 for (unsigned i = 0; i < M.size(); i += NumElts) {
5814 if (M.size() == NumElts * 2)
5815 WhichResult = i / NumElts;
5817 WhichResult = M[i] == 0 ? 0 : 1;
5818 for (unsigned j = 0; j < NumElts; j += 2) {
5819 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5820 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
5825 if (M.size() == NumElts*2)
5831 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
5832 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5833 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
5834 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5835 unsigned EltSz = VT.getScalarSizeInBits();
5839 unsigned NumElts = VT.getVectorNumElements();
5840 if (M.size() != NumElts && M.size() != NumElts*2)
5843 for (unsigned i = 0; i < M.size(); i += NumElts) {
5844 if (M.size() == NumElts * 2)
5845 WhichResult = i / NumElts;
5847 WhichResult = M[i] == 0 ? 0 : 1;
5848 for (unsigned j = 0; j < NumElts; j += 2) {
5849 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5850 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
5855 if (M.size() == NumElts*2)
5861 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
5862 // that the mask elements are either all even and in steps of size 2 or all odd
5863 // and in steps of size 2.
5864 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
5865 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
5867 // Requires similar checks to that of isVTRNMask with
5868 // respect the how results are returned.
5869 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5870 unsigned EltSz = VT.getScalarSizeInBits();
5874 unsigned NumElts = VT.getVectorNumElements();
5875 if (M.size() != NumElts && M.size() != NumElts*2)
5878 for (unsigned i = 0; i < M.size(); i += NumElts) {
5879 WhichResult = M[i] == 0 ? 0 : 1;
5880 for (unsigned j = 0; j < NumElts; ++j) {
5881 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
5886 if (M.size() == NumElts*2)
5889 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5890 if (VT.is64BitVector() && EltSz == 32)
5896 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
5897 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5898 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
5899 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5900 unsigned EltSz = VT.getScalarSizeInBits();
5904 unsigned NumElts = VT.getVectorNumElements();
5905 if (M.size() != NumElts && M.size() != NumElts*2)
5908 unsigned Half = NumElts / 2;
5909 for (unsigned i = 0; i < M.size(); i += NumElts) {
5910 WhichResult = M[i] == 0 ? 0 : 1;
5911 for (unsigned j = 0; j < NumElts; j += Half) {
5912 unsigned Idx = WhichResult;
5913 for (unsigned k = 0; k < Half; ++k) {
5914 int MIdx = M[i + j + k];
5915 if (MIdx >= 0 && (unsigned) MIdx != Idx)
5922 if (M.size() == NumElts*2)
5925 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5926 if (VT.is64BitVector() && EltSz == 32)
5932 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
5933 // that pairs of elements of the shufflemask represent the same index in each
5934 // vector incrementing sequentially through the vectors.
5935 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
5936 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
5938 // Requires similar checks to that of isVTRNMask with respect the how results
5940 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5941 unsigned EltSz = VT.getScalarSizeInBits();
5945 unsigned NumElts = VT.getVectorNumElements();
5946 if (M.size() != NumElts && M.size() != NumElts*2)
5949 for (unsigned i = 0; i < M.size(); i += NumElts) {
5950 WhichResult = M[i] == 0 ? 0 : 1;
5951 unsigned Idx = WhichResult * NumElts / 2;
5952 for (unsigned j = 0; j < NumElts; j += 2) {
5953 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
5954 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
5960 if (M.size() == NumElts*2)
5963 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5964 if (VT.is64BitVector() && EltSz == 32)
5970 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
5971 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5972 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
5973 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5974 unsigned EltSz = VT.getScalarSizeInBits();
5978 unsigned NumElts = VT.getVectorNumElements();
5979 if (M.size() != NumElts && M.size() != NumElts*2)
5982 for (unsigned i = 0; i < M.size(); i += NumElts) {
5983 WhichResult = M[i] == 0 ? 0 : 1;
5984 unsigned Idx = WhichResult * NumElts / 2;
5985 for (unsigned j = 0; j < NumElts; j += 2) {
5986 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
5987 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
5993 if (M.size() == NumElts*2)
5996 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5997 if (VT.is64BitVector() && EltSz == 32)
6003 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
6004 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
6005 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
6006 unsigned &WhichResult,
6009 if (isVTRNMask(ShuffleMask, VT, WhichResult))
6010 return ARMISD::VTRN;
6011 if (isVUZPMask(ShuffleMask, VT, WhichResult))
6012 return ARMISD::VUZP;
6013 if (isVZIPMask(ShuffleMask, VT, WhichResult))
6014 return ARMISD::VZIP;
6017 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
6018 return ARMISD::VTRN;
6019 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6020 return ARMISD::VUZP;
6021 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6022 return ARMISD::VZIP;
6027 /// \return true if this is a reverse operation on an vector.
6028 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
6029 unsigned NumElts = VT.getVectorNumElements();
6030 // Make sure the mask has the right size.
6031 if (NumElts != M.size())
6034 // Look for <15, ..., 3, -1, 1, 0>.
6035 for (unsigned i = 0; i != NumElts; ++i)
6036 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
6042 // If N is an integer constant that can be moved into a register in one
6043 // instruction, return an SDValue of such a constant (will become a MOV
6044 // instruction). Otherwise return null.
6045 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
6046 const ARMSubtarget *ST, const SDLoc &dl) {
6048 if (!isa<ConstantSDNode>(N))
6050 Val = cast<ConstantSDNode>(N)->getZExtValue();
6052 if (ST->isThumb1Only()) {
6053 if (Val <= 255 || ~Val <= 255)
6054 return DAG.getConstant(Val, dl, MVT::i32);
6056 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
6057 return DAG.getConstant(Val, dl, MVT::i32);
6062 // If this is a case we can't handle, return null and let the default
6063 // expansion code take care of it.
6064 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
6065 const ARMSubtarget *ST) const {
6066 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
6068 EVT VT = Op.getValueType();
6070 APInt SplatBits, SplatUndef;
6071 unsigned SplatBitSize;
6073 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6074 if (SplatUndef.isAllOnesValue())
6075 return DAG.getUNDEF(VT);
6077 if (SplatBitSize <= 64) {
6078 // Check if an immediate VMOV works.
6080 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
6081 SplatUndef.getZExtValue(), SplatBitSize,
6082 DAG, dl, VmovVT, VT.is128BitVector(),
6084 if (Val.getNode()) {
6085 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
6086 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6089 // Try an immediate VMVN.
6090 uint64_t NegatedImm = (~SplatBits).getZExtValue();
6091 Val = isNEONModifiedImm(NegatedImm,
6092 SplatUndef.getZExtValue(), SplatBitSize,
6093 DAG, dl, VmovVT, VT.is128BitVector(),
6095 if (Val.getNode()) {
6096 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
6097 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6100 // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
6101 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
6102 int ImmVal = ARM_AM::getFP32Imm(SplatBits);
6104 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
6105 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
6111 // Scan through the operands to see if only one value is used.
6113 // As an optimisation, even if more than one value is used it may be more
6114 // profitable to splat with one value then change some lanes.
6116 // Heuristically we decide to do this if the vector has a "dominant" value,
6117 // defined as splatted to more than half of the lanes.
6118 unsigned NumElts = VT.getVectorNumElements();
6119 bool isOnlyLowElement = true;
6120 bool usesOnlyOneValue = true;
6121 bool hasDominantValue = false;
6122 bool isConstant = true;
6124 // Map of the number of times a particular SDValue appears in the
6126 DenseMap<SDValue, unsigned> ValueCounts;
6128 for (unsigned i = 0; i < NumElts; ++i) {
6129 SDValue V = Op.getOperand(i);
6133 isOnlyLowElement = false;
6134 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6137 ValueCounts.insert(std::make_pair(V, 0));
6138 unsigned &Count = ValueCounts[V];
6140 // Is this value dominant? (takes up more than half of the lanes)
6141 if (++Count > (NumElts / 2)) {
6142 hasDominantValue = true;
6146 if (ValueCounts.size() != 1)
6147 usesOnlyOneValue = false;
6148 if (!Value.getNode() && ValueCounts.size() > 0)
6149 Value = ValueCounts.begin()->first;
6151 if (ValueCounts.size() == 0)
6152 return DAG.getUNDEF(VT);
6154 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
6155 // Keep going if we are hitting this case.
6156 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
6157 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
6159 unsigned EltSize = VT.getScalarSizeInBits();
6161 // Use VDUP for non-constant splats. For f32 constant splats, reduce to
6162 // i32 and try again.
6163 if (hasDominantValue && EltSize <= 32) {
6167 // If we are VDUPing a value that comes directly from a vector, that will
6168 // cause an unnecessary move to and from a GPR, where instead we could
6169 // just use VDUPLANE. We can only do this if the lane being extracted
6170 // is at a constant index, as the VDUP from lane instructions only have
6171 // constant-index forms.
6172 ConstantSDNode *constIndex;
6173 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6174 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
6175 // We need to create a new undef vector to use for the VDUPLANE if the
6176 // size of the vector from which we get the value is different than the
6177 // size of the vector that we need to create. We will insert the element
6178 // such that the register coalescer will remove unnecessary copies.
6179 if (VT != Value->getOperand(0).getValueType()) {
6180 unsigned index = constIndex->getAPIntValue().getLimitedValue() %
6181 VT.getVectorNumElements();
6182 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6183 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
6184 Value, DAG.getConstant(index, dl, MVT::i32)),
6185 DAG.getConstant(index, dl, MVT::i32));
6187 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6188 Value->getOperand(0), Value->getOperand(1));
6190 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
6192 if (!usesOnlyOneValue) {
6193 // The dominant value was splatted as 'N', but we now have to insert
6194 // all differing elements.
6195 for (unsigned I = 0; I < NumElts; ++I) {
6196 if (Op.getOperand(I) == Value)
6198 SmallVector<SDValue, 3> Ops;
6200 Ops.push_back(Op.getOperand(I));
6201 Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
6202 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
6207 if (VT.getVectorElementType().isFloatingPoint()) {
6208 SmallVector<SDValue, 8> Ops;
6209 for (unsigned i = 0; i < NumElts; ++i)
6210 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
6212 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
6213 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
6214 Val = LowerBUILD_VECTOR(Val, DAG, ST);
6216 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6218 if (usesOnlyOneValue) {
6219 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
6220 if (isConstant && Val.getNode())
6221 return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
6225 // If all elements are constants and the case above didn't get hit, fall back
6226 // to the default expansion, which will generate a load from the constant
6231 // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
6233 SDValue shuffle = ReconstructShuffle(Op, DAG);
6234 if (shuffle != SDValue())
6238 if (VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
6239 // If we haven't found an efficient lowering, try splitting a 128-bit vector
6240 // into two 64-bit vectors; we might discover a better way to lower it.
6241 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
6242 EVT ExtVT = VT.getVectorElementType();
6243 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
6245 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
6246 if (Lower.getOpcode() == ISD::BUILD_VECTOR)
6247 Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
6248 SDValue Upper = DAG.getBuildVector(
6249 HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6250 if (Upper.getOpcode() == ISD::BUILD_VECTOR)
6251 Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
6253 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
6256 // Vectors with 32- or 64-bit elements can be built by directly assigning
6257 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands
6258 // will be legalized.
6259 if (EltSize >= 32) {
6260 // Do the expansion with floating-point types, since that is what the VFP
6261 // registers are defined to use, and since i64 is not legal.
6262 EVT EltVT = EVT::getFloatingPointVT(EltSize);
6263 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6264 SmallVector<SDValue, 8> Ops;
6265 for (unsigned i = 0; i < NumElts; ++i)
6266 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
6267 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6268 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6271 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
6272 // know the default expansion would otherwise fall back on something even
6273 // worse. For a vector with one or two non-undef values, that's
6274 // scalar_to_vector for the elements followed by a shuffle (provided the
6275 // shuffle is valid for the target) and materialization element by element
6276 // on the stack followed by a load for everything else.
6277 if (!isConstant && !usesOnlyOneValue) {
6278 SDValue Vec = DAG.getUNDEF(VT);
6279 for (unsigned i = 0 ; i < NumElts; ++i) {
6280 SDValue V = Op.getOperand(i);
6283 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
6284 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
6292 // Gather data to see if the operation can be modelled as a
6293 // shuffle in combination with VEXTs.
6294 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
6295 SelectionDAG &DAG) const {
6296 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
6298 EVT VT = Op.getValueType();
6299 unsigned NumElts = VT.getVectorNumElements();
6301 struct ShuffleSourceInfo {
6306 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
6307 // be compatible with the shuffle we intend to construct. As a result
6308 // ShuffleVec will be some sliding window into the original Vec.
6311 // Code should guarantee that element i in Vec starts at element "WindowBase
6312 // + i * WindowScale in ShuffleVec".
6316 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
6317 ShuffleSourceInfo(SDValue Vec)
6318 : Vec(Vec), MinElt(UINT_MAX), MaxElt(0), ShuffleVec(Vec), WindowBase(0),
6322 // First gather all vectors used as an immediate source for this BUILD_VECTOR
6324 SmallVector<ShuffleSourceInfo, 2> Sources;
6325 for (unsigned i = 0; i < NumElts; ++i) {
6326 SDValue V = Op.getOperand(i);
6329 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
6330 // A shuffle can only come from building a vector from various
6331 // elements of other vectors.
6333 } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
6334 // Furthermore, shuffles require a constant mask, whereas extractelts
6335 // accept variable indices.
6339 // Add this element source to the list if it's not already there.
6340 SDValue SourceVec = V.getOperand(0);
6341 auto Source = find(Sources, SourceVec);
6342 if (Source == Sources.end())
6343 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
6345 // Update the minimum and maximum lane number seen.
6346 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
6347 Source->MinElt = std::min(Source->MinElt, EltNo);
6348 Source->MaxElt = std::max(Source->MaxElt, EltNo);
6351 // Currently only do something sane when at most two source vectors
6353 if (Sources.size() > 2)
6356 // Find out the smallest element size among result and two sources, and use
6357 // it as element size to build the shuffle_vector.
6358 EVT SmallestEltTy = VT.getVectorElementType();
6359 for (auto &Source : Sources) {
6360 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
6361 if (SrcEltTy.bitsLT(SmallestEltTy))
6362 SmallestEltTy = SrcEltTy;
6364 unsigned ResMultiplier =
6365 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
6366 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
6367 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
6369 // If the source vector is too wide or too narrow, we may nevertheless be able
6370 // to construct a compatible shuffle either by concatenating it with UNDEF or
6371 // extracting a suitable range of elements.
6372 for (auto &Src : Sources) {
6373 EVT SrcVT = Src.ShuffleVec.getValueType();
6375 if (SrcVT.getSizeInBits() == VT.getSizeInBits())
6378 // This stage of the search produces a source with the same element type as
6379 // the original, but with a total width matching the BUILD_VECTOR output.
6380 EVT EltVT = SrcVT.getVectorElementType();
6381 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
6382 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
6384 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
6385 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
6387 // We can pad out the smaller vector for free, so if it's part of a
6390 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
6391 DAG.getUNDEF(Src.ShuffleVec.getValueType()));
6395 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
6398 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6399 // Span too large for a VEXT to cope
6403 if (Src.MinElt >= NumSrcElts) {
6404 // The extraction can just take the second half
6406 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6407 DAG.getConstant(NumSrcElts, dl, MVT::i32));
6408 Src.WindowBase = -NumSrcElts;
6409 } else if (Src.MaxElt < NumSrcElts) {
6410 // The extraction can just take the first half
6412 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6413 DAG.getConstant(0, dl, MVT::i32));
6415 // An actual VEXT is needed
6417 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6418 DAG.getConstant(0, dl, MVT::i32));
6420 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6421 DAG.getConstant(NumSrcElts, dl, MVT::i32));
6423 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
6425 DAG.getConstant(Src.MinElt, dl, MVT::i32));
6426 Src.WindowBase = -Src.MinElt;
6430 // Another possible incompatibility occurs from the vector element types. We
6431 // can fix this by bitcasting the source vectors to the same type we intend
6433 for (auto &Src : Sources) {
6434 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
6435 if (SrcEltTy == SmallestEltTy)
6437 assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
6438 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
6439 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
6440 Src.WindowBase *= Src.WindowScale;
6443 // Final sanity check before we try to actually produce a shuffle.
6445 for (auto Src : Sources)
6446 assert(Src.ShuffleVec.getValueType() == ShuffleVT);
6449 // The stars all align, our next step is to produce the mask for the shuffle.
6450 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
6451 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
6452 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
6453 SDValue Entry = Op.getOperand(i);
6454 if (Entry.isUndef())
6457 auto Src = find(Sources, Entry.getOperand(0));
6458 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
6460 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
6461 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
6463 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
6464 int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
6465 VT.getScalarSizeInBits());
6466 int LanesDefined = BitsDefined / BitsPerShuffleLane;
6468 // This source is expected to fill ResMultiplier lanes of the final shuffle,
6469 // starting at the appropriate offset.
6470 int *LaneMask = &Mask[i * ResMultiplier];
6472 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
6473 ExtractBase += NumElts * (Src - Sources.begin());
6474 for (int j = 0; j < LanesDefined; ++j)
6475 LaneMask[j] = ExtractBase + j;
6478 // Final check before we try to produce nonsense...
6479 if (!isShuffleMaskLegal(Mask, ShuffleVT))
6482 // We can't handle more than two sources. This should have already
6483 // been checked before this point.
6484 assert(Sources.size() <= 2 && "Too many sources!");
6486 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
6487 for (unsigned i = 0; i < Sources.size(); ++i)
6488 ShuffleOps[i] = Sources[i].ShuffleVec;
6490 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
6491 ShuffleOps[1], Mask);
6492 return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
6495 /// isShuffleMaskLegal - Targets can use this to indicate that they only
6496 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
6497 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
6498 /// are assumed to be legal.
6500 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
6502 if (VT.getVectorNumElements() == 4 &&
6503 (VT.is128BitVector() || VT.is64BitVector())) {
6504 unsigned PFIndexes[4];
6505 for (unsigned i = 0; i != 4; ++i) {
6509 PFIndexes[i] = M[i];
6512 // Compute the index in the perfect shuffle table.
6513 unsigned PFTableIndex =
6514 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6515 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6516 unsigned Cost = (PFEntry >> 30);
6522 bool ReverseVEXT, isV_UNDEF;
6523 unsigned Imm, WhichResult;
6525 unsigned EltSize = VT.getScalarSizeInBits();
6526 return (EltSize >= 32 ||
6527 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
6528 isVREVMask(M, VT, 64) ||
6529 isVREVMask(M, VT, 32) ||
6530 isVREVMask(M, VT, 16) ||
6531 isVEXTMask(M, VT, ReverseVEXT, Imm) ||
6532 isVTBLMask(M, VT) ||
6533 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) ||
6534 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT)));
6537 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
6538 /// the specified operations to build the shuffle.
6539 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
6540 SDValue RHS, SelectionDAG &DAG,
6542 unsigned OpNum = (PFEntry >> 26) & 0x0F;
6543 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
6544 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
6547 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
6556 OP_VUZPL, // VUZP, left result
6557 OP_VUZPR, // VUZP, right result
6558 OP_VZIPL, // VZIP, left result
6559 OP_VZIPR, // VZIP, right result
6560 OP_VTRNL, // VTRN, left result
6561 OP_VTRNR // VTRN, right result
6564 if (OpNum == OP_COPY) {
6565 if (LHSID == (1*9+2)*9+3) return LHS;
6566 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
6570 SDValue OpLHS, OpRHS;
6571 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
6572 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
6573 EVT VT = OpLHS.getValueType();
6576 default: llvm_unreachable("Unknown shuffle opcode!");
6578 // VREV divides the vector in half and swaps within the half.
6579 if (VT.getVectorElementType() == MVT::i32 ||
6580 VT.getVectorElementType() == MVT::f32)
6581 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
6582 // vrev <4 x i16> -> VREV32
6583 if (VT.getVectorElementType() == MVT::i16)
6584 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
6585 // vrev <4 x i8> -> VREV16
6586 assert(VT.getVectorElementType() == MVT::i8);
6587 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
6592 return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6593 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
6597 return DAG.getNode(ARMISD::VEXT, dl, VT,
6599 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
6602 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
6603 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
6606 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
6607 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
6610 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
6611 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
6615 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
6616 ArrayRef<int> ShuffleMask,
6617 SelectionDAG &DAG) {
6618 // Check to see if we can use the VTBL instruction.
6619 SDValue V1 = Op.getOperand(0);
6620 SDValue V2 = Op.getOperand(1);
6623 SmallVector<SDValue, 8> VTBLMask;
6624 for (ArrayRef<int>::iterator
6625 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
6626 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
6628 if (V2.getNode()->isUndef())
6629 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
6630 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6632 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
6633 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6636 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
6637 SelectionDAG &DAG) {
6639 SDValue OpLHS = Op.getOperand(0);
6640 EVT VT = OpLHS.getValueType();
6642 assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
6643 "Expect an v8i16/v16i8 type");
6644 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
6645 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
6646 // extract the first 8 bytes into the top double word and the last 8 bytes
6647 // into the bottom double word. The v8i16 case is similar.
6648 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
6649 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
6650 DAG.getConstant(ExtractNum, DL, MVT::i32));
6653 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
6654 SDValue V1 = Op.getOperand(0);
6655 SDValue V2 = Op.getOperand(1);
6657 EVT VT = Op.getValueType();
6658 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
6660 // Convert shuffles that are directly supported on NEON to target-specific
6661 // DAG nodes, instead of keeping them as shuffles and matching them again
6662 // during code selection. This is more efficient and avoids the possibility
6663 // of inconsistencies between legalization and selection.
6664 // FIXME: floating-point vectors should be canonicalized to integer vectors
6665 // of the same time so that they get CSEd properly.
6666 ArrayRef<int> ShuffleMask = SVN->getMask();
6668 unsigned EltSize = VT.getScalarSizeInBits();
6669 if (EltSize <= 32) {
6670 if (SVN->isSplat()) {
6671 int Lane = SVN->getSplatIndex();
6672 // If this is undef splat, generate it via "just" vdup, if possible.
6673 if (Lane == -1) Lane = 0;
6675 // Test if V1 is a SCALAR_TO_VECTOR.
6676 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
6677 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6679 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
6680 // (and probably will turn into a SCALAR_TO_VECTOR once legalization
6682 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
6683 !isa<ConstantSDNode>(V1.getOperand(0))) {
6684 bool IsScalarToVector = true;
6685 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
6686 if (!V1.getOperand(i).isUndef()) {
6687 IsScalarToVector = false;
6690 if (IsScalarToVector)
6691 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6693 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
6694 DAG.getConstant(Lane, dl, MVT::i32));
6699 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
6702 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
6703 DAG.getConstant(Imm, dl, MVT::i32));
6706 if (isVREVMask(ShuffleMask, VT, 64))
6707 return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
6708 if (isVREVMask(ShuffleMask, VT, 32))
6709 return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
6710 if (isVREVMask(ShuffleMask, VT, 16))
6711 return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
6713 if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
6714 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
6715 DAG.getConstant(Imm, dl, MVT::i32));
6718 // Check for Neon shuffles that modify both input vectors in place.
6719 // If both results are used, i.e., if there are two shuffles with the same
6720 // source operands and with masks corresponding to both results of one of
6721 // these operations, DAG memoization will ensure that a single node is
6722 // used for both shuffles.
6723 unsigned WhichResult;
6725 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6726 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
6729 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
6730 .getValue(WhichResult);
6733 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
6734 // shuffles that produce a result larger than their operands with:
6735 // shuffle(concat(v1, undef), concat(v2, undef))
6737 // shuffle(concat(v1, v2), undef)
6738 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
6740 // This is useful in the general case, but there are special cases where
6741 // native shuffles produce larger results: the two-result ops.
6743 // Look through the concat when lowering them:
6744 // shuffle(concat(v1, v2), undef)
6746 // concat(VZIP(v1, v2):0, :1)
6748 if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
6749 SDValue SubV1 = V1->getOperand(0);
6750 SDValue SubV2 = V1->getOperand(1);
6751 EVT SubVT = SubV1.getValueType();
6753 // We expect these to have been canonicalized to -1.
6754 assert(all_of(ShuffleMask, [&](int i) {
6755 return i < (int)VT.getVectorNumElements();
6756 }) && "Unexpected shuffle index into UNDEF operand!");
6758 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6759 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
6762 assert((WhichResult == 0) &&
6763 "In-place shuffle of concat can only have one result!");
6764 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
6766 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
6772 // If the shuffle is not directly supported and it has 4 elements, use
6773 // the PerfectShuffle-generated table to synthesize it from other shuffles.
6774 unsigned NumElts = VT.getVectorNumElements();
6776 unsigned PFIndexes[4];
6777 for (unsigned i = 0; i != 4; ++i) {
6778 if (ShuffleMask[i] < 0)
6781 PFIndexes[i] = ShuffleMask[i];
6784 // Compute the index in the perfect shuffle table.
6785 unsigned PFTableIndex =
6786 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6787 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6788 unsigned Cost = (PFEntry >> 30);
6791 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
6794 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
6795 if (EltSize >= 32) {
6796 // Do the expansion with floating-point types, since that is what the VFP
6797 // registers are defined to use, and since i64 is not legal.
6798 EVT EltVT = EVT::getFloatingPointVT(EltSize);
6799 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6800 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
6801 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
6802 SmallVector<SDValue, 8> Ops;
6803 for (unsigned i = 0; i < NumElts; ++i) {
6804 if (ShuffleMask[i] < 0)
6805 Ops.push_back(DAG.getUNDEF(EltVT));
6807 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
6808 ShuffleMask[i] < (int)NumElts ? V1 : V2,
6809 DAG.getConstant(ShuffleMask[i] & (NumElts-1),
6812 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6813 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6816 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
6817 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
6819 if (VT == MVT::v8i8)
6820 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
6826 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6827 // INSERT_VECTOR_ELT is legal only for immediate indexes.
6828 SDValue Lane = Op.getOperand(2);
6829 if (!isa<ConstantSDNode>(Lane))
6835 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6836 // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
6837 SDValue Lane = Op.getOperand(1);
6838 if (!isa<ConstantSDNode>(Lane))
6841 SDValue Vec = Op.getOperand(0);
6842 if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
6844 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
6850 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
6851 // The only time a CONCAT_VECTORS operation can have legal types is when
6852 // two 64-bit vectors are concatenated to a 128-bit vector.
6853 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
6854 "unexpected CONCAT_VECTORS");
6856 SDValue Val = DAG.getUNDEF(MVT::v2f64);
6857 SDValue Op0 = Op.getOperand(0);
6858 SDValue Op1 = Op.getOperand(1);
6860 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6861 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
6862 DAG.getIntPtrConstant(0, dl));
6864 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6865 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
6866 DAG.getIntPtrConstant(1, dl));
6867 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
6870 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
6871 /// element has been zero/sign-extended, depending on the isSigned parameter,
6872 /// from an integer type half its size.
6873 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
6875 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
6876 EVT VT = N->getValueType(0);
6877 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
6878 SDNode *BVN = N->getOperand(0).getNode();
6879 if (BVN->getValueType(0) != MVT::v4i32 ||
6880 BVN->getOpcode() != ISD::BUILD_VECTOR)
6882 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
6883 unsigned HiElt = 1 - LoElt;
6884 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
6885 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
6886 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
6887 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
6888 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
6891 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
6892 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
6895 if (Hi0->isNullValue() && Hi1->isNullValue())
6901 if (N->getOpcode() != ISD::BUILD_VECTOR)
6904 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
6905 SDNode *Elt = N->getOperand(i).getNode();
6906 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
6907 unsigned EltSize = VT.getScalarSizeInBits();
6908 unsigned HalfSize = EltSize / 2;
6910 if (!isIntN(HalfSize, C->getSExtValue()))
6913 if (!isUIntN(HalfSize, C->getZExtValue()))
6924 /// isSignExtended - Check if a node is a vector value that is sign-extended
6925 /// or a constant BUILD_VECTOR with sign-extended elements.
6926 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
6927 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
6929 if (isExtendedBUILD_VECTOR(N, DAG, true))
6934 /// isZeroExtended - Check if a node is a vector value that is zero-extended
6935 /// or a constant BUILD_VECTOR with zero-extended elements.
6936 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
6937 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
6939 if (isExtendedBUILD_VECTOR(N, DAG, false))
6944 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
6945 if (OrigVT.getSizeInBits() >= 64)
6948 assert(OrigVT.isSimple() && "Expecting a simple value type");
6950 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
6951 switch (OrigSimpleTy) {
6952 default: llvm_unreachable("Unexpected Vector Type");
6961 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
6962 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
6963 /// We insert the required extension here to get the vector to fill a D register.
6964 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
6967 unsigned ExtOpcode) {
6968 // The vector originally had a size of OrigTy. It was then extended to ExtTy.
6969 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
6970 // 64-bits we need to insert a new extension so that it will be 64-bits.
6971 assert(ExtTy.is128BitVector() && "Unexpected extension size");
6972 if (OrigTy.getSizeInBits() >= 64)
6975 // Must extend size to at least 64 bits to be used as an operand for VMULL.
6976 EVT NewVT = getExtensionTo64Bits(OrigTy);
6978 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
6981 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
6982 /// does not do any sign/zero extension. If the original vector is less
6983 /// than 64 bits, an appropriate extension will be added after the load to
6984 /// reach a total size of 64 bits. We have to add the extension separately
6985 /// because ARM does not have a sign/zero extending load for vectors.
6986 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
6987 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
6989 // The load already has the right type.
6990 if (ExtendedTy == LD->getMemoryVT())
6991 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
6992 LD->getBasePtr(), LD->getPointerInfo(),
6993 LD->getAlignment(), LD->getMemOperand()->getFlags());
6995 // We need to create a zextload/sextload. We cannot just create a load
6996 // followed by a zext/zext node because LowerMUL is also run during normal
6997 // operation legalization where we can't create illegal types.
6998 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
6999 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
7000 LD->getMemoryVT(), LD->getAlignment(),
7001 LD->getMemOperand()->getFlags());
7004 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
7005 /// extending load, or BUILD_VECTOR with extended elements, return the
7006 /// unextended value. The unextended vector should be 64 bits so that it can
7007 /// be used as an operand to a VMULL instruction. If the original vector size
7008 /// before extension is less than 64 bits we add a an extension to resize
7009 /// the vector to 64 bits.
7010 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
7011 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
7012 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
7013 N->getOperand(0)->getValueType(0),
7017 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
7018 return SkipLoadExtensionForVMULL(LD, DAG);
7020 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will
7021 // have been legalized as a BITCAST from v4i32.
7022 if (N->getOpcode() == ISD::BITCAST) {
7023 SDNode *BVN = N->getOperand(0).getNode();
7024 assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
7025 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
7026 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
7027 return DAG.getBuildVector(
7028 MVT::v2i32, SDLoc(N),
7029 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
7031 // Construct a new BUILD_VECTOR with elements truncated to half the size.
7032 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
7033 EVT VT = N->getValueType(0);
7034 unsigned EltSize = VT.getScalarSizeInBits() / 2;
7035 unsigned NumElts = VT.getVectorNumElements();
7036 MVT TruncVT = MVT::getIntegerVT(EltSize);
7037 SmallVector<SDValue, 8> Ops;
7039 for (unsigned i = 0; i != NumElts; ++i) {
7040 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
7041 const APInt &CInt = C->getAPIntValue();
7042 // Element types smaller than 32 bits are not legal, so use i32 elements.
7043 // The values are implicitly truncated so sext vs. zext doesn't matter.
7044 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
7046 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
7049 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
7050 unsigned Opcode = N->getOpcode();
7051 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7052 SDNode *N0 = N->getOperand(0).getNode();
7053 SDNode *N1 = N->getOperand(1).getNode();
7054 return N0->hasOneUse() && N1->hasOneUse() &&
7055 isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
7060 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
7061 unsigned Opcode = N->getOpcode();
7062 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7063 SDNode *N0 = N->getOperand(0).getNode();
7064 SDNode *N1 = N->getOperand(1).getNode();
7065 return N0->hasOneUse() && N1->hasOneUse() &&
7066 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
7071 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
7072 // Multiplications are only custom-lowered for 128-bit vectors so that
7073 // VMULL can be detected. Otherwise v2i64 multiplications are not legal.
7074 EVT VT = Op.getValueType();
7075 assert(VT.is128BitVector() && VT.isInteger() &&
7076 "unexpected type for custom-lowering ISD::MUL");
7077 SDNode *N0 = Op.getOperand(0).getNode();
7078 SDNode *N1 = Op.getOperand(1).getNode();
7079 unsigned NewOpc = 0;
7081 bool isN0SExt = isSignExtended(N0, DAG);
7082 bool isN1SExt = isSignExtended(N1, DAG);
7083 if (isN0SExt && isN1SExt)
7084 NewOpc = ARMISD::VMULLs;
7086 bool isN0ZExt = isZeroExtended(N0, DAG);
7087 bool isN1ZExt = isZeroExtended(N1, DAG);
7088 if (isN0ZExt && isN1ZExt)
7089 NewOpc = ARMISD::VMULLu;
7090 else if (isN1SExt || isN1ZExt) {
7091 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
7092 // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
7093 if (isN1SExt && isAddSubSExt(N0, DAG)) {
7094 NewOpc = ARMISD::VMULLs;
7096 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
7097 NewOpc = ARMISD::VMULLu;
7099 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
7101 NewOpc = ARMISD::VMULLu;
7107 if (VT == MVT::v2i64)
7108 // Fall through to expand this. It is not legal.
7111 // Other vector multiplications are legal.
7116 // Legalize to a VMULL instruction.
7119 SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
7121 Op0 = SkipExtensionForVMULL(N0, DAG);
7122 assert(Op0.getValueType().is64BitVector() &&
7123 Op1.getValueType().is64BitVector() &&
7124 "unexpected types for extended operands to VMULL");
7125 return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
7128 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
7129 // isel lowering to take advantage of no-stall back to back vmul + vmla.
7136 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
7137 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
7138 EVT Op1VT = Op1.getValueType();
7139 return DAG.getNode(N0->getOpcode(), DL, VT,
7140 DAG.getNode(NewOpc, DL, VT,
7141 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
7142 DAG.getNode(NewOpc, DL, VT,
7143 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
7146 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
7147 SelectionDAG &DAG) {
7148 // TODO: Should this propagate fast-math-flags?
7151 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
7152 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
7153 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
7154 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
7155 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
7156 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
7157 // Get reciprocal estimate.
7158 // float4 recip = vrecpeq_f32(yf);
7159 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7160 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7162 // Because char has a smaller range than uchar, we can actually get away
7163 // without any newton steps. This requires that we use a weird bias
7164 // of 0xb000, however (again, this has been exhaustively tested).
7165 // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
7166 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
7167 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
7168 Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
7169 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
7170 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
7171 // Convert back to short.
7172 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
7173 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
7177 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
7178 SelectionDAG &DAG) {
7179 // TODO: Should this propagate fast-math-flags?
7182 // Convert to float.
7183 // float4 yf = vcvt_f32_s32(vmovl_s16(y));
7184 // float4 xf = vcvt_f32_s32(vmovl_s16(x));
7185 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
7186 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
7187 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7188 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7190 // Use reciprocal estimate and one refinement step.
7191 // float4 recip = vrecpeq_f32(yf);
7192 // recip *= vrecpsq_f32(yf, recip);
7193 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7194 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7196 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7197 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7199 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7200 // Because short has a smaller range than ushort, we can actually get away
7201 // with only a single newton step. This requires that we use a weird bias
7202 // of 89, however (again, this has been exhaustively tested).
7203 // float4 result = as_float4(as_int4(xf*recip) + 0x89);
7204 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7205 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7206 N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
7207 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7208 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7209 // Convert back to integer and return.
7210 // return vmovn_s32(vcvt_s32_f32(result));
7211 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7212 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7216 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
7217 EVT VT = Op.getValueType();
7218 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7219 "unexpected type for custom-lowering ISD::SDIV");
7222 SDValue N0 = Op.getOperand(0);
7223 SDValue N1 = Op.getOperand(1);
7226 if (VT == MVT::v8i8) {
7227 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
7228 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
7230 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7231 DAG.getIntPtrConstant(4, dl));
7232 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7233 DAG.getIntPtrConstant(4, dl));
7234 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7235 DAG.getIntPtrConstant(0, dl));
7236 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7237 DAG.getIntPtrConstant(0, dl));
7239 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
7240 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
7242 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7243 N0 = LowerCONCAT_VECTORS(N0, DAG);
7245 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
7248 return LowerSDIV_v4i16(N0, N1, dl, DAG);
7251 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
7252 // TODO: Should this propagate fast-math-flags?
7253 EVT VT = Op.getValueType();
7254 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7255 "unexpected type for custom-lowering ISD::UDIV");
7258 SDValue N0 = Op.getOperand(0);
7259 SDValue N1 = Op.getOperand(1);
7262 if (VT == MVT::v8i8) {
7263 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
7264 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
7266 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7267 DAG.getIntPtrConstant(4, dl));
7268 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7269 DAG.getIntPtrConstant(4, dl));
7270 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7271 DAG.getIntPtrConstant(0, dl));
7272 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7273 DAG.getIntPtrConstant(0, dl));
7275 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
7276 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
7278 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7279 N0 = LowerCONCAT_VECTORS(N0, DAG);
7281 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
7282 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
7288 // v4i16 sdiv ... Convert to float.
7289 // float4 yf = vcvt_f32_s32(vmovl_u16(y));
7290 // float4 xf = vcvt_f32_s32(vmovl_u16(x));
7291 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
7292 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
7293 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7294 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7296 // Use reciprocal estimate and two refinement steps.
7297 // float4 recip = vrecpeq_f32(yf);
7298 // recip *= vrecpsq_f32(yf, recip);
7299 // recip *= vrecpsq_f32(yf, recip);
7300 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7301 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7303 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7304 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7306 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7307 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7308 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7310 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7311 // Simply multiplying by the reciprocal estimate can leave us a few ulps
7312 // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
7313 // and that it will never cause us to return an answer too large).
7314 // float4 result = as_float4(as_int4(xf*recip) + 2);
7315 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7316 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7317 N1 = DAG.getConstant(2, dl, MVT::v4i32);
7318 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7319 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7320 // Convert back to integer and return.
7321 // return vmovn_u32(vcvt_s32_f32(result));
7322 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7323 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7327 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
7328 EVT VT = Op.getNode()->getValueType(0);
7329 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
7332 bool ExtraOp = false;
7333 switch (Op.getOpcode()) {
7334 default: llvm_unreachable("Invalid code");
7335 case ISD::ADDC: Opc = ARMISD::ADDC; break;
7336 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break;
7337 case ISD::SUBC: Opc = ARMISD::SUBC; break;
7338 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break;
7342 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7344 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7345 Op.getOperand(1), Op.getOperand(2));
7348 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
7349 assert(Subtarget->isTargetDarwin());
7351 // For iOS, we want to call an alternative entry point: __sincos_stret,
7352 // return values are passed via sret.
7354 SDValue Arg = Op.getOperand(0);
7355 EVT ArgVT = Arg.getValueType();
7356 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
7357 auto PtrVT = getPointerTy(DAG.getDataLayout());
7359 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7360 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7362 // Pair of floats / doubles used to pass the result.
7363 Type *RetTy = StructType::get(ArgTy, ArgTy, nullptr);
7364 auto &DL = DAG.getDataLayout();
7367 bool ShouldUseSRet = Subtarget->isAPCS_ABI();
7369 if (ShouldUseSRet) {
7370 // Create stack object for sret.
7371 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
7372 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
7373 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
7374 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
7378 Entry.Ty = RetTy->getPointerTo();
7379 Entry.isSExt = false;
7380 Entry.isZExt = false;
7381 Entry.isSRet = true;
7382 Args.push_back(Entry);
7383 RetTy = Type::getVoidTy(*DAG.getContext());
7389 Entry.isSExt = false;
7390 Entry.isZExt = false;
7391 Args.push_back(Entry);
7393 const char *LibcallName =
7394 (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret";
7396 (ArgVT == MVT::f64) ? RTLIB::SINCOS_F64 : RTLIB::SINCOS_F32;
7397 CallingConv::ID CC = getLibcallCallingConv(LC);
7398 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
7400 TargetLowering::CallLoweringInfo CLI(DAG);
7402 .setChain(DAG.getEntryNode())
7403 .setCallee(CC, RetTy, Callee, std::move(Args))
7404 .setDiscardResult(ShouldUseSRet);
7405 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
7408 return CallResult.first;
7411 DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
7413 // Address of cos field.
7414 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
7415 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
7417 DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
7419 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
7420 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
7421 LoadSin.getValue(0), LoadCos.getValue(0));
7424 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
7426 SDValue &Chain) const {
7427 EVT VT = Op.getValueType();
7428 assert((VT == MVT::i32 || VT == MVT::i64) &&
7429 "unexpected type for custom lowering DIV");
7432 const auto &DL = DAG.getDataLayout();
7433 const auto &TLI = DAG.getTargetLoweringInfo();
7435 const char *Name = nullptr;
7437 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
7439 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
7441 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
7443 ARMTargetLowering::ArgListTy Args;
7445 for (auto AI : {1, 0}) {
7447 Arg.Node = Op.getOperand(AI);
7448 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
7449 Args.push_back(Arg);
7452 CallLoweringInfo CLI(DAG);
7455 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
7456 ES, std::move(Args));
7458 return LowerCallTo(CLI).first;
7461 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
7462 bool Signed) const {
7463 assert(Op.getValueType() == MVT::i32 &&
7464 "unexpected type for custom lowering DIV");
7467 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
7468 DAG.getEntryNode(), Op.getOperand(1));
7470 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7473 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
7475 SDValue Op = N->getOperand(1);
7476 if (N->getValueType(0) == MVT::i32)
7477 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
7478 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7479 DAG.getConstant(0, DL, MVT::i32));
7480 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7481 DAG.getConstant(1, DL, MVT::i32));
7482 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
7483 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
7486 void ARMTargetLowering::ExpandDIV_Windows(
7487 SDValue Op, SelectionDAG &DAG, bool Signed,
7488 SmallVectorImpl<SDValue> &Results) const {
7489 const auto &DL = DAG.getDataLayout();
7490 const auto &TLI = DAG.getTargetLoweringInfo();
7492 assert(Op.getValueType() == MVT::i64 &&
7493 "unexpected type for custom lowering DIV");
7496 SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
7498 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7500 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
7501 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
7502 DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
7503 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
7505 Results.push_back(Lower);
7506 Results.push_back(Upper);
7509 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
7510 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
7511 // Acquire/Release load/store is not legal for targets without a dmb or
7512 // equivalent available.
7515 // Monotonic load/store is legal for all targets.
7519 static void ReplaceREADCYCLECOUNTER(SDNode *N,
7520 SmallVectorImpl<SDValue> &Results,
7522 const ARMSubtarget *Subtarget) {
7524 // Under Power Management extensions, the cycle-count is:
7525 // mrc p15, #0, <Rt>, c9, c13, #0
7526 SDValue Ops[] = { N->getOperand(0), // Chain
7527 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
7528 DAG.getConstant(15, DL, MVT::i32),
7529 DAG.getConstant(0, DL, MVT::i32),
7530 DAG.getConstant(9, DL, MVT::i32),
7531 DAG.getConstant(13, DL, MVT::i32),
7532 DAG.getConstant(0, DL, MVT::i32)
7535 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
7536 DAG.getVTList(MVT::i32, MVT::Other), Ops);
7537 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
7538 DAG.getConstant(0, DL, MVT::i32)));
7539 Results.push_back(Cycles32.getValue(1));
7542 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
7543 SDLoc dl(V.getNode());
7544 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
7545 SDValue VHi = DAG.getAnyExtOrTrunc(
7546 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
7549 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
7550 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
7551 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
7552 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
7554 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
7557 static void ReplaceCMP_SWAP_64Results(SDNode *N,
7558 SmallVectorImpl<SDValue> & Results,
7559 SelectionDAG &DAG) {
7560 assert(N->getValueType(0) == MVT::i64 &&
7561 "AtomicCmpSwap on types less than 64 should be legal");
7562 SDValue Ops[] = {N->getOperand(1),
7563 createGPRPairNode(DAG, N->getOperand(2)),
7564 createGPRPairNode(DAG, N->getOperand(3)),
7566 SDNode *CmpSwap = DAG.getMachineNode(
7567 ARM::CMP_SWAP_64, SDLoc(N),
7568 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
7570 MachineFunction &MF = DAG.getMachineFunction();
7571 MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1);
7572 MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
7573 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
7575 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32,
7576 SDValue(CmpSwap, 0)));
7577 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32,
7578 SDValue(CmpSwap, 0)));
7579 Results.push_back(SDValue(CmpSwap, 2));
7582 static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget,
7583 SelectionDAG &DAG) {
7584 const auto &TLI = DAG.getTargetLoweringInfo();
7586 assert(Subtarget.getTargetTriple().isOSMSVCRT() &&
7587 "Custom lowering is MSVCRT specific!");
7590 SDValue Val = Op.getOperand(0);
7591 MVT Ty = Val->getSimpleValueType(0);
7592 SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1));
7593 SDValue Callee = DAG.getExternalSymbol(Ty == MVT::f32 ? "powf" : "pow",
7594 TLI.getPointerTy(DAG.getDataLayout()));
7596 TargetLowering::ArgListTy Args;
7597 TargetLowering::ArgListEntry Entry;
7600 Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext());
7601 Entry.isZExt = true;
7602 Args.push_back(Entry);
7604 Entry.Node = Exponent;
7605 Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext());
7606 Entry.isZExt = true;
7607 Args.push_back(Entry);
7609 Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext());
7611 // In the in-chain to the call is the entry node If we are emitting a
7612 // tailcall, the chain will be mutated if the node has a non-entry input
7614 SDValue InChain = DAG.getEntryNode();
7615 SDValue TCChain = InChain;
7617 const auto *F = DAG.getMachineFunction().getFunction();
7618 bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
7619 F->getReturnType() == LCRTy;
7623 TargetLowering::CallLoweringInfo CLI(DAG);
7626 .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args))
7628 std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI);
7630 // Return the chain (the DAG root) if it is a tail call
7631 return !CI.second.getNode() ? DAG.getRoot() : CI.first;
7634 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7635 switch (Op.getOpcode()) {
7636 default: llvm_unreachable("Don't know how to custom lower this!");
7637 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
7638 case ISD::ConstantPool:
7639 if (Subtarget->genExecuteOnly())
7640 llvm_unreachable("execute-only should not generate constant pools");
7641 return LowerConstantPool(Op, DAG);
7642 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
7643 case ISD::GlobalAddress:
7644 switch (Subtarget->getTargetTriple().getObjectFormat()) {
7645 default: llvm_unreachable("unknown object format");
7647 return LowerGlobalAddressWindows(Op, DAG);
7649 return LowerGlobalAddressELF(Op, DAG);
7651 return LowerGlobalAddressDarwin(Op, DAG);
7653 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
7654 case ISD::SELECT: return LowerSELECT(Op, DAG);
7655 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
7656 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
7657 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
7658 case ISD::VASTART: return LowerVASTART(Op, DAG);
7659 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget);
7660 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget);
7661 case ISD::SINT_TO_FP:
7662 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
7663 case ISD::FP_TO_SINT:
7664 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
7665 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
7666 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
7667 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
7668 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
7669 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
7670 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
7671 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
7673 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
7676 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
7677 case ISD::SREM: return LowerREM(Op.getNode(), DAG);
7678 case ISD::UREM: return LowerREM(Op.getNode(), DAG);
7679 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
7680 case ISD::SRL_PARTS:
7681 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
7683 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
7684 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget);
7685 case ISD::SETCC: return LowerVSETCC(Op, DAG);
7686 case ISD::SETCCE: return LowerSETCCE(Op, DAG);
7687 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget);
7688 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget);
7689 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
7690 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
7691 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
7692 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
7693 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
7694 case ISD::MUL: return LowerMUL(Op, DAG);
7696 if (Subtarget->isTargetWindows())
7697 return LowerDIV_Windows(Op, DAG, /* Signed */ true);
7698 return LowerSDIV(Op, DAG);
7700 if (Subtarget->isTargetWindows())
7701 return LowerDIV_Windows(Op, DAG, /* Signed */ false);
7702 return LowerUDIV(Op, DAG);
7706 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
7711 return LowerXALUO(Op, DAG);
7712 case ISD::ATOMIC_LOAD:
7713 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG);
7714 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG);
7716 case ISD::UDIVREM: return LowerDivRem(Op, DAG);
7717 case ISD::DYNAMIC_STACKALLOC:
7718 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
7719 return LowerDYNAMIC_STACKALLOC(Op, DAG);
7720 llvm_unreachable("Don't know how to custom lower this!");
7721 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
7722 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
7723 case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG);
7724 case ARMISD::WIN__DBZCHK: return SDValue();
7728 /// ReplaceNodeResults - Replace the results of node with an illegal result
7729 /// type with new values built out of custom code.
7730 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
7731 SmallVectorImpl<SDValue> &Results,
7732 SelectionDAG &DAG) const {
7734 switch (N->getOpcode()) {
7736 llvm_unreachable("Don't know how to custom expand this!");
7737 case ISD::READ_REGISTER:
7738 ExpandREAD_REGISTER(N, Results, DAG);
7741 Res = ExpandBITCAST(N, DAG);
7745 Res = Expand64BitShift(N, DAG, Subtarget);
7749 Res = LowerREM(N, DAG);
7753 Res = LowerDivRem(SDValue(N, 0), DAG);
7754 assert(Res.getNumOperands() == 2 && "DivRem needs two values");
7755 Results.push_back(Res.getValue(0));
7756 Results.push_back(Res.getValue(1));
7758 case ISD::READCYCLECOUNTER:
7759 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
7763 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
7764 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
7766 case ISD::ATOMIC_CMP_SWAP:
7767 ReplaceCMP_SWAP_64Results(N, Results, DAG);
7771 Results.push_back(Res);
7774 //===----------------------------------------------------------------------===//
7775 // ARM Scheduler Hooks
7776 //===----------------------------------------------------------------------===//
7778 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
7779 /// registers the function context.
7780 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
7781 MachineBasicBlock *MBB,
7782 MachineBasicBlock *DispatchBB,
7784 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
7785 "ROPI/RWPI not currently supported with SjLj");
7786 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7787 DebugLoc dl = MI.getDebugLoc();
7788 MachineFunction *MF = MBB->getParent();
7789 MachineRegisterInfo *MRI = &MF->getRegInfo();
7790 MachineConstantPool *MCP = MF->getConstantPool();
7791 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
7792 const Function *F = MF->getFunction();
7794 bool isThumb = Subtarget->isThumb();
7795 bool isThumb2 = Subtarget->isThumb2();
7797 unsigned PCLabelId = AFI->createPICLabelUId();
7798 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
7799 ARMConstantPoolValue *CPV =
7800 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
7801 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
7803 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
7804 : &ARM::GPRRegClass;
7806 // Grab constant pool and fixed stack memory operands.
7807 MachineMemOperand *CPMMO =
7808 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
7809 MachineMemOperand::MOLoad, 4, 4);
7811 MachineMemOperand *FIMMOSt =
7812 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
7813 MachineMemOperand::MOStore, 4, 4);
7815 // Load the address of the dispatch MBB into the jump buffer.
7817 // Incoming value: jbuf
7818 // ldr.n r5, LCPI1_1
7821 // str r5, [$jbuf, #+4] ; &jbuf[1]
7822 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7823 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
7824 .addConstantPoolIndex(CPI)
7825 .addMemOperand(CPMMO));
7826 // Set the low bit because of thumb mode.
7827 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7829 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
7830 .addReg(NewVReg1, RegState::Kill)
7832 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7833 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
7834 .addReg(NewVReg2, RegState::Kill)
7836 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
7837 .addReg(NewVReg3, RegState::Kill)
7839 .addImm(36) // &jbuf[1] :: pc
7840 .addMemOperand(FIMMOSt));
7841 } else if (isThumb) {
7842 // Incoming value: jbuf
7843 // ldr.n r1, LCPI1_4
7847 // add r2, $jbuf, #+4 ; &jbuf[1]
7849 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7850 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
7851 .addConstantPoolIndex(CPI)
7852 .addMemOperand(CPMMO));
7853 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7854 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
7855 .addReg(NewVReg1, RegState::Kill)
7857 // Set the low bit because of thumb mode.
7858 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7859 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
7860 .addReg(ARM::CPSR, RegState::Define)
7862 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
7863 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
7864 .addReg(ARM::CPSR, RegState::Define)
7865 .addReg(NewVReg2, RegState::Kill)
7866 .addReg(NewVReg3, RegState::Kill));
7867 unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
7868 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
7870 .addImm(36); // &jbuf[1] :: pc
7871 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
7872 .addReg(NewVReg4, RegState::Kill)
7873 .addReg(NewVReg5, RegState::Kill)
7875 .addMemOperand(FIMMOSt));
7877 // Incoming value: jbuf
7880 // str r1, [$jbuf, #+4] ; &jbuf[1]
7881 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7882 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
7883 .addConstantPoolIndex(CPI)
7885 .addMemOperand(CPMMO));
7886 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7887 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
7888 .addReg(NewVReg1, RegState::Kill)
7889 .addImm(PCLabelId));
7890 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
7891 .addReg(NewVReg2, RegState::Kill)
7893 .addImm(36) // &jbuf[1] :: pc
7894 .addMemOperand(FIMMOSt));
7898 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
7899 MachineBasicBlock *MBB) const {
7900 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7901 DebugLoc dl = MI.getDebugLoc();
7902 MachineFunction *MF = MBB->getParent();
7903 MachineRegisterInfo *MRI = &MF->getRegInfo();
7904 MachineFrameInfo &MFI = MF->getFrameInfo();
7905 int FI = MFI.getFunctionContextIndex();
7907 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
7908 : &ARM::GPRnopcRegClass;
7910 // Get a mapping of the call site numbers to all of the landing pads they're
7912 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad;
7913 unsigned MaxCSNum = 0;
7914 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
7916 if (!BB->isEHPad()) continue;
7918 // FIXME: We should assert that the EH_LABEL is the first MI in the landing
7920 for (MachineBasicBlock::iterator
7921 II = BB->begin(), IE = BB->end(); II != IE; ++II) {
7922 if (!II->isEHLabel()) continue;
7924 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
7925 if (!MF->hasCallSiteLandingPad(Sym)) continue;
7927 SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
7928 for (SmallVectorImpl<unsigned>::iterator
7929 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
7930 CSI != CSE; ++CSI) {
7931 CallSiteNumToLPad[*CSI].push_back(&*BB);
7932 MaxCSNum = std::max(MaxCSNum, *CSI);
7938 // Get an ordered list of the machine basic blocks for the jump table.
7939 std::vector<MachineBasicBlock*> LPadList;
7940 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
7941 LPadList.reserve(CallSiteNumToLPad.size());
7942 for (unsigned I = 1; I <= MaxCSNum; ++I) {
7943 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
7944 for (SmallVectorImpl<MachineBasicBlock*>::iterator
7945 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
7946 LPadList.push_back(*II);
7947 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
7951 assert(!LPadList.empty() &&
7952 "No landing pad destinations for the dispatch jump table!");
7954 // Create the jump table and associated information.
7955 MachineJumpTableInfo *JTI =
7956 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
7957 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
7959 // Create the MBBs for the dispatch code.
7961 // Shove the dispatch's address into the return slot in the function context.
7962 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
7963 DispatchBB->setIsEHPad();
7965 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
7966 unsigned trap_opcode;
7967 if (Subtarget->isThumb())
7968 trap_opcode = ARM::tTRAP;
7970 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
7972 BuildMI(TrapBB, dl, TII->get(trap_opcode));
7973 DispatchBB->addSuccessor(TrapBB);
7975 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
7976 DispatchBB->addSuccessor(DispContBB);
7979 MF->insert(MF->end(), DispatchBB);
7980 MF->insert(MF->end(), DispContBB);
7981 MF->insert(MF->end(), TrapBB);
7983 // Insert code into the entry block that creates and registers the function
7985 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
7987 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
7988 MachinePointerInfo::getFixedStack(*MF, FI),
7989 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4);
7991 MachineInstrBuilder MIB;
7992 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
7994 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
7995 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
7997 // Add a register mask with no preserved registers. This results in all
7998 // registers being marked as clobbered. This can't work if the dispatch block
7999 // is in a Thumb1 function and is linked with ARM code which uses the FP
8000 // registers, as there is no way to preserve the FP registers in Thumb1 mode.
8001 MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
8003 bool IsPositionIndependent = isPositionIndependent();
8004 unsigned NumLPads = LPadList.size();
8005 if (Subtarget->isThumb2()) {
8006 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8007 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
8010 .addMemOperand(FIMMOLd));
8012 if (NumLPads < 256) {
8013 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
8015 .addImm(LPadList.size()));
8017 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8018 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
8019 .addImm(NumLPads & 0xFFFF));
8021 unsigned VReg2 = VReg1;
8022 if ((NumLPads & 0xFFFF0000) != 0) {
8023 VReg2 = MRI->createVirtualRegister(TRC);
8024 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
8026 .addImm(NumLPads >> 16));
8029 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
8034 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
8039 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8040 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3)
8041 .addJumpTableIndex(MJTI));
8043 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8046 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
8047 .addReg(NewVReg3, RegState::Kill)
8049 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
8051 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
8052 .addReg(NewVReg4, RegState::Kill)
8054 .addJumpTableIndex(MJTI);
8055 } else if (Subtarget->isThumb()) {
8056 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8057 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
8060 .addMemOperand(FIMMOLd));
8062 if (NumLPads < 256) {
8063 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
8067 MachineConstantPool *ConstantPool = MF->getConstantPool();
8068 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8069 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8071 // MachineConstantPool wants an explicit alignment.
8072 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8074 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8075 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8077 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8078 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
8079 .addReg(VReg1, RegState::Define)
8080 .addConstantPoolIndex(Idx));
8081 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
8086 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
8091 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8092 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
8093 .addReg(ARM::CPSR, RegState::Define)
8097 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8098 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
8099 .addJumpTableIndex(MJTI));
8101 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8102 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
8103 .addReg(ARM::CPSR, RegState::Define)
8104 .addReg(NewVReg2, RegState::Kill)
8107 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8108 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8110 unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8111 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
8112 .addReg(NewVReg4, RegState::Kill)
8114 .addMemOperand(JTMMOLd));
8116 unsigned NewVReg6 = NewVReg5;
8117 if (IsPositionIndependent) {
8118 NewVReg6 = MRI->createVirtualRegister(TRC);
8119 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
8120 .addReg(ARM::CPSR, RegState::Define)
8121 .addReg(NewVReg5, RegState::Kill)
8125 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
8126 .addReg(NewVReg6, RegState::Kill)
8127 .addJumpTableIndex(MJTI);
8129 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8130 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
8133 .addMemOperand(FIMMOLd));
8135 if (NumLPads < 256) {
8136 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
8139 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
8140 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8141 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
8142 .addImm(NumLPads & 0xFFFF));
8144 unsigned VReg2 = VReg1;
8145 if ((NumLPads & 0xFFFF0000) != 0) {
8146 VReg2 = MRI->createVirtualRegister(TRC);
8147 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
8149 .addImm(NumLPads >> 16));
8152 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8156 MachineConstantPool *ConstantPool = MF->getConstantPool();
8157 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8158 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8160 // MachineConstantPool wants an explicit alignment.
8161 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8163 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8164 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8166 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8167 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
8168 .addReg(VReg1, RegState::Define)
8169 .addConstantPoolIndex(Idx)
8171 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8173 .addReg(VReg1, RegState::Kill));
8176 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
8181 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8183 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
8185 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
8186 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8187 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
8188 .addJumpTableIndex(MJTI));
8190 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8191 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8192 unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8194 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
8195 .addReg(NewVReg3, RegState::Kill)
8198 .addMemOperand(JTMMOLd));
8200 if (IsPositionIndependent) {
8201 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
8202 .addReg(NewVReg5, RegState::Kill)
8204 .addJumpTableIndex(MJTI);
8206 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
8207 .addReg(NewVReg5, RegState::Kill)
8208 .addJumpTableIndex(MJTI);
8212 // Add the jump table entries as successors to the MBB.
8213 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
8214 for (std::vector<MachineBasicBlock*>::iterator
8215 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
8216 MachineBasicBlock *CurMBB = *I;
8217 if (SeenMBBs.insert(CurMBB).second)
8218 DispContBB->addSuccessor(CurMBB);
8221 // N.B. the order the invoke BBs are processed in doesn't matter here.
8222 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
8223 SmallVector<MachineBasicBlock*, 64> MBBLPads;
8224 for (MachineBasicBlock *BB : InvokeBBs) {
8226 // Remove the landing pad successor from the invoke block and replace it
8227 // with the new dispatch block.
8228 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
8230 while (!Successors.empty()) {
8231 MachineBasicBlock *SMBB = Successors.pop_back_val();
8232 if (SMBB->isEHPad()) {
8233 BB->removeSuccessor(SMBB);
8234 MBBLPads.push_back(SMBB);
8238 BB->addSuccessor(DispatchBB, BranchProbability::getZero());
8239 BB->normalizeSuccProbs();
8241 // Find the invoke call and mark all of the callee-saved registers as
8242 // 'implicit defined' so that they're spilled. This prevents code from
8243 // moving instructions to before the EH block, where they will never be
8245 for (MachineBasicBlock::reverse_iterator
8246 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
8247 if (!II->isCall()) continue;
8249 DenseMap<unsigned, bool> DefRegs;
8250 for (MachineInstr::mop_iterator
8251 OI = II->operands_begin(), OE = II->operands_end();
8253 if (!OI->isReg()) continue;
8254 DefRegs[OI->getReg()] = true;
8257 MachineInstrBuilder MIB(*MF, &*II);
8259 for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
8260 unsigned Reg = SavedRegs[i];
8261 if (Subtarget->isThumb2() &&
8262 !ARM::tGPRRegClass.contains(Reg) &&
8263 !ARM::hGPRRegClass.contains(Reg))
8265 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
8267 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
8270 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
8277 // Mark all former landing pads as non-landing pads. The dispatch is the only
8279 for (SmallVectorImpl<MachineBasicBlock*>::iterator
8280 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
8281 (*I)->setIsEHPad(false);
8283 // The instruction is gone now.
8284 MI.eraseFromParent();
8288 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
8289 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
8290 E = MBB->succ_end(); I != E; ++I)
8293 llvm_unreachable("Expecting a BB with two successors!");
8296 /// Return the load opcode for a given load size. If load size >= 8,
8297 /// neon opcode will be returned.
8298 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
8300 return LdSize == 16 ? ARM::VLD1q32wb_fixed
8301 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
8303 return LdSize == 4 ? ARM::tLDRi
8304 : LdSize == 2 ? ARM::tLDRHi
8305 : LdSize == 1 ? ARM::tLDRBi : 0;
8307 return LdSize == 4 ? ARM::t2LDR_POST
8308 : LdSize == 2 ? ARM::t2LDRH_POST
8309 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
8310 return LdSize == 4 ? ARM::LDR_POST_IMM
8311 : LdSize == 2 ? ARM::LDRH_POST
8312 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
8315 /// Return the store opcode for a given store size. If store size >= 8,
8316 /// neon opcode will be returned.
8317 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
8319 return StSize == 16 ? ARM::VST1q32wb_fixed
8320 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
8322 return StSize == 4 ? ARM::tSTRi
8323 : StSize == 2 ? ARM::tSTRHi
8324 : StSize == 1 ? ARM::tSTRBi : 0;
8326 return StSize == 4 ? ARM::t2STR_POST
8327 : StSize == 2 ? ARM::t2STRH_POST
8328 : StSize == 1 ? ARM::t2STRB_POST : 0;
8329 return StSize == 4 ? ARM::STR_POST_IMM
8330 : StSize == 2 ? ARM::STRH_POST
8331 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
8334 /// Emit a post-increment load operation with given size. The instructions
8335 /// will be added to BB at Pos.
8336 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8337 const TargetInstrInfo *TII, const DebugLoc &dl,
8338 unsigned LdSize, unsigned Data, unsigned AddrIn,
8339 unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8340 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
8341 assert(LdOpc != 0 && "Should have a load opcode");
8343 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8344 .addReg(AddrOut, RegState::Define).addReg(AddrIn)
8346 } else if (IsThumb1) {
8347 // load + update AddrIn
8348 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8349 .addReg(AddrIn).addImm(0));
8350 MachineInstrBuilder MIB =
8351 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut);
8352 MIB = AddDefaultT1CC(MIB);
8353 MIB.addReg(AddrIn).addImm(LdSize);
8354 AddDefaultPred(MIB);
8355 } else if (IsThumb2) {
8356 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8357 .addReg(AddrOut, RegState::Define).addReg(AddrIn)
8360 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8361 .addReg(AddrOut, RegState::Define).addReg(AddrIn)
8362 .addReg(0).addImm(LdSize));
8366 /// Emit a post-increment store operation with given size. The instructions
8367 /// will be added to BB at Pos.
8368 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8369 const TargetInstrInfo *TII, const DebugLoc &dl,
8370 unsigned StSize, unsigned Data, unsigned AddrIn,
8371 unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8372 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
8373 assert(StOpc != 0 && "Should have a store opcode");
8375 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8376 .addReg(AddrIn).addImm(0).addReg(Data));
8377 } else if (IsThumb1) {
8378 // store + update AddrIn
8379 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data)
8380 .addReg(AddrIn).addImm(0));
8381 MachineInstrBuilder MIB =
8382 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut);
8383 MIB = AddDefaultT1CC(MIB);
8384 MIB.addReg(AddrIn).addImm(StSize);
8385 AddDefaultPred(MIB);
8386 } else if (IsThumb2) {
8387 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8388 .addReg(Data).addReg(AddrIn).addImm(StSize));
8390 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8391 .addReg(Data).addReg(AddrIn).addReg(0)
8397 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
8398 MachineBasicBlock *BB) const {
8399 // This pseudo instruction has 3 operands: dst, src, size
8400 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
8401 // Otherwise, we will generate unrolled scalar copies.
8402 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8403 const BasicBlock *LLVM_BB = BB->getBasicBlock();
8404 MachineFunction::iterator It = ++BB->getIterator();
8406 unsigned dest = MI.getOperand(0).getReg();
8407 unsigned src = MI.getOperand(1).getReg();
8408 unsigned SizeVal = MI.getOperand(2).getImm();
8409 unsigned Align = MI.getOperand(3).getImm();
8410 DebugLoc dl = MI.getDebugLoc();
8412 MachineFunction *MF = BB->getParent();
8413 MachineRegisterInfo &MRI = MF->getRegInfo();
8414 unsigned UnitSize = 0;
8415 const TargetRegisterClass *TRC = nullptr;
8416 const TargetRegisterClass *VecTRC = nullptr;
8418 bool IsThumb1 = Subtarget->isThumb1Only();
8419 bool IsThumb2 = Subtarget->isThumb2();
8420 bool IsThumb = Subtarget->isThumb();
8424 } else if (Align & 2) {
8427 // Check whether we can use NEON instructions.
8428 if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
8429 Subtarget->hasNEON()) {
8430 if ((Align % 16 == 0) && SizeVal >= 16)
8432 else if ((Align % 8 == 0) && SizeVal >= 8)
8435 // Can't use NEON instructions.
8440 // Select the correct opcode and register class for unit size load/store
8441 bool IsNeon = UnitSize >= 8;
8442 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
8444 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
8445 : UnitSize == 8 ? &ARM::DPRRegClass
8448 unsigned BytesLeft = SizeVal % UnitSize;
8449 unsigned LoopSize = SizeVal - BytesLeft;
8451 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
8452 // Use LDR and STR to copy.
8453 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
8454 // [destOut] = STR_POST(scratch, destIn, UnitSize)
8455 unsigned srcIn = src;
8456 unsigned destIn = dest;
8457 for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
8458 unsigned srcOut = MRI.createVirtualRegister(TRC);
8459 unsigned destOut = MRI.createVirtualRegister(TRC);
8460 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8461 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
8462 IsThumb1, IsThumb2);
8463 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
8464 IsThumb1, IsThumb2);
8469 // Handle the leftover bytes with LDRB and STRB.
8470 // [scratch, srcOut] = LDRB_POST(srcIn, 1)
8471 // [destOut] = STRB_POST(scratch, destIn, 1)
8472 for (unsigned i = 0; i < BytesLeft; i++) {
8473 unsigned srcOut = MRI.createVirtualRegister(TRC);
8474 unsigned destOut = MRI.createVirtualRegister(TRC);
8475 unsigned scratch = MRI.createVirtualRegister(TRC);
8476 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
8477 IsThumb1, IsThumb2);
8478 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
8479 IsThumb1, IsThumb2);
8483 MI.eraseFromParent(); // The instruction is gone now.
8487 // Expand the pseudo op to a loop.
8490 // movw varEnd, # --> with thumb2
8492 // ldrcp varEnd, idx --> without thumb2
8493 // fallthrough --> loopMBB
8495 // PHI varPhi, varEnd, varLoop
8496 // PHI srcPhi, src, srcLoop
8497 // PHI destPhi, dst, destLoop
8498 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8499 // [destLoop] = STR_POST(scratch, destPhi, UnitSize)
8500 // subs varLoop, varPhi, #UnitSize
8502 // fallthrough --> exitMBB
8504 // epilogue to handle left-over bytes
8505 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8506 // [destOut] = STRB_POST(scratch, destLoop, 1)
8507 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8508 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8509 MF->insert(It, loopMBB);
8510 MF->insert(It, exitMBB);
8512 // Transfer the remainder of BB and its successor edges to exitMBB.
8513 exitMBB->splice(exitMBB->begin(), BB,
8514 std::next(MachineBasicBlock::iterator(MI)), BB->end());
8515 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
8517 // Load an immediate to varEnd.
8518 unsigned varEnd = MRI.createVirtualRegister(TRC);
8519 if (Subtarget->useMovt(*MF)) {
8520 unsigned Vtmp = varEnd;
8521 if ((LoopSize & 0xFFFF0000) != 0)
8522 Vtmp = MRI.createVirtualRegister(TRC);
8523 AddDefaultPred(BuildMI(BB, dl,
8524 TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16),
8525 Vtmp).addImm(LoopSize & 0xFFFF));
8527 if ((LoopSize & 0xFFFF0000) != 0)
8528 AddDefaultPred(BuildMI(BB, dl,
8529 TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16),
8532 .addImm(LoopSize >> 16));
8534 MachineConstantPool *ConstantPool = MF->getConstantPool();
8535 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8536 const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
8538 // MachineConstantPool wants an explicit alignment.
8539 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8541 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8542 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8545 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg(
8546 varEnd, RegState::Define).addConstantPoolIndex(Idx));
8548 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg(
8549 varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0));
8551 BB->addSuccessor(loopMBB);
8553 // Generate the loop body:
8554 // varPhi = PHI(varLoop, varEnd)
8555 // srcPhi = PHI(srcLoop, src)
8556 // destPhi = PHI(destLoop, dst)
8557 MachineBasicBlock *entryBB = BB;
8559 unsigned varLoop = MRI.createVirtualRegister(TRC);
8560 unsigned varPhi = MRI.createVirtualRegister(TRC);
8561 unsigned srcLoop = MRI.createVirtualRegister(TRC);
8562 unsigned srcPhi = MRI.createVirtualRegister(TRC);
8563 unsigned destLoop = MRI.createVirtualRegister(TRC);
8564 unsigned destPhi = MRI.createVirtualRegister(TRC);
8566 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
8567 .addReg(varLoop).addMBB(loopMBB)
8568 .addReg(varEnd).addMBB(entryBB);
8569 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
8570 .addReg(srcLoop).addMBB(loopMBB)
8571 .addReg(src).addMBB(entryBB);
8572 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
8573 .addReg(destLoop).addMBB(loopMBB)
8574 .addReg(dest).addMBB(entryBB);
8576 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8577 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
8578 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8579 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
8580 IsThumb1, IsThumb2);
8581 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
8582 IsThumb1, IsThumb2);
8584 // Decrement loop variable by UnitSize.
8586 MachineInstrBuilder MIB =
8587 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop);
8588 MIB = AddDefaultT1CC(MIB);
8589 MIB.addReg(varPhi).addImm(UnitSize);
8590 AddDefaultPred(MIB);
8592 MachineInstrBuilder MIB =
8593 BuildMI(*BB, BB->end(), dl,
8594 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
8595 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize)));
8596 MIB->getOperand(5).setReg(ARM::CPSR);
8597 MIB->getOperand(5).setIsDef(true);
8599 BuildMI(*BB, BB->end(), dl,
8600 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
8601 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
8603 // loopMBB can loop back to loopMBB or fall through to exitMBB.
8604 BB->addSuccessor(loopMBB);
8605 BB->addSuccessor(exitMBB);
8607 // Add epilogue to handle BytesLeft.
8609 auto StartOfExit = exitMBB->begin();
8611 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8612 // [destOut] = STRB_POST(scratch, destLoop, 1)
8613 unsigned srcIn = srcLoop;
8614 unsigned destIn = destLoop;
8615 for (unsigned i = 0; i < BytesLeft; i++) {
8616 unsigned srcOut = MRI.createVirtualRegister(TRC);
8617 unsigned destOut = MRI.createVirtualRegister(TRC);
8618 unsigned scratch = MRI.createVirtualRegister(TRC);
8619 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
8620 IsThumb1, IsThumb2);
8621 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
8622 IsThumb1, IsThumb2);
8627 MI.eraseFromParent(); // The instruction is gone now.
8632 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
8633 MachineBasicBlock *MBB) const {
8634 const TargetMachine &TM = getTargetMachine();
8635 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
8636 DebugLoc DL = MI.getDebugLoc();
8638 assert(Subtarget->isTargetWindows() &&
8639 "__chkstk is only supported on Windows");
8640 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
8642 // __chkstk takes the number of words to allocate on the stack in R4, and
8643 // returns the stack adjustment in number of bytes in R4. This will not
8644 // clober any other registers (other than the obvious lr).
8646 // Although, technically, IP should be considered a register which may be
8647 // clobbered, the call itself will not touch it. Windows on ARM is a pure
8648 // thumb-2 environment, so there is no interworking required. As a result, we
8649 // do not expect a veneer to be emitted by the linker, clobbering IP.
8651 // Each module receives its own copy of __chkstk, so no import thunk is
8652 // required, again, ensuring that IP is not clobbered.
8654 // Finally, although some linkers may theoretically provide a trampoline for
8655 // out of range calls (which is quite common due to a 32M range limitation of
8656 // branches for Thumb), we can generate the long-call version via
8657 // -mcmodel=large, alleviating the need for the trampoline which may clobber
8660 switch (TM.getCodeModel()) {
8661 case CodeModel::Small:
8662 case CodeModel::Medium:
8663 case CodeModel::Default:
8664 case CodeModel::Kernel:
8665 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
8666 .addImm((unsigned)ARMCC::AL).addReg(0)
8667 .addExternalSymbol("__chkstk")
8668 .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8669 .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8670 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead);
8672 case CodeModel::Large:
8673 case CodeModel::JITDefault: {
8674 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
8675 unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
8677 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
8678 .addExternalSymbol("__chkstk");
8679 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
8680 .addImm((unsigned)ARMCC::AL).addReg(0)
8681 .addReg(Reg, RegState::Kill)
8682 .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8683 .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8684 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead);
8689 AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr),
8691 .addReg(ARM::SP, RegState::Kill)
8692 .addReg(ARM::R4, RegState::Kill)
8693 .setMIFlags(MachineInstr::FrameSetup)));
8695 MI.eraseFromParent();
8700 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
8701 MachineBasicBlock *MBB) const {
8702 DebugLoc DL = MI.getDebugLoc();
8703 MachineFunction *MF = MBB->getParent();
8704 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8706 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
8707 MF->insert(++MBB->getIterator(), ContBB);
8708 ContBB->splice(ContBB->begin(), MBB,
8709 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
8710 ContBB->transferSuccessorsAndUpdatePHIs(MBB);
8711 MBB->addSuccessor(ContBB);
8713 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
8714 BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
8715 MF->push_back(TrapBB);
8716 MBB->addSuccessor(TrapBB);
8718 AddDefaultPred(BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
8719 .addReg(MI.getOperand(0).getReg())
8721 BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
8726 MI.eraseFromParent();
8731 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8732 MachineBasicBlock *BB) const {
8733 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8734 DebugLoc dl = MI.getDebugLoc();
8735 bool isThumb2 = Subtarget->isThumb2();
8736 switch (MI.getOpcode()) {
8739 llvm_unreachable("Unexpected instr type to insert");
8742 // Thumb1 post-indexed loads are really just single-register LDMs.
8743 case ARM::tLDR_postidx: {
8744 BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
8745 .addOperand(MI.getOperand(1)) // Rn_wb
8746 .addOperand(MI.getOperand(2)) // Rn
8747 .addOperand(MI.getOperand(3)) // PredImm
8748 .addOperand(MI.getOperand(4)) // PredReg
8749 .addOperand(MI.getOperand(0)); // Rt
8750 MI.eraseFromParent();
8754 // The Thumb2 pre-indexed stores have the same MI operands, they just
8755 // define them differently in the .td files from the isel patterns, so
8756 // they need pseudos.
8757 case ARM::t2STR_preidx:
8758 MI.setDesc(TII->get(ARM::t2STR_PRE));
8760 case ARM::t2STRB_preidx:
8761 MI.setDesc(TII->get(ARM::t2STRB_PRE));
8763 case ARM::t2STRH_preidx:
8764 MI.setDesc(TII->get(ARM::t2STRH_PRE));
8767 case ARM::STRi_preidx:
8768 case ARM::STRBi_preidx: {
8769 unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
8770 : ARM::STRB_PRE_IMM;
8771 // Decode the offset.
8772 unsigned Offset = MI.getOperand(4).getImm();
8773 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
8774 Offset = ARM_AM::getAM2Offset(Offset);
8778 MachineMemOperand *MMO = *MI.memoperands_begin();
8779 BuildMI(*BB, MI, dl, TII->get(NewOpc))
8780 .addOperand(MI.getOperand(0)) // Rn_wb
8781 .addOperand(MI.getOperand(1)) // Rt
8782 .addOperand(MI.getOperand(2)) // Rn
8783 .addImm(Offset) // offset (skip GPR==zero_reg)
8784 .addOperand(MI.getOperand(5)) // pred
8785 .addOperand(MI.getOperand(6))
8786 .addMemOperand(MMO);
8787 MI.eraseFromParent();
8790 case ARM::STRr_preidx:
8791 case ARM::STRBr_preidx:
8792 case ARM::STRH_preidx: {
8794 switch (MI.getOpcode()) {
8795 default: llvm_unreachable("unexpected opcode!");
8796 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
8797 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
8798 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
8800 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
8801 for (unsigned i = 0; i < MI.getNumOperands(); ++i)
8802 MIB.addOperand(MI.getOperand(i));
8803 MI.eraseFromParent();
8807 case ARM::tMOVCCr_pseudo: {
8808 // To "insert" a SELECT_CC instruction, we actually have to insert the
8809 // diamond control-flow pattern. The incoming instruction knows the
8810 // destination vreg to set, the condition code register to branch on, the
8811 // true/false values to select between, and a branch opcode to use.
8812 const BasicBlock *LLVM_BB = BB->getBasicBlock();
8813 MachineFunction::iterator It = ++BB->getIterator();
8818 // cmpTY ccX, r1, r2
8820 // fallthrough --> copy0MBB
8821 MachineBasicBlock *thisMBB = BB;
8822 MachineFunction *F = BB->getParent();
8823 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
8824 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
8825 F->insert(It, copy0MBB);
8826 F->insert(It, sinkMBB);
8828 // Transfer the remainder of BB and its successor edges to sinkMBB.
8829 sinkMBB->splice(sinkMBB->begin(), BB,
8830 std::next(MachineBasicBlock::iterator(MI)), BB->end());
8831 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
8833 BB->addSuccessor(copy0MBB);
8834 BB->addSuccessor(sinkMBB);
8836 BuildMI(BB, dl, TII->get(ARM::tBcc))
8838 .addImm(MI.getOperand(3).getImm())
8839 .addReg(MI.getOperand(4).getReg());
8842 // %FalseValue = ...
8843 // # fallthrough to sinkMBB
8846 // Update machine-CFG edges
8847 BB->addSuccessor(sinkMBB);
8850 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
8853 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
8854 .addReg(MI.getOperand(1).getReg())
8856 .addReg(MI.getOperand(2).getReg())
8859 MI.eraseFromParent(); // The pseudo instruction is gone now.
8864 case ARM::BCCZi64: {
8865 // If there is an unconditional branch to the other successor, remove it.
8866 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
8868 // Compare both parts that make up the double comparison separately for
8870 bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
8872 unsigned LHS1 = MI.getOperand(1).getReg();
8873 unsigned LHS2 = MI.getOperand(2).getReg();
8875 AddDefaultPred(BuildMI(BB, dl,
8876 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8877 .addReg(LHS1).addImm(0));
8878 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8879 .addReg(LHS2).addImm(0)
8880 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
8882 unsigned RHS1 = MI.getOperand(3).getReg();
8883 unsigned RHS2 = MI.getOperand(4).getReg();
8884 AddDefaultPred(BuildMI(BB, dl,
8885 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8886 .addReg(LHS1).addReg(RHS1));
8887 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8888 .addReg(LHS2).addReg(RHS2)
8889 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
8892 MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
8893 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
8894 if (MI.getOperand(0).getImm() == ARMCC::NE)
8895 std::swap(destMBB, exitMBB);
8897 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
8898 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
8900 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB));
8902 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
8904 MI.eraseFromParent(); // The pseudo instruction is gone now.
8908 case ARM::Int_eh_sjlj_setjmp:
8909 case ARM::Int_eh_sjlj_setjmp_nofp:
8910 case ARM::tInt_eh_sjlj_setjmp:
8911 case ARM::t2Int_eh_sjlj_setjmp:
8912 case ARM::t2Int_eh_sjlj_setjmp_nofp:
8915 case ARM::Int_eh_sjlj_setup_dispatch:
8916 EmitSjLjDispatchBlock(MI, BB);
8921 // To insert an ABS instruction, we have to insert the
8922 // diamond control-flow pattern. The incoming instruction knows the
8923 // source vreg to test against 0, the destination vreg to set,
8924 // the condition code register to branch on, the
8925 // true/false values to select between, and a branch opcode to use.
8930 // BCC (branch to SinkBB if V0 >= 0)
8931 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0)
8932 // SinkBB: V1 = PHI(V2, V3)
8933 const BasicBlock *LLVM_BB = BB->getBasicBlock();
8934 MachineFunction::iterator BBI = ++BB->getIterator();
8935 MachineFunction *Fn = BB->getParent();
8936 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
8937 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB);
8938 Fn->insert(BBI, RSBBB);
8939 Fn->insert(BBI, SinkBB);
8941 unsigned int ABSSrcReg = MI.getOperand(1).getReg();
8942 unsigned int ABSDstReg = MI.getOperand(0).getReg();
8943 bool ABSSrcKIll = MI.getOperand(1).isKill();
8944 bool isThumb2 = Subtarget->isThumb2();
8945 MachineRegisterInfo &MRI = Fn->getRegInfo();
8946 // In Thumb mode S must not be specified if source register is the SP or
8947 // PC and if destination register is the SP, so restrict register class
8948 unsigned NewRsbDstReg =
8949 MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
8951 // Transfer the remainder of BB and its successor edges to sinkMBB.
8952 SinkBB->splice(SinkBB->begin(), BB,
8953 std::next(MachineBasicBlock::iterator(MI)), BB->end());
8954 SinkBB->transferSuccessorsAndUpdatePHIs(BB);
8956 BB->addSuccessor(RSBBB);
8957 BB->addSuccessor(SinkBB);
8959 // fall through to SinkMBB
8960 RSBBB->addSuccessor(SinkBB);
8962 // insert a cmp at the end of BB
8963 AddDefaultPred(BuildMI(BB, dl,
8964 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8965 .addReg(ABSSrcReg).addImm(0));
8967 // insert a bcc with opposite CC to ARMCC::MI at the end of BB
8969 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
8970 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
8972 // insert rsbri in RSBBB
8973 // Note: BCC and rsbri will be converted into predicated rsbmi
8974 // by if-conversion pass
8975 BuildMI(*RSBBB, RSBBB->begin(), dl,
8976 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
8977 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
8978 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
8980 // insert PHI in SinkBB,
8981 // reuse ABSDstReg to not change uses of ABS instruction
8982 BuildMI(*SinkBB, SinkBB->begin(), dl,
8983 TII->get(ARM::PHI), ABSDstReg)
8984 .addReg(NewRsbDstReg).addMBB(RSBBB)
8985 .addReg(ABSSrcReg).addMBB(BB);
8987 // remove ABS instruction
8988 MI.eraseFromParent();
8990 // return last added BB
8993 case ARM::COPY_STRUCT_BYVAL_I32:
8995 return EmitStructByval(MI, BB);
8996 case ARM::WIN__CHKSTK:
8997 return EmitLowered__chkstk(MI, BB);
8998 case ARM::WIN__DBZCHK:
8999 return EmitLowered__dbzchk(MI, BB);
9003 /// \brief Attaches vregs to MEMCPY that it will use as scratch registers
9004 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
9005 /// instead of as a custom inserter because we need the use list from the SDNode.
9006 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
9007 MachineInstr &MI, const SDNode *Node) {
9008 bool isThumb1 = Subtarget->isThumb1Only();
9010 DebugLoc DL = MI.getDebugLoc();
9011 MachineFunction *MF = MI.getParent()->getParent();
9012 MachineRegisterInfo &MRI = MF->getRegInfo();
9013 MachineInstrBuilder MIB(*MF, MI);
9015 // If the new dst/src is unused mark it as dead.
9016 if (!Node->hasAnyUseOfValue(0)) {
9017 MI.getOperand(0).setIsDead(true);
9019 if (!Node->hasAnyUseOfValue(1)) {
9020 MI.getOperand(1).setIsDead(true);
9023 // The MEMCPY both defines and kills the scratch registers.
9024 for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
9025 unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
9026 : &ARM::GPRRegClass);
9027 MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
9031 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9032 SDNode *Node) const {
9033 if (MI.getOpcode() == ARM::MEMCPY) {
9034 attachMEMCPYScratchRegs(Subtarget, MI, Node);
9038 const MCInstrDesc *MCID = &MI.getDesc();
9039 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
9040 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
9041 // operand is still set to noreg. If needed, set the optional operand's
9042 // register to CPSR, and remove the redundant implicit def.
9044 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
9046 // Rename pseudo opcodes.
9047 unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
9049 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
9050 MCID = &TII->get(NewOpc);
9052 assert(MCID->getNumOperands() == MI.getDesc().getNumOperands() + 1 &&
9053 "converted opcode should be the same except for cc_out");
9057 // Add the optional cc_out operand
9058 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
9060 unsigned ccOutIdx = MCID->getNumOperands() - 1;
9062 // Any ARM instruction that sets the 's' bit should specify an optional
9063 // "cc_out" operand in the last operand position.
9064 if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
9065 assert(!NewOpc && "Optional cc_out operand required");
9068 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
9069 // since we already have an optional CPSR def.
9070 bool definesCPSR = false;
9071 bool deadCPSR = false;
9072 for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
9074 const MachineOperand &MO = MI.getOperand(i);
9075 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
9079 MI.RemoveOperand(i);
9084 assert(!NewOpc && "Optional cc_out operand required");
9087 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
9089 assert(!MI.getOperand(ccOutIdx).getReg() &&
9090 "expect uninitialized optional cc_out operand");
9094 // If this instruction was defined with an optional CPSR def and its dag node
9095 // had a live implicit CPSR def, then activate the optional CPSR def.
9096 MachineOperand &MO = MI.getOperand(ccOutIdx);
9097 MO.setReg(ARM::CPSR);
9101 //===----------------------------------------------------------------------===//
9102 // ARM Optimization Hooks
9103 //===----------------------------------------------------------------------===//
9105 // Helper function that checks if N is a null or all ones constant.
9106 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
9107 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
9110 // Return true if N is conditionally 0 or all ones.
9111 // Detects these expressions where cc is an i1 value:
9113 // (select cc 0, y) [AllOnes=0]
9114 // (select cc y, 0) [AllOnes=0]
9115 // (zext cc) [AllOnes=0]
9116 // (sext cc) [AllOnes=0/1]
9117 // (select cc -1, y) [AllOnes=1]
9118 // (select cc y, -1) [AllOnes=1]
9120 // Invert is set when N is the null/all ones constant when CC is false.
9121 // OtherOp is set to the alternative value of N.
9122 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
9123 SDValue &CC, bool &Invert,
9125 SelectionDAG &DAG) {
9126 switch (N->getOpcode()) {
9127 default: return false;
9129 CC = N->getOperand(0);
9130 SDValue N1 = N->getOperand(1);
9131 SDValue N2 = N->getOperand(2);
9132 if (isZeroOrAllOnes(N1, AllOnes)) {
9137 if (isZeroOrAllOnes(N2, AllOnes)) {
9144 case ISD::ZERO_EXTEND:
9145 // (zext cc) can never be the all ones value.
9149 case ISD::SIGN_EXTEND: {
9151 EVT VT = N->getValueType(0);
9152 CC = N->getOperand(0);
9153 if (CC.getValueType() != MVT::i1)
9157 // When looking for an AllOnes constant, N is an sext, and the 'other'
9159 OtherOp = DAG.getConstant(0, dl, VT);
9160 else if (N->getOpcode() == ISD::ZERO_EXTEND)
9161 // When looking for a 0 constant, N can be zext or sext.
9162 OtherOp = DAG.getConstant(1, dl, VT);
9164 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
9171 // Combine a constant select operand into its use:
9173 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
9174 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
9175 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1]
9176 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
9177 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
9179 // The transform is rejected if the select doesn't have a constant operand that
9180 // is null, or all ones when AllOnes is set.
9182 // Also recognize sext/zext from i1:
9184 // (add (zext cc), x) -> (select cc (add x, 1), x)
9185 // (add (sext cc), x) -> (select cc (add x, -1), x)
9187 // These transformations eventually create predicated instructions.
9189 // @param N The node to transform.
9190 // @param Slct The N operand that is a select.
9191 // @param OtherOp The other N operand (x above).
9192 // @param DCI Context.
9193 // @param AllOnes Require the select constant to be all ones instead of null.
9194 // @returns The new node, or SDValue() on failure.
9196 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
9197 TargetLowering::DAGCombinerInfo &DCI,
9198 bool AllOnes = false) {
9199 SelectionDAG &DAG = DCI.DAG;
9200 EVT VT = N->getValueType(0);
9201 SDValue NonConstantVal;
9204 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
9205 NonConstantVal, DAG))
9208 // Slct is now know to be the desired identity constant when CC is true.
9209 SDValue TrueVal = OtherOp;
9210 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
9211 OtherOp, NonConstantVal);
9212 // Unless SwapSelectOps says CC should be false.
9214 std::swap(TrueVal, FalseVal);
9216 return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
9217 CCOp, TrueVal, FalseVal);
9220 // Attempt combineSelectAndUse on each operand of a commutative operator N.
9222 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
9223 TargetLowering::DAGCombinerInfo &DCI) {
9224 SDValue N0 = N->getOperand(0);
9225 SDValue N1 = N->getOperand(1);
9226 if (N0.getNode()->hasOneUse())
9227 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
9229 if (N1.getNode()->hasOneUse())
9230 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
9235 // AddCombineToVPADDL- For pair-wise add on neon, use the vpaddl instruction
9236 // (only after legalization).
9237 static SDValue AddCombineToVPADDL(SDNode *N, SDValue N0, SDValue N1,
9238 TargetLowering::DAGCombinerInfo &DCI,
9239 const ARMSubtarget *Subtarget) {
9241 // Only perform optimization if after legalize, and if NEON is available. We
9242 // also expected both operands to be BUILD_VECTORs.
9243 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
9244 || N0.getOpcode() != ISD::BUILD_VECTOR
9245 || N1.getOpcode() != ISD::BUILD_VECTOR)
9248 // Check output type since VPADDL operand elements can only be 8, 16, or 32.
9249 EVT VT = N->getValueType(0);
9250 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
9253 // Check that the vector operands are of the right form.
9254 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
9255 // operands, where N is the size of the formed vector.
9256 // Each EXTRACT_VECTOR should have the same input vector and odd or even
9257 // index such that we have a pair wise add pattern.
9259 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
9260 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9262 SDValue Vec = N0->getOperand(0)->getOperand(0);
9263 SDNode *V = Vec.getNode();
9264 unsigned nextIndex = 0;
9266 // For each operands to the ADD which are BUILD_VECTORs,
9267 // check to see if each of their operands are an EXTRACT_VECTOR with
9268 // the same vector and appropriate index.
9269 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
9270 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
9271 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
9273 SDValue ExtVec0 = N0->getOperand(i);
9274 SDValue ExtVec1 = N1->getOperand(i);
9276 // First operand is the vector, verify its the same.
9277 if (V != ExtVec0->getOperand(0).getNode() ||
9278 V != ExtVec1->getOperand(0).getNode())
9281 // Second is the constant, verify its correct.
9282 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
9283 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
9285 // For the constant, we want to see all the even or all the odd.
9286 if (!C0 || !C1 || C0->getZExtValue() != nextIndex
9287 || C1->getZExtValue() != nextIndex+1)
9296 // Create VPADDL node.
9297 SelectionDAG &DAG = DCI.DAG;
9298 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9302 // Build operand list.
9303 SmallVector<SDValue, 8> Ops;
9304 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
9305 TLI.getPointerTy(DAG.getDataLayout())));
9307 // Input is the vector.
9310 // Get widened type and narrowed type.
9312 unsigned numElem = VT.getVectorNumElements();
9314 EVT inputLaneType = Vec.getValueType().getVectorElementType();
9315 switch (inputLaneType.getSimpleVT().SimpleTy) {
9316 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
9317 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
9318 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
9320 llvm_unreachable("Invalid vector element type for padd optimization.");
9323 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
9324 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
9325 return DAG.getNode(ExtOp, dl, VT, tmp);
9328 static SDValue findMUL_LOHI(SDValue V) {
9329 if (V->getOpcode() == ISD::UMUL_LOHI ||
9330 V->getOpcode() == ISD::SMUL_LOHI)
9335 static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
9336 TargetLowering::DAGCombinerInfo &DCI,
9337 const ARMSubtarget *Subtarget) {
9339 // Look for multiply add opportunities.
9340 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
9341 // each add nodes consumes a value from ISD::UMUL_LOHI and there is
9342 // a glue link from the first add to the second add.
9343 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
9344 // a S/UMLAL instruction.
9347 // / \ [no multiline comment]
9353 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC");
9354 SDValue AddcOp0 = AddcNode->getOperand(0);
9355 SDValue AddcOp1 = AddcNode->getOperand(1);
9357 // Check if the two operands are from the same mul_lohi node.
9358 if (AddcOp0.getNode() == AddcOp1.getNode())
9361 assert(AddcNode->getNumValues() == 2 &&
9362 AddcNode->getValueType(0) == MVT::i32 &&
9363 "Expect ADDC with two result values. First: i32");
9365 // Check that we have a glued ADDC node.
9366 if (AddcNode->getValueType(1) != MVT::Glue)
9369 // Check that the ADDC adds the low result of the S/UMUL_LOHI.
9370 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI &&
9371 AddcOp0->getOpcode() != ISD::SMUL_LOHI &&
9372 AddcOp1->getOpcode() != ISD::UMUL_LOHI &&
9373 AddcOp1->getOpcode() != ISD::SMUL_LOHI)
9376 // Look for the glued ADDE.
9377 SDNode* AddeNode = AddcNode->getGluedUser();
9381 // Make sure it is really an ADDE.
9382 if (AddeNode->getOpcode() != ISD::ADDE)
9385 assert(AddeNode->getNumOperands() == 3 &&
9386 AddeNode->getOperand(2).getValueType() == MVT::Glue &&
9387 "ADDE node has the wrong inputs");
9389 // Check for the triangle shape.
9390 SDValue AddeOp0 = AddeNode->getOperand(0);
9391 SDValue AddeOp1 = AddeNode->getOperand(1);
9393 // Make sure that the ADDE operands are not coming from the same node.
9394 if (AddeOp0.getNode() == AddeOp1.getNode())
9397 // Find the MUL_LOHI node walking up ADDE's operands.
9398 bool IsLeftOperandMUL = false;
9399 SDValue MULOp = findMUL_LOHI(AddeOp0);
9400 if (MULOp == SDValue())
9401 MULOp = findMUL_LOHI(AddeOp1);
9403 IsLeftOperandMUL = true;
9404 if (MULOp == SDValue())
9407 // Figure out the right opcode.
9408 unsigned Opc = MULOp->getOpcode();
9409 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
9411 // Figure out the high and low input values to the MLAL node.
9412 SDValue* HiAdd = nullptr;
9413 SDValue* LoMul = nullptr;
9414 SDValue* LowAdd = nullptr;
9416 // Ensure that ADDE is from high result of ISD::SMUL_LOHI.
9417 if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1)))
9420 if (IsLeftOperandMUL)
9426 // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node
9427 // whose low result is fed to the ADDC we are checking.
9429 if (AddcOp0 == MULOp.getValue(0)) {
9433 if (AddcOp1 == MULOp.getValue(0)) {
9441 // Create the merged node.
9442 SelectionDAG &DAG = DCI.DAG;
9444 // Build operand list.
9445 SmallVector<SDValue, 8> Ops;
9446 Ops.push_back(LoMul->getOperand(0));
9447 Ops.push_back(LoMul->getOperand(1));
9448 Ops.push_back(*LowAdd);
9449 Ops.push_back(*HiAdd);
9451 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode),
9452 DAG.getVTList(MVT::i32, MVT::i32), Ops);
9454 // Replace the ADDs' nodes uses by the MLA node's values.
9455 SDValue HiMLALResult(MLALNode.getNode(), 1);
9456 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
9458 SDValue LoMLALResult(MLALNode.getNode(), 0);
9459 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
9461 // Return original node to notify the driver to stop replacing.
9462 SDValue resNode(AddcNode, 0);
9466 static SDValue AddCombineTo64bitUMAAL(SDNode *AddcNode,
9467 TargetLowering::DAGCombinerInfo &DCI,
9468 const ARMSubtarget *Subtarget) {
9469 // UMAAL is similar to UMLAL except that it adds two unsigned values.
9470 // While trying to combine for the other MLAL nodes, first search for the
9471 // chance to use UMAAL. Check if Addc uses another addc node which can first
9472 // be combined into a UMLAL. The other pattern is AddcNode being combined
9473 // into an UMLAL and then using another addc is handled in ISelDAGToDAG.
9475 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP() ||
9476 (Subtarget->isThumb() && !Subtarget->hasThumb2()))
9477 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
9479 SDNode *PrevAddc = nullptr;
9480 if (AddcNode->getOperand(0).getOpcode() == ISD::ADDC)
9481 PrevAddc = AddcNode->getOperand(0).getNode();
9482 else if (AddcNode->getOperand(1).getOpcode() == ISD::ADDC)
9483 PrevAddc = AddcNode->getOperand(1).getNode();
9485 // If there's no addc chains, just return a search for any MLAL.
9486 if (PrevAddc == nullptr)
9487 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
9489 // Try to convert the addc operand to an MLAL and if that fails try to
9490 // combine AddcNode.
9491 SDValue MLAL = AddCombineTo64bitMLAL(PrevAddc, DCI, Subtarget);
9492 if (MLAL != SDValue(PrevAddc, 0))
9493 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
9495 // Find the converted UMAAL or quit if it doesn't exist.
9496 SDNode *UmlalNode = nullptr;
9498 if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
9499 UmlalNode = AddcNode->getOperand(0).getNode();
9500 AddHi = AddcNode->getOperand(1);
9501 } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
9502 UmlalNode = AddcNode->getOperand(1).getNode();
9503 AddHi = AddcNode->getOperand(0);
9508 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
9509 // the ADDC as well as Zero.
9510 auto *Zero = dyn_cast<ConstantSDNode>(UmlalNode->getOperand(3));
9512 if (!Zero || Zero->getZExtValue() != 0)
9515 // Check that we have a glued ADDC node.
9516 if (AddcNode->getValueType(1) != MVT::Glue)
9519 // Look for the glued ADDE.
9520 SDNode* AddeNode = AddcNode->getGluedUser();
9524 if ((AddeNode->getOperand(0).getNode() == Zero &&
9525 AddeNode->getOperand(1).getNode() == UmlalNode) ||
9526 (AddeNode->getOperand(0).getNode() == UmlalNode &&
9527 AddeNode->getOperand(1).getNode() == Zero)) {
9529 SelectionDAG &DAG = DCI.DAG;
9530 SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
9531 UmlalNode->getOperand(2), AddHi };
9532 SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
9533 DAG.getVTList(MVT::i32, MVT::i32), Ops);
9535 // Replace the ADDs' nodes uses by the UMAAL node's values.
9536 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
9537 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
9539 // Return original node to notify the driver to stop replacing.
9540 return SDValue(AddcNode, 0);
9545 /// PerformADDCCombine - Target-specific dag combine transform from
9546 /// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL or
9547 /// ISD::ADDC, ISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
9548 static SDValue PerformADDCCombine(SDNode *N,
9549 TargetLowering::DAGCombinerInfo &DCI,
9550 const ARMSubtarget *Subtarget) {
9552 if (Subtarget->isThumb1Only()) return SDValue();
9554 // Only perform the checks after legalize when the pattern is available.
9555 if (DCI.isBeforeLegalize()) return SDValue();
9557 return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
9560 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
9561 /// operands N0 and N1. This is a helper for PerformADDCombine that is
9562 /// called with the default operands, and if that fails, with commuted
9564 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
9565 TargetLowering::DAGCombinerInfo &DCI,
9566 const ARMSubtarget *Subtarget){
9568 // Attempt to create vpaddl for this add.
9569 if (SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget))
9572 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
9573 if (N0.getNode()->hasOneUse())
9574 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
9579 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
9581 static SDValue PerformADDCombine(SDNode *N,
9582 TargetLowering::DAGCombinerInfo &DCI,
9583 const ARMSubtarget *Subtarget) {
9584 SDValue N0 = N->getOperand(0);
9585 SDValue N1 = N->getOperand(1);
9587 // First try with the default operand order.
9588 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
9591 // If that didn't work, try again with the operands commuted.
9592 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
9595 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
9597 static SDValue PerformSUBCombine(SDNode *N,
9598 TargetLowering::DAGCombinerInfo &DCI) {
9599 SDValue N0 = N->getOperand(0);
9600 SDValue N1 = N->getOperand(1);
9602 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
9603 if (N1.getNode()->hasOneUse())
9604 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
9610 /// PerformVMULCombine
9611 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
9612 /// special multiplier accumulator forwarding.
9618 // However, for (A + B) * (A + B),
9625 static SDValue PerformVMULCombine(SDNode *N,
9626 TargetLowering::DAGCombinerInfo &DCI,
9627 const ARMSubtarget *Subtarget) {
9628 if (!Subtarget->hasVMLxForwarding())
9631 SelectionDAG &DAG = DCI.DAG;
9632 SDValue N0 = N->getOperand(0);
9633 SDValue N1 = N->getOperand(1);
9634 unsigned Opcode = N0.getOpcode();
9635 if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
9636 Opcode != ISD::FADD && Opcode != ISD::FSUB) {
9637 Opcode = N1.getOpcode();
9638 if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
9639 Opcode != ISD::FADD && Opcode != ISD::FSUB)
9647 EVT VT = N->getValueType(0);
9649 SDValue N00 = N0->getOperand(0);
9650 SDValue N01 = N0->getOperand(1);
9651 return DAG.getNode(Opcode, DL, VT,
9652 DAG.getNode(ISD::MUL, DL, VT, N00, N1),
9653 DAG.getNode(ISD::MUL, DL, VT, N01, N1));
9656 static SDValue PerformMULCombine(SDNode *N,
9657 TargetLowering::DAGCombinerInfo &DCI,
9658 const ARMSubtarget *Subtarget) {
9659 SelectionDAG &DAG = DCI.DAG;
9661 if (Subtarget->isThumb1Only())
9664 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
9667 EVT VT = N->getValueType(0);
9668 if (VT.is64BitVector() || VT.is128BitVector())
9669 return PerformVMULCombine(N, DCI, Subtarget);
9673 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9677 int64_t MulAmt = C->getSExtValue();
9678 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
9680 ShiftAmt = ShiftAmt & (32 - 1);
9681 SDValue V = N->getOperand(0);
9685 MulAmt >>= ShiftAmt;
9688 if (isPowerOf2_32(MulAmt - 1)) {
9689 // (mul x, 2^N + 1) => (add (shl x, N), x)
9690 Res = DAG.getNode(ISD::ADD, DL, VT,
9692 DAG.getNode(ISD::SHL, DL, VT,
9694 DAG.getConstant(Log2_32(MulAmt - 1), DL,
9696 } else if (isPowerOf2_32(MulAmt + 1)) {
9697 // (mul x, 2^N - 1) => (sub (shl x, N), x)
9698 Res = DAG.getNode(ISD::SUB, DL, VT,
9699 DAG.getNode(ISD::SHL, DL, VT,
9701 DAG.getConstant(Log2_32(MulAmt + 1), DL,
9707 uint64_t MulAmtAbs = -MulAmt;
9708 if (isPowerOf2_32(MulAmtAbs + 1)) {
9709 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
9710 Res = DAG.getNode(ISD::SUB, DL, VT,
9712 DAG.getNode(ISD::SHL, DL, VT,
9714 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
9716 } else if (isPowerOf2_32(MulAmtAbs - 1)) {
9717 // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
9718 Res = DAG.getNode(ISD::ADD, DL, VT,
9720 DAG.getNode(ISD::SHL, DL, VT,
9722 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
9724 Res = DAG.getNode(ISD::SUB, DL, VT,
9725 DAG.getConstant(0, DL, MVT::i32), Res);
9732 Res = DAG.getNode(ISD::SHL, DL, VT,
9733 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
9735 // Do not add new nodes to DAG combiner worklist.
9736 DCI.CombineTo(N, Res, false);
9740 static SDValue PerformANDCombine(SDNode *N,
9741 TargetLowering::DAGCombinerInfo &DCI,
9742 const ARMSubtarget *Subtarget) {
9744 // Attempt to use immediate-form VBIC
9745 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
9747 EVT VT = N->getValueType(0);
9748 SelectionDAG &DAG = DCI.DAG;
9750 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
9753 APInt SplatBits, SplatUndef;
9754 unsigned SplatBitSize;
9757 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
9758 if (SplatBitSize <= 64) {
9760 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
9761 SplatUndef.getZExtValue(), SplatBitSize,
9762 DAG, dl, VbicVT, VT.is128BitVector(),
9764 if (Val.getNode()) {
9766 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
9767 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
9768 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
9773 if (!Subtarget->isThumb1Only()) {
9774 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
9775 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
9782 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
9783 static SDValue PerformORCombine(SDNode *N,
9784 TargetLowering::DAGCombinerInfo &DCI,
9785 const ARMSubtarget *Subtarget) {
9786 // Attempt to use immediate-form VORR
9787 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
9789 EVT VT = N->getValueType(0);
9790 SelectionDAG &DAG = DCI.DAG;
9792 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
9795 APInt SplatBits, SplatUndef;
9796 unsigned SplatBitSize;
9798 if (BVN && Subtarget->hasNEON() &&
9799 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
9800 if (SplatBitSize <= 64) {
9802 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
9803 SplatUndef.getZExtValue(), SplatBitSize,
9804 DAG, dl, VorrVT, VT.is128BitVector(),
9806 if (Val.getNode()) {
9808 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
9809 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
9810 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
9815 if (!Subtarget->isThumb1Only()) {
9816 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
9817 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
9821 // The code below optimizes (or (and X, Y), Z).
9822 // The AND operand needs to have a single user to make these optimizations
9824 SDValue N0 = N->getOperand(0);
9825 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
9827 SDValue N1 = N->getOperand(1);
9829 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
9830 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
9831 DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
9833 unsigned SplatBitSize;
9836 APInt SplatBits0, SplatBits1;
9837 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
9838 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
9839 // Ensure that the second operand of both ands are constants
9840 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
9841 HasAnyUndefs) && !HasAnyUndefs) {
9842 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
9843 HasAnyUndefs) && !HasAnyUndefs) {
9844 // Ensure that the bit width of the constants are the same and that
9845 // the splat arguments are logical inverses as per the pattern we
9846 // are trying to simplify.
9847 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
9848 SplatBits0 == ~SplatBits1) {
9849 // Canonicalize the vector type to make instruction selection
9851 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
9852 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
9856 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
9862 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
9865 // BFI is only available on V6T2+
9866 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
9870 // 1) or (and A, mask), val => ARMbfi A, val, mask
9871 // iff (val & mask) == val
9873 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
9874 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
9875 // && mask == ~mask2
9876 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
9877 // && ~mask == mask2
9878 // (i.e., copy a bitfield value into another bitfield of the same width)
9883 SDValue N00 = N0.getOperand(0);
9885 // The value and the mask need to be constants so we can verify this is
9886 // actually a bitfield set. If the mask is 0xffff, we can do better
9887 // via a movt instruction, so don't use BFI in that case.
9888 SDValue MaskOp = N0.getOperand(1);
9889 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
9892 unsigned Mask = MaskC->getZExtValue();
9896 // Case (1): or (and A, mask), val => ARMbfi A, val, mask
9897 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
9899 unsigned Val = N1C->getZExtValue();
9900 if ((Val & ~Mask) != Val)
9903 if (ARM::isBitFieldInvertedMask(Mask)) {
9904 Val >>= countTrailingZeros(~Mask);
9906 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
9907 DAG.getConstant(Val, DL, MVT::i32),
9908 DAG.getConstant(Mask, DL, MVT::i32));
9910 // Do not add new nodes to DAG combiner worklist.
9911 DCI.CombineTo(N, Res, false);
9914 } else if (N1.getOpcode() == ISD::AND) {
9915 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
9916 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
9919 unsigned Mask2 = N11C->getZExtValue();
9921 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
9923 if (ARM::isBitFieldInvertedMask(Mask) &&
9925 // The pack halfword instruction works better for masks that fit it,
9926 // so use that when it's available.
9927 if (Subtarget->hasT2ExtractPack() &&
9928 (Mask == 0xffff || Mask == 0xffff0000))
9931 unsigned amt = countTrailingZeros(Mask2);
9932 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
9933 DAG.getConstant(amt, DL, MVT::i32));
9934 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
9935 DAG.getConstant(Mask, DL, MVT::i32));
9936 // Do not add new nodes to DAG combiner worklist.
9937 DCI.CombineTo(N, Res, false);
9939 } else if (ARM::isBitFieldInvertedMask(~Mask) &&
9941 // The pack halfword instruction works better for masks that fit it,
9942 // so use that when it's available.
9943 if (Subtarget->hasT2ExtractPack() &&
9944 (Mask2 == 0xffff || Mask2 == 0xffff0000))
9947 unsigned lsb = countTrailingZeros(Mask);
9948 Res = DAG.getNode(ISD::SRL, DL, VT, N00,
9949 DAG.getConstant(lsb, DL, MVT::i32));
9950 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
9951 DAG.getConstant(Mask2, DL, MVT::i32));
9952 // Do not add new nodes to DAG combiner worklist.
9953 DCI.CombineTo(N, Res, false);
9958 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
9959 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
9960 ARM::isBitFieldInvertedMask(~Mask)) {
9961 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
9962 // where lsb(mask) == #shamt and masked bits of B are known zero.
9963 SDValue ShAmt = N00.getOperand(1);
9964 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
9965 unsigned LSB = countTrailingZeros(Mask);
9969 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
9970 DAG.getConstant(~Mask, DL, MVT::i32));
9972 // Do not add new nodes to DAG combiner worklist.
9973 DCI.CombineTo(N, Res, false);
9979 static SDValue PerformXORCombine(SDNode *N,
9980 TargetLowering::DAGCombinerInfo &DCI,
9981 const ARMSubtarget *Subtarget) {
9982 EVT VT = N->getValueType(0);
9983 SelectionDAG &DAG = DCI.DAG;
9985 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
9988 if (!Subtarget->isThumb1Only()) {
9989 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
9990 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
9997 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
9998 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
9999 // their position in "to" (Rd).
10000 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
10001 assert(N->getOpcode() == ARMISD::BFI);
10003 SDValue From = N->getOperand(1);
10004 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
10005 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
10007 // If the Base came from a SHR #C, we can deduce that it is really testing bit
10008 // #C in the base of the SHR.
10009 if (From->getOpcode() == ISD::SRL &&
10010 isa<ConstantSDNode>(From->getOperand(1))) {
10011 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
10012 assert(Shift.getLimitedValue() < 32 && "Shift too large!");
10013 FromMask <<= Shift.getLimitedValue(31);
10014 From = From->getOperand(0);
10020 // If A and B contain one contiguous set of bits, does A | B == A . B?
10022 // Neither A nor B must be zero.
10023 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
10024 unsigned LastActiveBitInA = A.countTrailingZeros();
10025 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
10026 return LastActiveBitInA - 1 == FirstActiveBitInB;
10029 static SDValue FindBFIToCombineWith(SDNode *N) {
10030 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
10032 APInt ToMask, FromMask;
10033 SDValue From = ParseBFI(N, ToMask, FromMask);
10034 SDValue To = N->getOperand(0);
10036 // Now check for a compatible BFI to merge with. We can pass through BFIs that
10037 // aren't compatible, but not if they set the same bit in their destination as
10038 // we do (or that of any BFI we're going to combine with).
10040 APInt CombinedToMask = ToMask;
10041 while (V.getOpcode() == ARMISD::BFI) {
10042 APInt NewToMask, NewFromMask;
10043 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
10044 if (NewFrom != From) {
10045 // This BFI has a different base. Keep going.
10046 CombinedToMask |= NewToMask;
10047 V = V.getOperand(0);
10051 // Do the written bits conflict with any we've seen so far?
10052 if ((NewToMask & CombinedToMask).getBoolValue())
10053 // Conflicting bits - bail out because going further is unsafe.
10056 // Are the new bits contiguous when combined with the old bits?
10057 if (BitsProperlyConcatenate(ToMask, NewToMask) &&
10058 BitsProperlyConcatenate(FromMask, NewFromMask))
10060 if (BitsProperlyConcatenate(NewToMask, ToMask) &&
10061 BitsProperlyConcatenate(NewFromMask, FromMask))
10064 // We've seen a write to some bits, so track it.
10065 CombinedToMask |= NewToMask;
10067 V = V.getOperand(0);
10073 static SDValue PerformBFICombine(SDNode *N,
10074 TargetLowering::DAGCombinerInfo &DCI) {
10075 SDValue N1 = N->getOperand(1);
10076 if (N1.getOpcode() == ISD::AND) {
10077 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
10078 // the bits being cleared by the AND are not demanded by the BFI.
10079 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
10082 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
10083 unsigned LSB = countTrailingZeros(~InvMask);
10084 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
10086 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
10087 "undefined behavior");
10088 unsigned Mask = (1u << Width) - 1;
10089 unsigned Mask2 = N11C->getZExtValue();
10090 if ((Mask & (~Mask2)) == 0)
10091 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
10092 N->getOperand(0), N1.getOperand(0),
10094 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
10095 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
10096 // Keep track of any consecutive bits set that all come from the same base
10097 // value. We can combine these together into a single BFI.
10098 SDValue CombineBFI = FindBFIToCombineWith(N);
10099 if (CombineBFI == SDValue())
10102 // We've found a BFI.
10103 APInt ToMask1, FromMask1;
10104 SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
10106 APInt ToMask2, FromMask2;
10107 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
10108 assert(From1 == From2);
10111 // First, unlink CombineBFI.
10112 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
10113 // Then create a new BFI, combining the two together.
10114 APInt NewFromMask = FromMask1 | FromMask2;
10115 APInt NewToMask = ToMask1 | ToMask2;
10117 EVT VT = N->getValueType(0);
10120 if (NewFromMask[0] == 0)
10121 From1 = DCI.DAG.getNode(
10122 ISD::SRL, dl, VT, From1,
10123 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
10124 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
10125 DCI.DAG.getConstant(~NewToMask, dl, VT));
10130 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
10131 /// ARMISD::VMOVRRD.
10132 static SDValue PerformVMOVRRDCombine(SDNode *N,
10133 TargetLowering::DAGCombinerInfo &DCI,
10134 const ARMSubtarget *Subtarget) {
10135 // vmovrrd(vmovdrr x, y) -> x,y
10136 SDValue InDouble = N->getOperand(0);
10137 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP())
10138 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
10140 // vmovrrd(load f64) -> (load i32), (load i32)
10141 SDNode *InNode = InDouble.getNode();
10142 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
10143 InNode->getValueType(0) == MVT::f64 &&
10144 InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
10145 !cast<LoadSDNode>(InNode)->isVolatile()) {
10146 // TODO: Should this be done for non-FrameIndex operands?
10147 LoadSDNode *LD = cast<LoadSDNode>(InNode);
10149 SelectionDAG &DAG = DCI.DAG;
10151 SDValue BasePtr = LD->getBasePtr();
10153 DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
10154 LD->getAlignment(), LD->getMemOperand()->getFlags());
10156 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
10157 DAG.getConstant(4, DL, MVT::i32));
10158 SDValue NewLD2 = DAG.getLoad(
10159 MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(),
10160 std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags());
10162 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
10163 if (DCI.DAG.getDataLayout().isBigEndian())
10164 std::swap (NewLD1, NewLD2);
10165 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
10172 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
10173 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands.
10174 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
10175 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
10176 SDValue Op0 = N->getOperand(0);
10177 SDValue Op1 = N->getOperand(1);
10178 if (Op0.getOpcode() == ISD::BITCAST)
10179 Op0 = Op0.getOperand(0);
10180 if (Op1.getOpcode() == ISD::BITCAST)
10181 Op1 = Op1.getOperand(0);
10182 if (Op0.getOpcode() == ARMISD::VMOVRRD &&
10183 Op0.getNode() == Op1.getNode() &&
10184 Op0.getResNo() == 0 && Op1.getResNo() == 1)
10185 return DAG.getNode(ISD::BITCAST, SDLoc(N),
10186 N->getValueType(0), Op0.getOperand(0));
10190 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
10191 /// are normal, non-volatile loads. If so, it is profitable to bitcast an
10192 /// i64 vector to have f64 elements, since the value can then be loaded
10193 /// directly into a VFP register.
10194 static bool hasNormalLoadOperand(SDNode *N) {
10195 unsigned NumElts = N->getValueType(0).getVectorNumElements();
10196 for (unsigned i = 0; i < NumElts; ++i) {
10197 SDNode *Elt = N->getOperand(i).getNode();
10198 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
10204 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
10205 /// ISD::BUILD_VECTOR.
10206 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
10207 TargetLowering::DAGCombinerInfo &DCI,
10208 const ARMSubtarget *Subtarget) {
10209 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
10210 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value
10211 // into a pair of GPRs, which is fine when the value is used as a scalar,
10212 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
10213 SelectionDAG &DAG = DCI.DAG;
10214 if (N->getNumOperands() == 2)
10215 if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
10218 // Load i64 elements as f64 values so that type legalization does not split
10219 // them up into i32 values.
10220 EVT VT = N->getValueType(0);
10221 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
10224 SmallVector<SDValue, 8> Ops;
10225 unsigned NumElts = VT.getVectorNumElements();
10226 for (unsigned i = 0; i < NumElts; ++i) {
10227 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
10229 // Make the DAGCombiner fold the bitcast.
10230 DCI.AddToWorklist(V.getNode());
10232 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
10233 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
10234 return DAG.getNode(ISD::BITCAST, dl, VT, BV);
10237 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
10239 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
10240 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
10241 // At that time, we may have inserted bitcasts from integer to float.
10242 // If these bitcasts have survived DAGCombine, change the lowering of this
10243 // BUILD_VECTOR in something more vector friendly, i.e., that does not
10244 // force to use floating point types.
10246 // Make sure we can change the type of the vector.
10247 // This is possible iff:
10248 // 1. The vector is only used in a bitcast to a integer type. I.e.,
10249 // 1.1. Vector is used only once.
10250 // 1.2. Use is a bit convert to an integer type.
10251 // 2. The size of its operands are 32-bits (64-bits are not legal).
10252 EVT VT = N->getValueType(0);
10253 EVT EltVT = VT.getVectorElementType();
10255 // Check 1.1. and 2.
10256 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
10259 // By construction, the input type must be float.
10260 assert(EltVT == MVT::f32 && "Unexpected type!");
10263 SDNode *Use = *N->use_begin();
10264 if (Use->getOpcode() != ISD::BITCAST ||
10265 Use->getValueType(0).isFloatingPoint())
10268 // Check profitability.
10269 // Model is, if more than half of the relevant operands are bitcast from
10270 // i32, turn the build_vector into a sequence of insert_vector_elt.
10271 // Relevant operands are everything that is not statically
10272 // (i.e., at compile time) bitcasted.
10273 unsigned NumOfBitCastedElts = 0;
10274 unsigned NumElts = VT.getVectorNumElements();
10275 unsigned NumOfRelevantElts = NumElts;
10276 for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
10277 SDValue Elt = N->getOperand(Idx);
10278 if (Elt->getOpcode() == ISD::BITCAST) {
10279 // Assume only bit cast to i32 will go away.
10280 if (Elt->getOperand(0).getValueType() == MVT::i32)
10281 ++NumOfBitCastedElts;
10282 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
10283 // Constants are statically casted, thus do not count them as
10284 // relevant operands.
10285 --NumOfRelevantElts;
10288 // Check if more than half of the elements require a non-free bitcast.
10289 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
10292 SelectionDAG &DAG = DCI.DAG;
10293 // Create the new vector type.
10294 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
10295 // Check if the type is legal.
10296 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10297 if (!TLI.isTypeLegal(VecVT))
10301 // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
10302 // => BITCAST INSERT_VECTOR_ELT
10303 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
10304 // (BITCAST EN), N.
10305 SDValue Vec = DAG.getUNDEF(VecVT);
10307 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
10308 SDValue V = N->getOperand(Idx);
10311 if (V.getOpcode() == ISD::BITCAST &&
10312 V->getOperand(0).getValueType() == MVT::i32)
10313 // Fold obvious case.
10314 V = V.getOperand(0);
10316 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
10317 // Make the DAGCombiner fold the bitcasts.
10318 DCI.AddToWorklist(V.getNode());
10320 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
10321 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
10323 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
10324 // Make the DAGCombiner fold the bitcasts.
10325 DCI.AddToWorklist(Vec.getNode());
10329 /// PerformInsertEltCombine - Target-specific dag combine xforms for
10330 /// ISD::INSERT_VECTOR_ELT.
10331 static SDValue PerformInsertEltCombine(SDNode *N,
10332 TargetLowering::DAGCombinerInfo &DCI) {
10333 // Bitcast an i64 load inserted into a vector to f64.
10334 // Otherwise, the i64 value will be legalized to a pair of i32 values.
10335 EVT VT = N->getValueType(0);
10336 SDNode *Elt = N->getOperand(1).getNode();
10337 if (VT.getVectorElementType() != MVT::i64 ||
10338 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
10341 SelectionDAG &DAG = DCI.DAG;
10343 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
10344 VT.getVectorNumElements());
10345 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
10346 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
10347 // Make the DAGCombiner fold the bitcasts.
10348 DCI.AddToWorklist(Vec.getNode());
10349 DCI.AddToWorklist(V.getNode());
10350 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
10351 Vec, V, N->getOperand(2));
10352 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
10355 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
10356 /// ISD::VECTOR_SHUFFLE.
10357 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
10358 // The LLVM shufflevector instruction does not require the shuffle mask
10359 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
10360 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the
10361 // operands do not match the mask length, they are extended by concatenating
10362 // them with undef vectors. That is probably the right thing for other
10363 // targets, but for NEON it is better to concatenate two double-register
10364 // size vector operands into a single quad-register size vector. Do that
10365 // transformation here:
10366 // shuffle(concat(v1, undef), concat(v2, undef)) ->
10367 // shuffle(concat(v1, v2), undef)
10368 SDValue Op0 = N->getOperand(0);
10369 SDValue Op1 = N->getOperand(1);
10370 if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
10371 Op1.getOpcode() != ISD::CONCAT_VECTORS ||
10372 Op0.getNumOperands() != 2 ||
10373 Op1.getNumOperands() != 2)
10375 SDValue Concat0Op1 = Op0.getOperand(1);
10376 SDValue Concat1Op1 = Op1.getOperand(1);
10377 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
10379 // Skip the transformation if any of the types are illegal.
10380 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10381 EVT VT = N->getValueType(0);
10382 if (!TLI.isTypeLegal(VT) ||
10383 !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
10384 !TLI.isTypeLegal(Concat1Op1.getValueType()))
10387 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
10388 Op0.getOperand(0), Op1.getOperand(0));
10389 // Translate the shuffle mask.
10390 SmallVector<int, 16> NewMask;
10391 unsigned NumElts = VT.getVectorNumElements();
10392 unsigned HalfElts = NumElts/2;
10393 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
10394 for (unsigned n = 0; n < NumElts; ++n) {
10395 int MaskElt = SVN->getMaskElt(n);
10397 if (MaskElt < (int)HalfElts)
10399 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
10400 NewElt = HalfElts + MaskElt - NumElts;
10401 NewMask.push_back(NewElt);
10403 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
10404 DAG.getUNDEF(VT), NewMask);
10407 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
10408 /// NEON load/store intrinsics, and generic vector load/stores, to merge
10409 /// base address updates.
10410 /// For generic load/stores, the memory type is assumed to be a vector.
10411 /// The caller is assumed to have checked legality.
10412 static SDValue CombineBaseUpdate(SDNode *N,
10413 TargetLowering::DAGCombinerInfo &DCI) {
10414 SelectionDAG &DAG = DCI.DAG;
10415 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
10416 N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
10417 const bool isStore = N->getOpcode() == ISD::STORE;
10418 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
10419 SDValue Addr = N->getOperand(AddrOpIdx);
10420 MemSDNode *MemN = cast<MemSDNode>(N);
10423 // Search for a use of the address operand that is an increment.
10424 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
10425 UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
10426 SDNode *User = *UI;
10427 if (User->getOpcode() != ISD::ADD ||
10428 UI.getUse().getResNo() != Addr.getResNo())
10431 // Check that the add is independent of the load/store. Otherwise, folding
10432 // it would create a cycle.
10433 if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
10436 // Find the new opcode for the updating load/store.
10437 bool isLoadOp = true;
10438 bool isLaneOp = false;
10439 unsigned NewOpc = 0;
10440 unsigned NumVecs = 0;
10442 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10444 default: llvm_unreachable("unexpected intrinsic for Neon base update");
10445 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD;
10446 NumVecs = 1; break;
10447 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD;
10448 NumVecs = 2; break;
10449 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD;
10450 NumVecs = 3; break;
10451 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD;
10452 NumVecs = 4; break;
10453 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
10454 NumVecs = 2; isLaneOp = true; break;
10455 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
10456 NumVecs = 3; isLaneOp = true; break;
10457 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
10458 NumVecs = 4; isLaneOp = true; break;
10459 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD;
10460 NumVecs = 1; isLoadOp = false; break;
10461 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD;
10462 NumVecs = 2; isLoadOp = false; break;
10463 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD;
10464 NumVecs = 3; isLoadOp = false; break;
10465 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD;
10466 NumVecs = 4; isLoadOp = false; break;
10467 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
10468 NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
10469 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
10470 NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
10471 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
10472 NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
10476 switch (N->getOpcode()) {
10477 default: llvm_unreachable("unexpected opcode for Neon base update");
10478 case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
10479 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
10480 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
10481 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
10482 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD;
10483 NumVecs = 1; isLaneOp = false; break;
10484 case ISD::STORE: NewOpc = ARMISD::VST1_UPD;
10485 NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
10489 // Find the size of memory referenced by the load/store.
10492 VecTy = N->getValueType(0);
10493 } else if (isIntrinsic) {
10494 VecTy = N->getOperand(AddrOpIdx+1).getValueType();
10496 assert(isStore && "Node has to be a load, a store, or an intrinsic!");
10497 VecTy = N->getOperand(1).getValueType();
10500 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
10502 NumBytes /= VecTy.getVectorNumElements();
10504 // If the increment is a constant, it must match the memory ref size.
10505 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
10506 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
10507 uint64_t IncVal = CInc->getZExtValue();
10508 if (IncVal != NumBytes)
10510 } else if (NumBytes >= 3 * 16) {
10511 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
10512 // separate instructions that make it harder to use a non-constant update.
10516 // OK, we found an ADD we can fold into the base update.
10517 // Now, create a _UPD node, taking care of not breaking alignment.
10519 EVT AlignedVecTy = VecTy;
10520 unsigned Alignment = MemN->getAlignment();
10522 // If this is a less-than-standard-aligned load/store, change the type to
10523 // match the standard alignment.
10524 // The alignment is overlooked when selecting _UPD variants; and it's
10525 // easier to introduce bitcasts here than fix that.
10526 // There are 3 ways to get to this base-update combine:
10527 // - intrinsics: they are assumed to be properly aligned (to the standard
10528 // alignment of the memory type), so we don't need to do anything.
10529 // - ARMISD::VLDx nodes: they are only generated from the aforementioned
10530 // intrinsics, so, likewise, there's nothing to do.
10531 // - generic load/store instructions: the alignment is specified as an
10532 // explicit operand, rather than implicitly as the standard alignment
10533 // of the memory type (like the intrisics). We need to change the
10534 // memory type to match the explicit alignment. That way, we don't
10535 // generate non-standard-aligned ARMISD::VLDx nodes.
10536 if (isa<LSBaseSDNode>(N)) {
10537 if (Alignment == 0)
10539 if (Alignment < VecTy.getScalarSizeInBits() / 8) {
10540 MVT EltTy = MVT::getIntegerVT(Alignment * 8);
10541 assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
10542 assert(!isLaneOp && "Unexpected generic load/store lane.");
10543 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
10544 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
10546 // Don't set an explicit alignment on regular load/stores that we want
10547 // to transform to VLD/VST 1_UPD nodes.
10548 // This matches the behavior of regular load/stores, which only get an
10549 // explicit alignment if the MMO alignment is larger than the standard
10550 // alignment of the memory type.
10551 // Intrinsics, however, always get an explicit alignment, set to the
10552 // alignment of the MMO.
10556 // Create the new updating load/store node.
10557 // First, create an SDVTList for the new updating node's results.
10559 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
10561 for (n = 0; n < NumResultVecs; ++n)
10562 Tys[n] = AlignedVecTy;
10563 Tys[n++] = MVT::i32;
10564 Tys[n] = MVT::Other;
10565 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
10567 // Then, gather the new node's operands.
10568 SmallVector<SDValue, 8> Ops;
10569 Ops.push_back(N->getOperand(0)); // incoming chain
10570 Ops.push_back(N->getOperand(AddrOpIdx));
10571 Ops.push_back(Inc);
10573 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
10574 // Try to match the intrinsic's signature
10575 Ops.push_back(StN->getValue());
10577 // Loads (and of course intrinsics) match the intrinsics' signature,
10578 // so just add all but the alignment operand.
10579 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
10580 Ops.push_back(N->getOperand(i));
10583 // For all node types, the alignment operand is always the last one.
10584 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
10586 // If this is a non-standard-aligned STORE, the penultimate operand is the
10587 // stored value. Bitcast it to the aligned type.
10588 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
10589 SDValue &StVal = Ops[Ops.size()-2];
10590 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
10593 EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
10594 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
10595 MemN->getMemOperand());
10597 // Update the uses.
10598 SmallVector<SDValue, 5> NewResults;
10599 for (unsigned i = 0; i < NumResultVecs; ++i)
10600 NewResults.push_back(SDValue(UpdN.getNode(), i));
10602 // If this is an non-standard-aligned LOAD, the first result is the loaded
10603 // value. Bitcast it to the expected result type.
10604 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
10605 SDValue &LdVal = NewResults[0];
10606 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
10609 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
10610 DCI.CombineTo(N, NewResults);
10611 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
10618 static SDValue PerformVLDCombine(SDNode *N,
10619 TargetLowering::DAGCombinerInfo &DCI) {
10620 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
10623 return CombineBaseUpdate(N, DCI);
10626 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
10627 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
10628 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
10630 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
10631 SelectionDAG &DAG = DCI.DAG;
10632 EVT VT = N->getValueType(0);
10633 // vldN-dup instructions only support 64-bit vectors for N > 1.
10634 if (!VT.is64BitVector())
10637 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
10638 SDNode *VLD = N->getOperand(0).getNode();
10639 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
10641 unsigned NumVecs = 0;
10642 unsigned NewOpc = 0;
10643 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
10644 if (IntNo == Intrinsic::arm_neon_vld2lane) {
10646 NewOpc = ARMISD::VLD2DUP;
10647 } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
10649 NewOpc = ARMISD::VLD3DUP;
10650 } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
10652 NewOpc = ARMISD::VLD4DUP;
10657 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
10658 // numbers match the load.
10659 unsigned VLDLaneNo =
10660 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
10661 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
10663 // Ignore uses of the chain result.
10664 if (UI.getUse().getResNo() == NumVecs)
10666 SDNode *User = *UI;
10667 if (User->getOpcode() != ARMISD::VDUPLANE ||
10668 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
10672 // Create the vldN-dup node.
10675 for (n = 0; n < NumVecs; ++n)
10677 Tys[n] = MVT::Other;
10678 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
10679 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
10680 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
10681 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
10682 Ops, VLDMemInt->getMemoryVT(),
10683 VLDMemInt->getMemOperand());
10685 // Update the uses.
10686 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
10688 unsigned ResNo = UI.getUse().getResNo();
10689 // Ignore uses of the chain result.
10690 if (ResNo == NumVecs)
10692 SDNode *User = *UI;
10693 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
10696 // Now the vldN-lane intrinsic is dead except for its chain result.
10697 // Update uses of the chain.
10698 std::vector<SDValue> VLDDupResults;
10699 for (unsigned n = 0; n < NumVecs; ++n)
10700 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
10701 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
10702 DCI.CombineTo(VLD, VLDDupResults);
10707 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
10708 /// ARMISD::VDUPLANE.
10709 static SDValue PerformVDUPLANECombine(SDNode *N,
10710 TargetLowering::DAGCombinerInfo &DCI) {
10711 SDValue Op = N->getOperand(0);
10713 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
10714 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
10715 if (CombineVLDDUP(N, DCI))
10716 return SDValue(N, 0);
10718 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
10719 // redundant. Ignore bit_converts for now; element sizes are checked below.
10720 while (Op.getOpcode() == ISD::BITCAST)
10721 Op = Op.getOperand(0);
10722 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
10725 // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
10726 unsigned EltSize = Op.getScalarValueSizeInBits();
10727 // The canonical VMOV for a zero vector uses a 32-bit element size.
10728 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10730 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
10732 EVT VT = N->getValueType(0);
10733 if (EltSize > VT.getScalarSizeInBits())
10736 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
10739 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
10740 static SDValue PerformVDUPCombine(SDNode *N,
10741 TargetLowering::DAGCombinerInfo &DCI) {
10742 SelectionDAG &DAG = DCI.DAG;
10743 SDValue Op = N->getOperand(0);
10745 // Match VDUP(LOAD) -> VLD1DUP.
10746 // We match this pattern here rather than waiting for isel because the
10747 // transform is only legal for unindexed loads.
10748 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
10749 if (LD && Op.hasOneUse() && LD->isUnindexed() &&
10750 LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
10751 SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
10752 DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
10753 SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
10754 SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
10755 Ops, LD->getMemoryVT(),
10756 LD->getMemOperand());
10757 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
10764 static SDValue PerformLOADCombine(SDNode *N,
10765 TargetLowering::DAGCombinerInfo &DCI) {
10766 EVT VT = N->getValueType(0);
10768 // If this is a legal vector load, try to combine it into a VLD1_UPD.
10769 if (ISD::isNormalLoad(N) && VT.isVector() &&
10770 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
10771 return CombineBaseUpdate(N, DCI);
10776 /// PerformSTORECombine - Target-specific dag combine xforms for
10778 static SDValue PerformSTORECombine(SDNode *N,
10779 TargetLowering::DAGCombinerInfo &DCI) {
10780 StoreSDNode *St = cast<StoreSDNode>(N);
10781 if (St->isVolatile())
10784 // Optimize trunc store (of multiple scalars) to shuffle and store. First,
10785 // pack all of the elements in one place. Next, store to memory in fewer
10787 SDValue StVal = St->getValue();
10788 EVT VT = StVal.getValueType();
10789 if (St->isTruncatingStore() && VT.isVector()) {
10790 SelectionDAG &DAG = DCI.DAG;
10791 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10792 EVT StVT = St->getMemoryVT();
10793 unsigned NumElems = VT.getVectorNumElements();
10794 assert(StVT != VT && "Cannot truncate to the same type");
10795 unsigned FromEltSz = VT.getScalarSizeInBits();
10796 unsigned ToEltSz = StVT.getScalarSizeInBits();
10798 // From, To sizes and ElemCount must be pow of two
10799 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
10801 // We are going to use the original vector elt for storing.
10802 // Accumulated smaller vector elements must be a multiple of the store size.
10803 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
10805 unsigned SizeRatio = FromEltSz / ToEltSz;
10806 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
10808 // Create a type on which we perform the shuffle.
10809 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
10810 NumElems*SizeRatio);
10811 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
10814 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
10815 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
10816 for (unsigned i = 0; i < NumElems; ++i)
10817 ShuffleVec[i] = DAG.getDataLayout().isBigEndian()
10818 ? (i + 1) * SizeRatio - 1
10821 // Can't shuffle using an illegal type.
10822 if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
10824 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
10825 DAG.getUNDEF(WideVec.getValueType()),
10827 // At this point all of the data is stored at the bottom of the
10828 // register. We now need to save it to mem.
10830 // Find the largest store unit
10831 MVT StoreType = MVT::i8;
10832 for (MVT Tp : MVT::integer_valuetypes()) {
10833 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
10836 // Didn't find a legal store type.
10837 if (!TLI.isTypeLegal(StoreType))
10840 // Bitcast the original vector into a vector of store-size units
10841 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
10842 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
10843 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
10844 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
10845 SmallVector<SDValue, 8> Chains;
10846 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
10847 TLI.getPointerTy(DAG.getDataLayout()));
10848 SDValue BasePtr = St->getBasePtr();
10850 // Perform one or more big stores into memory.
10851 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
10852 for (unsigned I = 0; I < E; I++) {
10853 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
10854 StoreType, ShuffWide,
10855 DAG.getIntPtrConstant(I, DL));
10856 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
10857 St->getPointerInfo(), St->getAlignment(),
10858 St->getMemOperand()->getFlags());
10859 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
10861 Chains.push_back(Ch);
10863 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
10866 if (!ISD::isNormalStore(St))
10869 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
10870 // ARM stores of arguments in the same cache line.
10871 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
10872 StVal.getNode()->hasOneUse()) {
10873 SelectionDAG &DAG = DCI.DAG;
10874 bool isBigEndian = DAG.getDataLayout().isBigEndian();
10876 SDValue BasePtr = St->getBasePtr();
10877 SDValue NewST1 = DAG.getStore(
10878 St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
10879 BasePtr, St->getPointerInfo(), St->getAlignment(),
10880 St->getMemOperand()->getFlags());
10882 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
10883 DAG.getConstant(4, DL, MVT::i32));
10884 return DAG.getStore(NewST1.getValue(0), DL,
10885 StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
10886 OffsetPtr, St->getPointerInfo(),
10887 std::min(4U, St->getAlignment() / 2),
10888 St->getMemOperand()->getFlags());
10891 if (StVal.getValueType() == MVT::i64 &&
10892 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
10894 // Bitcast an i64 store extracted from a vector to f64.
10895 // Otherwise, the i64 value will be legalized to a pair of i32 values.
10896 SelectionDAG &DAG = DCI.DAG;
10898 SDValue IntVec = StVal.getOperand(0);
10899 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
10900 IntVec.getValueType().getVectorNumElements());
10901 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
10902 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
10903 Vec, StVal.getOperand(1));
10905 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
10906 // Make the DAGCombiner fold the bitcasts.
10907 DCI.AddToWorklist(Vec.getNode());
10908 DCI.AddToWorklist(ExtElt.getNode());
10909 DCI.AddToWorklist(V.getNode());
10910 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
10911 St->getPointerInfo(), St->getAlignment(),
10912 St->getMemOperand()->getFlags(), St->getAAInfo());
10915 // If this is a legal vector store, try to combine it into a VST1_UPD.
10916 if (ISD::isNormalStore(N) && VT.isVector() &&
10917 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
10918 return CombineBaseUpdate(N, DCI);
10923 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
10924 /// can replace combinations of VMUL and VCVT (floating-point to integer)
10925 /// when the VMUL has a constant operand that is a power of 2.
10927 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
10928 /// vmul.f32 d16, d17, d16
10929 /// vcvt.s32.f32 d16, d16
10931 /// vcvt.s32.f32 d16, d16, #3
10932 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
10933 const ARMSubtarget *Subtarget) {
10934 if (!Subtarget->hasNEON())
10937 SDValue Op = N->getOperand(0);
10938 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
10939 Op.getOpcode() != ISD::FMUL)
10942 SDValue ConstVec = Op->getOperand(1);
10943 if (!isa<BuildVectorSDNode>(ConstVec))
10946 MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
10947 uint32_t FloatBits = FloatTy.getSizeInBits();
10948 MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
10949 uint32_t IntBits = IntTy.getSizeInBits();
10950 unsigned NumLanes = Op.getValueType().getVectorNumElements();
10951 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
10952 // These instructions only exist converting from f32 to i32. We can handle
10953 // smaller integers by generating an extra truncate, but larger ones would
10954 // be lossy. We also can't handle more then 4 lanes, since these intructions
10955 // only support v2i32/v4i32 types.
10959 BitVector UndefElements;
10960 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
10961 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
10962 if (C == -1 || C == 0 || C > 32)
10966 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
10967 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
10968 Intrinsic::arm_neon_vcvtfp2fxu;
10969 SDValue FixConv = DAG.getNode(
10970 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
10971 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
10972 DAG.getConstant(C, dl, MVT::i32));
10974 if (IntBits < FloatBits)
10975 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
10980 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
10981 /// can replace combinations of VCVT (integer to floating-point) and VDIV
10982 /// when the VDIV has a constant operand that is a power of 2.
10984 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
10985 /// vcvt.f32.s32 d16, d16
10986 /// vdiv.f32 d16, d17, d16
10988 /// vcvt.f32.s32 d16, d16, #3
10989 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
10990 const ARMSubtarget *Subtarget) {
10991 if (!Subtarget->hasNEON())
10994 SDValue Op = N->getOperand(0);
10995 unsigned OpOpcode = Op.getNode()->getOpcode();
10996 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
10997 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
11000 SDValue ConstVec = N->getOperand(1);
11001 if (!isa<BuildVectorSDNode>(ConstVec))
11004 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
11005 uint32_t FloatBits = FloatTy.getSizeInBits();
11006 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
11007 uint32_t IntBits = IntTy.getSizeInBits();
11008 unsigned NumLanes = Op.getValueType().getVectorNumElements();
11009 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11010 // These instructions only exist converting from i32 to f32. We can handle
11011 // smaller integers by generating an extra extend, but larger ones would
11012 // be lossy. We also can't handle more then 4 lanes, since these intructions
11013 // only support v2i32/v4i32 types.
11017 BitVector UndefElements;
11018 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
11019 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
11020 if (C == -1 || C == 0 || C > 32)
11024 bool isSigned = OpOpcode == ISD::SINT_TO_FP;
11025 SDValue ConvInput = Op.getOperand(0);
11026 if (IntBits < FloatBits)
11027 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
11028 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
11031 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
11032 Intrinsic::arm_neon_vcvtfxu2fp;
11033 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
11035 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
11036 ConvInput, DAG.getConstant(C, dl, MVT::i32));
11039 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
11040 /// operand of a vector shift operation, where all the elements of the
11041 /// build_vector must have the same constant integer value.
11042 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11043 // Ignore bit_converts.
11044 while (Op.getOpcode() == ISD::BITCAST)
11045 Op = Op.getOperand(0);
11046 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11047 APInt SplatBits, SplatUndef;
11048 unsigned SplatBitSize;
11050 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11051 HasAnyUndefs, ElementBits) ||
11052 SplatBitSize > ElementBits)
11054 Cnt = SplatBits.getSExtValue();
11058 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11059 /// operand of a vector shift left operation. That value must be in the range:
11060 /// 0 <= Value < ElementBits for a left shift; or
11061 /// 0 <= Value <= ElementBits for a long left shift.
11062 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11063 assert(VT.isVector() && "vector shift count is not a vector type");
11064 int64_t ElementBits = VT.getScalarSizeInBits();
11065 if (! getVShiftImm(Op, ElementBits, Cnt))
11067 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
11070 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11071 /// operand of a vector shift right operation. For a shift opcode, the value
11072 /// is positive, but for an intrinsic the value count must be negative. The
11073 /// absolute value must be in the range:
11074 /// 1 <= |Value| <= ElementBits for a right shift; or
11075 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
11076 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
11078 assert(VT.isVector() && "vector shift count is not a vector type");
11079 int64_t ElementBits = VT.getScalarSizeInBits();
11080 if (! getVShiftImm(Op, ElementBits, Cnt))
11083 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
11084 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
11091 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
11092 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
11093 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
11096 // Don't do anything for most intrinsics.
11099 // Vector shifts: check for immediate versions and lower them.
11100 // Note: This is done during DAG combining instead of DAG legalizing because
11101 // the build_vectors for 64-bit vector element shift counts are generally
11102 // not legal, and it is hard to see their values after they get legalized to
11103 // loads from a constant pool.
11104 case Intrinsic::arm_neon_vshifts:
11105 case Intrinsic::arm_neon_vshiftu:
11106 case Intrinsic::arm_neon_vrshifts:
11107 case Intrinsic::arm_neon_vrshiftu:
11108 case Intrinsic::arm_neon_vrshiftn:
11109 case Intrinsic::arm_neon_vqshifts:
11110 case Intrinsic::arm_neon_vqshiftu:
11111 case Intrinsic::arm_neon_vqshiftsu:
11112 case Intrinsic::arm_neon_vqshiftns:
11113 case Intrinsic::arm_neon_vqshiftnu:
11114 case Intrinsic::arm_neon_vqshiftnsu:
11115 case Intrinsic::arm_neon_vqrshiftns:
11116 case Intrinsic::arm_neon_vqrshiftnu:
11117 case Intrinsic::arm_neon_vqrshiftnsu: {
11118 EVT VT = N->getOperand(1).getValueType();
11120 unsigned VShiftOpc = 0;
11123 case Intrinsic::arm_neon_vshifts:
11124 case Intrinsic::arm_neon_vshiftu:
11125 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
11126 VShiftOpc = ARMISD::VSHL;
11129 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
11130 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
11131 ARMISD::VSHRs : ARMISD::VSHRu);
11136 case Intrinsic::arm_neon_vrshifts:
11137 case Intrinsic::arm_neon_vrshiftu:
11138 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
11142 case Intrinsic::arm_neon_vqshifts:
11143 case Intrinsic::arm_neon_vqshiftu:
11144 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11148 case Intrinsic::arm_neon_vqshiftsu:
11149 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11151 llvm_unreachable("invalid shift count for vqshlu intrinsic");
11153 case Intrinsic::arm_neon_vrshiftn:
11154 case Intrinsic::arm_neon_vqshiftns:
11155 case Intrinsic::arm_neon_vqshiftnu:
11156 case Intrinsic::arm_neon_vqshiftnsu:
11157 case Intrinsic::arm_neon_vqrshiftns:
11158 case Intrinsic::arm_neon_vqrshiftnu:
11159 case Intrinsic::arm_neon_vqrshiftnsu:
11160 // Narrowing shifts require an immediate right shift.
11161 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
11163 llvm_unreachable("invalid shift count for narrowing vector shift "
11167 llvm_unreachable("unhandled vector shift");
11171 case Intrinsic::arm_neon_vshifts:
11172 case Intrinsic::arm_neon_vshiftu:
11173 // Opcode already set above.
11175 case Intrinsic::arm_neon_vrshifts:
11176 VShiftOpc = ARMISD::VRSHRs; break;
11177 case Intrinsic::arm_neon_vrshiftu:
11178 VShiftOpc = ARMISD::VRSHRu; break;
11179 case Intrinsic::arm_neon_vrshiftn:
11180 VShiftOpc = ARMISD::VRSHRN; break;
11181 case Intrinsic::arm_neon_vqshifts:
11182 VShiftOpc = ARMISD::VQSHLs; break;
11183 case Intrinsic::arm_neon_vqshiftu:
11184 VShiftOpc = ARMISD::VQSHLu; break;
11185 case Intrinsic::arm_neon_vqshiftsu:
11186 VShiftOpc = ARMISD::VQSHLsu; break;
11187 case Intrinsic::arm_neon_vqshiftns:
11188 VShiftOpc = ARMISD::VQSHRNs; break;
11189 case Intrinsic::arm_neon_vqshiftnu:
11190 VShiftOpc = ARMISD::VQSHRNu; break;
11191 case Intrinsic::arm_neon_vqshiftnsu:
11192 VShiftOpc = ARMISD::VQSHRNsu; break;
11193 case Intrinsic::arm_neon_vqrshiftns:
11194 VShiftOpc = ARMISD::VQRSHRNs; break;
11195 case Intrinsic::arm_neon_vqrshiftnu:
11196 VShiftOpc = ARMISD::VQRSHRNu; break;
11197 case Intrinsic::arm_neon_vqrshiftnsu:
11198 VShiftOpc = ARMISD::VQRSHRNsu; break;
11202 return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11203 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
11206 case Intrinsic::arm_neon_vshiftins: {
11207 EVT VT = N->getOperand(1).getValueType();
11209 unsigned VShiftOpc = 0;
11211 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
11212 VShiftOpc = ARMISD::VSLI;
11213 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
11214 VShiftOpc = ARMISD::VSRI;
11216 llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
11220 return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11221 N->getOperand(1), N->getOperand(2),
11222 DAG.getConstant(Cnt, dl, MVT::i32));
11225 case Intrinsic::arm_neon_vqrshifts:
11226 case Intrinsic::arm_neon_vqrshiftu:
11227 // No immediate versions of these to check for.
11234 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
11235 /// lowers them. As with the vector shift intrinsics, this is done during DAG
11236 /// combining instead of DAG legalizing because the build_vectors for 64-bit
11237 /// vector element shift counts are generally not legal, and it is hard to see
11238 /// their values after they get legalized to loads from a constant pool.
11239 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
11240 const ARMSubtarget *ST) {
11241 EVT VT = N->getValueType(0);
11242 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
11243 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
11244 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
11245 SDValue N1 = N->getOperand(1);
11246 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
11247 SDValue N0 = N->getOperand(0);
11248 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
11249 DAG.MaskedValueIsZero(N0.getOperand(0),
11250 APInt::getHighBitsSet(32, 16)))
11251 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
11255 // Nothing to be done for scalar shifts.
11256 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11257 if (!VT.isVector() || !TLI.isTypeLegal(VT))
11260 assert(ST->hasNEON() && "unexpected vector shift");
11263 switch (N->getOpcode()) {
11264 default: llvm_unreachable("unexpected shift opcode");
11267 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
11269 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0),
11270 DAG.getConstant(Cnt, dl, MVT::i32));
11276 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
11277 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
11278 ARMISD::VSHRs : ARMISD::VSHRu);
11280 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
11281 DAG.getConstant(Cnt, dl, MVT::i32));
11287 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
11288 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
11289 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
11290 const ARMSubtarget *ST) {
11291 SDValue N0 = N->getOperand(0);
11293 // Check for sign- and zero-extensions of vector extract operations of 8-
11294 // and 16-bit vector elements. NEON supports these directly. They are
11295 // handled during DAG combining because type legalization will promote them
11296 // to 32-bit types and it is messy to recognize the operations after that.
11297 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11298 SDValue Vec = N0.getOperand(0);
11299 SDValue Lane = N0.getOperand(1);
11300 EVT VT = N->getValueType(0);
11301 EVT EltVT = N0.getValueType();
11302 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11304 if (VT == MVT::i32 &&
11305 (EltVT == MVT::i8 || EltVT == MVT::i16) &&
11306 TLI.isTypeLegal(Vec.getValueType()) &&
11307 isa<ConstantSDNode>(Lane)) {
11310 switch (N->getOpcode()) {
11311 default: llvm_unreachable("unexpected opcode");
11312 case ISD::SIGN_EXTEND:
11313 Opc = ARMISD::VGETLANEs;
11315 case ISD::ZERO_EXTEND:
11316 case ISD::ANY_EXTEND:
11317 Opc = ARMISD::VGETLANEu;
11320 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
11327 static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero,
11329 if (Op.getOpcode() == ARMISD::BFI) {
11330 // Conservatively, we can recurse down the first operand
11331 // and just mask out all affected bits.
11332 computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne);
11334 // The operand to BFI is already a mask suitable for removing the bits it
11336 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
11337 const APInt &Mask = CI->getAPIntValue();
11342 if (Op.getOpcode() == ARMISD::CMOV) {
11343 APInt KZ2(KnownZero.getBitWidth(), 0);
11344 APInt KO2(KnownOne.getBitWidth(), 0);
11345 computeKnownBits(DAG, Op.getOperand(1), KnownZero, KnownOne);
11346 computeKnownBits(DAG, Op.getOperand(2), KZ2, KO2);
11352 return DAG.computeKnownBits(Op, KnownZero, KnownOne);
11355 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
11356 // If we have a CMOV, OR and AND combination such as:
11361 // * CN is a single bit;
11362 // * All bits covered by CM are known zero in y
11364 // Then we can convert this into a sequence of BFI instructions. This will
11365 // always be a win if CM is a single bit, will always be no worse than the
11366 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
11367 // three bits (due to the extra IT instruction).
11369 SDValue Op0 = CMOV->getOperand(0);
11370 SDValue Op1 = CMOV->getOperand(1);
11371 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
11372 auto CC = CCNode->getAPIntValue().getLimitedValue();
11373 SDValue CmpZ = CMOV->getOperand(4);
11375 // The compare must be against zero.
11376 if (!isNullConstant(CmpZ->getOperand(1)))
11379 assert(CmpZ->getOpcode() == ARMISD::CMPZ);
11380 SDValue And = CmpZ->getOperand(0);
11381 if (And->getOpcode() != ISD::AND)
11383 ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(And->getOperand(1));
11384 if (!AndC || !AndC->getAPIntValue().isPowerOf2())
11386 SDValue X = And->getOperand(0);
11388 if (CC == ARMCC::EQ) {
11389 // We're performing an "equal to zero" compare. Swap the operands so we
11390 // canonicalize on a "not equal to zero" compare.
11391 std::swap(Op0, Op1);
11393 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
11396 if (Op1->getOpcode() != ISD::OR)
11399 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
11402 SDValue Y = Op1->getOperand(0);
11407 // Now, is it profitable to continue?
11408 APInt OrCI = OrC->getAPIntValue();
11409 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
11410 if (OrCI.countPopulation() > Heuristic)
11413 // Lastly, can we determine that the bits defined by OrCI
11415 APInt KnownZero, KnownOne;
11416 computeKnownBits(DAG, Y, KnownZero, KnownOne);
11417 if ((OrCI & KnownZero) != OrCI)
11420 // OK, we can do the combine.
11423 EVT VT = X.getValueType();
11424 unsigned BitInX = AndC->getAPIntValue().logBase2();
11427 // We must shift X first.
11428 X = DAG.getNode(ISD::SRL, dl, VT, X,
11429 DAG.getConstant(BitInX, dl, VT));
11432 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
11433 BitInY < NumActiveBits; ++BitInY) {
11434 if (OrCI[BitInY] == 0)
11436 APInt Mask(VT.getSizeInBits(), 0);
11437 Mask.setBit(BitInY);
11438 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
11439 // Confusingly, the operand is an *inverted* mask.
11440 DAG.getConstant(~Mask, dl, VT));
11446 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
11448 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
11449 SDValue Cmp = N->getOperand(4);
11450 if (Cmp.getOpcode() != ARMISD::CMPZ)
11451 // Only looking at NE cases.
11454 EVT VT = N->getValueType(0);
11456 SDValue LHS = Cmp.getOperand(0);
11457 SDValue RHS = Cmp.getOperand(1);
11458 SDValue Chain = N->getOperand(0);
11459 SDValue BB = N->getOperand(1);
11460 SDValue ARMcc = N->getOperand(2);
11461 ARMCC::CondCodes CC =
11462 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11464 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
11465 // -> (brcond Chain BB CC CPSR Cmp)
11466 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
11467 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
11468 LHS->getOperand(0)->hasOneUse()) {
11469 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
11470 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
11471 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11472 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11473 if ((LHS00C && LHS00C->getZExtValue() == 0) &&
11474 (LHS01C && LHS01C->getZExtValue() == 1) &&
11475 (LHS1C && LHS1C->getZExtValue() == 1) &&
11476 (RHSC && RHSC->getZExtValue() == 0)) {
11477 return DAG.getNode(
11478 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
11479 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
11486 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
11488 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
11489 SDValue Cmp = N->getOperand(4);
11490 if (Cmp.getOpcode() != ARMISD::CMPZ)
11491 // Only looking at EQ and NE cases.
11494 EVT VT = N->getValueType(0);
11496 SDValue LHS = Cmp.getOperand(0);
11497 SDValue RHS = Cmp.getOperand(1);
11498 SDValue FalseVal = N->getOperand(0);
11499 SDValue TrueVal = N->getOperand(1);
11500 SDValue ARMcc = N->getOperand(2);
11501 ARMCC::CondCodes CC =
11502 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11504 // BFI is only available on V6T2+.
11505 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
11506 SDValue R = PerformCMOVToBFICombine(N, DAG);
11527 /// FIXME: Turn this into a target neutral optimization?
11529 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
11530 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
11531 N->getOperand(3), Cmp);
11532 } else if (CC == ARMCC::EQ && TrueVal == RHS) {
11534 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
11535 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
11536 N->getOperand(3), NewCmp);
11539 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
11540 // -> (cmov F T CC CPSR Cmp)
11541 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
11542 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
11543 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11544 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11545 if ((LHS0C && LHS0C->getZExtValue() == 0) &&
11546 (LHS1C && LHS1C->getZExtValue() == 1) &&
11547 (RHSC && RHSC->getZExtValue() == 0)) {
11548 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
11549 LHS->getOperand(2), LHS->getOperand(3),
11550 LHS->getOperand(4));
11554 if (Res.getNode()) {
11555 APInt KnownZero, KnownOne;
11556 DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne);
11557 // Capture demanded bits information that would be otherwise lost.
11558 if (KnownZero == 0xfffffffe)
11559 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11560 DAG.getValueType(MVT::i1));
11561 else if (KnownZero == 0xffffff00)
11562 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11563 DAG.getValueType(MVT::i8));
11564 else if (KnownZero == 0xffff0000)
11565 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11566 DAG.getValueType(MVT::i16));
11572 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
11573 DAGCombinerInfo &DCI) const {
11574 switch (N->getOpcode()) {
11576 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget);
11577 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget);
11578 case ISD::SUB: return PerformSUBCombine(N, DCI);
11579 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
11580 case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
11581 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget);
11582 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget);
11583 case ARMISD::BFI: return PerformBFICombine(N, DCI);
11584 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
11585 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
11586 case ISD::STORE: return PerformSTORECombine(N, DCI);
11587 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
11588 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
11589 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
11590 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
11591 case ARMISD::VDUP: return PerformVDUPCombine(N, DCI);
11592 case ISD::FP_TO_SINT:
11593 case ISD::FP_TO_UINT:
11594 return PerformVCVTCombine(N, DCI.DAG, Subtarget);
11596 return PerformVDIVCombine(N, DCI.DAG, Subtarget);
11597 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
11600 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget);
11601 case ISD::SIGN_EXTEND:
11602 case ISD::ZERO_EXTEND:
11603 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
11604 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
11605 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
11606 case ISD::LOAD: return PerformLOADCombine(N, DCI);
11607 case ARMISD::VLD1DUP:
11608 case ARMISD::VLD2DUP:
11609 case ARMISD::VLD3DUP:
11610 case ARMISD::VLD4DUP:
11611 return PerformVLDCombine(N, DCI);
11612 case ARMISD::BUILD_VECTOR:
11613 return PerformARMBUILD_VECTORCombine(N, DCI);
11614 case ISD::INTRINSIC_VOID:
11615 case ISD::INTRINSIC_W_CHAIN:
11616 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11617 case Intrinsic::arm_neon_vld1:
11618 case Intrinsic::arm_neon_vld2:
11619 case Intrinsic::arm_neon_vld3:
11620 case Intrinsic::arm_neon_vld4:
11621 case Intrinsic::arm_neon_vld2lane:
11622 case Intrinsic::arm_neon_vld3lane:
11623 case Intrinsic::arm_neon_vld4lane:
11624 case Intrinsic::arm_neon_vst1:
11625 case Intrinsic::arm_neon_vst2:
11626 case Intrinsic::arm_neon_vst3:
11627 case Intrinsic::arm_neon_vst4:
11628 case Intrinsic::arm_neon_vst2lane:
11629 case Intrinsic::arm_neon_vst3lane:
11630 case Intrinsic::arm_neon_vst4lane:
11631 return PerformVLDCombine(N, DCI);
11639 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
11641 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
11644 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
11647 bool *Fast) const {
11648 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
11649 bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
11651 switch (VT.getSimpleVT().SimpleTy) {
11657 // Unaligned access can use (for example) LRDB, LRDH, LDR
11658 if (AllowsUnaligned) {
11660 *Fast = Subtarget->hasV7Ops();
11667 // For any little-endian targets with neon, we can support unaligned ld/st
11668 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
11669 // A big-endian target may also explicitly support unaligned accesses
11670 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
11680 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
11681 unsigned AlignCheck) {
11682 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
11683 (DstAlign == 0 || DstAlign % AlignCheck == 0));
11686 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
11687 unsigned DstAlign, unsigned SrcAlign,
11688 bool IsMemset, bool ZeroMemset,
11690 MachineFunction &MF) const {
11691 const Function *F = MF.getFunction();
11693 // See if we can use NEON instructions for this...
11694 if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
11695 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
11698 (memOpAlign(SrcAlign, DstAlign, 16) ||
11699 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) {
11701 } else if (Size >= 8 &&
11702 (memOpAlign(SrcAlign, DstAlign, 8) ||
11703 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) &&
11709 // Lowering to i32/i16 if the size permits.
11712 else if (Size >= 2)
11715 // Let the target-independent logic figure it out.
11719 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
11720 if (Val.getOpcode() != ISD::LOAD)
11723 EVT VT1 = Val.getValueType();
11724 if (!VT1.isSimple() || !VT1.isInteger() ||
11725 !VT2.isSimple() || !VT2.isInteger())
11728 switch (VT1.getSimpleVT().SimpleTy) {
11733 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
11740 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
11741 EVT VT = ExtVal.getValueType();
11743 if (!isTypeLegal(VT))
11746 // Don't create a loadext if we can fold the extension into a wide/long
11748 // If there's more than one user instruction, the loadext is desirable no
11749 // matter what. There can be two uses by the same instruction.
11750 if (ExtVal->use_empty() ||
11751 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
11754 SDNode *U = *ExtVal->use_begin();
11755 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
11756 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL))
11762 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
11763 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
11766 if (!isTypeLegal(EVT::getEVT(Ty1)))
11769 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
11771 // Assuming the caller doesn't have a zeroext or signext return parameter,
11772 // truncation all the way down to i1 is valid.
11776 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
11777 const AddrMode &AM, Type *Ty,
11778 unsigned AS) const {
11779 if (isLegalAddressingMode(DL, AM, Ty, AS)) {
11780 if (Subtarget->hasFPAO())
11781 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
11788 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
11792 unsigned Scale = 1;
11793 switch (VT.getSimpleVT().SimpleTy) {
11794 default: return false;
11809 if ((V & (Scale - 1)) != 0)
11812 return V == (V & ((1LL << 5) - 1));
11815 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
11816 const ARMSubtarget *Subtarget) {
11817 bool isNeg = false;
11823 switch (VT.getSimpleVT().SimpleTy) {
11824 default: return false;
11829 // + imm12 or - imm8
11831 return V == (V & ((1LL << 8) - 1));
11832 return V == (V & ((1LL << 12) - 1));
11835 // Same as ARM mode. FIXME: NEON?
11836 if (!Subtarget->hasVFP2())
11841 return V == (V & ((1LL << 8) - 1));
11845 /// isLegalAddressImmediate - Return true if the integer value can be used
11846 /// as the offset of the target addressing mode for load / store of the
11848 static bool isLegalAddressImmediate(int64_t V, EVT VT,
11849 const ARMSubtarget *Subtarget) {
11853 if (!VT.isSimple())
11856 if (Subtarget->isThumb1Only())
11857 return isLegalT1AddressImmediate(V, VT);
11858 else if (Subtarget->isThumb2())
11859 return isLegalT2AddressImmediate(V, VT, Subtarget);
11864 switch (VT.getSimpleVT().SimpleTy) {
11865 default: return false;
11870 return V == (V & ((1LL << 12) - 1));
11873 return V == (V & ((1LL << 8) - 1));
11876 if (!Subtarget->hasVFP2()) // FIXME: NEON?
11881 return V == (V & ((1LL << 8) - 1));
11885 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
11887 int Scale = AM.Scale;
11891 switch (VT.getSimpleVT().SimpleTy) {
11892 default: return false;
11900 Scale = Scale & ~1;
11901 return Scale == 2 || Scale == 4 || Scale == 8;
11904 if (((unsigned)AM.HasBaseReg + Scale) <= 2)
11908 // Note, we allow "void" uses (basically, uses that aren't loads or
11909 // stores), because arm allows folding a scale into many arithmetic
11910 // operations. This should be made more precise and revisited later.
11912 // Allow r << imm, but the imm has to be a multiple of two.
11913 if (Scale & 1) return false;
11914 return isPowerOf2_32(Scale);
11918 /// isLegalAddressingMode - Return true if the addressing mode represented
11919 /// by AM is legal for this target, for a load/store of the specified type.
11920 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
11921 const AddrMode &AM, Type *Ty,
11922 unsigned AS) const {
11923 EVT VT = getValueType(DL, Ty, true);
11924 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
11927 // Can never fold addr of global into load/store.
11931 switch (AM.Scale) {
11932 case 0: // no scale reg, must be "r+i" or "r", or "i".
11935 if (Subtarget->isThumb1Only())
11939 // ARM doesn't support any R+R*scale+imm addr modes.
11943 if (!VT.isSimple())
11946 if (Subtarget->isThumb2())
11947 return isLegalT2ScaledAddressingMode(AM, VT);
11949 int Scale = AM.Scale;
11950 switch (VT.getSimpleVT().SimpleTy) {
11951 default: return false;
11955 if (Scale < 0) Scale = -Scale;
11959 return isPowerOf2_32(Scale & ~1);
11963 if (((unsigned)AM.HasBaseReg + Scale) <= 2)
11968 // Note, we allow "void" uses (basically, uses that aren't loads or
11969 // stores), because arm allows folding a scale into many arithmetic
11970 // operations. This should be made more precise and revisited later.
11972 // Allow r << imm, but the imm has to be a multiple of two.
11973 if (Scale & 1) return false;
11974 return isPowerOf2_32(Scale);
11980 /// isLegalICmpImmediate - Return true if the specified immediate is legal
11981 /// icmp immediate, that is the target has icmp instructions which can compare
11982 /// a register against the immediate without having to materialize the
11983 /// immediate into a register.
11984 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
11985 // Thumb2 and ARM modes can use cmn for negative immediates.
11986 if (!Subtarget->isThumb())
11987 return ARM_AM::getSOImmVal(std::abs(Imm)) != -1;
11988 if (Subtarget->isThumb2())
11989 return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1;
11990 // Thumb1 doesn't have cmn, and only 8-bit immediates.
11991 return Imm >= 0 && Imm <= 255;
11994 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
11995 /// *or sub* immediate, that is the target has add or sub instructions which can
11996 /// add a register with the immediate without having to materialize the
11997 /// immediate into a register.
11998 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
11999 // Same encoding for add/sub, just flip the sign.
12000 int64_t AbsImm = std::abs(Imm);
12001 if (!Subtarget->isThumb())
12002 return ARM_AM::getSOImmVal(AbsImm) != -1;
12003 if (Subtarget->isThumb2())
12004 return ARM_AM::getT2SOImmVal(AbsImm) != -1;
12005 // Thumb1 only has 8-bit unsigned immediate.
12006 return AbsImm >= 0 && AbsImm <= 255;
12009 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
12010 bool isSEXTLoad, SDValue &Base,
12011 SDValue &Offset, bool &isInc,
12012 SelectionDAG &DAG) {
12013 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12016 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
12017 // AddressingMode 3
12018 Base = Ptr->getOperand(0);
12019 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12020 int RHSC = (int)RHS->getZExtValue();
12021 if (RHSC < 0 && RHSC > -256) {
12022 assert(Ptr->getOpcode() == ISD::ADD);
12024 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12028 isInc = (Ptr->getOpcode() == ISD::ADD);
12029 Offset = Ptr->getOperand(1);
12031 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
12032 // AddressingMode 2
12033 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12034 int RHSC = (int)RHS->getZExtValue();
12035 if (RHSC < 0 && RHSC > -0x1000) {
12036 assert(Ptr->getOpcode() == ISD::ADD);
12038 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12039 Base = Ptr->getOperand(0);
12044 if (Ptr->getOpcode() == ISD::ADD) {
12046 ARM_AM::ShiftOpc ShOpcVal=
12047 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
12048 if (ShOpcVal != ARM_AM::no_shift) {
12049 Base = Ptr->getOperand(1);
12050 Offset = Ptr->getOperand(0);
12052 Base = Ptr->getOperand(0);
12053 Offset = Ptr->getOperand(1);
12058 isInc = (Ptr->getOpcode() == ISD::ADD);
12059 Base = Ptr->getOperand(0);
12060 Offset = Ptr->getOperand(1);
12064 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
12068 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
12069 bool isSEXTLoad, SDValue &Base,
12070 SDValue &Offset, bool &isInc,
12071 SelectionDAG &DAG) {
12072 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12075 Base = Ptr->getOperand(0);
12076 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12077 int RHSC = (int)RHS->getZExtValue();
12078 if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
12079 assert(Ptr->getOpcode() == ISD::ADD);
12081 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12083 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
12084 isInc = Ptr->getOpcode() == ISD::ADD;
12085 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
12093 /// getPreIndexedAddressParts - returns true by value, base pointer and
12094 /// offset pointer and addressing mode by reference if the node's address
12095 /// can be legally represented as pre-indexed load / store address.
12097 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
12099 ISD::MemIndexedMode &AM,
12100 SelectionDAG &DAG) const {
12101 if (Subtarget->isThumb1Only())
12106 bool isSEXTLoad = false;
12107 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12108 Ptr = LD->getBasePtr();
12109 VT = LD->getMemoryVT();
12110 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12111 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12112 Ptr = ST->getBasePtr();
12113 VT = ST->getMemoryVT();
12118 bool isLegal = false;
12119 if (Subtarget->isThumb2())
12120 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12121 Offset, isInc, DAG);
12123 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12124 Offset, isInc, DAG);
12128 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
12132 /// getPostIndexedAddressParts - returns true by value, base pointer and
12133 /// offset pointer and addressing mode by reference if this node can be
12134 /// combined with a load / store to form a post-indexed load / store.
12135 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
12138 ISD::MemIndexedMode &AM,
12139 SelectionDAG &DAG) const {
12142 bool isSEXTLoad = false, isNonExt;
12143 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12144 VT = LD->getMemoryVT();
12145 Ptr = LD->getBasePtr();
12146 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12147 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
12148 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12149 VT = ST->getMemoryVT();
12150 Ptr = ST->getBasePtr();
12151 isNonExt = !ST->isTruncatingStore();
12155 if (Subtarget->isThumb1Only()) {
12156 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
12157 // must be non-extending/truncating, i32, with an offset of 4.
12158 assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
12159 if (Op->getOpcode() != ISD::ADD || !isNonExt)
12161 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
12162 if (!RHS || RHS->getZExtValue() != 4)
12165 Offset = Op->getOperand(1);
12166 Base = Op->getOperand(0);
12167 AM = ISD::POST_INC;
12172 bool isLegal = false;
12173 if (Subtarget->isThumb2())
12174 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12177 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12183 // Swap base ptr and offset to catch more post-index load / store when
12184 // it's legal. In Thumb2 mode, offset must be an immediate.
12185 if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
12186 !Subtarget->isThumb2())
12187 std::swap(Base, Offset);
12189 // Post-indexed load / store update the base pointer.
12194 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
12198 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
12201 const SelectionDAG &DAG,
12202 unsigned Depth) const {
12203 unsigned BitWidth = KnownOne.getBitWidth();
12204 KnownZero = KnownOne = APInt(BitWidth, 0);
12205 switch (Op.getOpcode()) {
12211 // These nodes' second result is a boolean
12212 if (Op.getResNo() == 0)
12214 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
12216 case ARMISD::CMOV: {
12217 // Bits are known zero/one if known on the LHS and RHS.
12218 DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
12219 if (KnownZero == 0 && KnownOne == 0) return;
12221 APInt KnownZeroRHS, KnownOneRHS;
12222 DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
12223 KnownZero &= KnownZeroRHS;
12224 KnownOne &= KnownOneRHS;
12227 case ISD::INTRINSIC_W_CHAIN: {
12228 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
12229 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
12232 case Intrinsic::arm_ldaex:
12233 case Intrinsic::arm_ldrex: {
12234 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
12235 unsigned MemBits = VT.getScalarSizeInBits();
12236 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
12244 //===----------------------------------------------------------------------===//
12245 // ARM Inline Assembly Support
12246 //===----------------------------------------------------------------------===//
12248 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
12249 // Looking for "rev" which is V6+.
12250 if (!Subtarget->hasV6Ops())
12253 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
12254 std::string AsmStr = IA->getAsmString();
12255 SmallVector<StringRef, 4> AsmPieces;
12256 SplitString(AsmStr, AsmPieces, ";\n");
12258 switch (AsmPieces.size()) {
12259 default: return false;
12261 AsmStr = AsmPieces[0];
12263 SplitString(AsmStr, AsmPieces, " \t,");
12266 if (AsmPieces.size() == 3 &&
12267 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
12268 IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
12269 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
12270 if (Ty && Ty->getBitWidth() == 32)
12271 return IntrinsicLowering::LowerToByteSwap(CI);
12279 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
12280 // At this point, we have to lower this constraint to something else, so we
12281 // lower it to an "r" or "w". However, by doing this we will force the result
12282 // to be in register, while the X constraint is much more permissive.
12284 // Although we are correct (we are free to emit anything, without
12285 // constraints), we might break use cases that would expect us to be more
12286 // efficient and emit something else.
12287 if (!Subtarget->hasVFP2())
12289 if (ConstraintVT.isFloatingPoint())
12291 if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
12292 (ConstraintVT.getSizeInBits() == 64 ||
12293 ConstraintVT.getSizeInBits() == 128))
12299 /// getConstraintType - Given a constraint letter, return the type of
12300 /// constraint it is for this target.
12301 ARMTargetLowering::ConstraintType
12302 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
12303 if (Constraint.size() == 1) {
12304 switch (Constraint[0]) {
12306 case 'l': return C_RegisterClass;
12307 case 'w': return C_RegisterClass;
12308 case 'h': return C_RegisterClass;
12309 case 'x': return C_RegisterClass;
12310 case 't': return C_RegisterClass;
12311 case 'j': return C_Other; // Constant for movw.
12312 // An address with a single base register. Due to the way we
12313 // currently handle addresses it is the same as an 'r' memory constraint.
12314 case 'Q': return C_Memory;
12316 } else if (Constraint.size() == 2) {
12317 switch (Constraint[0]) {
12319 // All 'U+' constraints are addresses.
12320 case 'U': return C_Memory;
12323 return TargetLowering::getConstraintType(Constraint);
12326 /// Examine constraint type and operand type and determine a weight value.
12327 /// This object must already have been set up with the operand type
12328 /// and the current alternative constraint selected.
12329 TargetLowering::ConstraintWeight
12330 ARMTargetLowering::getSingleConstraintMatchWeight(
12331 AsmOperandInfo &info, const char *constraint) const {
12332 ConstraintWeight weight = CW_Invalid;
12333 Value *CallOperandVal = info.CallOperandVal;
12334 // If we don't have a value, we can't do a match,
12335 // but allow it at the lowest weight.
12336 if (!CallOperandVal)
12338 Type *type = CallOperandVal->getType();
12339 // Look at the constraint type.
12340 switch (*constraint) {
12342 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
12345 if (type->isIntegerTy()) {
12346 if (Subtarget->isThumb())
12347 weight = CW_SpecificReg;
12349 weight = CW_Register;
12353 if (type->isFloatingPointTy())
12354 weight = CW_Register;
12360 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
12361 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
12362 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
12363 if (Constraint.size() == 1) {
12364 // GCC ARM Constraint Letters
12365 switch (Constraint[0]) {
12366 case 'l': // Low regs or general regs.
12367 if (Subtarget->isThumb())
12368 return RCPair(0U, &ARM::tGPRRegClass);
12369 return RCPair(0U, &ARM::GPRRegClass);
12370 case 'h': // High regs or no regs.
12371 if (Subtarget->isThumb())
12372 return RCPair(0U, &ARM::hGPRRegClass);
12375 if (Subtarget->isThumb1Only())
12376 return RCPair(0U, &ARM::tGPRRegClass);
12377 return RCPair(0U, &ARM::GPRRegClass);
12379 if (VT == MVT::Other)
12381 if (VT == MVT::f32)
12382 return RCPair(0U, &ARM::SPRRegClass);
12383 if (VT.getSizeInBits() == 64)
12384 return RCPair(0U, &ARM::DPRRegClass);
12385 if (VT.getSizeInBits() == 128)
12386 return RCPair(0U, &ARM::QPRRegClass);
12389 if (VT == MVT::Other)
12391 if (VT == MVT::f32)
12392 return RCPair(0U, &ARM::SPR_8RegClass);
12393 if (VT.getSizeInBits() == 64)
12394 return RCPair(0U, &ARM::DPR_8RegClass);
12395 if (VT.getSizeInBits() == 128)
12396 return RCPair(0U, &ARM::QPR_8RegClass);
12399 if (VT == MVT::f32)
12400 return RCPair(0U, &ARM::SPRRegClass);
12404 if (StringRef("{cc}").equals_lower(Constraint))
12405 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
12407 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
12410 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
12411 /// vector. If it is invalid, don't add anything to Ops.
12412 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
12413 std::string &Constraint,
12414 std::vector<SDValue>&Ops,
12415 SelectionDAG &DAG) const {
12418 // Currently only support length 1 constraints.
12419 if (Constraint.length() != 1) return;
12421 char ConstraintLetter = Constraint[0];
12422 switch (ConstraintLetter) {
12425 case 'I': case 'J': case 'K': case 'L':
12426 case 'M': case 'N': case 'O':
12427 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
12431 int64_t CVal64 = C->getSExtValue();
12432 int CVal = (int) CVal64;
12433 // None of these constraints allow values larger than 32 bits. Check
12434 // that the value fits in an int.
12435 if (CVal != CVal64)
12438 switch (ConstraintLetter) {
12440 // Constant suitable for movw, must be between 0 and
12442 if (Subtarget->hasV6T2Ops())
12443 if (CVal >= 0 && CVal <= 65535)
12447 if (Subtarget->isThumb1Only()) {
12448 // This must be a constant between 0 and 255, for ADD
12450 if (CVal >= 0 && CVal <= 255)
12452 } else if (Subtarget->isThumb2()) {
12453 // A constant that can be used as an immediate value in a
12454 // data-processing instruction.
12455 if (ARM_AM::getT2SOImmVal(CVal) != -1)
12458 // A constant that can be used as an immediate value in a
12459 // data-processing instruction.
12460 if (ARM_AM::getSOImmVal(CVal) != -1)
12466 if (Subtarget->isThumb1Only()) {
12467 // This must be a constant between -255 and -1, for negated ADD
12468 // immediates. This can be used in GCC with an "n" modifier that
12469 // prints the negated value, for use with SUB instructions. It is
12470 // not useful otherwise but is implemented for compatibility.
12471 if (CVal >= -255 && CVal <= -1)
12474 // This must be a constant between -4095 and 4095. It is not clear
12475 // what this constraint is intended for. Implemented for
12476 // compatibility with GCC.
12477 if (CVal >= -4095 && CVal <= 4095)
12483 if (Subtarget->isThumb1Only()) {
12484 // A 32-bit value where only one byte has a nonzero value. Exclude
12485 // zero to match GCC. This constraint is used by GCC internally for
12486 // constants that can be loaded with a move/shift combination.
12487 // It is not useful otherwise but is implemented for compatibility.
12488 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
12490 } else if (Subtarget->isThumb2()) {
12491 // A constant whose bitwise inverse can be used as an immediate
12492 // value in a data-processing instruction. This can be used in GCC
12493 // with a "B" modifier that prints the inverted value, for use with
12494 // BIC and MVN instructions. It is not useful otherwise but is
12495 // implemented for compatibility.
12496 if (ARM_AM::getT2SOImmVal(~CVal) != -1)
12499 // A constant whose bitwise inverse can be used as an immediate
12500 // value in a data-processing instruction. This can be used in GCC
12501 // with a "B" modifier that prints the inverted value, for use with
12502 // BIC and MVN instructions. It is not useful otherwise but is
12503 // implemented for compatibility.
12504 if (ARM_AM::getSOImmVal(~CVal) != -1)
12510 if (Subtarget->isThumb1Only()) {
12511 // This must be a constant between -7 and 7,
12512 // for 3-operand ADD/SUB immediate instructions.
12513 if (CVal >= -7 && CVal < 7)
12515 } else if (Subtarget->isThumb2()) {
12516 // A constant whose negation can be used as an immediate value in a
12517 // data-processing instruction. This can be used in GCC with an "n"
12518 // modifier that prints the negated value, for use with SUB
12519 // instructions. It is not useful otherwise but is implemented for
12521 if (ARM_AM::getT2SOImmVal(-CVal) != -1)
12524 // A constant whose negation can be used as an immediate value in a
12525 // data-processing instruction. This can be used in GCC with an "n"
12526 // modifier that prints the negated value, for use with SUB
12527 // instructions. It is not useful otherwise but is implemented for
12529 if (ARM_AM::getSOImmVal(-CVal) != -1)
12535 if (Subtarget->isThumb1Only()) {
12536 // This must be a multiple of 4 between 0 and 1020, for
12537 // ADD sp + immediate.
12538 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
12541 // A power of two or a constant between 0 and 32. This is used in
12542 // GCC for the shift amount on shifted register operands, but it is
12543 // useful in general for any shift amounts.
12544 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
12550 if (Subtarget->isThumb()) { // FIXME thumb2
12551 // This must be a constant between 0 and 31, for shift amounts.
12552 if (CVal >= 0 && CVal <= 31)
12558 if (Subtarget->isThumb()) { // FIXME thumb2
12559 // This must be a multiple of 4 between -508 and 508, for
12560 // ADD/SUB sp = sp + immediate.
12561 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
12566 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
12570 if (Result.getNode()) {
12571 Ops.push_back(Result);
12574 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
12577 static RTLIB::Libcall getDivRemLibcall(
12578 const SDNode *N, MVT::SimpleValueType SVT) {
12579 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
12580 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&
12581 "Unhandled Opcode in getDivRemLibcall");
12582 bool isSigned = N->getOpcode() == ISD::SDIVREM ||
12583 N->getOpcode() == ISD::SREM;
12586 default: llvm_unreachable("Unexpected request for libcall!");
12587 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
12588 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
12589 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
12590 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
12595 static TargetLowering::ArgListTy getDivRemArgList(
12596 const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
12597 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
12598 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&
12599 "Unhandled Opcode in getDivRemArgList");
12600 bool isSigned = N->getOpcode() == ISD::SDIVREM ||
12601 N->getOpcode() == ISD::SREM;
12602 TargetLowering::ArgListTy Args;
12603 TargetLowering::ArgListEntry Entry;
12604 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
12605 EVT ArgVT = N->getOperand(i).getValueType();
12606 Type *ArgTy = ArgVT.getTypeForEVT(*Context);
12607 Entry.Node = N->getOperand(i);
12609 Entry.isSExt = isSigned;
12610 Entry.isZExt = !isSigned;
12611 Args.push_back(Entry);
12613 if (Subtarget->isTargetWindows() && Args.size() >= 2)
12614 std::swap(Args[0], Args[1]);
12618 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
12619 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
12620 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
12621 Subtarget->isTargetWindows()) &&
12622 "Register-based DivRem lowering only");
12623 unsigned Opcode = Op->getOpcode();
12624 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
12625 "Invalid opcode for Div/Rem lowering");
12626 bool isSigned = (Opcode == ISD::SDIVREM);
12627 EVT VT = Op->getValueType(0);
12628 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
12631 // If the target has hardware divide, use divide + multiply + subtract:
12633 // rem = a - b * div
12634 // return {div, rem}
12635 // This should be lowered into UDIV/SDIV + MLS later on.
12636 if (Subtarget->hasDivide() && Op->getValueType(0).isSimple() &&
12637 Op->getSimpleValueType(0) == MVT::i32) {
12638 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
12639 const SDValue Dividend = Op->getOperand(0);
12640 const SDValue Divisor = Op->getOperand(1);
12641 SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
12642 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
12643 SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
12645 SDValue Values[2] = {Div, Rem};
12646 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
12649 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
12650 VT.getSimpleVT().SimpleTy);
12651 SDValue InChain = DAG.getEntryNode();
12653 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
12657 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
12658 getPointerTy(DAG.getDataLayout()));
12660 Type *RetTy = (Type*)StructType::get(Ty, Ty, nullptr);
12662 if (Subtarget->isTargetWindows())
12663 InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
12665 TargetLowering::CallLoweringInfo CLI(DAG);
12666 CLI.setDebugLoc(dl).setChain(InChain)
12667 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
12668 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
12670 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
12671 return CallInfo.first;
12674 // Lowers REM using divmod helpers
12675 // see RTABI section 4.2/4.3
12676 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
12677 // Build return types (div and rem)
12678 std::vector<Type*> RetTyParams;
12679 Type *RetTyElement;
12681 switch (N->getValueType(0).getSimpleVT().SimpleTy) {
12682 default: llvm_unreachable("Unexpected request for libcall!");
12683 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break;
12684 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
12685 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
12686 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
12689 RetTyParams.push_back(RetTyElement);
12690 RetTyParams.push_back(RetTyElement);
12691 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
12692 Type *RetTy = StructType::get(*DAG.getContext(), ret);
12694 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
12696 SDValue InChain = DAG.getEntryNode();
12697 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
12699 bool isSigned = N->getOpcode() == ISD::SREM;
12700 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
12701 getPointerTy(DAG.getDataLayout()));
12703 if (Subtarget->isTargetWindows())
12704 InChain = WinDBZCheckDenominator(DAG, N, InChain);
12707 CallLoweringInfo CLI(DAG);
12708 CLI.setChain(InChain)
12709 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
12710 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
12711 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
12713 // Return second (rem) result operand (first contains div)
12714 SDNode *ResNode = CallResult.first.getNode();
12715 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
12716 return ResNode->getOperand(1);
12720 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
12721 assert(Subtarget->isTargetWindows() && "unsupported target platform");
12725 SDValue Chain = Op.getOperand(0);
12726 SDValue Size = Op.getOperand(1);
12728 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
12729 DAG.getConstant(2, DL, MVT::i32));
12732 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
12733 Flag = Chain.getValue(1);
12735 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
12736 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
12738 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
12739 Chain = NewSP.getValue(1);
12741 SDValue Ops[2] = { NewSP, Chain };
12742 return DAG.getMergeValues(Ops, DL);
12745 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
12746 assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() &&
12747 "Unexpected type for custom-lowering FP_EXTEND");
12750 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
12752 SDValue SrcVal = Op.getOperand(0);
12753 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
12757 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
12758 assert(Op.getOperand(0).getValueType() == MVT::f64 &&
12759 Subtarget->isFPOnlySP() &&
12760 "Unexpected type for custom-lowering FP_ROUND");
12763 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
12765 SDValue SrcVal = Op.getOperand(0);
12766 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
12771 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
12772 // The ARM target isn't yet aware of offsets.
12776 bool ARM::isBitFieldInvertedMask(unsigned v) {
12777 if (v == 0xffffffff)
12780 // there can be 1's on either or both "outsides", all the "inside"
12781 // bits must be 0's
12782 return isShiftedMask_32(~v);
12785 /// isFPImmLegal - Returns true if the target can instruction select the
12786 /// specified FP immediate natively. If false, the legalizer will
12787 /// materialize the FP immediate as a load from a constant pool.
12788 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
12789 if (!Subtarget->hasVFP3())
12791 if (VT == MVT::f32)
12792 return ARM_AM::getFP32Imm(Imm) != -1;
12793 if (VT == MVT::f64 && !Subtarget->isFPOnlySP())
12794 return ARM_AM::getFP64Imm(Imm) != -1;
12798 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
12799 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
12800 /// specified in the intrinsic calls.
12801 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
12803 unsigned Intrinsic) const {
12804 switch (Intrinsic) {
12805 case Intrinsic::arm_neon_vld1:
12806 case Intrinsic::arm_neon_vld2:
12807 case Intrinsic::arm_neon_vld3:
12808 case Intrinsic::arm_neon_vld4:
12809 case Intrinsic::arm_neon_vld2lane:
12810 case Intrinsic::arm_neon_vld3lane:
12811 case Intrinsic::arm_neon_vld4lane: {
12812 Info.opc = ISD::INTRINSIC_W_CHAIN;
12813 // Conservatively set memVT to the entire set of vectors loaded.
12814 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12815 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
12816 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12817 Info.ptrVal = I.getArgOperand(0);
12819 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
12820 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
12821 Info.vol = false; // volatile loads with NEON intrinsics not supported
12822 Info.readMem = true;
12823 Info.writeMem = false;
12826 case Intrinsic::arm_neon_vst1:
12827 case Intrinsic::arm_neon_vst2:
12828 case Intrinsic::arm_neon_vst3:
12829 case Intrinsic::arm_neon_vst4:
12830 case Intrinsic::arm_neon_vst2lane:
12831 case Intrinsic::arm_neon_vst3lane:
12832 case Intrinsic::arm_neon_vst4lane: {
12833 Info.opc = ISD::INTRINSIC_VOID;
12834 // Conservatively set memVT to the entire set of vectors stored.
12835 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12836 unsigned NumElts = 0;
12837 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
12838 Type *ArgTy = I.getArgOperand(ArgI)->getType();
12839 if (!ArgTy->isVectorTy())
12841 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
12843 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12844 Info.ptrVal = I.getArgOperand(0);
12846 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
12847 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
12848 Info.vol = false; // volatile stores with NEON intrinsics not supported
12849 Info.readMem = false;
12850 Info.writeMem = true;
12853 case Intrinsic::arm_ldaex:
12854 case Intrinsic::arm_ldrex: {
12855 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12856 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
12857 Info.opc = ISD::INTRINSIC_W_CHAIN;
12858 Info.memVT = MVT::getVT(PtrTy->getElementType());
12859 Info.ptrVal = I.getArgOperand(0);
12861 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
12863 Info.readMem = true;
12864 Info.writeMem = false;
12867 case Intrinsic::arm_stlex:
12868 case Intrinsic::arm_strex: {
12869 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12870 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
12871 Info.opc = ISD::INTRINSIC_W_CHAIN;
12872 Info.memVT = MVT::getVT(PtrTy->getElementType());
12873 Info.ptrVal = I.getArgOperand(1);
12875 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
12877 Info.readMem = false;
12878 Info.writeMem = true;
12881 case Intrinsic::arm_stlexd:
12882 case Intrinsic::arm_strexd: {
12883 Info.opc = ISD::INTRINSIC_W_CHAIN;
12884 Info.memVT = MVT::i64;
12885 Info.ptrVal = I.getArgOperand(2);
12889 Info.readMem = false;
12890 Info.writeMem = true;
12893 case Intrinsic::arm_ldaexd:
12894 case Intrinsic::arm_ldrexd: {
12895 Info.opc = ISD::INTRINSIC_W_CHAIN;
12896 Info.memVT = MVT::i64;
12897 Info.ptrVal = I.getArgOperand(0);
12901 Info.readMem = true;
12902 Info.writeMem = false;
12912 /// \brief Returns true if it is beneficial to convert a load of a constant
12913 /// to just the constant itself.
12914 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
12916 assert(Ty->isIntegerTy());
12918 unsigned Bits = Ty->getPrimitiveSizeInBits();
12919 if (Bits == 0 || Bits > 32)
12924 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT,
12925 unsigned Index) const {
12926 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
12929 return (Index == 0 || Index == ResVT.getVectorNumElements());
12932 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
12933 ARM_MB::MemBOpt Domain) const {
12934 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
12936 // First, if the target has no DMB, see what fallback we can use.
12937 if (!Subtarget->hasDataBarrier()) {
12938 // Some ARMv6 cpus can support data barriers with an mcr instruction.
12939 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
12941 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
12942 Function *MCR = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
12943 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
12944 Builder.getInt32(0), Builder.getInt32(7),
12945 Builder.getInt32(10), Builder.getInt32(5)};
12946 return Builder.CreateCall(MCR, args);
12948 // Instead of using barriers, atomic accesses on these subtargets use
12950 llvm_unreachable("makeDMB on a target so old that it has no barriers");
12953 Function *DMB = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
12954 // Only a full system barrier exists in the M-class architectures.
12955 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
12956 Constant *CDomain = Builder.getInt32(Domain);
12957 return Builder.CreateCall(DMB, CDomain);
12961 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
12962 Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
12963 AtomicOrdering Ord, bool IsStore,
12964 bool IsLoad) const {
12966 case AtomicOrdering::NotAtomic:
12967 case AtomicOrdering::Unordered:
12968 llvm_unreachable("Invalid fence: unordered/non-atomic");
12969 case AtomicOrdering::Monotonic:
12970 case AtomicOrdering::Acquire:
12971 return nullptr; // Nothing to do
12972 case AtomicOrdering::SequentiallyConsistent:
12974 return nullptr; // Nothing to do
12976 case AtomicOrdering::Release:
12977 case AtomicOrdering::AcquireRelease:
12978 if (Subtarget->preferISHSTBarriers())
12979 return makeDMB(Builder, ARM_MB::ISHST);
12980 // FIXME: add a comment with a link to documentation justifying this.
12982 return makeDMB(Builder, ARM_MB::ISH);
12984 llvm_unreachable("Unknown fence ordering in emitLeadingFence");
12987 Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
12988 AtomicOrdering Ord, bool IsStore,
12989 bool IsLoad) const {
12991 case AtomicOrdering::NotAtomic:
12992 case AtomicOrdering::Unordered:
12993 llvm_unreachable("Invalid fence: unordered/not-atomic");
12994 case AtomicOrdering::Monotonic:
12995 case AtomicOrdering::Release:
12996 return nullptr; // Nothing to do
12997 case AtomicOrdering::Acquire:
12998 case AtomicOrdering::AcquireRelease:
12999 case AtomicOrdering::SequentiallyConsistent:
13000 return makeDMB(Builder, ARM_MB::ISH);
13002 llvm_unreachable("Unknown fence ordering in emitTrailingFence");
13005 // Loads and stores less than 64-bits are already atomic; ones above that
13006 // are doomed anyway, so defer to the default libcall and blame the OS when
13007 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13008 // anything for those.
13009 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
13010 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
13011 return (Size == 64) && !Subtarget->isMClass();
13014 // Loads and stores less than 64-bits are already atomic; ones above that
13015 // are doomed anyway, so defer to the default libcall and blame the OS when
13016 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13017 // anything for those.
13018 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
13019 // guarantee, see DDI0406C ARM architecture reference manual,
13020 // sections A8.8.72-74 LDRD)
13021 TargetLowering::AtomicExpansionKind
13022 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
13023 unsigned Size = LI->getType()->getPrimitiveSizeInBits();
13024 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
13025 : AtomicExpansionKind::None;
13028 // For the real atomic operations, we have ldrex/strex up to 32 bits,
13029 // and up to 64 bits on the non-M profiles
13030 TargetLowering::AtomicExpansionKind
13031 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
13032 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
13033 bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13034 return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
13035 ? AtomicExpansionKind::LLSC
13036 : AtomicExpansionKind::None;
13039 bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(
13040 AtomicCmpXchgInst *AI) const {
13041 // At -O0, fast-regalloc cannot cope with the live vregs necessary to
13042 // implement cmpxchg without spilling. If the address being exchanged is also
13043 // on the stack and close enough to the spill slot, this can lead to a
13044 // situation where the monitor always gets cleared and the atomic operation
13045 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
13046 bool hasAtomicCmpXchg =
13047 !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13048 return getTargetMachine().getOptLevel() != 0 && hasAtomicCmpXchg;
13051 bool ARMTargetLowering::shouldInsertFencesForAtomic(
13052 const Instruction *I) const {
13053 return InsertFencesForAtomic;
13056 // This has so far only been implemented for MachO.
13057 bool ARMTargetLowering::useLoadStackGuardNode() const {
13058 return Subtarget->isTargetMachO();
13061 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
13062 unsigned &Cost) const {
13063 // If we do not have NEON, vector types are not natively supported.
13064 if (!Subtarget->hasNEON())
13067 // Floating point values and vector values map to the same register file.
13068 // Therefore, although we could do a store extract of a vector type, this is
13069 // better to leave at float as we have more freedom in the addressing mode for
13071 if (VectorTy->isFPOrFPVectorTy())
13074 // If the index is unknown at compile time, this is very expensive to lower
13075 // and it is not possible to combine the store with the extract.
13076 if (!isa<ConstantInt>(Idx))
13079 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
13080 unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth();
13081 // We can do a store + vector extract on any vector that fits perfectly in a D
13083 if (BitWidth == 64 || BitWidth == 128) {
13090 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
13091 return Subtarget->hasV6T2Ops();
13094 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
13095 return Subtarget->hasV6T2Ops();
13098 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
13099 AtomicOrdering Ord) const {
13100 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13101 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
13102 bool IsAcquire = isAcquireOrStronger(Ord);
13104 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
13105 // intrinsic must return {i32, i32} and we have to recombine them into a
13106 // single i64 here.
13107 if (ValTy->getPrimitiveSizeInBits() == 64) {
13108 Intrinsic::ID Int =
13109 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
13110 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int);
13112 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13113 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
13115 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
13116 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
13117 if (!Subtarget->isLittle())
13118 std::swap (Lo, Hi);
13119 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
13120 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
13121 return Builder.CreateOr(
13122 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
13125 Type *Tys[] = { Addr->getType() };
13126 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
13127 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys);
13129 return Builder.CreateTruncOrBitCast(
13130 Builder.CreateCall(Ldrex, Addr),
13131 cast<PointerType>(Addr->getType())->getElementType());
13134 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
13135 IRBuilder<> &Builder) const {
13136 if (!Subtarget->hasV7Ops())
13138 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13139 Builder.CreateCall(llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
13142 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
13144 AtomicOrdering Ord) const {
13145 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13146 bool IsRelease = isReleaseOrStronger(Ord);
13148 // Since the intrinsics must have legal type, the i64 intrinsics take two
13149 // parameters: "i32, i32". We must marshal Val into the appropriate form
13150 // before the call.
13151 if (Val->getType()->getPrimitiveSizeInBits() == 64) {
13152 Intrinsic::ID Int =
13153 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
13154 Function *Strex = Intrinsic::getDeclaration(M, Int);
13155 Type *Int32Ty = Type::getInt32Ty(M->getContext());
13157 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
13158 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
13159 if (!Subtarget->isLittle())
13160 std::swap (Lo, Hi);
13161 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13162 return Builder.CreateCall(Strex, {Lo, Hi, Addr});
13165 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
13166 Type *Tys[] = { Addr->getType() };
13167 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
13169 return Builder.CreateCall(
13170 Strex, {Builder.CreateZExtOrBitCast(
13171 Val, Strex->getFunctionType()->getParamType(0)),
13175 /// \brief Lower an interleaved load into a vldN intrinsic.
13177 /// E.g. Lower an interleaved load (Factor = 2):
13178 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
13179 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
13180 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
13183 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
13184 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
13185 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
13186 bool ARMTargetLowering::lowerInterleavedLoad(
13187 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
13188 ArrayRef<unsigned> Indices, unsigned Factor) const {
13189 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13190 "Invalid interleave factor");
13191 assert(!Shuffles.empty() && "Empty shufflevector input");
13192 assert(Shuffles.size() == Indices.size() &&
13193 "Unmatched number of shufflevectors and indices");
13195 VectorType *VecTy = Shuffles[0]->getType();
13196 Type *EltTy = VecTy->getVectorElementType();
13198 const DataLayout &DL = LI->getModule()->getDataLayout();
13199 unsigned VecSize = DL.getTypeSizeInBits(VecTy);
13200 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64;
13202 // Skip if we do not have NEON and skip illegal vector types and vector types
13203 // with i64/f64 elements (vldN doesn't support i64/f64 elements).
13204 if (!Subtarget->hasNEON() || (VecSize != 64 && VecSize != 128) || EltIs64Bits)
13207 // A pointer vector can not be the return type of the ldN intrinsics. Need to
13208 // load integer vectors first and then convert to pointer vectors.
13209 if (EltTy->isPointerTy())
13211 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
13213 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
13214 Intrinsic::arm_neon_vld3,
13215 Intrinsic::arm_neon_vld4};
13217 IRBuilder<> Builder(LI);
13218 SmallVector<Value *, 2> Ops;
13220 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
13221 Ops.push_back(Builder.CreateBitCast(LI->getPointerOperand(), Int8Ptr));
13222 Ops.push_back(Builder.getInt32(LI->getAlignment()));
13224 Type *Tys[] = { VecTy, Int8Ptr };
13225 Function *VldnFunc =
13226 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
13227 CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN");
13229 // Replace uses of each shufflevector with the corresponding vector loaded
13231 for (unsigned i = 0; i < Shuffles.size(); i++) {
13232 ShuffleVectorInst *SV = Shuffles[i];
13233 unsigned Index = Indices[i];
13235 Value *SubVec = Builder.CreateExtractValue(VldN, Index);
13237 // Convert the integer vector to pointer vector if the element is pointer.
13238 if (EltTy->isPointerTy())
13239 SubVec = Builder.CreateIntToPtr(SubVec, SV->getType());
13241 SV->replaceAllUsesWith(SubVec);
13247 /// \brief Get a mask consisting of sequential integers starting from \p Start.
13249 /// I.e. <Start, Start + 1, ..., Start + NumElts - 1>
13250 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned Start,
13251 unsigned NumElts) {
13252 SmallVector<Constant *, 16> Mask;
13253 for (unsigned i = 0; i < NumElts; i++)
13254 Mask.push_back(Builder.getInt32(Start + i));
13256 return ConstantVector::get(Mask);
13259 /// \brief Lower an interleaved store into a vstN intrinsic.
13261 /// E.g. Lower an interleaved store (Factor = 3):
13262 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13263 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13264 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
13267 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13268 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13269 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13270 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13272 /// Note that the new shufflevectors will be removed and we'll only generate one
13273 /// vst3 instruction in CodeGen.
13275 /// Example for a more general valid mask (Factor 3). Lower:
13276 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13277 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13278 /// store <12 x i32> %i.vec, <12 x i32>* %ptr
13281 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13282 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13283 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13284 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13285 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
13286 ShuffleVectorInst *SVI,
13287 unsigned Factor) const {
13288 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13289 "Invalid interleave factor");
13291 VectorType *VecTy = SVI->getType();
13292 assert(VecTy->getVectorNumElements() % Factor == 0 &&
13293 "Invalid interleaved store");
13295 unsigned LaneLen = VecTy->getVectorNumElements() / Factor;
13296 Type *EltTy = VecTy->getVectorElementType();
13297 VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
13299 const DataLayout &DL = SI->getModule()->getDataLayout();
13300 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy);
13301 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64;
13303 // Skip if we do not have NEON and skip illegal vector types and vector types
13304 // with i64/f64 elements (vstN doesn't support i64/f64 elements).
13305 if (!Subtarget->hasNEON() || (SubVecSize != 64 && SubVecSize != 128) ||
13309 Value *Op0 = SVI->getOperand(0);
13310 Value *Op1 = SVI->getOperand(1);
13311 IRBuilder<> Builder(SI);
13313 // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13314 // vectors to integer vectors.
13315 if (EltTy->isPointerTy()) {
13316 Type *IntTy = DL.getIntPtrType(EltTy);
13318 // Convert to the corresponding integer vector.
13320 VectorType::get(IntTy, Op0->getType()->getVectorNumElements());
13321 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13322 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13324 SubVecTy = VectorType::get(IntTy, LaneLen);
13327 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
13328 Intrinsic::arm_neon_vst3,
13329 Intrinsic::arm_neon_vst4};
13330 SmallVector<Value *, 6> Ops;
13332 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
13333 Ops.push_back(Builder.CreateBitCast(SI->getPointerOperand(), Int8Ptr));
13335 Type *Tys[] = { Int8Ptr, SubVecTy };
13336 Function *VstNFunc = Intrinsic::getDeclaration(
13337 SI->getModule(), StoreInts[Factor - 2], Tys);
13339 // Split the shufflevector operands into sub vectors for the new vstN call.
13340 auto Mask = SVI->getShuffleMask();
13341 for (unsigned i = 0; i < Factor; i++) {
13342 if (Mask[i] >= 0) {
13343 Ops.push_back(Builder.CreateShuffleVector(
13344 Op0, Op1, getSequentialMask(Builder, Mask[i], LaneLen)));
13346 unsigned StartMask = 0;
13347 for (unsigned j = 1; j < LaneLen; j++) {
13348 if (Mask[j*Factor + i] >= 0) {
13349 StartMask = Mask[j*Factor + i] - j;
13353 // Note: If all elements in a chunk are undefs, StartMask=0!
13354 // Note: Filling undef gaps with random elements is ok, since
13355 // those elements were being written anyway (with undefs).
13356 // In the case of all undefs we're defaulting to using elems from 0
13357 // Note: StartMask cannot be negative, it's checked in isReInterleaveMask
13358 Ops.push_back(Builder.CreateShuffleVector(
13359 Op0, Op1, getSequentialMask(Builder, StartMask, LaneLen)));
13363 Ops.push_back(Builder.getInt32(SI->getAlignment()));
13364 Builder.CreateCall(VstNFunc, Ops);
13376 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
13377 uint64_t &Members) {
13378 if (auto *ST = dyn_cast<StructType>(Ty)) {
13379 for (unsigned i = 0; i < ST->getNumElements(); ++i) {
13380 uint64_t SubMembers = 0;
13381 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
13383 Members += SubMembers;
13385 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
13386 uint64_t SubMembers = 0;
13387 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
13389 Members += SubMembers * AT->getNumElements();
13390 } else if (Ty->isFloatTy()) {
13391 if (Base != HA_UNKNOWN && Base != HA_FLOAT)
13395 } else if (Ty->isDoubleTy()) {
13396 if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
13400 } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
13407 return VT->getBitWidth() == 64;
13409 return VT->getBitWidth() == 128;
13411 switch (VT->getBitWidth()) {
13424 return (Members > 0 && Members <= 4);
13427 /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
13428 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
13429 /// passing according to AAPCS rules.
13430 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
13431 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
13432 if (getEffectiveCallingConv(CallConv, isVarArg) !=
13433 CallingConv::ARM_AAPCS_VFP)
13436 HABaseType Base = HA_UNKNOWN;
13437 uint64_t Members = 0;
13438 bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
13439 DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
13441 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
13442 return IsHA || IsIntArray;
13445 unsigned ARMTargetLowering::getExceptionPointerRegister(
13446 const Constant *PersonalityFn) const {
13447 // Platforms which do not use SjLj EH may return values in these registers
13448 // via the personality function.
13449 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0;
13452 unsigned ARMTargetLowering::getExceptionSelectorRegister(
13453 const Constant *PersonalityFn) const {
13454 // Platforms which do not use SjLj EH may return values in these registers
13455 // via the personality function.
13456 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1;
13459 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
13460 // Update IsSplitCSR in ARMFunctionInfo.
13461 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
13462 AFI->setIsSplitCSR(true);
13465 void ARMTargetLowering::insertCopiesSplitCSR(
13466 MachineBasicBlock *Entry,
13467 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
13468 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
13469 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
13473 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
13474 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
13475 MachineBasicBlock::iterator MBBI = Entry->begin();
13476 for (const MCPhysReg *I = IStart; *I; ++I) {
13477 const TargetRegisterClass *RC = nullptr;
13478 if (ARM::GPRRegClass.contains(*I))
13479 RC = &ARM::GPRRegClass;
13480 else if (ARM::DPRRegClass.contains(*I))
13481 RC = &ARM::DPRRegClass;
13483 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
13485 unsigned NewVR = MRI->createVirtualRegister(RC);
13486 // Create copy from CSR to a virtual register.
13487 // FIXME: this currently does not emit CFI pseudo-instructions, it works
13488 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
13489 // nounwind. If we want to generalize this later, we may need to emit
13490 // CFI pseudo-instructions.
13491 assert(Entry->getParent()->getFunction()->hasFnAttribute(
13492 Attribute::NoUnwind) &&
13493 "Function should be nounwind in insertCopiesSplitCSR!");
13494 Entry->addLiveIn(*I);
13495 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
13498 // Insert the copy-back instructions right before the terminator.
13499 for (auto *Exit : Exits)
13500 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
13501 TII->get(TargetOpcode::COPY), *I)