1 //===-- ARMISelLowering.cpp - ARM DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that ARM uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "ARMISelLowering.h"
16 #include "ARMCallingConv.h"
17 #include "ARMConstantPoolValue.h"
18 #include "ARMMachineFunctionInfo.h"
19 #include "ARMPerfectShuffle.h"
20 #include "ARMSubtarget.h"
21 #include "ARMTargetMachine.h"
22 #include "ARMTargetObjectFile.h"
23 #include "MCTargetDesc/ARMAddressingModes.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/CodeGen/CallingConvLower.h"
28 #include "llvm/CodeGen/IntrinsicLowering.h"
29 #include "llvm/CodeGen/MachineBasicBlock.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/SelectionDAG.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/DebugInfoMetadata.h"
41 #include "llvm/IR/GlobalValue.h"
42 #include "llvm/IR/IRBuilder.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/MC/MCSectionMachO.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetOptions.h"
58 #define DEBUG_TYPE "arm-isel"
60 STATISTIC(NumTailCalls, "Number of tail calls");
61 STATISTIC(NumMovwMovt, "Number of GAs materialized with movw + movt");
62 STATISTIC(NumLoopByVals, "Number of loops generated for byval arguments");
63 STATISTIC(NumConstpoolPromoted,
64 "Number of constants with their storage promoted into constant pools");
67 ARMInterworking("arm-interworking", cl::Hidden,
68 cl::desc("Enable / disable ARM interworking (for debugging only)"),
71 static cl::opt<bool> EnableConstpoolPromotion(
72 "arm-promote-constant", cl::Hidden,
73 cl::desc("Enable / disable promotion of unnamed_addr constants into "
76 static cl::opt<unsigned> ConstpoolPromotionMaxSize(
77 "arm-promote-constant-max-size", cl::Hidden,
78 cl::desc("Maximum size of constant to promote into a constant pool"),
80 static cl::opt<unsigned> ConstpoolPromotionMaxTotal(
81 "arm-promote-constant-max-total", cl::Hidden,
82 cl::desc("Maximum size of ALL constants to promote into a constant pool"),
86 class ARMCCState : public CCState {
88 ARMCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
89 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
91 : CCState(CC, isVarArg, MF, locs, C) {
92 assert(((PC == Call) || (PC == Prologue)) &&
93 "ARMCCState users must specify whether their context is call"
94 "or prologue generation.");
100 void ARMTargetLowering::InitLibcallCallingConvs() {
101 // The builtins on ARM always use AAPCS, irrespective of wheter C is AAPCS or
103 for (const auto LC : {
176 RTLIB::FPEXT_F64_F128,
177 RTLIB::FPEXT_F32_F128,
178 RTLIB::FPEXT_F32_F64,
179 RTLIB::FPEXT_F16_F32,
180 RTLIB::FPROUND_F32_F16,
181 RTLIB::FPROUND_F64_F16,
182 RTLIB::FPROUND_F80_F16,
183 RTLIB::FPROUND_F128_F16,
184 RTLIB::FPROUND_F64_F32,
185 RTLIB::FPROUND_F80_F32,
186 RTLIB::FPROUND_F128_F32,
187 RTLIB::FPROUND_F80_F64,
188 RTLIB::FPROUND_F128_F64,
189 RTLIB::FPTOSINT_F32_I32,
190 RTLIB::FPTOSINT_F32_I64,
191 RTLIB::FPTOSINT_F32_I128,
192 RTLIB::FPTOSINT_F64_I32,
193 RTLIB::FPTOSINT_F64_I64,
194 RTLIB::FPTOSINT_F64_I128,
195 RTLIB::FPTOSINT_F80_I32,
196 RTLIB::FPTOSINT_F80_I64,
197 RTLIB::FPTOSINT_F80_I128,
198 RTLIB::FPTOSINT_F128_I32,
199 RTLIB::FPTOSINT_F128_I64,
200 RTLIB::FPTOSINT_F128_I128,
201 RTLIB::FPTOUINT_F32_I32,
202 RTLIB::FPTOUINT_F32_I64,
203 RTLIB::FPTOUINT_F32_I128,
204 RTLIB::FPTOUINT_F64_I32,
205 RTLIB::FPTOUINT_F64_I64,
206 RTLIB::FPTOUINT_F64_I128,
207 RTLIB::FPTOUINT_F80_I32,
208 RTLIB::FPTOUINT_F80_I64,
209 RTLIB::FPTOUINT_F80_I128,
210 RTLIB::FPTOUINT_F128_I32,
211 RTLIB::FPTOUINT_F128_I64,
212 RTLIB::FPTOUINT_F128_I128,
213 RTLIB::SINTTOFP_I32_F32,
214 RTLIB::SINTTOFP_I32_F64,
215 RTLIB::SINTTOFP_I32_F80,
216 RTLIB::SINTTOFP_I32_F128,
217 RTLIB::SINTTOFP_I64_F32,
218 RTLIB::SINTTOFP_I64_F64,
219 RTLIB::SINTTOFP_I64_F80,
220 RTLIB::SINTTOFP_I64_F128,
221 RTLIB::SINTTOFP_I128_F32,
222 RTLIB::SINTTOFP_I128_F64,
223 RTLIB::SINTTOFP_I128_F80,
224 RTLIB::SINTTOFP_I128_F128,
225 RTLIB::UINTTOFP_I32_F32,
226 RTLIB::UINTTOFP_I32_F64,
227 RTLIB::UINTTOFP_I32_F80,
228 RTLIB::UINTTOFP_I32_F128,
229 RTLIB::UINTTOFP_I64_F32,
230 RTLIB::UINTTOFP_I64_F64,
231 RTLIB::UINTTOFP_I64_F80,
232 RTLIB::UINTTOFP_I64_F128,
233 RTLIB::UINTTOFP_I128_F32,
234 RTLIB::UINTTOFP_I128_F64,
235 RTLIB::UINTTOFP_I128_F80,
236 RTLIB::UINTTOFP_I128_F128,
262 setLibcallCallingConv(LC, CallingConv::ARM_AAPCS);
265 // The APCS parameter registers.
266 static const MCPhysReg GPRArgRegs[] = {
267 ARM::R0, ARM::R1, ARM::R2, ARM::R3
270 void ARMTargetLowering::addTypeForNEON(MVT VT, MVT PromotedLdStVT,
271 MVT PromotedBitwiseVT) {
272 if (VT != PromotedLdStVT) {
273 setOperationAction(ISD::LOAD, VT, Promote);
274 AddPromotedToType (ISD::LOAD, VT, PromotedLdStVT);
276 setOperationAction(ISD::STORE, VT, Promote);
277 AddPromotedToType (ISD::STORE, VT, PromotedLdStVT);
280 MVT ElemTy = VT.getVectorElementType();
281 if (ElemTy != MVT::f64)
282 setOperationAction(ISD::SETCC, VT, Custom);
283 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
284 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
285 if (ElemTy == MVT::i32) {
286 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
287 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
288 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
289 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
291 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
292 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
293 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
294 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
296 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
297 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
298 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal);
299 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
300 setOperationAction(ISD::SELECT, VT, Expand);
301 setOperationAction(ISD::SELECT_CC, VT, Expand);
302 setOperationAction(ISD::VSELECT, VT, Expand);
303 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
304 if (VT.isInteger()) {
305 setOperationAction(ISD::SHL, VT, Custom);
306 setOperationAction(ISD::SRA, VT, Custom);
307 setOperationAction(ISD::SRL, VT, Custom);
310 // Promote all bit-wise operations.
311 if (VT.isInteger() && VT != PromotedBitwiseVT) {
312 setOperationAction(ISD::AND, VT, Promote);
313 AddPromotedToType (ISD::AND, VT, PromotedBitwiseVT);
314 setOperationAction(ISD::OR, VT, Promote);
315 AddPromotedToType (ISD::OR, VT, PromotedBitwiseVT);
316 setOperationAction(ISD::XOR, VT, Promote);
317 AddPromotedToType (ISD::XOR, VT, PromotedBitwiseVT);
320 // Neon does not support vector divide/remainder operations.
321 setOperationAction(ISD::SDIV, VT, Expand);
322 setOperationAction(ISD::UDIV, VT, Expand);
323 setOperationAction(ISD::FDIV, VT, Expand);
324 setOperationAction(ISD::SREM, VT, Expand);
325 setOperationAction(ISD::UREM, VT, Expand);
326 setOperationAction(ISD::FREM, VT, Expand);
328 if (!VT.isFloatingPoint() &&
329 VT != MVT::v2i64 && VT != MVT::v1i64)
330 for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX})
331 setOperationAction(Opcode, VT, Legal);
334 void ARMTargetLowering::addDRTypeForNEON(MVT VT) {
335 addRegisterClass(VT, &ARM::DPRRegClass);
336 addTypeForNEON(VT, MVT::f64, MVT::v2i32);
339 void ARMTargetLowering::addQRTypeForNEON(MVT VT) {
340 addRegisterClass(VT, &ARM::DPairRegClass);
341 addTypeForNEON(VT, MVT::v2f64, MVT::v4i32);
344 ARMTargetLowering::ARMTargetLowering(const TargetMachine &TM,
345 const ARMSubtarget &STI)
346 : TargetLowering(TM), Subtarget(&STI) {
347 RegInfo = Subtarget->getRegisterInfo();
348 Itins = Subtarget->getInstrItineraryData();
350 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
352 InitLibcallCallingConvs();
354 if (Subtarget->isTargetMachO()) {
355 // Uses VFP for Thumb libfuncs if available.
356 if (Subtarget->isThumb() && Subtarget->hasVFP2() &&
357 Subtarget->hasARMOps() && !Subtarget->useSoftFloat()) {
358 static const struct {
359 const RTLIB::Libcall Op;
360 const char * const Name;
361 const ISD::CondCode Cond;
363 // Single-precision floating-point arithmetic.
364 { RTLIB::ADD_F32, "__addsf3vfp", ISD::SETCC_INVALID },
365 { RTLIB::SUB_F32, "__subsf3vfp", ISD::SETCC_INVALID },
366 { RTLIB::MUL_F32, "__mulsf3vfp", ISD::SETCC_INVALID },
367 { RTLIB::DIV_F32, "__divsf3vfp", ISD::SETCC_INVALID },
369 // Double-precision floating-point arithmetic.
370 { RTLIB::ADD_F64, "__adddf3vfp", ISD::SETCC_INVALID },
371 { RTLIB::SUB_F64, "__subdf3vfp", ISD::SETCC_INVALID },
372 { RTLIB::MUL_F64, "__muldf3vfp", ISD::SETCC_INVALID },
373 { RTLIB::DIV_F64, "__divdf3vfp", ISD::SETCC_INVALID },
375 // Single-precision comparisons.
376 { RTLIB::OEQ_F32, "__eqsf2vfp", ISD::SETNE },
377 { RTLIB::UNE_F32, "__nesf2vfp", ISD::SETNE },
378 { RTLIB::OLT_F32, "__ltsf2vfp", ISD::SETNE },
379 { RTLIB::OLE_F32, "__lesf2vfp", ISD::SETNE },
380 { RTLIB::OGE_F32, "__gesf2vfp", ISD::SETNE },
381 { RTLIB::OGT_F32, "__gtsf2vfp", ISD::SETNE },
382 { RTLIB::UO_F32, "__unordsf2vfp", ISD::SETNE },
383 { RTLIB::O_F32, "__unordsf2vfp", ISD::SETEQ },
385 // Double-precision comparisons.
386 { RTLIB::OEQ_F64, "__eqdf2vfp", ISD::SETNE },
387 { RTLIB::UNE_F64, "__nedf2vfp", ISD::SETNE },
388 { RTLIB::OLT_F64, "__ltdf2vfp", ISD::SETNE },
389 { RTLIB::OLE_F64, "__ledf2vfp", ISD::SETNE },
390 { RTLIB::OGE_F64, "__gedf2vfp", ISD::SETNE },
391 { RTLIB::OGT_F64, "__gtdf2vfp", ISD::SETNE },
392 { RTLIB::UO_F64, "__unorddf2vfp", ISD::SETNE },
393 { RTLIB::O_F64, "__unorddf2vfp", ISD::SETEQ },
395 // Floating-point to integer conversions.
396 // i64 conversions are done via library routines even when generating VFP
397 // instructions, so use the same ones.
398 { RTLIB::FPTOSINT_F64_I32, "__fixdfsivfp", ISD::SETCC_INVALID },
399 { RTLIB::FPTOUINT_F64_I32, "__fixunsdfsivfp", ISD::SETCC_INVALID },
400 { RTLIB::FPTOSINT_F32_I32, "__fixsfsivfp", ISD::SETCC_INVALID },
401 { RTLIB::FPTOUINT_F32_I32, "__fixunssfsivfp", ISD::SETCC_INVALID },
403 // Conversions between floating types.
404 { RTLIB::FPROUND_F64_F32, "__truncdfsf2vfp", ISD::SETCC_INVALID },
405 { RTLIB::FPEXT_F32_F64, "__extendsfdf2vfp", ISD::SETCC_INVALID },
407 // Integer to floating-point conversions.
408 // i64 conversions are done via library routines even when generating VFP
409 // instructions, so use the same ones.
410 // FIXME: There appears to be some naming inconsistency in ARM libgcc:
411 // e.g., __floatunsidf vs. __floatunssidfvfp.
412 { RTLIB::SINTTOFP_I32_F64, "__floatsidfvfp", ISD::SETCC_INVALID },
413 { RTLIB::UINTTOFP_I32_F64, "__floatunssidfvfp", ISD::SETCC_INVALID },
414 { RTLIB::SINTTOFP_I32_F32, "__floatsisfvfp", ISD::SETCC_INVALID },
415 { RTLIB::UINTTOFP_I32_F32, "__floatunssisfvfp", ISD::SETCC_INVALID },
418 for (const auto &LC : LibraryCalls) {
419 setLibcallName(LC.Op, LC.Name);
420 if (LC.Cond != ISD::SETCC_INVALID)
421 setCmpLibcallCC(LC.Op, LC.Cond);
425 // Set the correct calling convention for ARMv7k WatchOS. It's just
426 // AAPCS_VFP for functions as simple as libcalls.
427 if (Subtarget->isTargetWatchABI()) {
428 for (int i = 0; i < RTLIB::UNKNOWN_LIBCALL; ++i)
429 setLibcallCallingConv((RTLIB::Libcall)i, CallingConv::ARM_AAPCS_VFP);
433 // These libcalls are not available in 32-bit.
434 setLibcallName(RTLIB::SHL_I128, nullptr);
435 setLibcallName(RTLIB::SRL_I128, nullptr);
436 setLibcallName(RTLIB::SRA_I128, nullptr);
439 if (Subtarget->isAAPCS_ABI() &&
440 (Subtarget->isTargetAEABI() || Subtarget->isTargetGNUAEABI() ||
441 Subtarget->isTargetMuslAEABI() || Subtarget->isTargetAndroid())) {
442 static const struct {
443 const RTLIB::Libcall Op;
444 const char * const Name;
445 const CallingConv::ID CC;
446 const ISD::CondCode Cond;
448 // Double-precision floating-point arithmetic helper functions
449 // RTABI chapter 4.1.2, Table 2
450 { RTLIB::ADD_F64, "__aeabi_dadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
451 { RTLIB::DIV_F64, "__aeabi_ddiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
452 { RTLIB::MUL_F64, "__aeabi_dmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
453 { RTLIB::SUB_F64, "__aeabi_dsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
455 // Double-precision floating-point comparison helper functions
456 // RTABI chapter 4.1.2, Table 3
457 { RTLIB::OEQ_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
458 { RTLIB::UNE_F64, "__aeabi_dcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
459 { RTLIB::OLT_F64, "__aeabi_dcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
460 { RTLIB::OLE_F64, "__aeabi_dcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
461 { RTLIB::OGE_F64, "__aeabi_dcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
462 { RTLIB::OGT_F64, "__aeabi_dcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
463 { RTLIB::UO_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
464 { RTLIB::O_F64, "__aeabi_dcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
466 // Single-precision floating-point arithmetic helper functions
467 // RTABI chapter 4.1.2, Table 4
468 { RTLIB::ADD_F32, "__aeabi_fadd", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
469 { RTLIB::DIV_F32, "__aeabi_fdiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
470 { RTLIB::MUL_F32, "__aeabi_fmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
471 { RTLIB::SUB_F32, "__aeabi_fsub", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
473 // Single-precision floating-point comparison helper functions
474 // RTABI chapter 4.1.2, Table 5
475 { RTLIB::OEQ_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETNE },
476 { RTLIB::UNE_F32, "__aeabi_fcmpeq", CallingConv::ARM_AAPCS, ISD::SETEQ },
477 { RTLIB::OLT_F32, "__aeabi_fcmplt", CallingConv::ARM_AAPCS, ISD::SETNE },
478 { RTLIB::OLE_F32, "__aeabi_fcmple", CallingConv::ARM_AAPCS, ISD::SETNE },
479 { RTLIB::OGE_F32, "__aeabi_fcmpge", CallingConv::ARM_AAPCS, ISD::SETNE },
480 { RTLIB::OGT_F32, "__aeabi_fcmpgt", CallingConv::ARM_AAPCS, ISD::SETNE },
481 { RTLIB::UO_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETNE },
482 { RTLIB::O_F32, "__aeabi_fcmpun", CallingConv::ARM_AAPCS, ISD::SETEQ },
484 // Floating-point to integer conversions.
485 // RTABI chapter 4.1.2, Table 6
486 { RTLIB::FPTOSINT_F64_I32, "__aeabi_d2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
487 { RTLIB::FPTOUINT_F64_I32, "__aeabi_d2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
488 { RTLIB::FPTOSINT_F64_I64, "__aeabi_d2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
489 { RTLIB::FPTOUINT_F64_I64, "__aeabi_d2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
490 { RTLIB::FPTOSINT_F32_I32, "__aeabi_f2iz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
491 { RTLIB::FPTOUINT_F32_I32, "__aeabi_f2uiz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
492 { RTLIB::FPTOSINT_F32_I64, "__aeabi_f2lz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
493 { RTLIB::FPTOUINT_F32_I64, "__aeabi_f2ulz", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
495 // Conversions between floating types.
496 // RTABI chapter 4.1.2, Table 7
497 { RTLIB::FPROUND_F64_F32, "__aeabi_d2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
498 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
499 { RTLIB::FPEXT_F32_F64, "__aeabi_f2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
501 // Integer to floating-point conversions.
502 // RTABI chapter 4.1.2, Table 8
503 { RTLIB::SINTTOFP_I32_F64, "__aeabi_i2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
504 { RTLIB::UINTTOFP_I32_F64, "__aeabi_ui2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
505 { RTLIB::SINTTOFP_I64_F64, "__aeabi_l2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
506 { RTLIB::UINTTOFP_I64_F64, "__aeabi_ul2d", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
507 { RTLIB::SINTTOFP_I32_F32, "__aeabi_i2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
508 { RTLIB::UINTTOFP_I32_F32, "__aeabi_ui2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
509 { RTLIB::SINTTOFP_I64_F32, "__aeabi_l2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
510 { RTLIB::UINTTOFP_I64_F32, "__aeabi_ul2f", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
512 // Long long helper functions
513 // RTABI chapter 4.2, Table 9
514 { RTLIB::MUL_I64, "__aeabi_lmul", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
515 { RTLIB::SHL_I64, "__aeabi_llsl", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
516 { RTLIB::SRL_I64, "__aeabi_llsr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
517 { RTLIB::SRA_I64, "__aeabi_lasr", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
519 // Integer division functions
520 // RTABI chapter 4.3.1
521 { RTLIB::SDIV_I8, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
522 { RTLIB::SDIV_I16, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
523 { RTLIB::SDIV_I32, "__aeabi_idiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
524 { RTLIB::SDIV_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
525 { RTLIB::UDIV_I8, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
526 { RTLIB::UDIV_I16, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
527 { RTLIB::UDIV_I32, "__aeabi_uidiv", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
528 { RTLIB::UDIV_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
531 for (const auto &LC : LibraryCalls) {
532 setLibcallName(LC.Op, LC.Name);
533 setLibcallCallingConv(LC.Op, LC.CC);
534 if (LC.Cond != ISD::SETCC_INVALID)
535 setCmpLibcallCC(LC.Op, LC.Cond);
538 // EABI dependent RTLIB
539 if (TM.Options.EABIVersion == EABI::EABI4 ||
540 TM.Options.EABIVersion == EABI::EABI5) {
541 static const struct {
542 const RTLIB::Libcall Op;
543 const char *const Name;
544 const CallingConv::ID CC;
545 const ISD::CondCode Cond;
546 } MemOpsLibraryCalls[] = {
548 // RTABI chapter 4.3.4
549 { RTLIB::MEMCPY, "__aeabi_memcpy", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
550 { RTLIB::MEMMOVE, "__aeabi_memmove", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
551 { RTLIB::MEMSET, "__aeabi_memset", CallingConv::ARM_AAPCS, ISD::SETCC_INVALID },
554 for (const auto &LC : MemOpsLibraryCalls) {
555 setLibcallName(LC.Op, LC.Name);
556 setLibcallCallingConv(LC.Op, LC.CC);
557 if (LC.Cond != ISD::SETCC_INVALID)
558 setCmpLibcallCC(LC.Op, LC.Cond);
563 if (Subtarget->isTargetWindows()) {
564 static const struct {
565 const RTLIB::Libcall Op;
566 const char * const Name;
567 const CallingConv::ID CC;
569 { RTLIB::FPTOSINT_F32_I64, "__stoi64", CallingConv::ARM_AAPCS_VFP },
570 { RTLIB::FPTOSINT_F64_I64, "__dtoi64", CallingConv::ARM_AAPCS_VFP },
571 { RTLIB::FPTOUINT_F32_I64, "__stou64", CallingConv::ARM_AAPCS_VFP },
572 { RTLIB::FPTOUINT_F64_I64, "__dtou64", CallingConv::ARM_AAPCS_VFP },
573 { RTLIB::SINTTOFP_I64_F32, "__i64tos", CallingConv::ARM_AAPCS_VFP },
574 { RTLIB::SINTTOFP_I64_F64, "__i64tod", CallingConv::ARM_AAPCS_VFP },
575 { RTLIB::UINTTOFP_I64_F32, "__u64tos", CallingConv::ARM_AAPCS_VFP },
576 { RTLIB::UINTTOFP_I64_F64, "__u64tod", CallingConv::ARM_AAPCS_VFP },
579 for (const auto &LC : LibraryCalls) {
580 setLibcallName(LC.Op, LC.Name);
581 setLibcallCallingConv(LC.Op, LC.CC);
585 // Use divmod compiler-rt calls for iOS 5.0 and later.
586 if (Subtarget->isTargetWatchOS() ||
587 (Subtarget->isTargetIOS() &&
588 !Subtarget->getTargetTriple().isOSVersionLT(5, 0))) {
589 setLibcallName(RTLIB::SDIVREM_I32, "__divmodsi4");
590 setLibcallName(RTLIB::UDIVREM_I32, "__udivmodsi4");
593 // The half <-> float conversion functions are always soft-float on
594 // non-watchos platforms, but are needed for some targets which use a
595 // hard-float calling convention by default.
596 if (!Subtarget->isTargetWatchABI()) {
597 if (Subtarget->isAAPCS_ABI()) {
598 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_AAPCS);
599 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_AAPCS);
600 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_AAPCS);
602 setLibcallCallingConv(RTLIB::FPROUND_F32_F16, CallingConv::ARM_APCS);
603 setLibcallCallingConv(RTLIB::FPROUND_F64_F16, CallingConv::ARM_APCS);
604 setLibcallCallingConv(RTLIB::FPEXT_F16_F32, CallingConv::ARM_APCS);
608 // In EABI, these functions have an __aeabi_ prefix, but in GNUEABI they have
609 // a __gnu_ prefix (which is the default).
610 if (Subtarget->isTargetAEABI()) {
611 static const struct {
612 const RTLIB::Libcall Op;
613 const char * const Name;
614 const CallingConv::ID CC;
616 { RTLIB::FPROUND_F32_F16, "__aeabi_f2h", CallingConv::ARM_AAPCS },
617 { RTLIB::FPROUND_F64_F16, "__aeabi_d2h", CallingConv::ARM_AAPCS },
618 { RTLIB::FPEXT_F16_F32, "__aeabi_h2f", CallingConv::ARM_AAPCS },
621 for (const auto &LC : LibraryCalls) {
622 setLibcallName(LC.Op, LC.Name);
623 setLibcallCallingConv(LC.Op, LC.CC);
627 if (Subtarget->isThumb1Only())
628 addRegisterClass(MVT::i32, &ARM::tGPRRegClass);
630 addRegisterClass(MVT::i32, &ARM::GPRRegClass);
632 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
633 !Subtarget->isThumb1Only()) {
634 addRegisterClass(MVT::f32, &ARM::SPRRegClass);
635 addRegisterClass(MVT::f64, &ARM::DPRRegClass);
638 for (MVT VT : MVT::vector_valuetypes()) {
639 for (MVT InnerVT : MVT::vector_valuetypes()) {
640 setTruncStoreAction(VT, InnerVT, Expand);
641 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand);
642 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand);
643 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand);
646 setOperationAction(ISD::MULHS, VT, Expand);
647 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
648 setOperationAction(ISD::MULHU, VT, Expand);
649 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
651 setOperationAction(ISD::BSWAP, VT, Expand);
654 setOperationAction(ISD::ConstantFP, MVT::f32, Custom);
655 setOperationAction(ISD::ConstantFP, MVT::f64, Custom);
657 setOperationAction(ISD::READ_REGISTER, MVT::i64, Custom);
658 setOperationAction(ISD::WRITE_REGISTER, MVT::i64, Custom);
660 if (Subtarget->hasNEON()) {
661 addDRTypeForNEON(MVT::v2f32);
662 addDRTypeForNEON(MVT::v8i8);
663 addDRTypeForNEON(MVT::v4i16);
664 addDRTypeForNEON(MVT::v2i32);
665 addDRTypeForNEON(MVT::v1i64);
667 addQRTypeForNEON(MVT::v4f32);
668 addQRTypeForNEON(MVT::v2f64);
669 addQRTypeForNEON(MVT::v16i8);
670 addQRTypeForNEON(MVT::v8i16);
671 addQRTypeForNEON(MVT::v4i32);
672 addQRTypeForNEON(MVT::v2i64);
674 // v2f64 is legal so that QR subregs can be extracted as f64 elements, but
675 // neither Neon nor VFP support any arithmetic operations on it.
676 // The same with v4f32. But keep in mind that vadd, vsub, vmul are natively
677 // supported for v4f32.
678 setOperationAction(ISD::FADD, MVT::v2f64, Expand);
679 setOperationAction(ISD::FSUB, MVT::v2f64, Expand);
680 setOperationAction(ISD::FMUL, MVT::v2f64, Expand);
681 // FIXME: Code duplication: FDIV and FREM are expanded always, see
682 // ARMTargetLowering::addTypeForNEON method for details.
683 setOperationAction(ISD::FDIV, MVT::v2f64, Expand);
684 setOperationAction(ISD::FREM, MVT::v2f64, Expand);
685 // FIXME: Create unittest.
686 // In another words, find a way when "copysign" appears in DAG with vector
688 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Expand);
689 // FIXME: Code duplication: SETCC has custom operation action, see
690 // ARMTargetLowering::addTypeForNEON method for details.
691 setOperationAction(ISD::SETCC, MVT::v2f64, Expand);
692 // FIXME: Create unittest for FNEG and for FABS.
693 setOperationAction(ISD::FNEG, MVT::v2f64, Expand);
694 setOperationAction(ISD::FABS, MVT::v2f64, Expand);
695 setOperationAction(ISD::FSQRT, MVT::v2f64, Expand);
696 setOperationAction(ISD::FSIN, MVT::v2f64, Expand);
697 setOperationAction(ISD::FCOS, MVT::v2f64, Expand);
698 setOperationAction(ISD::FPOWI, MVT::v2f64, Expand);
699 setOperationAction(ISD::FPOW, MVT::v2f64, Expand);
700 setOperationAction(ISD::FLOG, MVT::v2f64, Expand);
701 setOperationAction(ISD::FLOG2, MVT::v2f64, Expand);
702 setOperationAction(ISD::FLOG10, MVT::v2f64, Expand);
703 setOperationAction(ISD::FEXP, MVT::v2f64, Expand);
704 setOperationAction(ISD::FEXP2, MVT::v2f64, Expand);
705 // FIXME: Create unittest for FCEIL, FTRUNC, FRINT, FNEARBYINT, FFLOOR.
706 setOperationAction(ISD::FCEIL, MVT::v2f64, Expand);
707 setOperationAction(ISD::FTRUNC, MVT::v2f64, Expand);
708 setOperationAction(ISD::FRINT, MVT::v2f64, Expand);
709 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Expand);
710 setOperationAction(ISD::FFLOOR, MVT::v2f64, Expand);
711 setOperationAction(ISD::FMA, MVT::v2f64, Expand);
713 setOperationAction(ISD::FSQRT, MVT::v4f32, Expand);
714 setOperationAction(ISD::FSIN, MVT::v4f32, Expand);
715 setOperationAction(ISD::FCOS, MVT::v4f32, Expand);
716 setOperationAction(ISD::FPOWI, MVT::v4f32, Expand);
717 setOperationAction(ISD::FPOW, MVT::v4f32, Expand);
718 setOperationAction(ISD::FLOG, MVT::v4f32, Expand);
719 setOperationAction(ISD::FLOG2, MVT::v4f32, Expand);
720 setOperationAction(ISD::FLOG10, MVT::v4f32, Expand);
721 setOperationAction(ISD::FEXP, MVT::v4f32, Expand);
722 setOperationAction(ISD::FEXP2, MVT::v4f32, Expand);
723 setOperationAction(ISD::FCEIL, MVT::v4f32, Expand);
724 setOperationAction(ISD::FTRUNC, MVT::v4f32, Expand);
725 setOperationAction(ISD::FRINT, MVT::v4f32, Expand);
726 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Expand);
727 setOperationAction(ISD::FFLOOR, MVT::v4f32, Expand);
729 // Mark v2f32 intrinsics.
730 setOperationAction(ISD::FSQRT, MVT::v2f32, Expand);
731 setOperationAction(ISD::FSIN, MVT::v2f32, Expand);
732 setOperationAction(ISD::FCOS, MVT::v2f32, Expand);
733 setOperationAction(ISD::FPOWI, MVT::v2f32, Expand);
734 setOperationAction(ISD::FPOW, MVT::v2f32, Expand);
735 setOperationAction(ISD::FLOG, MVT::v2f32, Expand);
736 setOperationAction(ISD::FLOG2, MVT::v2f32, Expand);
737 setOperationAction(ISD::FLOG10, MVT::v2f32, Expand);
738 setOperationAction(ISD::FEXP, MVT::v2f32, Expand);
739 setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
740 setOperationAction(ISD::FCEIL, MVT::v2f32, Expand);
741 setOperationAction(ISD::FTRUNC, MVT::v2f32, Expand);
742 setOperationAction(ISD::FRINT, MVT::v2f32, Expand);
743 setOperationAction(ISD::FNEARBYINT, MVT::v2f32, Expand);
744 setOperationAction(ISD::FFLOOR, MVT::v2f32, Expand);
746 // Neon does not support some operations on v1i64 and v2i64 types.
747 setOperationAction(ISD::MUL, MVT::v1i64, Expand);
748 // Custom handling for some quad-vector types to detect VMULL.
749 setOperationAction(ISD::MUL, MVT::v8i16, Custom);
750 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
751 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
752 // Custom handling for some vector types to avoid expensive expansions
753 setOperationAction(ISD::SDIV, MVT::v4i16, Custom);
754 setOperationAction(ISD::SDIV, MVT::v8i8, Custom);
755 setOperationAction(ISD::UDIV, MVT::v4i16, Custom);
756 setOperationAction(ISD::UDIV, MVT::v8i8, Custom);
757 // Neon does not have single instruction SINT_TO_FP and UINT_TO_FP with
758 // a destination type that is wider than the source, and nor does
759 // it have a FP_TO_[SU]INT instruction with a narrower destination than
761 setOperationAction(ISD::SINT_TO_FP, MVT::v4i16, Custom);
762 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
763 setOperationAction(ISD::FP_TO_UINT, MVT::v4i16, Custom);
764 setOperationAction(ISD::FP_TO_SINT, MVT::v4i16, Custom);
766 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Expand);
767 setOperationAction(ISD::FP_EXTEND, MVT::v2f64, Expand);
769 // NEON does not have single instruction CTPOP for vectors with element
770 // types wider than 8-bits. However, custom lowering can leverage the
771 // v8i8/v16i8 vcnt instruction.
772 setOperationAction(ISD::CTPOP, MVT::v2i32, Custom);
773 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
774 setOperationAction(ISD::CTPOP, MVT::v4i16, Custom);
775 setOperationAction(ISD::CTPOP, MVT::v8i16, Custom);
776 setOperationAction(ISD::CTPOP, MVT::v1i64, Expand);
777 setOperationAction(ISD::CTPOP, MVT::v2i64, Expand);
779 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand);
780 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand);
782 // NEON does not have single instruction CTTZ for vectors.
783 setOperationAction(ISD::CTTZ, MVT::v8i8, Custom);
784 setOperationAction(ISD::CTTZ, MVT::v4i16, Custom);
785 setOperationAction(ISD::CTTZ, MVT::v2i32, Custom);
786 setOperationAction(ISD::CTTZ, MVT::v1i64, Custom);
788 setOperationAction(ISD::CTTZ, MVT::v16i8, Custom);
789 setOperationAction(ISD::CTTZ, MVT::v8i16, Custom);
790 setOperationAction(ISD::CTTZ, MVT::v4i32, Custom);
791 setOperationAction(ISD::CTTZ, MVT::v2i64, Custom);
793 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i8, Custom);
794 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i16, Custom);
795 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i32, Custom);
796 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v1i64, Custom);
798 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v16i8, Custom);
799 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v8i16, Custom);
800 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v4i32, Custom);
801 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::v2i64, Custom);
803 // NEON only has FMA instructions as of VFP4.
804 if (!Subtarget->hasVFP4()) {
805 setOperationAction(ISD::FMA, MVT::v2f32, Expand);
806 setOperationAction(ISD::FMA, MVT::v4f32, Expand);
809 setTargetDAGCombine(ISD::INTRINSIC_VOID);
810 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN);
811 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
812 setTargetDAGCombine(ISD::SHL);
813 setTargetDAGCombine(ISD::SRL);
814 setTargetDAGCombine(ISD::SRA);
815 setTargetDAGCombine(ISD::SIGN_EXTEND);
816 setTargetDAGCombine(ISD::ZERO_EXTEND);
817 setTargetDAGCombine(ISD::ANY_EXTEND);
818 setTargetDAGCombine(ISD::BUILD_VECTOR);
819 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
820 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
821 setTargetDAGCombine(ISD::STORE);
822 setTargetDAGCombine(ISD::FP_TO_SINT);
823 setTargetDAGCombine(ISD::FP_TO_UINT);
824 setTargetDAGCombine(ISD::FDIV);
825 setTargetDAGCombine(ISD::LOAD);
827 // It is legal to extload from v4i8 to v4i16 or v4i32.
828 for (MVT Ty : {MVT::v8i8, MVT::v4i8, MVT::v2i8, MVT::v4i16, MVT::v2i16,
830 for (MVT VT : MVT::integer_vector_valuetypes()) {
831 setLoadExtAction(ISD::EXTLOAD, VT, Ty, Legal);
832 setLoadExtAction(ISD::ZEXTLOAD, VT, Ty, Legal);
833 setLoadExtAction(ISD::SEXTLOAD, VT, Ty, Legal);
838 // ARM and Thumb2 support UMLAL/SMLAL.
839 if (!Subtarget->isThumb1Only())
840 setTargetDAGCombine(ISD::ADDC);
842 if (Subtarget->isFPOnlySP()) {
843 // When targeting a floating-point unit with only single-precision
844 // operations, f64 is legal for the few double-precision instructions which
845 // are present However, no double-precision operations other than moves,
846 // loads and stores are provided by the hardware.
847 setOperationAction(ISD::FADD, MVT::f64, Expand);
848 setOperationAction(ISD::FSUB, MVT::f64, Expand);
849 setOperationAction(ISD::FMUL, MVT::f64, Expand);
850 setOperationAction(ISD::FMA, MVT::f64, Expand);
851 setOperationAction(ISD::FDIV, MVT::f64, Expand);
852 setOperationAction(ISD::FREM, MVT::f64, Expand);
853 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
854 setOperationAction(ISD::FGETSIGN, MVT::f64, Expand);
855 setOperationAction(ISD::FNEG, MVT::f64, Expand);
856 setOperationAction(ISD::FABS, MVT::f64, Expand);
857 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
858 setOperationAction(ISD::FSIN, MVT::f64, Expand);
859 setOperationAction(ISD::FCOS, MVT::f64, Expand);
860 setOperationAction(ISD::FPOWI, MVT::f64, Expand);
861 setOperationAction(ISD::FPOW, MVT::f64, Expand);
862 setOperationAction(ISD::FLOG, MVT::f64, Expand);
863 setOperationAction(ISD::FLOG2, MVT::f64, Expand);
864 setOperationAction(ISD::FLOG10, MVT::f64, Expand);
865 setOperationAction(ISD::FEXP, MVT::f64, Expand);
866 setOperationAction(ISD::FEXP2, MVT::f64, Expand);
867 setOperationAction(ISD::FCEIL, MVT::f64, Expand);
868 setOperationAction(ISD::FTRUNC, MVT::f64, Expand);
869 setOperationAction(ISD::FRINT, MVT::f64, Expand);
870 setOperationAction(ISD::FNEARBYINT, MVT::f64, Expand);
871 setOperationAction(ISD::FFLOOR, MVT::f64, Expand);
872 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
873 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
874 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
875 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
876 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Custom);
877 setOperationAction(ISD::FP_TO_UINT, MVT::f64, Custom);
878 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
879 setOperationAction(ISD::FP_EXTEND, MVT::f64, Custom);
882 computeRegisterProperties(Subtarget->getRegisterInfo());
884 // ARM does not have floating-point extending loads.
885 for (MVT VT : MVT::fp_valuetypes()) {
886 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
887 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand);
890 // ... or truncating stores
891 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
892 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
893 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
895 // ARM does not have i1 sign extending load.
896 for (MVT VT : MVT::integer_valuetypes())
897 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
899 // ARM supports all 4 flavors of integer indexed load / store.
900 if (!Subtarget->isThumb1Only()) {
901 for (unsigned im = (unsigned)ISD::PRE_INC;
902 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) {
903 setIndexedLoadAction(im, MVT::i1, Legal);
904 setIndexedLoadAction(im, MVT::i8, Legal);
905 setIndexedLoadAction(im, MVT::i16, Legal);
906 setIndexedLoadAction(im, MVT::i32, Legal);
907 setIndexedStoreAction(im, MVT::i1, Legal);
908 setIndexedStoreAction(im, MVT::i8, Legal);
909 setIndexedStoreAction(im, MVT::i16, Legal);
910 setIndexedStoreAction(im, MVT::i32, Legal);
913 // Thumb-1 has limited post-inc load/store support - LDM r0!, {r1}.
914 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
915 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
918 setOperationAction(ISD::SADDO, MVT::i32, Custom);
919 setOperationAction(ISD::UADDO, MVT::i32, Custom);
920 setOperationAction(ISD::SSUBO, MVT::i32, Custom);
921 setOperationAction(ISD::USUBO, MVT::i32, Custom);
923 // i64 operation support.
924 setOperationAction(ISD::MUL, MVT::i64, Expand);
925 setOperationAction(ISD::MULHU, MVT::i32, Expand);
926 if (Subtarget->isThumb1Only()) {
927 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
928 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
930 if (Subtarget->isThumb1Only() || !Subtarget->hasV6Ops()
931 || (Subtarget->isThumb2() && !Subtarget->hasDSP()))
932 setOperationAction(ISD::MULHS, MVT::i32, Expand);
934 setOperationAction(ISD::SHL_PARTS, MVT::i32, Custom);
935 setOperationAction(ISD::SRA_PARTS, MVT::i32, Custom);
936 setOperationAction(ISD::SRL_PARTS, MVT::i32, Custom);
937 setOperationAction(ISD::SRL, MVT::i64, Custom);
938 setOperationAction(ISD::SRA, MVT::i64, Custom);
940 if (!Subtarget->isThumb1Only()) {
941 // FIXME: We should do this for Thumb1 as well.
942 setOperationAction(ISD::ADDC, MVT::i32, Custom);
943 setOperationAction(ISD::ADDE, MVT::i32, Custom);
944 setOperationAction(ISD::SUBC, MVT::i32, Custom);
945 setOperationAction(ISD::SUBE, MVT::i32, Custom);
948 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops())
949 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal);
951 // ARM does not have ROTL.
952 setOperationAction(ISD::ROTL, MVT::i32, Expand);
953 for (MVT VT : MVT::vector_valuetypes()) {
954 setOperationAction(ISD::ROTL, VT, Expand);
955 setOperationAction(ISD::ROTR, VT, Expand);
957 setOperationAction(ISD::CTTZ, MVT::i32, Custom);
958 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
959 if (!Subtarget->hasV5TOps() || Subtarget->isThumb1Only())
960 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
962 // @llvm.readcyclecounter requires the Performance Monitors extension.
963 // Default to the 0 expansion on unsupported platforms.
964 // FIXME: Technically there are older ARM CPUs that have
965 // implementation-specific ways of obtaining this information.
966 if (Subtarget->hasPerfMon())
967 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Custom);
969 // Only ARMv6 has BSWAP.
970 if (!Subtarget->hasV6Ops())
971 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
973 bool hasDivide = Subtarget->isThumb() ? Subtarget->hasDivide()
974 : Subtarget->hasDivideInARMMode();
976 // These are expanded into libcalls if the cpu doesn't have HW divider.
977 setOperationAction(ISD::SDIV, MVT::i32, LibCall);
978 setOperationAction(ISD::UDIV, MVT::i32, LibCall);
981 if (Subtarget->isTargetWindows() && !Subtarget->hasDivide()) {
982 setOperationAction(ISD::SDIV, MVT::i32, Custom);
983 setOperationAction(ISD::UDIV, MVT::i32, Custom);
985 setOperationAction(ISD::SDIV, MVT::i64, Custom);
986 setOperationAction(ISD::UDIV, MVT::i64, Custom);
989 setOperationAction(ISD::SREM, MVT::i32, Expand);
990 setOperationAction(ISD::UREM, MVT::i32, Expand);
992 // Register based DivRem for AEABI (RTABI 4.2)
993 if (Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
994 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
995 Subtarget->isTargetWindows()) {
996 setOperationAction(ISD::SREM, MVT::i64, Custom);
997 setOperationAction(ISD::UREM, MVT::i64, Custom);
998 HasStandaloneRem = false;
1000 if (Subtarget->isTargetWindows()) {
1002 const RTLIB::Libcall Op;
1003 const char * const Name;
1004 const CallingConv::ID CC;
1005 } LibraryCalls[] = {
1006 { RTLIB::SDIVREM_I8, "__rt_sdiv", CallingConv::ARM_AAPCS },
1007 { RTLIB::SDIVREM_I16, "__rt_sdiv", CallingConv::ARM_AAPCS },
1008 { RTLIB::SDIVREM_I32, "__rt_sdiv", CallingConv::ARM_AAPCS },
1009 { RTLIB::SDIVREM_I64, "__rt_sdiv64", CallingConv::ARM_AAPCS },
1011 { RTLIB::UDIVREM_I8, "__rt_udiv", CallingConv::ARM_AAPCS },
1012 { RTLIB::UDIVREM_I16, "__rt_udiv", CallingConv::ARM_AAPCS },
1013 { RTLIB::UDIVREM_I32, "__rt_udiv", CallingConv::ARM_AAPCS },
1014 { RTLIB::UDIVREM_I64, "__rt_udiv64", CallingConv::ARM_AAPCS },
1017 for (const auto &LC : LibraryCalls) {
1018 setLibcallName(LC.Op, LC.Name);
1019 setLibcallCallingConv(LC.Op, LC.CC);
1023 const RTLIB::Libcall Op;
1024 const char * const Name;
1025 const CallingConv::ID CC;
1026 } LibraryCalls[] = {
1027 { RTLIB::SDIVREM_I8, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1028 { RTLIB::SDIVREM_I16, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1029 { RTLIB::SDIVREM_I32, "__aeabi_idivmod", CallingConv::ARM_AAPCS },
1030 { RTLIB::SDIVREM_I64, "__aeabi_ldivmod", CallingConv::ARM_AAPCS },
1032 { RTLIB::UDIVREM_I8, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1033 { RTLIB::UDIVREM_I16, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1034 { RTLIB::UDIVREM_I32, "__aeabi_uidivmod", CallingConv::ARM_AAPCS },
1035 { RTLIB::UDIVREM_I64, "__aeabi_uldivmod", CallingConv::ARM_AAPCS },
1038 for (const auto &LC : LibraryCalls) {
1039 setLibcallName(LC.Op, LC.Name);
1040 setLibcallCallingConv(LC.Op, LC.CC);
1044 setOperationAction(ISD::SDIVREM, MVT::i32, Custom);
1045 setOperationAction(ISD::UDIVREM, MVT::i32, Custom);
1046 setOperationAction(ISD::SDIVREM, MVT::i64, Custom);
1047 setOperationAction(ISD::UDIVREM, MVT::i64, Custom);
1049 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1050 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1053 if (Subtarget->isTargetWindows() && Subtarget->getTargetTriple().isOSMSVCRT())
1054 for (auto &VT : {MVT::f32, MVT::f64})
1055 setOperationAction(ISD::FPOWI, VT, Custom);
1057 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1058 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
1059 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
1060 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1062 setOperationAction(ISD::TRAP, MVT::Other, Legal);
1064 // Use the default implementation.
1065 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1066 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1067 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1068 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1069 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1070 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1072 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
1073 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1075 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
1077 // ARMv6 Thumb1 (except for CPUs that support dmb / dsb) and earlier use
1078 // the default expansion.
1079 InsertFencesForAtomic = false;
1080 if (Subtarget->hasAnyDataBarrier() &&
1081 (!Subtarget->isThumb() || Subtarget->hasV8MBaselineOps())) {
1082 // ATOMIC_FENCE needs custom lowering; the others should have been expanded
1083 // to ldrex/strex loops already.
1084 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1085 if (!Subtarget->isThumb() || !Subtarget->isMClass())
1086 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Custom);
1088 // On v8, we have particularly efficient implementations of atomic fences
1089 // if they can be combined with nearby atomic loads and stores.
1090 if (!Subtarget->hasV8Ops() || getTargetMachine().getOptLevel() == 0) {
1091 // Automatically insert fences (dmb ish) around ATOMIC_SWAP etc.
1092 InsertFencesForAtomic = true;
1095 // If there's anything we can use as a barrier, go through custom lowering
1096 // for ATOMIC_FENCE.
1097 // If target has DMB in thumb, Fences can be inserted.
1098 if (Subtarget->hasDataBarrier())
1099 InsertFencesForAtomic = true;
1101 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other,
1102 Subtarget->hasAnyDataBarrier() ? Custom : Expand);
1104 // Set them all for expansion, which will force libcalls.
1105 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i32, Expand);
1106 setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Expand);
1107 setOperationAction(ISD::ATOMIC_LOAD_ADD, MVT::i32, Expand);
1108 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Expand);
1109 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Expand);
1110 setOperationAction(ISD::ATOMIC_LOAD_OR, MVT::i32, Expand);
1111 setOperationAction(ISD::ATOMIC_LOAD_XOR, MVT::i32, Expand);
1112 setOperationAction(ISD::ATOMIC_LOAD_NAND, MVT::i32, Expand);
1113 setOperationAction(ISD::ATOMIC_LOAD_MIN, MVT::i32, Expand);
1114 setOperationAction(ISD::ATOMIC_LOAD_MAX, MVT::i32, Expand);
1115 setOperationAction(ISD::ATOMIC_LOAD_UMIN, MVT::i32, Expand);
1116 setOperationAction(ISD::ATOMIC_LOAD_UMAX, MVT::i32, Expand);
1117 // Mark ATOMIC_LOAD and ATOMIC_STORE custom so we can handle the
1118 // Unordered/Monotonic case.
1119 if (!InsertFencesForAtomic) {
1120 setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
1121 setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
1125 setOperationAction(ISD::PREFETCH, MVT::Other, Custom);
1127 // Requires SXTB/SXTH, available on v6 and up in both ARM and Thumb modes.
1128 if (!Subtarget->hasV6Ops()) {
1129 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
1130 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
1132 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1134 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1135 !Subtarget->isThumb1Only()) {
1136 // Turn f64->i64 into VMOVRRD, i64 -> f64 to VMOVDRR
1137 // iff target supports vfp2.
1138 setOperationAction(ISD::BITCAST, MVT::i64, Custom);
1139 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom);
1142 // We want to custom lower some of our intrinsics.
1143 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1144 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
1145 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
1146 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
1147 if (Subtarget->useSjLjEH())
1148 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
1150 setOperationAction(ISD::SETCC, MVT::i32, Expand);
1151 setOperationAction(ISD::SETCC, MVT::f32, Expand);
1152 setOperationAction(ISD::SETCC, MVT::f64, Expand);
1153 setOperationAction(ISD::SELECT, MVT::i32, Custom);
1154 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1155 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1156 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1157 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
1158 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
1160 // Thumb-1 cannot currently select ARMISD::SUBE.
1161 if (!Subtarget->isThumb1Only())
1162 setOperationAction(ISD::SETCCE, MVT::i32, Custom);
1164 setOperationAction(ISD::BRCOND, MVT::Other, Expand);
1165 setOperationAction(ISD::BR_CC, MVT::i32, Custom);
1166 setOperationAction(ISD::BR_CC, MVT::f32, Custom);
1167 setOperationAction(ISD::BR_CC, MVT::f64, Custom);
1168 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1170 // We don't support sin/cos/fmod/copysign/pow
1171 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1172 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1173 setOperationAction(ISD::FCOS, MVT::f32, Expand);
1174 setOperationAction(ISD::FCOS, MVT::f64, Expand);
1175 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1176 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1177 setOperationAction(ISD::FREM, MVT::f64, Expand);
1178 setOperationAction(ISD::FREM, MVT::f32, Expand);
1179 if (!Subtarget->useSoftFloat() && Subtarget->hasVFP2() &&
1180 !Subtarget->isThumb1Only()) {
1181 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
1182 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
1184 setOperationAction(ISD::FPOW, MVT::f64, Expand);
1185 setOperationAction(ISD::FPOW, MVT::f32, Expand);
1187 if (!Subtarget->hasVFP4()) {
1188 setOperationAction(ISD::FMA, MVT::f64, Expand);
1189 setOperationAction(ISD::FMA, MVT::f32, Expand);
1192 // Various VFP goodness
1193 if (!Subtarget->useSoftFloat() && !Subtarget->isThumb1Only()) {
1194 // FP-ARMv8 adds f64 <-> f16 conversion. Before that it should be expanded.
1195 if (!Subtarget->hasFPARMv8() || Subtarget->isFPOnlySP()) {
1196 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
1197 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
1200 // fp16 is a special v7 extension that adds f16 <-> f32 conversions.
1201 if (!Subtarget->hasFP16()) {
1202 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
1203 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
1207 // Combine sin / cos into one node or libcall if possible.
1208 if (Subtarget->hasSinCos()) {
1209 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1210 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1211 if (Subtarget->isTargetWatchABI()) {
1212 setLibcallCallingConv(RTLIB::SINCOS_F32, CallingConv::ARM_AAPCS_VFP);
1213 setLibcallCallingConv(RTLIB::SINCOS_F64, CallingConv::ARM_AAPCS_VFP);
1215 if (Subtarget->isTargetIOS() || Subtarget->isTargetWatchOS()) {
1216 // For iOS, we don't want to the normal expansion of a libcall to
1217 // sincos. We want to issue a libcall to __sincos_stret.
1218 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1219 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1223 // FP-ARMv8 implements a lot of rounding-like FP operations.
1224 if (Subtarget->hasFPARMv8()) {
1225 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1226 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1227 setOperationAction(ISD::FROUND, MVT::f32, Legal);
1228 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1229 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1230 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1231 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
1232 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
1233 setOperationAction(ISD::FMINNUM, MVT::v2f32, Legal);
1234 setOperationAction(ISD::FMAXNUM, MVT::v2f32, Legal);
1235 setOperationAction(ISD::FMINNUM, MVT::v4f32, Legal);
1236 setOperationAction(ISD::FMAXNUM, MVT::v4f32, Legal);
1238 if (!Subtarget->isFPOnlySP()) {
1239 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1240 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1241 setOperationAction(ISD::FROUND, MVT::f64, Legal);
1242 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1243 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1244 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1245 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
1246 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
1250 if (Subtarget->hasNEON()) {
1251 // vmin and vmax aren't available in a scalar form, so we use
1252 // a NEON instruction with an undef lane instead.
1253 setOperationAction(ISD::FMINNAN, MVT::f32, Legal);
1254 setOperationAction(ISD::FMAXNAN, MVT::f32, Legal);
1255 setOperationAction(ISD::FMINNAN, MVT::v2f32, Legal);
1256 setOperationAction(ISD::FMAXNAN, MVT::v2f32, Legal);
1257 setOperationAction(ISD::FMINNAN, MVT::v4f32, Legal);
1258 setOperationAction(ISD::FMAXNAN, MVT::v4f32, Legal);
1261 // We have target-specific dag combine patterns for the following nodes:
1262 // ARMISD::VMOVRRD - No need to call setTargetDAGCombine
1263 setTargetDAGCombine(ISD::ADD);
1264 setTargetDAGCombine(ISD::SUB);
1265 setTargetDAGCombine(ISD::MUL);
1266 setTargetDAGCombine(ISD::AND);
1267 setTargetDAGCombine(ISD::OR);
1268 setTargetDAGCombine(ISD::XOR);
1270 if (Subtarget->hasV6Ops())
1271 setTargetDAGCombine(ISD::SRL);
1273 setStackPointerRegisterToSaveRestore(ARM::SP);
1275 if (Subtarget->useSoftFloat() || Subtarget->isThumb1Only() ||
1276 !Subtarget->hasVFP2())
1277 setSchedulingPreference(Sched::RegPressure);
1279 setSchedulingPreference(Sched::Hybrid);
1281 //// temporary - rewrite interface to use type
1282 MaxStoresPerMemset = 8;
1283 MaxStoresPerMemsetOptSize = 4;
1284 MaxStoresPerMemcpy = 4; // For @llvm.memcpy -> sequence of stores
1285 MaxStoresPerMemcpyOptSize = 2;
1286 MaxStoresPerMemmove = 4; // For @llvm.memmove -> sequence of stores
1287 MaxStoresPerMemmoveOptSize = 2;
1289 // On ARM arguments smaller than 4 bytes are extended, so all arguments
1290 // are at least 4 bytes aligned.
1291 setMinStackArgumentAlignment(4);
1293 // Prefer likely predicted branches to selects on out-of-order cores.
1294 PredictableSelectIsExpensive = Subtarget->getSchedModel().isOutOfOrder();
1296 setMinFunctionAlignment(Subtarget->isThumb() ? 1 : 2);
1299 bool ARMTargetLowering::useSoftFloat() const {
1300 return Subtarget->useSoftFloat();
1303 // FIXME: It might make sense to define the representative register class as the
1304 // nearest super-register that has a non-null superset. For example, DPR_VFP2 is
1305 // a super-register of SPR, and DPR is a superset if DPR_VFP2. Consequently,
1306 // SPR's representative would be DPR_VFP2. This should work well if register
1307 // pressure tracking were modified such that a register use would increment the
1308 // pressure of the register class's representative and all of it's super
1309 // classes' representatives transitively. We have not implemented this because
1310 // of the difficulty prior to coalescing of modeling operand register classes
1311 // due to the common occurrence of cross class copies and subregister insertions
1313 std::pair<const TargetRegisterClass *, uint8_t>
1314 ARMTargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
1316 const TargetRegisterClass *RRC = nullptr;
1318 switch (VT.SimpleTy) {
1320 return TargetLowering::findRepresentativeClass(TRI, VT);
1321 // Use DPR as representative register class for all floating point
1322 // and vector types. Since there are 32 SPR registers and 32 DPR registers so
1323 // the cost is 1 for both f32 and f64.
1324 case MVT::f32: case MVT::f64: case MVT::v8i8: case MVT::v4i16:
1325 case MVT::v2i32: case MVT::v1i64: case MVT::v2f32:
1326 RRC = &ARM::DPRRegClass;
1327 // When NEON is used for SP, only half of the register file is available
1328 // because operations that define both SP and DP results will be constrained
1329 // to the VFP2 class (D0-D15). We currently model this constraint prior to
1330 // coalescing by double-counting the SP regs. See the FIXME above.
1331 if (Subtarget->useNEONForSinglePrecisionFP())
1334 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1335 case MVT::v4f32: case MVT::v2f64:
1336 RRC = &ARM::DPRRegClass;
1340 RRC = &ARM::DPRRegClass;
1344 RRC = &ARM::DPRRegClass;
1348 return std::make_pair(RRC, Cost);
1351 const char *ARMTargetLowering::getTargetNodeName(unsigned Opcode) const {
1352 switch ((ARMISD::NodeType)Opcode) {
1353 case ARMISD::FIRST_NUMBER: break;
1354 case ARMISD::Wrapper: return "ARMISD::Wrapper";
1355 case ARMISD::WrapperPIC: return "ARMISD::WrapperPIC";
1356 case ARMISD::WrapperJT: return "ARMISD::WrapperJT";
1357 case ARMISD::COPY_STRUCT_BYVAL: return "ARMISD::COPY_STRUCT_BYVAL";
1358 case ARMISD::CALL: return "ARMISD::CALL";
1359 case ARMISD::CALL_PRED: return "ARMISD::CALL_PRED";
1360 case ARMISD::CALL_NOLINK: return "ARMISD::CALL_NOLINK";
1361 case ARMISD::BRCOND: return "ARMISD::BRCOND";
1362 case ARMISD::BR_JT: return "ARMISD::BR_JT";
1363 case ARMISD::BR2_JT: return "ARMISD::BR2_JT";
1364 case ARMISD::RET_FLAG: return "ARMISD::RET_FLAG";
1365 case ARMISD::INTRET_FLAG: return "ARMISD::INTRET_FLAG";
1366 case ARMISD::PIC_ADD: return "ARMISD::PIC_ADD";
1367 case ARMISD::CMP: return "ARMISD::CMP";
1368 case ARMISD::CMN: return "ARMISD::CMN";
1369 case ARMISD::CMPZ: return "ARMISD::CMPZ";
1370 case ARMISD::CMPFP: return "ARMISD::CMPFP";
1371 case ARMISD::CMPFPw0: return "ARMISD::CMPFPw0";
1372 case ARMISD::BCC_i64: return "ARMISD::BCC_i64";
1373 case ARMISD::FMSTAT: return "ARMISD::FMSTAT";
1375 case ARMISD::CMOV: return "ARMISD::CMOV";
1377 case ARMISD::SSAT: return "ARMISD::SSAT";
1379 case ARMISD::SRL_FLAG: return "ARMISD::SRL_FLAG";
1380 case ARMISD::SRA_FLAG: return "ARMISD::SRA_FLAG";
1381 case ARMISD::RRX: return "ARMISD::RRX";
1383 case ARMISD::ADDC: return "ARMISD::ADDC";
1384 case ARMISD::ADDE: return "ARMISD::ADDE";
1385 case ARMISD::SUBC: return "ARMISD::SUBC";
1386 case ARMISD::SUBE: return "ARMISD::SUBE";
1388 case ARMISD::VMOVRRD: return "ARMISD::VMOVRRD";
1389 case ARMISD::VMOVDRR: return "ARMISD::VMOVDRR";
1391 case ARMISD::EH_SJLJ_SETJMP: return "ARMISD::EH_SJLJ_SETJMP";
1392 case ARMISD::EH_SJLJ_LONGJMP: return "ARMISD::EH_SJLJ_LONGJMP";
1393 case ARMISD::EH_SJLJ_SETUP_DISPATCH: return "ARMISD::EH_SJLJ_SETUP_DISPATCH";
1395 case ARMISD::TC_RETURN: return "ARMISD::TC_RETURN";
1397 case ARMISD::THREAD_POINTER:return "ARMISD::THREAD_POINTER";
1399 case ARMISD::DYN_ALLOC: return "ARMISD::DYN_ALLOC";
1401 case ARMISD::MEMBARRIER_MCR: return "ARMISD::MEMBARRIER_MCR";
1403 case ARMISD::PRELOAD: return "ARMISD::PRELOAD";
1405 case ARMISD::WIN__CHKSTK: return "ARMISD::WIN__CHKSTK";
1406 case ARMISD::WIN__DBZCHK: return "ARMISD::WIN__DBZCHK";
1408 case ARMISD::VCEQ: return "ARMISD::VCEQ";
1409 case ARMISD::VCEQZ: return "ARMISD::VCEQZ";
1410 case ARMISD::VCGE: return "ARMISD::VCGE";
1411 case ARMISD::VCGEZ: return "ARMISD::VCGEZ";
1412 case ARMISD::VCLEZ: return "ARMISD::VCLEZ";
1413 case ARMISD::VCGEU: return "ARMISD::VCGEU";
1414 case ARMISD::VCGT: return "ARMISD::VCGT";
1415 case ARMISD::VCGTZ: return "ARMISD::VCGTZ";
1416 case ARMISD::VCLTZ: return "ARMISD::VCLTZ";
1417 case ARMISD::VCGTU: return "ARMISD::VCGTU";
1418 case ARMISD::VTST: return "ARMISD::VTST";
1420 case ARMISD::VSHL: return "ARMISD::VSHL";
1421 case ARMISD::VSHRs: return "ARMISD::VSHRs";
1422 case ARMISD::VSHRu: return "ARMISD::VSHRu";
1423 case ARMISD::VRSHRs: return "ARMISD::VRSHRs";
1424 case ARMISD::VRSHRu: return "ARMISD::VRSHRu";
1425 case ARMISD::VRSHRN: return "ARMISD::VRSHRN";
1426 case ARMISD::VQSHLs: return "ARMISD::VQSHLs";
1427 case ARMISD::VQSHLu: return "ARMISD::VQSHLu";
1428 case ARMISD::VQSHLsu: return "ARMISD::VQSHLsu";
1429 case ARMISD::VQSHRNs: return "ARMISD::VQSHRNs";
1430 case ARMISD::VQSHRNu: return "ARMISD::VQSHRNu";
1431 case ARMISD::VQSHRNsu: return "ARMISD::VQSHRNsu";
1432 case ARMISD::VQRSHRNs: return "ARMISD::VQRSHRNs";
1433 case ARMISD::VQRSHRNu: return "ARMISD::VQRSHRNu";
1434 case ARMISD::VQRSHRNsu: return "ARMISD::VQRSHRNsu";
1435 case ARMISD::VSLI: return "ARMISD::VSLI";
1436 case ARMISD::VSRI: return "ARMISD::VSRI";
1437 case ARMISD::VGETLANEu: return "ARMISD::VGETLANEu";
1438 case ARMISD::VGETLANEs: return "ARMISD::VGETLANEs";
1439 case ARMISD::VMOVIMM: return "ARMISD::VMOVIMM";
1440 case ARMISD::VMVNIMM: return "ARMISD::VMVNIMM";
1441 case ARMISD::VMOVFPIMM: return "ARMISD::VMOVFPIMM";
1442 case ARMISD::VDUP: return "ARMISD::VDUP";
1443 case ARMISD::VDUPLANE: return "ARMISD::VDUPLANE";
1444 case ARMISD::VEXT: return "ARMISD::VEXT";
1445 case ARMISD::VREV64: return "ARMISD::VREV64";
1446 case ARMISD::VREV32: return "ARMISD::VREV32";
1447 case ARMISD::VREV16: return "ARMISD::VREV16";
1448 case ARMISD::VZIP: return "ARMISD::VZIP";
1449 case ARMISD::VUZP: return "ARMISD::VUZP";
1450 case ARMISD::VTRN: return "ARMISD::VTRN";
1451 case ARMISD::VTBL1: return "ARMISD::VTBL1";
1452 case ARMISD::VTBL2: return "ARMISD::VTBL2";
1453 case ARMISD::VMULLs: return "ARMISD::VMULLs";
1454 case ARMISD::VMULLu: return "ARMISD::VMULLu";
1455 case ARMISD::UMAAL: return "ARMISD::UMAAL";
1456 case ARMISD::UMLAL: return "ARMISD::UMLAL";
1457 case ARMISD::SMLAL: return "ARMISD::SMLAL";
1458 case ARMISD::BUILD_VECTOR: return "ARMISD::BUILD_VECTOR";
1459 case ARMISD::BFI: return "ARMISD::BFI";
1460 case ARMISD::VORRIMM: return "ARMISD::VORRIMM";
1461 case ARMISD::VBICIMM: return "ARMISD::VBICIMM";
1462 case ARMISD::VBSL: return "ARMISD::VBSL";
1463 case ARMISD::MEMCPY: return "ARMISD::MEMCPY";
1464 case ARMISD::VLD1DUP: return "ARMISD::VLD1DUP";
1465 case ARMISD::VLD2DUP: return "ARMISD::VLD2DUP";
1466 case ARMISD::VLD3DUP: return "ARMISD::VLD3DUP";
1467 case ARMISD::VLD4DUP: return "ARMISD::VLD4DUP";
1468 case ARMISD::VLD1_UPD: return "ARMISD::VLD1_UPD";
1469 case ARMISD::VLD2_UPD: return "ARMISD::VLD2_UPD";
1470 case ARMISD::VLD3_UPD: return "ARMISD::VLD3_UPD";
1471 case ARMISD::VLD4_UPD: return "ARMISD::VLD4_UPD";
1472 case ARMISD::VLD2LN_UPD: return "ARMISD::VLD2LN_UPD";
1473 case ARMISD::VLD3LN_UPD: return "ARMISD::VLD3LN_UPD";
1474 case ARMISD::VLD4LN_UPD: return "ARMISD::VLD4LN_UPD";
1475 case ARMISD::VLD1DUP_UPD: return "ARMISD::VLD1DUP_UPD";
1476 case ARMISD::VLD2DUP_UPD: return "ARMISD::VLD2DUP_UPD";
1477 case ARMISD::VLD3DUP_UPD: return "ARMISD::VLD3DUP_UPD";
1478 case ARMISD::VLD4DUP_UPD: return "ARMISD::VLD4DUP_UPD";
1479 case ARMISD::VST1_UPD: return "ARMISD::VST1_UPD";
1480 case ARMISD::VST2_UPD: return "ARMISD::VST2_UPD";
1481 case ARMISD::VST3_UPD: return "ARMISD::VST3_UPD";
1482 case ARMISD::VST4_UPD: return "ARMISD::VST4_UPD";
1483 case ARMISD::VST2LN_UPD: return "ARMISD::VST2LN_UPD";
1484 case ARMISD::VST3LN_UPD: return "ARMISD::VST3LN_UPD";
1485 case ARMISD::VST4LN_UPD: return "ARMISD::VST4LN_UPD";
1490 EVT ARMTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
1493 return getPointerTy(DL);
1494 return VT.changeVectorElementTypeToInteger();
1497 /// getRegClassFor - Return the register class that should be used for the
1498 /// specified value type.
1499 const TargetRegisterClass *ARMTargetLowering::getRegClassFor(MVT VT) const {
1500 // Map v4i64 to QQ registers but do not make the type legal. Similarly map
1501 // v8i64 to QQQQ registers. v4i64 and v8i64 are only used for REG_SEQUENCE to
1502 // load / store 4 to 8 consecutive D registers.
1503 if (Subtarget->hasNEON()) {
1504 if (VT == MVT::v4i64)
1505 return &ARM::QQPRRegClass;
1506 if (VT == MVT::v8i64)
1507 return &ARM::QQQQPRRegClass;
1509 return TargetLowering::getRegClassFor(VT);
1512 // memcpy, and other memory intrinsics, typically tries to use LDM/STM if the
1513 // source/dest is aligned and the copy size is large enough. We therefore want
1514 // to align such objects passed to memory intrinsics.
1515 bool ARMTargetLowering::shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
1516 unsigned &PrefAlign) const {
1517 if (!isa<MemIntrinsic>(CI))
1520 // On ARM11 onwards (excluding M class) 8-byte aligned LDM is typically 1
1521 // cycle faster than 4-byte aligned LDM.
1522 PrefAlign = (Subtarget->hasV6Ops() && !Subtarget->isMClass() ? 8 : 4);
1526 // Create a fast isel object.
1528 ARMTargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
1529 const TargetLibraryInfo *libInfo) const {
1530 return ARM::createFastISel(funcInfo, libInfo);
1533 Sched::Preference ARMTargetLowering::getSchedulingPreference(SDNode *N) const {
1534 unsigned NumVals = N->getNumValues();
1536 return Sched::RegPressure;
1538 for (unsigned i = 0; i != NumVals; ++i) {
1539 EVT VT = N->getValueType(i);
1540 if (VT == MVT::Glue || VT == MVT::Other)
1542 if (VT.isFloatingPoint() || VT.isVector())
1546 if (!N->isMachineOpcode())
1547 return Sched::RegPressure;
1549 // Load are scheduled for latency even if there instruction itinerary
1550 // is not available.
1551 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
1552 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1554 if (MCID.getNumDefs() == 0)
1555 return Sched::RegPressure;
1556 if (!Itins->isEmpty() &&
1557 Itins->getOperandCycle(MCID.getSchedClass(), 0) > 2)
1560 return Sched::RegPressure;
1563 //===----------------------------------------------------------------------===//
1565 //===----------------------------------------------------------------------===//
1567 /// IntCCToARMCC - Convert a DAG integer condition code to an ARM CC
1568 static ARMCC::CondCodes IntCCToARMCC(ISD::CondCode CC) {
1570 default: llvm_unreachable("Unknown condition code!");
1571 case ISD::SETNE: return ARMCC::NE;
1572 case ISD::SETEQ: return ARMCC::EQ;
1573 case ISD::SETGT: return ARMCC::GT;
1574 case ISD::SETGE: return ARMCC::GE;
1575 case ISD::SETLT: return ARMCC::LT;
1576 case ISD::SETLE: return ARMCC::LE;
1577 case ISD::SETUGT: return ARMCC::HI;
1578 case ISD::SETUGE: return ARMCC::HS;
1579 case ISD::SETULT: return ARMCC::LO;
1580 case ISD::SETULE: return ARMCC::LS;
1584 /// FPCCToARMCC - Convert a DAG fp condition code to an ARM CC.
1585 static void FPCCToARMCC(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
1586 ARMCC::CondCodes &CondCode2) {
1587 CondCode2 = ARMCC::AL;
1589 default: llvm_unreachable("Unknown FP condition!");
1591 case ISD::SETOEQ: CondCode = ARMCC::EQ; break;
1593 case ISD::SETOGT: CondCode = ARMCC::GT; break;
1595 case ISD::SETOGE: CondCode = ARMCC::GE; break;
1596 case ISD::SETOLT: CondCode = ARMCC::MI; break;
1597 case ISD::SETOLE: CondCode = ARMCC::LS; break;
1598 case ISD::SETONE: CondCode = ARMCC::MI; CondCode2 = ARMCC::GT; break;
1599 case ISD::SETO: CondCode = ARMCC::VC; break;
1600 case ISD::SETUO: CondCode = ARMCC::VS; break;
1601 case ISD::SETUEQ: CondCode = ARMCC::EQ; CondCode2 = ARMCC::VS; break;
1602 case ISD::SETUGT: CondCode = ARMCC::HI; break;
1603 case ISD::SETUGE: CondCode = ARMCC::PL; break;
1605 case ISD::SETULT: CondCode = ARMCC::LT; break;
1607 case ISD::SETULE: CondCode = ARMCC::LE; break;
1609 case ISD::SETUNE: CondCode = ARMCC::NE; break;
1613 //===----------------------------------------------------------------------===//
1614 // Calling Convention Implementation
1615 //===----------------------------------------------------------------------===//
1617 #include "ARMGenCallingConv.inc"
1619 /// getEffectiveCallingConv - Get the effective calling convention, taking into
1620 /// account presence of floating point hardware and calling convention
1621 /// limitations, such as support for variadic functions.
1623 ARMTargetLowering::getEffectiveCallingConv(CallingConv::ID CC,
1624 bool isVarArg) const {
1627 llvm_unreachable("Unsupported calling convention");
1628 case CallingConv::ARM_AAPCS:
1629 case CallingConv::ARM_APCS:
1630 case CallingConv::GHC:
1632 case CallingConv::PreserveMost:
1633 return CallingConv::PreserveMost;
1634 case CallingConv::ARM_AAPCS_VFP:
1635 case CallingConv::Swift:
1636 return isVarArg ? CallingConv::ARM_AAPCS : CallingConv::ARM_AAPCS_VFP;
1637 case CallingConv::C:
1638 if (!Subtarget->isAAPCS_ABI())
1639 return CallingConv::ARM_APCS;
1640 else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() &&
1641 getTargetMachine().Options.FloatABIType == FloatABI::Hard &&
1643 return CallingConv::ARM_AAPCS_VFP;
1645 return CallingConv::ARM_AAPCS;
1646 case CallingConv::Fast:
1647 case CallingConv::CXX_FAST_TLS:
1648 if (!Subtarget->isAAPCS_ABI()) {
1649 if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1650 return CallingConv::Fast;
1651 return CallingConv::ARM_APCS;
1652 } else if (Subtarget->hasVFP2() && !Subtarget->isThumb1Only() && !isVarArg)
1653 return CallingConv::ARM_AAPCS_VFP;
1655 return CallingConv::ARM_AAPCS;
1659 CCAssignFn *ARMTargetLowering::CCAssignFnForCall(CallingConv::ID CC,
1660 bool isVarArg) const {
1661 return CCAssignFnForNode(CC, false, isVarArg);
1664 CCAssignFn *ARMTargetLowering::CCAssignFnForReturn(CallingConv::ID CC,
1665 bool isVarArg) const {
1666 return CCAssignFnForNode(CC, true, isVarArg);
1669 /// CCAssignFnForNode - Selects the correct CCAssignFn for the given
1670 /// CallingConvention.
1671 CCAssignFn *ARMTargetLowering::CCAssignFnForNode(CallingConv::ID CC,
1673 bool isVarArg) const {
1674 switch (getEffectiveCallingConv(CC, isVarArg)) {
1676 llvm_unreachable("Unsupported calling convention");
1677 case CallingConv::ARM_APCS:
1678 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS);
1679 case CallingConv::ARM_AAPCS:
1680 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1681 case CallingConv::ARM_AAPCS_VFP:
1682 return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
1683 case CallingConv::Fast:
1684 return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
1685 case CallingConv::GHC:
1686 return (Return ? RetCC_ARM_APCS : CC_ARM_APCS_GHC);
1687 case CallingConv::PreserveMost:
1688 return (Return ? RetCC_ARM_AAPCS : CC_ARM_AAPCS);
1692 /// LowerCallResult - Lower the result values of a call into the
1693 /// appropriate copies out of appropriate physical registers.
1694 SDValue ARMTargetLowering::LowerCallResult(
1695 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
1696 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
1697 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
1698 SDValue ThisVal) const {
1700 // Assign locations to each value returned by this call.
1701 SmallVector<CCValAssign, 16> RVLocs;
1702 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
1703 *DAG.getContext(), Call);
1704 CCInfo.AnalyzeCallResult(Ins, CCAssignFnForReturn(CallConv, isVarArg));
1706 // Copy all of the result registers out of their specified physreg.
1707 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1708 CCValAssign VA = RVLocs[i];
1710 // Pass 'this' value directly from the argument to return value, to avoid
1711 // reg unit interference
1712 if (i == 0 && isThisReturn) {
1713 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i32 &&
1714 "unexpected return calling convention register assignment");
1715 InVals.push_back(ThisVal);
1720 if (VA.needsCustom()) {
1721 // Handle f64 or half of a v2f64.
1722 SDValue Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1724 Chain = Lo.getValue(1);
1725 InFlag = Lo.getValue(2);
1726 VA = RVLocs[++i]; // skip ahead to next loc
1727 SDValue Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32,
1729 Chain = Hi.getValue(1);
1730 InFlag = Hi.getValue(2);
1731 if (!Subtarget->isLittle())
1733 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1735 if (VA.getLocVT() == MVT::v2f64) {
1736 SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
1737 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1738 DAG.getConstant(0, dl, MVT::i32));
1740 VA = RVLocs[++i]; // skip ahead to next loc
1741 Lo = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1742 Chain = Lo.getValue(1);
1743 InFlag = Lo.getValue(2);
1744 VA = RVLocs[++i]; // skip ahead to next loc
1745 Hi = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), MVT::i32, InFlag);
1746 Chain = Hi.getValue(1);
1747 InFlag = Hi.getValue(2);
1748 if (!Subtarget->isLittle())
1750 Val = DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
1751 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Vec, Val,
1752 DAG.getConstant(1, dl, MVT::i32));
1755 Val = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), VA.getLocVT(),
1757 Chain = Val.getValue(1);
1758 InFlag = Val.getValue(2);
1761 switch (VA.getLocInfo()) {
1762 default: llvm_unreachable("Unknown loc info!");
1763 case CCValAssign::Full: break;
1764 case CCValAssign::BCvt:
1765 Val = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), Val);
1769 InVals.push_back(Val);
1775 /// LowerMemOpCallTo - Store the argument to the stack.
1776 SDValue ARMTargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
1777 SDValue Arg, const SDLoc &dl,
1779 const CCValAssign &VA,
1780 ISD::ArgFlagsTy Flags) const {
1781 unsigned LocMemOffset = VA.getLocMemOffset();
1782 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1783 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
1785 return DAG.getStore(
1786 Chain, dl, Arg, PtrOff,
1787 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
1790 void ARMTargetLowering::PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG,
1791 SDValue Chain, SDValue &Arg,
1792 RegsToPassVector &RegsToPass,
1793 CCValAssign &VA, CCValAssign &NextVA,
1795 SmallVectorImpl<SDValue> &MemOpChains,
1796 ISD::ArgFlagsTy Flags) const {
1798 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
1799 DAG.getVTList(MVT::i32, MVT::i32), Arg);
1800 unsigned id = Subtarget->isLittle() ? 0 : 1;
1801 RegsToPass.push_back(std::make_pair(VA.getLocReg(), fmrrd.getValue(id)));
1803 if (NextVA.isRegLoc())
1804 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), fmrrd.getValue(1-id)));
1806 assert(NextVA.isMemLoc());
1807 if (!StackPtr.getNode())
1808 StackPtr = DAG.getCopyFromReg(Chain, dl, ARM::SP,
1809 getPointerTy(DAG.getDataLayout()));
1811 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, fmrrd.getValue(1-id),
1817 /// LowerCall - Lowering a call into a callseq_start <-
1818 /// ARMISD:CALL <- callseq_end chain. Also add input and output parameter
1821 ARMTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
1822 SmallVectorImpl<SDValue> &InVals) const {
1823 SelectionDAG &DAG = CLI.DAG;
1825 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
1826 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
1827 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
1828 SDValue Chain = CLI.Chain;
1829 SDValue Callee = CLI.Callee;
1830 bool &isTailCall = CLI.IsTailCall;
1831 CallingConv::ID CallConv = CLI.CallConv;
1832 bool doesNotRet = CLI.DoesNotReturn;
1833 bool isVarArg = CLI.IsVarArg;
1835 MachineFunction &MF = DAG.getMachineFunction();
1836 bool isStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
1837 bool isThisReturn = false;
1838 bool isSibCall = false;
1839 auto Attr = MF.getFunction()->getFnAttribute("disable-tail-calls");
1841 // Disable tail calls if they're not supported.
1842 if (!Subtarget->supportsTailCall() || Attr.getValueAsString() == "true")
1846 // Check if it's really possible to do a tail call.
1847 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
1848 isVarArg, isStructRet, MF.getFunction()->hasStructRetAttr(),
1849 Outs, OutVals, Ins, DAG);
1850 if (!isTailCall && CLI.CS && CLI.CS->isMustTailCall())
1851 report_fatal_error("failed to perform tail call elimination on a call "
1852 "site marked musttail");
1853 // We don't support GuaranteedTailCallOpt for ARM, only automatically
1854 // detected sibcalls.
1861 // Analyze operands of the call, assigning locations to each operand.
1862 SmallVector<CCValAssign, 16> ArgLocs;
1863 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
1864 *DAG.getContext(), Call);
1865 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CallConv, isVarArg));
1867 // Get a count of how many bytes are to be pushed on the stack.
1868 unsigned NumBytes = CCInfo.getNextStackOffset();
1870 // For tail calls, memory operands are available in our caller's stack.
1874 // Adjust the stack pointer for the new arguments...
1875 // These operations are automatically eliminated by the prolog/epilog pass
1877 Chain = DAG.getCALLSEQ_START(Chain,
1878 DAG.getIntPtrConstant(NumBytes, dl, true), dl);
1881 DAG.getCopyFromReg(Chain, dl, ARM::SP, getPointerTy(DAG.getDataLayout()));
1883 RegsToPassVector RegsToPass;
1884 SmallVector<SDValue, 8> MemOpChains;
1886 // Walk the register/memloc assignments, inserting copies/loads. In the case
1887 // of tail call optimization, arguments are handled later.
1888 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
1890 ++i, ++realArgIdx) {
1891 CCValAssign &VA = ArgLocs[i];
1892 SDValue Arg = OutVals[realArgIdx];
1893 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
1894 bool isByVal = Flags.isByVal();
1896 // Promote the value if needed.
1897 switch (VA.getLocInfo()) {
1898 default: llvm_unreachable("Unknown loc info!");
1899 case CCValAssign::Full: break;
1900 case CCValAssign::SExt:
1901 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
1903 case CCValAssign::ZExt:
1904 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
1906 case CCValAssign::AExt:
1907 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
1909 case CCValAssign::BCvt:
1910 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
1914 // f64 and v2f64 might be passed in i32 pairs and must be split into pieces
1915 if (VA.needsCustom()) {
1916 if (VA.getLocVT() == MVT::v2f64) {
1917 SDValue Op0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1918 DAG.getConstant(0, dl, MVT::i32));
1919 SDValue Op1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
1920 DAG.getConstant(1, dl, MVT::i32));
1922 PassF64ArgInRegs(dl, DAG, Chain, Op0, RegsToPass,
1923 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1925 VA = ArgLocs[++i]; // skip ahead to next loc
1926 if (VA.isRegLoc()) {
1927 PassF64ArgInRegs(dl, DAG, Chain, Op1, RegsToPass,
1928 VA, ArgLocs[++i], StackPtr, MemOpChains, Flags);
1930 assert(VA.isMemLoc());
1932 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Op1,
1933 dl, DAG, VA, Flags));
1936 PassF64ArgInRegs(dl, DAG, Chain, Arg, RegsToPass, VA, ArgLocs[++i],
1937 StackPtr, MemOpChains, Flags);
1939 } else if (VA.isRegLoc()) {
1940 if (realArgIdx == 0 && Flags.isReturned() && Outs[0].VT == MVT::i32) {
1941 assert(VA.getLocVT() == MVT::i32 &&
1942 "unexpected calling convention register assignment");
1943 assert(!Ins.empty() && Ins[0].VT == MVT::i32 &&
1944 "unexpected use of 'returned'");
1945 isThisReturn = true;
1947 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
1948 } else if (isByVal) {
1949 assert(VA.isMemLoc());
1950 unsigned offset = 0;
1952 // True if this byval aggregate will be split between registers
1954 unsigned ByValArgsCount = CCInfo.getInRegsParamsCount();
1955 unsigned CurByValIdx = CCInfo.getInRegsParamsProcessed();
1957 if (CurByValIdx < ByValArgsCount) {
1959 unsigned RegBegin, RegEnd;
1960 CCInfo.getInRegsParamInfo(CurByValIdx, RegBegin, RegEnd);
1963 DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
1965 for (i = 0, j = RegBegin; j < RegEnd; i++, j++) {
1966 SDValue Const = DAG.getConstant(4*i, dl, MVT::i32);
1967 SDValue AddArg = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, Const);
1968 SDValue Load = DAG.getLoad(PtrVT, dl, Chain, AddArg,
1969 MachinePointerInfo(),
1970 DAG.InferPtrAlignment(AddArg));
1971 MemOpChains.push_back(Load.getValue(1));
1972 RegsToPass.push_back(std::make_pair(j, Load));
1975 // If parameter size outsides register area, "offset" value
1976 // helps us to calculate stack slot for remained part properly.
1977 offset = RegEnd - RegBegin;
1979 CCInfo.nextInRegsParam();
1982 if (Flags.getByValSize() > 4*offset) {
1983 auto PtrVT = getPointerTy(DAG.getDataLayout());
1984 unsigned LocMemOffset = VA.getLocMemOffset();
1985 SDValue StkPtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
1986 SDValue Dst = DAG.getNode(ISD::ADD, dl, PtrVT, StackPtr, StkPtrOff);
1987 SDValue SrcOffset = DAG.getIntPtrConstant(4*offset, dl);
1988 SDValue Src = DAG.getNode(ISD::ADD, dl, PtrVT, Arg, SrcOffset);
1989 SDValue SizeNode = DAG.getConstant(Flags.getByValSize() - 4*offset, dl,
1991 SDValue AlignNode = DAG.getConstant(Flags.getByValAlign(), dl,
1994 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
1995 SDValue Ops[] = { Chain, Dst, Src, SizeNode, AlignNode};
1996 MemOpChains.push_back(DAG.getNode(ARMISD::COPY_STRUCT_BYVAL, dl, VTs,
1999 } else if (!isSibCall) {
2000 assert(VA.isMemLoc());
2002 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2003 dl, DAG, VA, Flags));
2007 if (!MemOpChains.empty())
2008 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2010 // Build a sequence of copy-to-reg nodes chained together with token chain
2011 // and flag operands which copy the outgoing args into the appropriate regs.
2013 // Tail call byval lowering might overwrite argument registers so in case of
2014 // tail call optimization the copies to registers are lowered later.
2016 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2017 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2018 RegsToPass[i].second, InFlag);
2019 InFlag = Chain.getValue(1);
2022 // For tail calls lower the arguments to the 'real' stack slot.
2024 // Force all the incoming stack arguments to be loaded from the stack
2025 // before any new outgoing arguments are stored to the stack, because the
2026 // outgoing stack slots may alias the incoming argument stack slots, and
2027 // the alias isn't otherwise explicit. This is slightly more conservative
2028 // than necessary, because it means that each store effectively depends
2029 // on every argument instead of just those arguments it would clobber.
2031 // Do not flag preceding copytoreg stuff together with the following stuff.
2033 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
2034 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
2035 RegsToPass[i].second, InFlag);
2036 InFlag = Chain.getValue(1);
2041 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
2042 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
2043 // node so that legalize doesn't hack it.
2044 bool isDirect = false;
2046 const TargetMachine &TM = getTargetMachine();
2047 const Module *Mod = MF.getFunction()->getParent();
2048 const GlobalValue *GV = nullptr;
2049 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
2050 GV = G->getGlobal();
2052 !TM.shouldAssumeDSOLocal(*Mod, GV) && Subtarget->isTargetMachO();
2054 bool isARMFunc = !Subtarget->isThumb() || (isStub && !Subtarget->isMClass());
2055 bool isLocalARMFunc = false;
2056 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2057 auto PtrVt = getPointerTy(DAG.getDataLayout());
2059 if (Subtarget->genLongCalls()) {
2060 assert((!isPositionIndependent() || Subtarget->isTargetWindows()) &&
2061 "long-calls codegen is not position independent!");
2062 // Handle a global address or an external symbol. If it's not one of
2063 // those, the target's already in a register, so we don't need to do
2065 if (isa<GlobalAddressSDNode>(Callee)) {
2066 // Create a constant pool entry for the callee address
2067 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2068 ARMConstantPoolValue *CPV =
2069 ARMConstantPoolConstant::Create(GV, ARMPCLabelIndex, ARMCP::CPValue, 0);
2071 // Get the address of the callee into a register
2072 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2073 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2074 Callee = DAG.getLoad(
2075 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2076 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2077 } else if (ExternalSymbolSDNode *S=dyn_cast<ExternalSymbolSDNode>(Callee)) {
2078 const char *Sym = S->getSymbol();
2080 // Create a constant pool entry for the callee address
2081 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2082 ARMConstantPoolValue *CPV =
2083 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2084 ARMPCLabelIndex, 0);
2085 // Get the address of the callee into a register
2086 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2087 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2088 Callee = DAG.getLoad(
2089 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2090 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2092 } else if (isa<GlobalAddressSDNode>(Callee)) {
2093 // If we're optimizing for minimum size and the function is called three or
2094 // more times in this block, we can improve codesize by calling indirectly
2095 // as BLXr has a 16-bit encoding.
2096 auto *GV = cast<GlobalAddressSDNode>(Callee)->getGlobal();
2097 auto *BB = CLI.CS->getParent();
2098 bool PreferIndirect =
2099 Subtarget->isThumb() && MF.getFunction()->optForMinSize() &&
2100 count_if(GV->users(), [&BB](const User *U) {
2101 return isa<Instruction>(U) && cast<Instruction>(U)->getParent() == BB;
2104 if (!PreferIndirect) {
2106 bool isDef = GV->isStrongDefinitionForLinker();
2108 // ARM call to a local ARM function is predicable.
2109 isLocalARMFunc = !Subtarget->isThumb() && (isDef || !ARMInterworking);
2110 // tBX takes a register source operand.
2111 if (isStub && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2112 assert(Subtarget->isTargetMachO() && "WrapperPIC use on non-MachO?");
2113 Callee = DAG.getNode(
2114 ARMISD::WrapperPIC, dl, PtrVt,
2115 DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, ARMII::MO_NONLAZY));
2116 Callee = DAG.getLoad(
2117 PtrVt, dl, DAG.getEntryNode(), Callee,
2118 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2119 /* Alignment = */ 0, MachineMemOperand::MODereferenceable |
2120 MachineMemOperand::MOInvariant);
2121 } else if (Subtarget->isTargetCOFF()) {
2122 assert(Subtarget->isTargetWindows() &&
2123 "Windows is the only supported COFF target");
2124 unsigned TargetFlags = GV->hasDLLImportStorageClass()
2125 ? ARMII::MO_DLLIMPORT
2126 : ARMII::MO_NO_FLAG;
2127 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, /*Offset=*/0,
2129 if (GV->hasDLLImportStorageClass())
2131 DAG.getLoad(PtrVt, dl, DAG.getEntryNode(),
2132 DAG.getNode(ARMISD::Wrapper, dl, PtrVt, Callee),
2133 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
2135 Callee = DAG.getTargetGlobalAddress(GV, dl, PtrVt, 0, 0);
2138 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2140 // tBX takes a register source operand.
2141 const char *Sym = S->getSymbol();
2142 if (isARMFunc && Subtarget->isThumb1Only() && !Subtarget->hasV5TOps()) {
2143 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2144 ARMConstantPoolValue *CPV =
2145 ARMConstantPoolSymbol::Create(*DAG.getContext(), Sym,
2146 ARMPCLabelIndex, 4);
2147 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVt, 4);
2148 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
2149 Callee = DAG.getLoad(
2150 PtrVt, dl, DAG.getEntryNode(), CPAddr,
2151 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2152 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2153 Callee = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVt, Callee, PICLabel);
2155 Callee = DAG.getTargetExternalSymbol(Sym, PtrVt, 0);
2159 // FIXME: handle tail calls differently.
2161 if (Subtarget->isThumb()) {
2162 if ((!isDirect || isARMFunc) && !Subtarget->hasV5TOps())
2163 CallOpc = ARMISD::CALL_NOLINK;
2165 CallOpc = ARMISD::CALL;
2167 if (!isDirect && !Subtarget->hasV5TOps())
2168 CallOpc = ARMISD::CALL_NOLINK;
2169 else if (doesNotRet && isDirect && Subtarget->hasRetAddrStack() &&
2170 // Emit regular call when code size is the priority
2171 !MF.getFunction()->optForMinSize())
2172 // "mov lr, pc; b _foo" to avoid confusing the RSP
2173 CallOpc = ARMISD::CALL_NOLINK;
2175 CallOpc = isLocalARMFunc ? ARMISD::CALL_PRED : ARMISD::CALL;
2178 std::vector<SDValue> Ops;
2179 Ops.push_back(Chain);
2180 Ops.push_back(Callee);
2182 // Add argument registers to the end of the list so that they are known live
2184 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
2185 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
2186 RegsToPass[i].second.getValueType()));
2188 // Add a register mask operand representing the call-preserved registers.
2190 const uint32_t *Mask;
2191 const ARMBaseRegisterInfo *ARI = Subtarget->getRegisterInfo();
2193 // For 'this' returns, use the R0-preserving mask if applicable
2194 Mask = ARI->getThisReturnPreservedMask(MF, CallConv);
2196 // Set isThisReturn to false if the calling convention is not one that
2197 // allows 'returned' to be modeled in this way, so LowerCallResult does
2198 // not try to pass 'this' straight through
2199 isThisReturn = false;
2200 Mask = ARI->getCallPreservedMask(MF, CallConv);
2203 Mask = ARI->getCallPreservedMask(MF, CallConv);
2205 assert(Mask && "Missing call preserved mask for calling convention");
2206 Ops.push_back(DAG.getRegisterMask(Mask));
2209 if (InFlag.getNode())
2210 Ops.push_back(InFlag);
2212 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2214 MF.getFrameInfo().setHasTailCall();
2215 return DAG.getNode(ARMISD::TC_RETURN, dl, NodeTys, Ops);
2218 // Returns a chain and a flag for retval copy to use.
2219 Chain = DAG.getNode(CallOpc, dl, NodeTys, Ops);
2220 InFlag = Chain.getValue(1);
2222 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, dl, true),
2223 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
2225 InFlag = Chain.getValue(1);
2227 // Handle result values, copying them out of physregs into vregs that we
2229 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
2230 InVals, isThisReturn,
2231 isThisReturn ? OutVals[0] : SDValue());
2234 /// HandleByVal - Every parameter *after* a byval parameter is passed
2235 /// on the stack. Remember the next parameter register to allocate,
2236 /// and then confiscate the rest of the parameter registers to insure
2238 void ARMTargetLowering::HandleByVal(CCState *State, unsigned &Size,
2239 unsigned Align) const {
2240 assert((State->getCallOrPrologue() == Prologue ||
2241 State->getCallOrPrologue() == Call) &&
2242 "unhandled ParmContext");
2244 // Byval (as with any stack) slots are always at least 4 byte aligned.
2245 Align = std::max(Align, 4U);
2247 unsigned Reg = State->AllocateReg(GPRArgRegs);
2251 unsigned AlignInRegs = Align / 4;
2252 unsigned Waste = (ARM::R4 - Reg) % AlignInRegs;
2253 for (unsigned i = 0; i < Waste; ++i)
2254 Reg = State->AllocateReg(GPRArgRegs);
2259 unsigned Excess = 4 * (ARM::R4 - Reg);
2261 // Special case when NSAA != SP and parameter size greater than size of
2262 // all remained GPR regs. In that case we can't split parameter, we must
2263 // send it to stack. We also must set NCRN to R4, so waste all
2264 // remained registers.
2265 const unsigned NSAAOffset = State->getNextStackOffset();
2266 if (NSAAOffset != 0 && Size > Excess) {
2267 while (State->AllocateReg(GPRArgRegs))
2272 // First register for byval parameter is the first register that wasn't
2273 // allocated before this method call, so it would be "reg".
2274 // If parameter is small enough to be saved in range [reg, r4), then
2275 // the end (first after last) register would be reg + param-size-in-regs,
2276 // else parameter would be splitted between registers and stack,
2277 // end register would be r4 in this case.
2278 unsigned ByValRegBegin = Reg;
2279 unsigned ByValRegEnd = std::min<unsigned>(Reg + Size / 4, ARM::R4);
2280 State->addInRegsParamInfo(ByValRegBegin, ByValRegEnd);
2281 // Note, first register is allocated in the beginning of function already,
2282 // allocate remained amount of registers we need.
2283 for (unsigned i = Reg + 1; i != ByValRegEnd; ++i)
2284 State->AllocateReg(GPRArgRegs);
2285 // A byval parameter that is split between registers and memory needs its
2286 // size truncated here.
2287 // In the case where the entire structure fits in registers, we set the
2288 // size in memory to zero.
2289 Size = std::max<int>(Size - Excess, 0);
2292 /// MatchingStackOffset - Return true if the given stack call argument is
2293 /// already available in the same position (relatively) of the caller's
2294 /// incoming argument stack.
2296 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
2297 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
2298 const TargetInstrInfo *TII) {
2299 unsigned Bytes = Arg.getValueSizeInBits() / 8;
2301 if (Arg.getOpcode() == ISD::CopyFromReg) {
2302 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
2303 if (!TargetRegisterInfo::isVirtualRegister(VR))
2305 MachineInstr *Def = MRI->getVRegDef(VR);
2308 if (!Flags.isByVal()) {
2309 if (!TII->isLoadFromStackSlot(*Def, FI))
2314 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
2315 if (Flags.isByVal())
2316 // ByVal argument is passed in as a pointer but it's now being
2317 // dereferenced. e.g.
2318 // define @foo(%struct.X* %A) {
2319 // tail call @bar(%struct.X* byval %A)
2322 SDValue Ptr = Ld->getBasePtr();
2323 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
2326 FI = FINode->getIndex();
2330 assert(FI != INT_MAX);
2331 if (!MFI.isFixedObjectIndex(FI))
2333 return Offset == MFI.getObjectOffset(FI) && Bytes == MFI.getObjectSize(FI);
2336 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
2337 /// for tail call optimization. Targets which want to do tail call
2338 /// optimization should implement this function.
2340 ARMTargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
2341 CallingConv::ID CalleeCC,
2343 bool isCalleeStructRet,
2344 bool isCallerStructRet,
2345 const SmallVectorImpl<ISD::OutputArg> &Outs,
2346 const SmallVectorImpl<SDValue> &OutVals,
2347 const SmallVectorImpl<ISD::InputArg> &Ins,
2348 SelectionDAG& DAG) const {
2349 MachineFunction &MF = DAG.getMachineFunction();
2350 const Function *CallerF = MF.getFunction();
2351 CallingConv::ID CallerCC = CallerF->getCallingConv();
2353 assert(Subtarget->supportsTailCall());
2355 // Look for obvious safe cases to perform tail call optimization that do not
2356 // require ABI changes. This is what gcc calls sibcall.
2358 // Exception-handling functions need a special set of instructions to indicate
2359 // a return to the hardware. Tail-calling another function would probably
2361 if (CallerF->hasFnAttribute("interrupt"))
2364 // Also avoid sibcall optimization if either caller or callee uses struct
2365 // return semantics.
2366 if (isCalleeStructRet || isCallerStructRet)
2369 // Externally-defined functions with weak linkage should not be
2370 // tail-called on ARM when the OS does not support dynamic
2371 // pre-emption of symbols, as the AAELF spec requires normal calls
2372 // to undefined weak functions to be replaced with a NOP or jump to the
2373 // next instruction. The behaviour of branch instructions in this
2374 // situation (as used for tail calls) is implementation-defined, so we
2375 // cannot rely on the linker replacing the tail call with a return.
2376 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2377 const GlobalValue *GV = G->getGlobal();
2378 const Triple &TT = getTargetMachine().getTargetTriple();
2379 if (GV->hasExternalWeakLinkage() &&
2380 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO()))
2384 // Check that the call results are passed in the same way.
2385 LLVMContext &C = *DAG.getContext();
2386 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
2387 CCAssignFnForReturn(CalleeCC, isVarArg),
2388 CCAssignFnForReturn(CallerCC, isVarArg)))
2390 // The callee has to preserve all registers the caller needs to preserve.
2391 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2392 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2393 if (CalleeCC != CallerCC) {
2394 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2395 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2399 // If Caller's vararg or byval argument has been split between registers and
2400 // stack, do not perform tail call, since part of the argument is in caller's
2402 const ARMFunctionInfo *AFI_Caller = MF.getInfo<ARMFunctionInfo>();
2403 if (AFI_Caller->getArgRegsSaveSize())
2406 // If the callee takes no arguments then go on to check the results of the
2408 if (!Outs.empty()) {
2409 // Check if stack adjustment is needed. For now, do not do this if any
2410 // argument is passed on the stack.
2411 SmallVector<CCValAssign, 16> ArgLocs;
2412 ARMCCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C, Call);
2413 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg));
2414 if (CCInfo.getNextStackOffset()) {
2415 // Check if the arguments are already laid out in the right way as
2416 // the caller's fixed stack objects.
2417 MachineFrameInfo &MFI = MF.getFrameInfo();
2418 const MachineRegisterInfo *MRI = &MF.getRegInfo();
2419 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
2420 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size();
2422 ++i, ++realArgIdx) {
2423 CCValAssign &VA = ArgLocs[i];
2424 EVT RegVT = VA.getLocVT();
2425 SDValue Arg = OutVals[realArgIdx];
2426 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
2427 if (VA.getLocInfo() == CCValAssign::Indirect)
2429 if (VA.needsCustom()) {
2430 // f64 and vector types are split into multiple registers or
2431 // register/stack-slot combinations. The types will not match
2432 // the registers; give up on memory f64 refs until we figure
2433 // out what to do about this.
2436 if (!ArgLocs[++i].isRegLoc())
2438 if (RegVT == MVT::v2f64) {
2439 if (!ArgLocs[++i].isRegLoc())
2441 if (!ArgLocs[++i].isRegLoc())
2444 } else if (!VA.isRegLoc()) {
2445 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
2452 const MachineRegisterInfo &MRI = MF.getRegInfo();
2453 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
2461 ARMTargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2462 MachineFunction &MF, bool isVarArg,
2463 const SmallVectorImpl<ISD::OutputArg> &Outs,
2464 LLVMContext &Context) const {
2465 SmallVector<CCValAssign, 16> RVLocs;
2466 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2467 return CCInfo.CheckReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2470 static SDValue LowerInterruptReturn(SmallVectorImpl<SDValue> &RetOps,
2471 const SDLoc &DL, SelectionDAG &DAG) {
2472 const MachineFunction &MF = DAG.getMachineFunction();
2473 const Function *F = MF.getFunction();
2475 StringRef IntKind = F->getFnAttribute("interrupt").getValueAsString();
2477 // See ARM ARM v7 B1.8.3. On exception entry LR is set to a possibly offset
2478 // version of the "preferred return address". These offsets affect the return
2479 // instruction if this is a return from PL1 without hypervisor extensions.
2480 // IRQ/FIQ: +4 "subs pc, lr, #4"
2481 // SWI: 0 "subs pc, lr, #0"
2482 // ABORT: +4 "subs pc, lr, #4"
2483 // UNDEF: +4/+2 "subs pc, lr, #0"
2484 // UNDEF varies depending on where the exception came from ARM or Thumb
2485 // mode. Alongside GCC, we throw our hands up in disgust and pretend it's 0.
2488 if (IntKind == "" || IntKind == "IRQ" || IntKind == "FIQ" ||
2491 else if (IntKind == "SWI" || IntKind == "UNDEF")
2494 report_fatal_error("Unsupported interrupt attribute. If present, value "
2495 "must be one of: IRQ, FIQ, SWI, ABORT or UNDEF");
2497 RetOps.insert(RetOps.begin() + 1,
2498 DAG.getConstant(LROffset, DL, MVT::i32, false));
2500 return DAG.getNode(ARMISD::INTRET_FLAG, DL, MVT::Other, RetOps);
2504 ARMTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2506 const SmallVectorImpl<ISD::OutputArg> &Outs,
2507 const SmallVectorImpl<SDValue> &OutVals,
2508 const SDLoc &dl, SelectionDAG &DAG) const {
2510 // CCValAssign - represent the assignment of the return value to a location.
2511 SmallVector<CCValAssign, 16> RVLocs;
2513 // CCState - Info about the registers and stack slots.
2514 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2515 *DAG.getContext(), Call);
2517 // Analyze outgoing return values.
2518 CCInfo.AnalyzeReturn(Outs, CCAssignFnForReturn(CallConv, isVarArg));
2521 SmallVector<SDValue, 4> RetOps;
2522 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2523 bool isLittleEndian = Subtarget->isLittle();
2525 MachineFunction &MF = DAG.getMachineFunction();
2526 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2527 AFI->setReturnRegsCount(RVLocs.size());
2529 // Copy the result values into the output registers.
2530 for (unsigned i = 0, realRVLocIdx = 0;
2532 ++i, ++realRVLocIdx) {
2533 CCValAssign &VA = RVLocs[i];
2534 assert(VA.isRegLoc() && "Can only return in registers!");
2536 SDValue Arg = OutVals[realRVLocIdx];
2538 switch (VA.getLocInfo()) {
2539 default: llvm_unreachable("Unknown loc info!");
2540 case CCValAssign::Full: break;
2541 case CCValAssign::BCvt:
2542 Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
2546 if (VA.needsCustom()) {
2547 if (VA.getLocVT() == MVT::v2f64) {
2548 // Extract the first half and return it in two registers.
2549 SDValue Half = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2550 DAG.getConstant(0, dl, MVT::i32));
2551 SDValue HalfGPRs = DAG.getNode(ARMISD::VMOVRRD, dl,
2552 DAG.getVTList(MVT::i32, MVT::i32), Half);
2554 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2555 HalfGPRs.getValue(isLittleEndian ? 0 : 1),
2557 Flag = Chain.getValue(1);
2558 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2559 VA = RVLocs[++i]; // skip ahead to next loc
2560 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2561 HalfGPRs.getValue(isLittleEndian ? 1 : 0),
2563 Flag = Chain.getValue(1);
2564 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2565 VA = RVLocs[++i]; // skip ahead to next loc
2567 // Extract the 2nd half and fall through to handle it as an f64 value.
2568 Arg = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Arg,
2569 DAG.getConstant(1, dl, MVT::i32));
2571 // Legalize ret f64 -> ret 2 x i32. We always have fmrrd if f64 is
2573 SDValue fmrrd = DAG.getNode(ARMISD::VMOVRRD, dl,
2574 DAG.getVTList(MVT::i32, MVT::i32), Arg);
2575 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2576 fmrrd.getValue(isLittleEndian ? 0 : 1),
2578 Flag = Chain.getValue(1);
2579 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2580 VA = RVLocs[++i]; // skip ahead to next loc
2581 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
2582 fmrrd.getValue(isLittleEndian ? 1 : 0),
2585 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), Arg, Flag);
2587 // Guarantee that all emitted copies are
2588 // stuck together, avoiding something bad.
2589 Flag = Chain.getValue(1);
2590 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2592 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
2593 const MCPhysReg *I =
2594 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2597 if (ARM::GPRRegClass.contains(*I))
2598 RetOps.push_back(DAG.getRegister(*I, MVT::i32));
2599 else if (ARM::DPRRegClass.contains(*I))
2600 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64)));
2602 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2606 // Update chain and glue.
2609 RetOps.push_back(Flag);
2611 // CPUs which aren't M-class use a special sequence to return from
2612 // exceptions (roughly, any instruction setting pc and cpsr simultaneously,
2613 // though we use "subs pc, lr, #N").
2615 // M-class CPUs actually use a normal return sequence with a special
2616 // (hardware-provided) value in LR, so the normal code path works.
2617 if (DAG.getMachineFunction().getFunction()->hasFnAttribute("interrupt") &&
2618 !Subtarget->isMClass()) {
2619 if (Subtarget->isThumb1Only())
2620 report_fatal_error("interrupt attribute is not supported in Thumb1");
2621 return LowerInterruptReturn(RetOps, dl, DAG);
2624 return DAG.getNode(ARMISD::RET_FLAG, dl, MVT::Other, RetOps);
2627 bool ARMTargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2628 if (N->getNumValues() != 1)
2630 if (!N->hasNUsesOfValue(1, 0))
2633 SDValue TCChain = Chain;
2634 SDNode *Copy = *N->use_begin();
2635 if (Copy->getOpcode() == ISD::CopyToReg) {
2636 // If the copy has a glue operand, we conservatively assume it isn't safe to
2637 // perform a tail call.
2638 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2640 TCChain = Copy->getOperand(0);
2641 } else if (Copy->getOpcode() == ARMISD::VMOVRRD) {
2642 SDNode *VMov = Copy;
2643 // f64 returned in a pair of GPRs.
2644 SmallPtrSet<SDNode*, 2> Copies;
2645 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2647 if (UI->getOpcode() != ISD::CopyToReg)
2651 if (Copies.size() > 2)
2654 for (SDNode::use_iterator UI = VMov->use_begin(), UE = VMov->use_end();
2656 SDValue UseChain = UI->getOperand(0);
2657 if (Copies.count(UseChain.getNode()))
2661 // We are at the top of this chain.
2662 // If the copy has a glue operand, we conservatively assume it
2663 // isn't safe to perform a tail call.
2664 if (UI->getOperand(UI->getNumOperands()-1).getValueType() == MVT::Glue)
2670 } else if (Copy->getOpcode() == ISD::BITCAST) {
2671 // f32 returned in a single GPR.
2672 if (!Copy->hasOneUse())
2674 Copy = *Copy->use_begin();
2675 if (Copy->getOpcode() != ISD::CopyToReg || !Copy->hasNUsesOfValue(1, 0))
2677 // If the copy has a glue operand, we conservatively assume it isn't safe to
2678 // perform a tail call.
2679 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2681 TCChain = Copy->getOperand(0);
2686 bool HasRet = false;
2687 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2689 if (UI->getOpcode() != ARMISD::RET_FLAG &&
2690 UI->getOpcode() != ARMISD::INTRET_FLAG)
2702 bool ARMTargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2703 if (!Subtarget->supportsTailCall())
2707 CI->getParent()->getParent()->getFnAttribute("disable-tail-calls");
2708 if (!CI->isTailCall() || Attr.getValueAsString() == "true")
2714 // Trying to write a 64 bit value so need to split into two 32 bit values first,
2715 // and pass the lower and high parts through.
2716 static SDValue LowerWRITE_REGISTER(SDValue Op, SelectionDAG &DAG) {
2718 SDValue WriteValue = Op->getOperand(2);
2720 // This function is only supposed to be called for i64 type argument.
2721 assert(WriteValue.getValueType() == MVT::i64
2722 && "LowerWRITE_REGISTER called for non-i64 type argument.");
2724 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2725 DAG.getConstant(0, DL, MVT::i32));
2726 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, WriteValue,
2727 DAG.getConstant(1, DL, MVT::i32));
2728 SDValue Ops[] = { Op->getOperand(0), Op->getOperand(1), Lo, Hi };
2729 return DAG.getNode(ISD::WRITE_REGISTER, DL, MVT::Other, Ops);
2732 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
2733 // their target counterpart wrapped in the ARMISD::Wrapper node. Suppose N is
2734 // one of the above mentioned nodes. It has to be wrapped because otherwise
2735 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
2736 // be used to form addressing mode. These wrapped nodes will be selected
2738 static SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
2739 EVT PtrVT = Op.getValueType();
2740 // FIXME there is no actual debug info here
2742 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
2744 if (CP->isMachineConstantPoolEntry())
2745 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
2746 CP->getAlignment());
2748 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
2749 CP->getAlignment());
2750 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Res);
2753 unsigned ARMTargetLowering::getJumpTableEncoding() const {
2754 return MachineJumpTableInfo::EK_Inline;
2757 SDValue ARMTargetLowering::LowerBlockAddress(SDValue Op,
2758 SelectionDAG &DAG) const {
2759 MachineFunction &MF = DAG.getMachineFunction();
2760 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2761 unsigned ARMPCLabelIndex = 0;
2763 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2764 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
2766 bool IsPositionIndependent = isPositionIndependent() || Subtarget->isROPI();
2767 if (!IsPositionIndependent) {
2768 CPAddr = DAG.getTargetConstantPool(BA, PtrVT, 4);
2770 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
2771 ARMPCLabelIndex = AFI->createPICLabelUId();
2772 ARMConstantPoolValue *CPV =
2773 ARMConstantPoolConstant::Create(BA, ARMPCLabelIndex,
2774 ARMCP::CPBlockAddress, PCAdj);
2775 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2777 CPAddr = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, CPAddr);
2778 SDValue Result = DAG.getLoad(
2779 PtrVT, DL, DAG.getEntryNode(), CPAddr,
2780 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2781 if (!IsPositionIndependent)
2783 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, DL, MVT::i32);
2784 return DAG.getNode(ARMISD::PIC_ADD, DL, PtrVT, Result, PICLabel);
2787 /// \brief Convert a TLS address reference into the correct sequence of loads
2788 /// and calls to compute the variable's address for Darwin, and return an
2789 /// SDValue containing the final node.
2791 /// Darwin only has one TLS scheme which must be capable of dealing with the
2792 /// fully general situation, in the worst case. This means:
2793 /// + "extern __thread" declaration.
2794 /// + Defined in a possibly unknown dynamic library.
2796 /// The general system is that each __thread variable has a [3 x i32] descriptor
2797 /// which contains information used by the runtime to calculate the address. The
2798 /// only part of this the compiler needs to know about is the first word, which
2799 /// contains a function pointer that must be called with the address of the
2800 /// entire descriptor in "r0".
2802 /// Since this descriptor may be in a different unit, in general access must
2803 /// proceed along the usual ARM rules. A common sequence to produce is:
2805 /// movw rT1, :lower16:_var$non_lazy_ptr
2806 /// movt rT1, :upper16:_var$non_lazy_ptr
2810 /// [...address now in r0...]
2812 ARMTargetLowering::LowerGlobalTLSAddressDarwin(SDValue Op,
2813 SelectionDAG &DAG) const {
2814 assert(Subtarget->isTargetDarwin() && "TLS only supported on Darwin");
2817 // First step is to get the address of the actua global symbol. This is where
2818 // the TLS descriptor lives.
2819 SDValue DescAddr = LowerGlobalAddressDarwin(Op, DAG);
2821 // The first entry in the descriptor is a function pointer that we must call
2822 // to obtain the address of the variable.
2823 SDValue Chain = DAG.getEntryNode();
2824 SDValue FuncTLVGet = DAG.getLoad(
2825 MVT::i32, DL, Chain, DescAddr,
2826 MachinePointerInfo::getGOT(DAG.getMachineFunction()),
2827 /* Alignment = */ 4,
2828 MachineMemOperand::MONonTemporal | MachineMemOperand::MODereferenceable |
2829 MachineMemOperand::MOInvariant);
2830 Chain = FuncTLVGet.getValue(1);
2832 MachineFunction &F = DAG.getMachineFunction();
2833 MachineFrameInfo &MFI = F.getFrameInfo();
2834 MFI.setAdjustsStack(true);
2836 // TLS calls preserve all registers except those that absolutely must be
2837 // trashed: R0 (it takes an argument), LR (it's a call) and CPSR (let's not be
2840 getTargetMachine().getSubtargetImpl(*F.getFunction())->getRegisterInfo();
2841 auto ARI = static_cast<const ARMRegisterInfo *>(TRI);
2842 const uint32_t *Mask = ARI->getTLSCallPreservedMask(DAG.getMachineFunction());
2844 // Finally, we can make the call. This is just a degenerate version of a
2845 // normal AArch64 call node: r0 takes the address of the descriptor, and
2846 // returns the address of the variable in this thread.
2847 Chain = DAG.getCopyToReg(Chain, DL, ARM::R0, DescAddr, SDValue());
2849 DAG.getNode(ARMISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue),
2850 Chain, FuncTLVGet, DAG.getRegister(ARM::R0, MVT::i32),
2851 DAG.getRegisterMask(Mask), Chain.getValue(1));
2852 return DAG.getCopyFromReg(Chain, DL, ARM::R0, MVT::i32, Chain.getValue(1));
2856 ARMTargetLowering::LowerGlobalTLSAddressWindows(SDValue Op,
2857 SelectionDAG &DAG) const {
2858 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering");
2860 SDValue Chain = DAG.getEntryNode();
2861 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2864 // Load the current TEB (thread environment block)
2865 SDValue Ops[] = {Chain,
2866 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
2867 DAG.getConstant(15, DL, MVT::i32),
2868 DAG.getConstant(0, DL, MVT::i32),
2869 DAG.getConstant(13, DL, MVT::i32),
2870 DAG.getConstant(0, DL, MVT::i32),
2871 DAG.getConstant(2, DL, MVT::i32)};
2872 SDValue CurrentTEB = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
2873 DAG.getVTList(MVT::i32, MVT::Other), Ops);
2875 SDValue TEB = CurrentTEB.getValue(0);
2876 Chain = CurrentTEB.getValue(1);
2878 // Load the ThreadLocalStoragePointer from the TEB
2879 // A pointer to the TLS array is located at offset 0x2c from the TEB.
2881 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x2c, DL));
2882 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo());
2884 // The pointer to the thread's TLS data area is at the TLS Index scaled by 4
2885 // offset into the TLSArray.
2887 // Load the TLS index from the C runtime
2889 DAG.getTargetExternalSymbol("_tls_index", PtrVT, ARMII::MO_NO_FLAG);
2890 TLSIndex = DAG.getNode(ARMISD::Wrapper, DL, PtrVT, TLSIndex);
2891 TLSIndex = DAG.getLoad(PtrVT, DL, Chain, TLSIndex, MachinePointerInfo());
2893 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex,
2894 DAG.getConstant(2, DL, MVT::i32));
2895 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain,
2896 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot),
2897 MachinePointerInfo());
2899 // Get the offset of the start of the .tls section (section base)
2900 const auto *GA = cast<GlobalAddressSDNode>(Op);
2901 auto *CPV = ARMConstantPoolConstant::Create(GA->getGlobal(), ARMCP::SECREL);
2902 SDValue Offset = DAG.getLoad(
2903 PtrVT, DL, Chain, DAG.getNode(ARMISD::Wrapper, DL, MVT::i32,
2904 DAG.getTargetConstantPool(CPV, PtrVT, 4)),
2905 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2907 return DAG.getNode(ISD::ADD, DL, PtrVT, TLS, Offset);
2910 // Lower ISD::GlobalTLSAddress using the "general dynamic" model
2912 ARMTargetLowering::LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
2913 SelectionDAG &DAG) const {
2915 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2916 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2917 MachineFunction &MF = DAG.getMachineFunction();
2918 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2919 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2920 ARMConstantPoolValue *CPV =
2921 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2922 ARMCP::CPValue, PCAdj, ARMCP::TLSGD, true);
2923 SDValue Argument = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2924 Argument = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Argument);
2925 Argument = DAG.getLoad(
2926 PtrVT, dl, DAG.getEntryNode(), Argument,
2927 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2928 SDValue Chain = Argument.getValue(1);
2930 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2931 Argument = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Argument, PICLabel);
2933 // call __tls_get_addr.
2936 Entry.Node = Argument;
2937 Entry.Ty = (Type *) Type::getInt32Ty(*DAG.getContext());
2938 Args.push_back(Entry);
2940 // FIXME: is there useful debug info available here?
2941 TargetLowering::CallLoweringInfo CLI(DAG);
2942 CLI.setDebugLoc(dl).setChain(Chain)
2943 .setCallee(CallingConv::C, Type::getInt32Ty(*DAG.getContext()),
2944 DAG.getExternalSymbol("__tls_get_addr", PtrVT), std::move(Args));
2946 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
2947 return CallResult.first;
2950 // Lower ISD::GlobalTLSAddress using the "initial exec" or
2951 // "local exec" model.
2953 ARMTargetLowering::LowerToTLSExecModels(GlobalAddressSDNode *GA,
2955 TLSModel::Model model) const {
2956 const GlobalValue *GV = GA->getGlobal();
2959 SDValue Chain = DAG.getEntryNode();
2960 EVT PtrVT = getPointerTy(DAG.getDataLayout());
2961 // Get the Thread Pointer
2962 SDValue ThreadPointer = DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
2964 if (model == TLSModel::InitialExec) {
2965 MachineFunction &MF = DAG.getMachineFunction();
2966 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
2967 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
2968 // Initial exec model.
2969 unsigned char PCAdj = Subtarget->isThumb() ? 4 : 8;
2970 ARMConstantPoolValue *CPV =
2971 ARMConstantPoolConstant::Create(GA->getGlobal(), ARMPCLabelIndex,
2972 ARMCP::CPValue, PCAdj, ARMCP::GOTTPOFF,
2974 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2975 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2976 Offset = DAG.getLoad(
2977 PtrVT, dl, Chain, Offset,
2978 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2979 Chain = Offset.getValue(1);
2981 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
2982 Offset = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Offset, PICLabel);
2984 Offset = DAG.getLoad(
2985 PtrVT, dl, Chain, Offset,
2986 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2989 assert(model == TLSModel::LocalExec);
2990 ARMConstantPoolValue *CPV =
2991 ARMConstantPoolConstant::Create(GV, ARMCP::TPOFF);
2992 Offset = DAG.getTargetConstantPool(CPV, PtrVT, 4);
2993 Offset = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, Offset);
2994 Offset = DAG.getLoad(
2995 PtrVT, dl, Chain, Offset,
2996 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
2999 // The address of the thread local variable is the add of the thread
3000 // pointer with the offset of the variable.
3001 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
3005 ARMTargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
3006 if (Subtarget->isTargetDarwin())
3007 return LowerGlobalTLSAddressDarwin(Op, DAG);
3009 if (Subtarget->isTargetWindows())
3010 return LowerGlobalTLSAddressWindows(Op, DAG);
3012 // TODO: implement the "local dynamic" model
3013 assert(Subtarget->isTargetELF() && "Only ELF implemented here");
3014 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
3015 if (DAG.getTarget().Options.EmulatedTLS)
3016 return LowerToTLSEmulatedModel(GA, DAG);
3018 TLSModel::Model model = getTargetMachine().getTLSModel(GA->getGlobal());
3021 case TLSModel::GeneralDynamic:
3022 case TLSModel::LocalDynamic:
3023 return LowerToTLSGeneralDynamicModel(GA, DAG);
3024 case TLSModel::InitialExec:
3025 case TLSModel::LocalExec:
3026 return LowerToTLSExecModels(GA, DAG, model);
3028 llvm_unreachable("bogus TLS model");
3031 /// Return true if all users of V are within function F, looking through
3033 static bool allUsersAreInFunction(const Value *V, const Function *F) {
3034 SmallVector<const User*,4> Worklist;
3035 for (auto *U : V->users())
3036 Worklist.push_back(U);
3037 while (!Worklist.empty()) {
3038 auto *U = Worklist.pop_back_val();
3039 if (isa<ConstantExpr>(U)) {
3040 for (auto *UU : U->users())
3041 Worklist.push_back(UU);
3045 auto *I = dyn_cast<Instruction>(U);
3046 if (!I || I->getParent()->getParent() != F)
3052 /// Return true if all users of V are within some (any) function, looking through
3053 /// ConstantExprs. In other words, are there any global constant users?
3054 static bool allUsersAreInFunctions(const Value *V) {
3055 SmallVector<const User*,4> Worklist;
3056 for (auto *U : V->users())
3057 Worklist.push_back(U);
3058 while (!Worklist.empty()) {
3059 auto *U = Worklist.pop_back_val();
3060 if (isa<ConstantExpr>(U)) {
3061 for (auto *UU : U->users())
3062 Worklist.push_back(UU);
3066 if (!isa<Instruction>(U))
3072 // Return true if T is an integer, float or an array/vector of either.
3073 static bool isSimpleType(Type *T) {
3074 if (T->isIntegerTy() || T->isFloatingPointTy())
3076 Type *SubT = nullptr;
3078 SubT = T->getArrayElementType();
3079 else if (T->isVectorTy())
3080 SubT = T->getVectorElementType();
3083 return SubT->isIntegerTy() || SubT->isFloatingPointTy();
3086 static SDValue promoteToConstantPool(const GlobalValue *GV, SelectionDAG &DAG,
3087 EVT PtrVT, SDLoc dl) {
3088 // If we're creating a pool entry for a constant global with unnamed address,
3089 // and the global is small enough, we can emit it inline into the constant pool
3090 // to save ourselves an indirection.
3092 // This is a win if the constant is only used in one function (so it doesn't
3093 // need to be duplicated) or duplicating the constant wouldn't increase code
3094 // size (implying the constant is no larger than 4 bytes).
3095 const Function *F = DAG.getMachineFunction().getFunction();
3097 // We rely on this decision to inline being idemopotent and unrelated to the
3098 // use-site. We know that if we inline a variable at one use site, we'll
3099 // inline it elsewhere too (and reuse the constant pool entry). Fast-isel
3100 // doesn't know about this optimization, so bail out if it's enabled else
3101 // we could decide to inline here (and thus never emit the GV) but require
3102 // the GV from fast-isel generated code.
3103 if (!EnableConstpoolPromotion ||
3104 DAG.getMachineFunction().getTarget().Options.EnableFastISel)
3107 auto *GVar = dyn_cast<GlobalVariable>(GV);
3108 if (!GVar || !GVar->hasInitializer() ||
3109 !GVar->isConstant() || !GVar->hasGlobalUnnamedAddr() ||
3110 !GVar->hasLocalLinkage())
3113 // Ensure that we don't try and inline any type that contains pointers. If
3114 // we inline a value that contains relocations, we move the relocations from
3115 // .data to .text which is not ideal.
3116 auto *Init = GVar->getInitializer();
3117 if (!isSimpleType(Init->getType()))
3120 // The constant islands pass can only really deal with alignment requests
3121 // <= 4 bytes and cannot pad constants itself. Therefore we cannot promote
3122 // any type wanting greater alignment requirements than 4 bytes. We also
3123 // can only promote constants that are multiples of 4 bytes in size or
3124 // are paddable to a multiple of 4. Currently we only try and pad constants
3125 // that are strings for simplicity.
3126 auto *CDAInit = dyn_cast<ConstantDataArray>(Init);
3127 unsigned Size = DAG.getDataLayout().getTypeAllocSize(Init->getType());
3128 unsigned Align = GVar->getAlignment();
3129 unsigned RequiredPadding = 4 - (Size % 4);
3130 bool PaddingPossible =
3131 RequiredPadding == 4 || (CDAInit && CDAInit->isString());
3132 if (!PaddingPossible || Align > 4 || Size > ConstpoolPromotionMaxSize)
3135 unsigned PaddedSize = Size + ((RequiredPadding == 4) ? 0 : RequiredPadding);
3136 MachineFunction &MF = DAG.getMachineFunction();
3137 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3139 // We can't bloat the constant pool too much, else the ConstantIslands pass
3140 // may fail to converge. If we haven't promoted this global yet (it may have
3141 // multiple uses), and promoting it would increase the constant pool size (Sz
3142 // > 4), ensure we have space to do so up to MaxTotal.
3143 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar) && Size > 4)
3144 if (AFI->getPromotedConstpoolIncrease() + PaddedSize - 4 >=
3145 ConstpoolPromotionMaxTotal)
3148 // This is only valid if all users are in a single function OR it has users
3149 // in multiple functions but it no larger than a pointer. We also check if
3150 // GVar has constant (non-ConstantExpr) users. If so, it essentially has its
3152 if (!allUsersAreInFunction(GVar, F) &&
3153 !(Size <= 4 && allUsersAreInFunctions(GVar)))
3156 // We're going to inline this global. Pad it out if needed.
3157 if (RequiredPadding != 4) {
3158 StringRef S = CDAInit->getAsString();
3160 SmallVector<uint8_t,16> V(S.size());
3161 std::copy(S.bytes_begin(), S.bytes_end(), V.begin());
3162 while (RequiredPadding--)
3164 Init = ConstantDataArray::get(*DAG.getContext(), V);
3167 auto CPVal = ARMConstantPoolConstant::Create(GVar, Init);
3169 DAG.getTargetConstantPool(CPVal, PtrVT, /*Align=*/4);
3170 if (!AFI->getGlobalsPromotedToConstantPool().count(GVar)) {
3171 AFI->markGlobalAsPromotedToConstantPool(GVar);
3172 AFI->setPromotedConstpoolIncrease(AFI->getPromotedConstpoolIncrease() +
3175 ++NumConstpoolPromoted;
3176 return DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3179 SDValue ARMTargetLowering::LowerGlobalAddressELF(SDValue Op,
3180 SelectionDAG &DAG) const {
3181 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3183 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3184 const TargetMachine &TM = getTargetMachine();
3185 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
3186 GV = GA->getBaseObject();
3188 (isa<GlobalVariable>(GV) && cast<GlobalVariable>(GV)->isConstant()) ||
3191 // promoteToConstantPool only if not generating XO text section
3192 if (TM.shouldAssumeDSOLocal(*GV->getParent(), GV) && !Subtarget->genExecuteOnly())
3193 if (SDValue V = promoteToConstantPool(GV, DAG, PtrVT, dl))
3196 if (isPositionIndependent()) {
3197 bool UseGOT_PREL = !TM.shouldAssumeDSOLocal(*GV->getParent(), GV);
3199 MachineFunction &MF = DAG.getMachineFunction();
3200 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3201 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3202 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3204 unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
3205 ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
3206 GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
3207 UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
3208 /*AddCurrentAddress=*/UseGOT_PREL);
3209 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3210 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3211 SDValue Result = DAG.getLoad(
3212 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3213 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3214 SDValue Chain = Result.getValue(1);
3215 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3216 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3219 DAG.getLoad(PtrVT, dl, Chain, Result,
3220 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3222 } else if (Subtarget->isROPI() && IsRO) {
3224 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT);
3225 SDValue Result = DAG.getNode(ARMISD::WrapperPIC, dl, PtrVT, G);
3227 } else if (Subtarget->isRWPI() && !IsRO) {
3229 ARMConstantPoolValue *CPV =
3230 ARMConstantPoolConstant::Create(GV, ARMCP::SBREL);
3231 SDValue CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3232 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3233 SDValue G = DAG.getLoad(
3234 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3235 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3236 SDValue SB = DAG.getCopyFromReg(DAG.getEntryNode(), dl, ARM::R9, PtrVT);
3237 SDValue Result = DAG.getNode(ISD::ADD, dl, PtrVT, SB, G);
3241 // If we have T2 ops, we can materialize the address directly via movt/movw
3242 // pair. This is always cheaper.
3243 if (Subtarget->useMovt(DAG.getMachineFunction())) {
3245 // FIXME: Once remat is capable of dealing with instructions with register
3246 // operands, expand this into two nodes.
3247 return DAG.getNode(ARMISD::Wrapper, dl, PtrVT,
3248 DAG.getTargetGlobalAddress(GV, dl, PtrVT));
3250 SDValue CPAddr = DAG.getTargetConstantPool(GV, PtrVT, 4);
3251 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3253 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3254 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3258 SDValue ARMTargetLowering::LowerGlobalAddressDarwin(SDValue Op,
3259 SelectionDAG &DAG) const {
3260 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3261 "ROPI/RWPI not currently supported for Darwin");
3262 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3264 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3266 if (Subtarget->useMovt(DAG.getMachineFunction()))
3269 // FIXME: Once remat is capable of dealing with instructions with register
3270 // operands, expand this into multiple nodes
3272 isPositionIndependent() ? ARMISD::WrapperPIC : ARMISD::Wrapper;
3274 SDValue G = DAG.getTargetGlobalAddress(GV, dl, PtrVT, 0, ARMII::MO_NONLAZY);
3275 SDValue Result = DAG.getNode(Wrapper, dl, PtrVT, G);
3277 if (Subtarget->isGVIndirectSymbol(GV))
3278 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
3279 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3283 SDValue ARMTargetLowering::LowerGlobalAddressWindows(SDValue Op,
3284 SelectionDAG &DAG) const {
3285 assert(Subtarget->isTargetWindows() && "non-Windows COFF is not supported");
3286 assert(Subtarget->useMovt(DAG.getMachineFunction()) &&
3287 "Windows on ARM expects to use movw/movt");
3288 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
3289 "ROPI/RWPI not currently supported for Windows");
3291 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
3292 const ARMII::TOF TargetFlags =
3293 (GV->hasDLLImportStorageClass() ? ARMII::MO_DLLIMPORT : ARMII::MO_NO_FLAG);
3294 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3300 // FIXME: Once remat is capable of dealing with instructions with register
3301 // operands, expand this into two nodes.
3302 Result = DAG.getNode(ARMISD::Wrapper, DL, PtrVT,
3303 DAG.getTargetGlobalAddress(GV, DL, PtrVT, /*Offset=*/0,
3305 if (GV->hasDLLImportStorageClass())
3306 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result,
3307 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
3312 ARMTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const {
3314 SDValue Val = DAG.getConstant(0, dl, MVT::i32);
3315 return DAG.getNode(ARMISD::EH_SJLJ_SETJMP, dl,
3316 DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0),
3317 Op.getOperand(1), Val);
3321 ARMTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const {
3323 return DAG.getNode(ARMISD::EH_SJLJ_LONGJMP, dl, MVT::Other, Op.getOperand(0),
3324 Op.getOperand(1), DAG.getConstant(0, dl, MVT::i32));
3327 SDValue ARMTargetLowering::LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
3328 SelectionDAG &DAG) const {
3330 return DAG.getNode(ARMISD::EH_SJLJ_SETUP_DISPATCH, dl, MVT::Other,
3335 ARMTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
3336 const ARMSubtarget *Subtarget) const {
3337 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3340 default: return SDValue(); // Don't custom lower most intrinsics.
3341 case Intrinsic::thread_pointer: {
3342 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3343 return DAG.getNode(ARMISD::THREAD_POINTER, dl, PtrVT);
3345 case Intrinsic::eh_sjlj_lsda: {
3346 MachineFunction &MF = DAG.getMachineFunction();
3347 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3348 unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
3349 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3351 bool IsPositionIndependent = isPositionIndependent();
3352 unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
3353 ARMConstantPoolValue *CPV =
3354 ARMConstantPoolConstant::Create(MF.getFunction(), ARMPCLabelIndex,
3355 ARMCP::CPLSDA, PCAdj);
3356 CPAddr = DAG.getTargetConstantPool(CPV, PtrVT, 4);
3357 CPAddr = DAG.getNode(ARMISD::Wrapper, dl, MVT::i32, CPAddr);
3358 SDValue Result = DAG.getLoad(
3359 PtrVT, dl, DAG.getEntryNode(), CPAddr,
3360 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()));
3362 if (IsPositionIndependent) {
3363 SDValue PICLabel = DAG.getConstant(ARMPCLabelIndex, dl, MVT::i32);
3364 Result = DAG.getNode(ARMISD::PIC_ADD, dl, PtrVT, Result, PICLabel);
3368 case Intrinsic::arm_neon_vmulls:
3369 case Intrinsic::arm_neon_vmullu: {
3370 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmulls)
3371 ? ARMISD::VMULLs : ARMISD::VMULLu;
3372 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3373 Op.getOperand(1), Op.getOperand(2));
3375 case Intrinsic::arm_neon_vminnm:
3376 case Intrinsic::arm_neon_vmaxnm: {
3377 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminnm)
3378 ? ISD::FMINNUM : ISD::FMAXNUM;
3379 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3380 Op.getOperand(1), Op.getOperand(2));
3382 case Intrinsic::arm_neon_vminu:
3383 case Intrinsic::arm_neon_vmaxu: {
3384 if (Op.getValueType().isFloatingPoint())
3386 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vminu)
3387 ? ISD::UMIN : ISD::UMAX;
3388 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3389 Op.getOperand(1), Op.getOperand(2));
3391 case Intrinsic::arm_neon_vmins:
3392 case Intrinsic::arm_neon_vmaxs: {
3393 // v{min,max}s is overloaded between signed integers and floats.
3394 if (!Op.getValueType().isFloatingPoint()) {
3395 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3396 ? ISD::SMIN : ISD::SMAX;
3397 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3398 Op.getOperand(1), Op.getOperand(2));
3400 unsigned NewOpc = (IntNo == Intrinsic::arm_neon_vmins)
3401 ? ISD::FMINNAN : ISD::FMAXNAN;
3402 return DAG.getNode(NewOpc, SDLoc(Op), Op.getValueType(),
3403 Op.getOperand(1), Op.getOperand(2));
3408 static SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG &DAG,
3409 const ARMSubtarget *Subtarget) {
3410 // FIXME: handle "fence singlethread" more efficiently.
3412 if (!Subtarget->hasDataBarrier()) {
3413 // Some ARMv6 cpus can support data barriers with an mcr instruction.
3414 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
3416 assert(Subtarget->hasV6Ops() && !Subtarget->isThumb() &&
3417 "Unexpected ISD::ATOMIC_FENCE encountered. Should be libcall!");
3418 return DAG.getNode(ARMISD::MEMBARRIER_MCR, dl, MVT::Other, Op.getOperand(0),
3419 DAG.getConstant(0, dl, MVT::i32));
3422 ConstantSDNode *OrdN = cast<ConstantSDNode>(Op.getOperand(1));
3423 AtomicOrdering Ord = static_cast<AtomicOrdering>(OrdN->getZExtValue());
3424 ARM_MB::MemBOpt Domain = ARM_MB::ISH;
3425 if (Subtarget->isMClass()) {
3426 // Only a full system barrier exists in the M-class architectures.
3427 Domain = ARM_MB::SY;
3428 } else if (Subtarget->preferISHSTBarriers() &&
3429 Ord == AtomicOrdering::Release) {
3430 // Swift happens to implement ISHST barriers in a way that's compatible with
3431 // Release semantics but weaker than ISH so we'd be fools not to use
3432 // it. Beware: other processors probably don't!
3433 Domain = ARM_MB::ISHST;
3436 return DAG.getNode(ISD::INTRINSIC_VOID, dl, MVT::Other, Op.getOperand(0),
3437 DAG.getConstant(Intrinsic::arm_dmb, dl, MVT::i32),
3438 DAG.getConstant(Domain, dl, MVT::i32));
3441 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG,
3442 const ARMSubtarget *Subtarget) {
3443 // ARM pre v5TE and Thumb1 does not have preload instructions.
3444 if (!(Subtarget->isThumb2() ||
3445 (!Subtarget->isThumb1Only() && Subtarget->hasV5TEOps())))
3446 // Just preserve the chain.
3447 return Op.getOperand(0);
3450 unsigned isRead = ~cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue() & 1;
3452 (!Subtarget->hasV7Ops() || !Subtarget->hasMPExtension()))
3453 // ARMv7 with MP extension has PLDW.
3454 return Op.getOperand(0);
3456 unsigned isData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue();
3457 if (Subtarget->isThumb()) {
3459 isRead = ~isRead & 1;
3460 isData = ~isData & 1;
3463 return DAG.getNode(ARMISD::PRELOAD, dl, MVT::Other, Op.getOperand(0),
3464 Op.getOperand(1), DAG.getConstant(isRead, dl, MVT::i32),
3465 DAG.getConstant(isData, dl, MVT::i32));
3468 static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) {
3469 MachineFunction &MF = DAG.getMachineFunction();
3470 ARMFunctionInfo *FuncInfo = MF.getInfo<ARMFunctionInfo>();
3472 // vastart just stores the address of the VarArgsFrameIndex slot into the
3473 // memory location argument.
3475 EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
3476 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
3477 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3478 return DAG.getStore(Op.getOperand(0), dl, FR, Op.getOperand(1),
3479 MachinePointerInfo(SV));
3482 SDValue ARMTargetLowering::GetF64FormalArgument(CCValAssign &VA,
3483 CCValAssign &NextVA,
3486 const SDLoc &dl) const {
3487 MachineFunction &MF = DAG.getMachineFunction();
3488 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3490 const TargetRegisterClass *RC;
3491 if (AFI->isThumb1OnlyFunction())
3492 RC = &ARM::tGPRRegClass;
3494 RC = &ARM::GPRRegClass;
3496 // Transform the arguments stored in physical registers into virtual ones.
3497 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3498 SDValue ArgValue = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3501 if (NextVA.isMemLoc()) {
3502 MachineFrameInfo &MFI = MF.getFrameInfo();
3503 int FI = MFI.CreateFixedObject(4, NextVA.getLocMemOffset(), true);
3505 // Create load node to retrieve arguments from the stack.
3506 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
3507 ArgValue2 = DAG.getLoad(
3508 MVT::i32, dl, Root, FIN,
3509 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3511 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
3512 ArgValue2 = DAG.getCopyFromReg(Root, dl, Reg, MVT::i32);
3514 if (!Subtarget->isLittle())
3515 std::swap (ArgValue, ArgValue2);
3516 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, ArgValue, ArgValue2);
3519 // The remaining GPRs hold either the beginning of variable-argument
3520 // data, or the beginning of an aggregate passed by value (usually
3521 // byval). Either way, we allocate stack slots adjacent to the data
3522 // provided by our caller, and store the unallocated registers there.
3523 // If this is a variadic function, the va_list pointer will begin with
3524 // these values; otherwise, this reassembles a (byval) structure that
3525 // was split between registers and memory.
3526 // Return: The frame index registers were stored into.
3527 int ARMTargetLowering::StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG,
3528 const SDLoc &dl, SDValue &Chain,
3529 const Value *OrigArg,
3530 unsigned InRegsParamRecordIdx,
3531 int ArgOffset, unsigned ArgSize) const {
3532 // Currently, two use-cases possible:
3533 // Case #1. Non-var-args function, and we meet first byval parameter.
3534 // Setup first unallocated register as first byval register;
3535 // eat all remained registers
3536 // (these two actions are performed by HandleByVal method).
3537 // Then, here, we initialize stack frame with
3538 // "store-reg" instructions.
3539 // Case #2. Var-args function, that doesn't contain byval parameters.
3540 // The same: eat all remained unallocated registers,
3541 // initialize stack frame.
3543 MachineFunction &MF = DAG.getMachineFunction();
3544 MachineFrameInfo &MFI = MF.getFrameInfo();
3545 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3546 unsigned RBegin, REnd;
3547 if (InRegsParamRecordIdx < CCInfo.getInRegsParamsCount()) {
3548 CCInfo.getInRegsParamInfo(InRegsParamRecordIdx, RBegin, REnd);
3550 unsigned RBeginIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3551 RBegin = RBeginIdx == 4 ? (unsigned)ARM::R4 : GPRArgRegs[RBeginIdx];
3556 ArgOffset = -4 * (ARM::R4 - RBegin);
3558 auto PtrVT = getPointerTy(DAG.getDataLayout());
3559 int FrameIndex = MFI.CreateFixedObject(ArgSize, ArgOffset, false);
3560 SDValue FIN = DAG.getFrameIndex(FrameIndex, PtrVT);
3562 SmallVector<SDValue, 4> MemOps;
3563 const TargetRegisterClass *RC =
3564 AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
3566 for (unsigned Reg = RBegin, i = 0; Reg < REnd; ++Reg, ++i) {
3567 unsigned VReg = MF.addLiveIn(Reg, RC);
3568 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
3569 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
3570 MachinePointerInfo(OrigArg, 4 * i));
3571 MemOps.push_back(Store);
3572 FIN = DAG.getNode(ISD::ADD, dl, PtrVT, FIN, DAG.getConstant(4, dl, PtrVT));
3575 if (!MemOps.empty())
3576 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
3580 // Setup stack frame, the va_list pointer will start from.
3581 void ARMTargetLowering::VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
3582 const SDLoc &dl, SDValue &Chain,
3584 unsigned TotalArgRegsSaveSize,
3585 bool ForceMutable) const {
3586 MachineFunction &MF = DAG.getMachineFunction();
3587 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3589 // Try to store any remaining integer argument regs
3590 // to their spots on the stack so that they may be loaded by dereferencing
3591 // the result of va_next.
3592 // If there is no regs to be stored, just point address after last
3593 // argument passed via stack.
3594 int FrameIndex = StoreByValRegs(CCInfo, DAG, dl, Chain, nullptr,
3595 CCInfo.getInRegsParamsCount(),
3596 CCInfo.getNextStackOffset(), 4);
3597 AFI->setVarArgsFrameIndex(FrameIndex);
3600 SDValue ARMTargetLowering::LowerFormalArguments(
3601 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
3602 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3603 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3604 MachineFunction &MF = DAG.getMachineFunction();
3605 MachineFrameInfo &MFI = MF.getFrameInfo();
3607 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
3609 // Assign locations to all of the incoming arguments.
3610 SmallVector<CCValAssign, 16> ArgLocs;
3611 ARMCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
3612 *DAG.getContext(), Prologue);
3613 CCInfo.AnalyzeFormalArguments(Ins, CCAssignFnForCall(CallConv, isVarArg));
3615 SmallVector<SDValue, 16> ArgValues;
3617 Function::const_arg_iterator CurOrigArg = MF.getFunction()->arg_begin();
3618 unsigned CurArgIdx = 0;
3620 // Initially ArgRegsSaveSize is zero.
3621 // Then we increase this value each time we meet byval parameter.
3622 // We also increase this value in case of varargs function.
3623 AFI->setArgRegsSaveSize(0);
3625 // Calculate the amount of stack space that we need to allocate to store
3626 // byval and variadic arguments that are passed in registers.
3627 // We need to know this before we allocate the first byval or variadic
3628 // argument, as they will be allocated a stack slot below the CFA (Canonical
3629 // Frame Address, the stack pointer at entry to the function).
3630 unsigned ArgRegBegin = ARM::R4;
3631 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3632 if (CCInfo.getInRegsParamsProcessed() >= CCInfo.getInRegsParamsCount())
3635 CCValAssign &VA = ArgLocs[i];
3636 unsigned Index = VA.getValNo();
3637 ISD::ArgFlagsTy Flags = Ins[Index].Flags;
3638 if (!Flags.isByVal())
3641 assert(VA.isMemLoc() && "unexpected byval pointer in reg");
3642 unsigned RBegin, REnd;
3643 CCInfo.getInRegsParamInfo(CCInfo.getInRegsParamsProcessed(), RBegin, REnd);
3644 ArgRegBegin = std::min(ArgRegBegin, RBegin);
3646 CCInfo.nextInRegsParam();
3648 CCInfo.rewindByValRegsInfo();
3650 int lastInsIndex = -1;
3651 if (isVarArg && MFI.hasVAStart()) {
3652 unsigned RegIdx = CCInfo.getFirstUnallocated(GPRArgRegs);
3653 if (RegIdx != array_lengthof(GPRArgRegs))
3654 ArgRegBegin = std::min(ArgRegBegin, (unsigned)GPRArgRegs[RegIdx]);
3657 unsigned TotalArgRegsSaveSize = 4 * (ARM::R4 - ArgRegBegin);
3658 AFI->setArgRegsSaveSize(TotalArgRegsSaveSize);
3659 auto PtrVT = getPointerTy(DAG.getDataLayout());
3661 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3662 CCValAssign &VA = ArgLocs[i];
3663 if (Ins[VA.getValNo()].isOrigArg()) {
3664 std::advance(CurOrigArg,
3665 Ins[VA.getValNo()].getOrigArgIndex() - CurArgIdx);
3666 CurArgIdx = Ins[VA.getValNo()].getOrigArgIndex();
3668 // Arguments stored in registers.
3669 if (VA.isRegLoc()) {
3670 EVT RegVT = VA.getLocVT();
3672 if (VA.needsCustom()) {
3673 // f64 and vector types are split up into multiple registers or
3674 // combinations of registers and stack slots.
3675 if (VA.getLocVT() == MVT::v2f64) {
3676 SDValue ArgValue1 = GetF64FormalArgument(VA, ArgLocs[++i],
3678 VA = ArgLocs[++i]; // skip ahead to next loc
3680 if (VA.isMemLoc()) {
3681 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), true);
3682 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3683 ArgValue2 = DAG.getLoad(MVT::f64, dl, Chain, FIN,
3684 MachinePointerInfo::getFixedStack(
3685 DAG.getMachineFunction(), FI));
3687 ArgValue2 = GetF64FormalArgument(VA, ArgLocs[++i],
3690 ArgValue = DAG.getNode(ISD::UNDEF, dl, MVT::v2f64);
3691 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3692 ArgValue, ArgValue1,
3693 DAG.getIntPtrConstant(0, dl));
3694 ArgValue = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64,
3695 ArgValue, ArgValue2,
3696 DAG.getIntPtrConstant(1, dl));
3698 ArgValue = GetF64FormalArgument(VA, ArgLocs[++i], Chain, DAG, dl);
3701 const TargetRegisterClass *RC;
3703 if (RegVT == MVT::f32)
3704 RC = &ARM::SPRRegClass;
3705 else if (RegVT == MVT::f64)
3706 RC = &ARM::DPRRegClass;
3707 else if (RegVT == MVT::v2f64)
3708 RC = &ARM::QPRRegClass;
3709 else if (RegVT == MVT::i32)
3710 RC = AFI->isThumb1OnlyFunction() ? &ARM::tGPRRegClass
3711 : &ARM::GPRRegClass;
3713 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering");
3715 // Transform the arguments in physical registers into virtual ones.
3716 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
3717 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3720 // If this is an 8 or 16-bit value, it is really passed promoted
3721 // to 32 bits. Insert an assert[sz]ext to capture this, then
3722 // truncate to the right size.
3723 switch (VA.getLocInfo()) {
3724 default: llvm_unreachable("Unknown loc info!");
3725 case CCValAssign::Full: break;
3726 case CCValAssign::BCvt:
3727 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
3729 case CCValAssign::SExt:
3730 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3731 DAG.getValueType(VA.getValVT()));
3732 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3734 case CCValAssign::ZExt:
3735 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3736 DAG.getValueType(VA.getValVT()));
3737 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3741 InVals.push_back(ArgValue);
3743 } else { // VA.isRegLoc()
3746 assert(VA.isMemLoc());
3747 assert(VA.getValVT() != MVT::i64 && "i64 should already be lowered");
3749 int index = VA.getValNo();
3751 // Some Ins[] entries become multiple ArgLoc[] entries.
3752 // Process them only once.
3753 if (index != lastInsIndex)
3755 ISD::ArgFlagsTy Flags = Ins[index].Flags;
3756 // FIXME: For now, all byval parameter objects are marked mutable.
3757 // This can be changed with more analysis.
3758 // In case of tail call optimization mark all arguments mutable.
3759 // Since they could be overwritten by lowering of arguments in case of
3761 if (Flags.isByVal()) {
3762 assert(Ins[index].isOrigArg() &&
3763 "Byval arguments cannot be implicit");
3764 unsigned CurByValIndex = CCInfo.getInRegsParamsProcessed();
3766 int FrameIndex = StoreByValRegs(
3767 CCInfo, DAG, dl, Chain, &*CurOrigArg, CurByValIndex,
3768 VA.getLocMemOffset(), Flags.getByValSize());
3769 InVals.push_back(DAG.getFrameIndex(FrameIndex, PtrVT));
3770 CCInfo.nextInRegsParam();
3772 unsigned FIOffset = VA.getLocMemOffset();
3773 int FI = MFI.CreateFixedObject(VA.getLocVT().getSizeInBits()/8,
3776 // Create load nodes to retrieve arguments from the stack.
3777 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3778 InVals.push_back(DAG.getLoad(VA.getValVT(), dl, Chain, FIN,
3779 MachinePointerInfo::getFixedStack(
3780 DAG.getMachineFunction(), FI)));
3782 lastInsIndex = index;
3788 if (isVarArg && MFI.hasVAStart())
3789 VarArgStyleRegisters(CCInfo, DAG, dl, Chain,
3790 CCInfo.getNextStackOffset(),
3791 TotalArgRegsSaveSize);
3793 AFI->setArgumentStackSize(CCInfo.getNextStackOffset());
3798 /// isFloatingPointZero - Return true if this is +0.0.
3799 static bool isFloatingPointZero(SDValue Op) {
3800 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Op))
3801 return CFP->getValueAPF().isPosZero();
3802 else if (ISD::isEXTLoad(Op.getNode()) || ISD::isNON_EXTLoad(Op.getNode())) {
3803 // Maybe this has already been legalized into the constant pool?
3804 if (Op.getOperand(1).getOpcode() == ARMISD::Wrapper) {
3805 SDValue WrapperOp = Op.getOperand(1).getOperand(0);
3806 if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(WrapperOp))
3807 if (const ConstantFP *CFP = dyn_cast<ConstantFP>(CP->getConstVal()))
3808 return CFP->getValueAPF().isPosZero();
3810 } else if (Op->getOpcode() == ISD::BITCAST &&
3811 Op->getValueType(0) == MVT::f64) {
3812 // Handle (ISD::BITCAST (ARMISD::VMOVIMM (ISD::TargetConstant 0)) MVT::f64)
3813 // created by LowerConstantFP().
3814 SDValue BitcastOp = Op->getOperand(0);
3815 if (BitcastOp->getOpcode() == ARMISD::VMOVIMM &&
3816 isNullConstant(BitcastOp->getOperand(0)))
3822 /// Returns appropriate ARM CMP (cmp) and corresponding condition code for
3823 /// the given operands.
3824 SDValue ARMTargetLowering::getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
3825 SDValue &ARMcc, SelectionDAG &DAG,
3826 const SDLoc &dl) const {
3827 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) {
3828 unsigned C = RHSC->getZExtValue();
3829 if (!isLegalICmpImmediate(C)) {
3830 // Constant does not fit, try adjusting it by one?
3835 if (C != 0x80000000 && isLegalICmpImmediate(C-1)) {
3836 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT;
3837 RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3842 if (C != 0 && isLegalICmpImmediate(C-1)) {
3843 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT;
3844 RHS = DAG.getConstant(C - 1, dl, MVT::i32);
3849 if (C != 0x7fffffff && isLegalICmpImmediate(C+1)) {
3850 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE;
3851 RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3856 if (C != 0xffffffff && isLegalICmpImmediate(C+1)) {
3857 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE;
3858 RHS = DAG.getConstant(C + 1, dl, MVT::i32);
3865 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
3866 ARMISD::NodeType CompareType;
3869 CompareType = ARMISD::CMP;
3874 CompareType = ARMISD::CMPZ;
3877 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
3878 return DAG.getNode(CompareType, dl, MVT::Glue, LHS, RHS);
3881 /// Returns a appropriate VFP CMP (fcmp{s|d}+fmstat) for the given operands.
3882 SDValue ARMTargetLowering::getVFPCmp(SDValue LHS, SDValue RHS,
3883 SelectionDAG &DAG, const SDLoc &dl) const {
3884 assert(!Subtarget->isFPOnlySP() || RHS.getValueType() != MVT::f64);
3886 if (!isFloatingPointZero(RHS))
3887 Cmp = DAG.getNode(ARMISD::CMPFP, dl, MVT::Glue, LHS, RHS);
3889 Cmp = DAG.getNode(ARMISD::CMPFPw0, dl, MVT::Glue, LHS);
3890 return DAG.getNode(ARMISD::FMSTAT, dl, MVT::Glue, Cmp);
3893 /// duplicateCmp - Glue values can have only one use, so this function
3894 /// duplicates a comparison node.
3896 ARMTargetLowering::duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const {
3897 unsigned Opc = Cmp.getOpcode();
3899 if (Opc == ARMISD::CMP || Opc == ARMISD::CMPZ)
3900 return DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
3902 assert(Opc == ARMISD::FMSTAT && "unexpected comparison operation");
3903 Cmp = Cmp.getOperand(0);
3904 Opc = Cmp.getOpcode();
3905 if (Opc == ARMISD::CMPFP)
3906 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0),Cmp.getOperand(1));
3908 assert(Opc == ARMISD::CMPFPw0 && "unexpected operand of FMSTAT");
3909 Cmp = DAG.getNode(Opc, DL, MVT::Glue, Cmp.getOperand(0));
3911 return DAG.getNode(ARMISD::FMSTAT, DL, MVT::Glue, Cmp);
3914 std::pair<SDValue, SDValue>
3915 ARMTargetLowering::getARMXALUOOp(SDValue Op, SelectionDAG &DAG,
3916 SDValue &ARMcc) const {
3917 assert(Op.getValueType() == MVT::i32 && "Unsupported value type");
3919 SDValue Value, OverflowCmp;
3920 SDValue LHS = Op.getOperand(0);
3921 SDValue RHS = Op.getOperand(1);
3924 // FIXME: We are currently always generating CMPs because we don't support
3925 // generating CMN through the backend. This is not as good as the natural
3926 // CMP case because it causes a register dependency and cannot be folded
3929 switch (Op.getOpcode()) {
3931 llvm_unreachable("Unknown overflow instruction!");
3933 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3934 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3935 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3938 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3939 Value = DAG.getNode(ISD::ADD, dl, Op.getValueType(), LHS, RHS);
3940 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, Value, LHS);
3943 ARMcc = DAG.getConstant(ARMCC::VC, dl, MVT::i32);
3944 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3945 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3948 ARMcc = DAG.getConstant(ARMCC::HS, dl, MVT::i32);
3949 Value = DAG.getNode(ISD::SUB, dl, Op.getValueType(), LHS, RHS);
3950 OverflowCmp = DAG.getNode(ARMISD::CMP, dl, MVT::Glue, LHS, RHS);
3954 return std::make_pair(Value, OverflowCmp);
3959 ARMTargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const {
3960 // Let legalize expand this if it isn't a legal type yet.
3961 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()))
3964 SDValue Value, OverflowCmp;
3966 std::tie(Value, OverflowCmp) = getARMXALUOOp(Op, DAG, ARMcc);
3967 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3969 // We use 0 and 1 as false and true values.
3970 SDValue TVal = DAG.getConstant(1, dl, MVT::i32);
3971 SDValue FVal = DAG.getConstant(0, dl, MVT::i32);
3972 EVT VT = Op.getValueType();
3974 SDValue Overflow = DAG.getNode(ARMISD::CMOV, dl, VT, TVal, FVal,
3975 ARMcc, CCR, OverflowCmp);
3977 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
3978 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow);
3982 SDValue ARMTargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3983 SDValue Cond = Op.getOperand(0);
3984 SDValue SelectTrue = Op.getOperand(1);
3985 SDValue SelectFalse = Op.getOperand(2);
3987 unsigned Opc = Cond.getOpcode();
3989 if (Cond.getResNo() == 1 &&
3990 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO ||
3991 Opc == ISD::USUBO)) {
3992 if (!DAG.getTargetLoweringInfo().isTypeLegal(Cond->getValueType(0)))
3995 SDValue Value, OverflowCmp;
3997 std::tie(Value, OverflowCmp) = getARMXALUOOp(Cond, DAG, ARMcc);
3998 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
3999 EVT VT = Op.getValueType();
4001 return getCMOV(dl, VT, SelectTrue, SelectFalse, ARMcc, CCR,
4007 // (select (cmov 1, 0, cond), t, f) -> (cmov t, f, cond)
4008 // (select (cmov 0, 1, cond), t, f) -> (cmov f, t, cond)
4010 if (Cond.getOpcode() == ARMISD::CMOV && Cond.hasOneUse()) {
4011 const ConstantSDNode *CMOVTrue =
4012 dyn_cast<ConstantSDNode>(Cond.getOperand(0));
4013 const ConstantSDNode *CMOVFalse =
4014 dyn_cast<ConstantSDNode>(Cond.getOperand(1));
4016 if (CMOVTrue && CMOVFalse) {
4017 unsigned CMOVTrueVal = CMOVTrue->getZExtValue();
4018 unsigned CMOVFalseVal = CMOVFalse->getZExtValue();
4022 if (CMOVTrueVal == 1 && CMOVFalseVal == 0) {
4024 False = SelectFalse;
4025 } else if (CMOVTrueVal == 0 && CMOVFalseVal == 1) {
4030 if (True.getNode() && False.getNode()) {
4031 EVT VT = Op.getValueType();
4032 SDValue ARMcc = Cond.getOperand(2);
4033 SDValue CCR = Cond.getOperand(3);
4034 SDValue Cmp = duplicateCmp(Cond.getOperand(4), DAG);
4035 assert(True.getValueType() == VT);
4036 return getCMOV(dl, VT, True, False, ARMcc, CCR, Cmp, DAG);
4041 // ARM's BooleanContents value is UndefinedBooleanContent. Mask out the
4042 // undefined bits before doing a full-word comparison with zero.
4043 Cond = DAG.getNode(ISD::AND, dl, Cond.getValueType(), Cond,
4044 DAG.getConstant(1, dl, Cond.getValueType()));
4046 return DAG.getSelectCC(dl, Cond,
4047 DAG.getConstant(0, dl, Cond.getValueType()),
4048 SelectTrue, SelectFalse, ISD::SETNE);
4051 static void checkVSELConstraints(ISD::CondCode CC, ARMCC::CondCodes &CondCode,
4052 bool &swpCmpOps, bool &swpVselOps) {
4053 // Start by selecting the GE condition code for opcodes that return true for
4055 if (CC == ISD::SETUGE || CC == ISD::SETOGE || CC == ISD::SETOLE ||
4057 CondCode = ARMCC::GE;
4059 // and GT for opcodes that return false for 'equality'.
4060 else if (CC == ISD::SETUGT || CC == ISD::SETOGT || CC == ISD::SETOLT ||
4062 CondCode = ARMCC::GT;
4064 // Since we are constrained to GE/GT, if the opcode contains 'less', we need
4065 // to swap the compare operands.
4066 if (CC == ISD::SETOLE || CC == ISD::SETULE || CC == ISD::SETOLT ||
4070 // Both GT and GE are ordered comparisons, and return false for 'unordered'.
4071 // If we have an unordered opcode, we need to swap the operands to the VSEL
4072 // instruction (effectively negating the condition).
4074 // This also has the effect of swapping which one of 'less' or 'greater'
4075 // returns true, so we also swap the compare operands. It also switches
4076 // whether we return true for 'equality', so we compensate by picking the
4077 // opposite condition code to our original choice.
4078 if (CC == ISD::SETULE || CC == ISD::SETULT || CC == ISD::SETUGE ||
4079 CC == ISD::SETUGT) {
4080 swpCmpOps = !swpCmpOps;
4081 swpVselOps = !swpVselOps;
4082 CondCode = CondCode == ARMCC::GT ? ARMCC::GE : ARMCC::GT;
4085 // 'ordered' is 'anything but unordered', so use the VS condition code and
4086 // swap the VSEL operands.
4087 if (CC == ISD::SETO) {
4088 CondCode = ARMCC::VS;
4092 // 'unordered or not equal' is 'anything but equal', so use the EQ condition
4093 // code and swap the VSEL operands.
4094 if (CC == ISD::SETUNE) {
4095 CondCode = ARMCC::EQ;
4100 SDValue ARMTargetLowering::getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal,
4101 SDValue TrueVal, SDValue ARMcc, SDValue CCR,
4102 SDValue Cmp, SelectionDAG &DAG) const {
4103 if (Subtarget->isFPOnlySP() && VT == MVT::f64) {
4104 FalseVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4105 DAG.getVTList(MVT::i32, MVT::i32), FalseVal);
4106 TrueVal = DAG.getNode(ARMISD::VMOVRRD, dl,
4107 DAG.getVTList(MVT::i32, MVT::i32), TrueVal);
4109 SDValue TrueLow = TrueVal.getValue(0);
4110 SDValue TrueHigh = TrueVal.getValue(1);
4111 SDValue FalseLow = FalseVal.getValue(0);
4112 SDValue FalseHigh = FalseVal.getValue(1);
4114 SDValue Low = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseLow, TrueLow,
4116 SDValue High = DAG.getNode(ARMISD::CMOV, dl, MVT::i32, FalseHigh, TrueHigh,
4117 ARMcc, CCR, duplicateCmp(Cmp, DAG));
4119 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Low, High);
4121 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal, ARMcc, CCR,
4126 static bool isGTorGE(ISD::CondCode CC) {
4127 return CC == ISD::SETGT || CC == ISD::SETGE;
4130 static bool isLTorLE(ISD::CondCode CC) {
4131 return CC == ISD::SETLT || CC == ISD::SETLE;
4134 // See if a conditional (LHS CC RHS ? TrueVal : FalseVal) is lower-saturating.
4135 // All of these conditions (and their <= and >= counterparts) will do:
4140 static bool isLowerSaturate(const SDValue LHS, const SDValue RHS,
4141 const SDValue TrueVal, const SDValue FalseVal,
4142 const ISD::CondCode CC, const SDValue K) {
4143 return (isGTorGE(CC) &&
4144 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal))) ||
4146 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal)));
4149 // Similar to isLowerSaturate(), but checks for upper-saturating conditions.
4150 static bool isUpperSaturate(const SDValue LHS, const SDValue RHS,
4151 const SDValue TrueVal, const SDValue FalseVal,
4152 const ISD::CondCode CC, const SDValue K) {
4153 return (isGTorGE(CC) &&
4154 ((K == RHS && K == TrueVal) || (K == LHS && K == FalseVal))) ||
4156 ((K == LHS && K == TrueVal) || (K == RHS && K == FalseVal)));
4159 // Check if two chained conditionals could be converted into SSAT.
4161 // SSAT can replace a set of two conditional selectors that bound a number to an
4162 // interval of type [k, ~k] when k + 1 is a power of 2. Here are some examples:
4164 // x < -k ? -k : (x > k ? k : x)
4165 // x < -k ? -k : (x < k ? x : k)
4166 // x > -k ? (x > k ? k : x) : -k
4167 // x < k ? (x < -k ? -k : x) : k
4170 // It returns true if the conversion can be done, false otherwise.
4171 // Additionally, the variable is returned in parameter V and the constant in K.
4172 static bool isSaturatingConditional(const SDValue &Op, SDValue &V,
4175 SDValue LHS1 = Op.getOperand(0);
4176 SDValue RHS1 = Op.getOperand(1);
4177 SDValue TrueVal1 = Op.getOperand(2);
4178 SDValue FalseVal1 = Op.getOperand(3);
4179 ISD::CondCode CC1 = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4181 const SDValue Op2 = isa<ConstantSDNode>(TrueVal1) ? FalseVal1 : TrueVal1;
4182 if (Op2.getOpcode() != ISD::SELECT_CC)
4185 SDValue LHS2 = Op2.getOperand(0);
4186 SDValue RHS2 = Op2.getOperand(1);
4187 SDValue TrueVal2 = Op2.getOperand(2);
4188 SDValue FalseVal2 = Op2.getOperand(3);
4189 ISD::CondCode CC2 = cast<CondCodeSDNode>(Op2.getOperand(4))->get();
4191 // Find out which are the constants and which are the variables
4192 // in each conditional
4193 SDValue *K1 = isa<ConstantSDNode>(LHS1) ? &LHS1 : isa<ConstantSDNode>(RHS1)
4196 SDValue *K2 = isa<ConstantSDNode>(LHS2) ? &LHS2 : isa<ConstantSDNode>(RHS2)
4199 SDValue K2Tmp = isa<ConstantSDNode>(TrueVal2) ? TrueVal2 : FalseVal2;
4200 SDValue V1Tmp = (K1 && *K1 == LHS1) ? RHS1 : LHS1;
4201 SDValue V2Tmp = (K2 && *K2 == LHS2) ? RHS2 : LHS2;
4202 SDValue V2 = (K2Tmp == TrueVal2) ? FalseVal2 : TrueVal2;
4204 // We must detect cases where the original operations worked with 16- or
4205 // 8-bit values. In such case, V2Tmp != V2 because the comparison operations
4206 // must work with sign-extended values but the select operations return
4207 // the original non-extended value.
4208 SDValue V2TmpReg = V2Tmp;
4209 if (V2Tmp->getOpcode() == ISD::SIGN_EXTEND_INREG)
4210 V2TmpReg = V2Tmp->getOperand(0);
4212 // Check that the registers and the constants have the correct values
4213 // in both conditionals
4214 if (!K1 || !K2 || *K1 == Op2 || *K2 != K2Tmp || V1Tmp != V2Tmp ||
4218 // Figure out which conditional is saturating the lower/upper bound.
4219 const SDValue *LowerCheckOp =
4220 isLowerSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4222 : isLowerSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2
4224 const SDValue *UpperCheckOp =
4225 isUpperSaturate(LHS1, RHS1, TrueVal1, FalseVal1, CC1, *K1)
4227 : isUpperSaturate(LHS2, RHS2, TrueVal2, FalseVal2, CC2, *K2) ? &Op2
4230 if (!UpperCheckOp || !LowerCheckOp || LowerCheckOp == UpperCheckOp)
4233 // Check that the constant in the lower-bound check is
4234 // the opposite of the constant in the upper-bound check
4235 // in 1's complement.
4236 int64_t Val1 = cast<ConstantSDNode>(*K1)->getSExtValue();
4237 int64_t Val2 = cast<ConstantSDNode>(*K2)->getSExtValue();
4238 int64_t PosVal = std::max(Val1, Val2);
4240 if (((Val1 > Val2 && UpperCheckOp == &Op) ||
4241 (Val1 < Val2 && UpperCheckOp == &Op2)) &&
4242 Val1 == ~Val2 && isPowerOf2_64(PosVal + 1)) {
4245 K = (uint64_t)PosVal; // At this point, PosVal is guaranteed to be positive
4252 SDValue ARMTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
4254 EVT VT = Op.getValueType();
4257 // Try to convert two saturating conditional selects into a single SSAT
4259 uint64_t SatConstant;
4260 if (((!Subtarget->isThumb() && Subtarget->hasV6Ops()) || Subtarget->isThumb2()) &&
4261 isSaturatingConditional(Op, SatValue, SatConstant))
4262 return DAG.getNode(ARMISD::SSAT, dl, VT, SatValue,
4263 DAG.getConstant(countTrailingOnes(SatConstant), dl, VT));
4265 SDValue LHS = Op.getOperand(0);
4266 SDValue RHS = Op.getOperand(1);
4267 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
4268 SDValue TrueVal = Op.getOperand(2);
4269 SDValue FalseVal = Op.getOperand(3);
4271 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4272 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4275 // If softenSetCCOperands only returned one value, we should compare it to
4277 if (!RHS.getNode()) {
4278 RHS = DAG.getConstant(0, dl, LHS.getValueType());
4283 if (LHS.getValueType() == MVT::i32) {
4284 // Try to generate VSEL on ARMv8.
4285 // The VSEL instruction can't use all the usual ARM condition
4286 // codes: it only has two bits to select the condition code, so it's
4287 // constrained to use only GE, GT, VS and EQ.
4289 // To implement all the various ISD::SETXXX opcodes, we sometimes need to
4290 // swap the operands of the previous compare instruction (effectively
4291 // inverting the compare condition, swapping 'less' and 'greater') and
4292 // sometimes need to swap the operands to the VSEL (which inverts the
4293 // condition in the sense of firing whenever the previous condition didn't)
4294 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4295 TrueVal.getValueType() == MVT::f64)) {
4296 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4297 if (CondCode == ARMCC::LT || CondCode == ARMCC::LE ||
4298 CondCode == ARMCC::VC || CondCode == ARMCC::NE) {
4299 CC = ISD::getSetCCInverse(CC, true);
4300 std::swap(TrueVal, FalseVal);
4305 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4306 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4307 return getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4310 ARMCC::CondCodes CondCode, CondCode2;
4311 FPCCToARMCC(CC, CondCode, CondCode2);
4313 // Try to generate VMAXNM/VMINNM on ARMv8.
4314 if (Subtarget->hasFPARMv8() && (TrueVal.getValueType() == MVT::f32 ||
4315 TrueVal.getValueType() == MVT::f64)) {
4316 bool swpCmpOps = false;
4317 bool swpVselOps = false;
4318 checkVSELConstraints(CC, CondCode, swpCmpOps, swpVselOps);
4320 if (CondCode == ARMCC::GT || CondCode == ARMCC::GE ||
4321 CondCode == ARMCC::VS || CondCode == ARMCC::EQ) {
4323 std::swap(LHS, RHS);
4325 std::swap(TrueVal, FalseVal);
4329 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4330 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
4331 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4332 SDValue Result = getCMOV(dl, VT, FalseVal, TrueVal, ARMcc, CCR, Cmp, DAG);
4333 if (CondCode2 != ARMCC::AL) {
4334 SDValue ARMcc2 = DAG.getConstant(CondCode2, dl, MVT::i32);
4335 // FIXME: Needs another CMP because flag can have but one use.
4336 SDValue Cmp2 = getVFPCmp(LHS, RHS, DAG, dl);
4337 Result = getCMOV(dl, VT, Result, TrueVal, ARMcc2, CCR, Cmp2, DAG);
4342 /// canChangeToInt - Given the fp compare operand, return true if it is suitable
4343 /// to morph to an integer compare sequence.
4344 static bool canChangeToInt(SDValue Op, bool &SeenZero,
4345 const ARMSubtarget *Subtarget) {
4346 SDNode *N = Op.getNode();
4347 if (!N->hasOneUse())
4348 // Otherwise it requires moving the value from fp to integer registers.
4350 if (!N->getNumValues())
4352 EVT VT = Op.getValueType();
4353 if (VT != MVT::f32 && !Subtarget->isFPBrccSlow())
4354 // f32 case is generally profitable. f64 case only makes sense when vcmpe +
4355 // vmrs are very slow, e.g. cortex-a8.
4358 if (isFloatingPointZero(Op)) {
4362 return ISD::isNormalLoad(N);
4365 static SDValue bitcastf32Toi32(SDValue Op, SelectionDAG &DAG) {
4366 if (isFloatingPointZero(Op))
4367 return DAG.getConstant(0, SDLoc(Op), MVT::i32);
4369 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op))
4370 return DAG.getLoad(MVT::i32, SDLoc(Op), Ld->getChain(), Ld->getBasePtr(),
4371 Ld->getPointerInfo(), Ld->getAlignment(),
4372 Ld->getMemOperand()->getFlags());
4374 llvm_unreachable("Unknown VFP cmp argument!");
4377 static void expandf64Toi32(SDValue Op, SelectionDAG &DAG,
4378 SDValue &RetVal1, SDValue &RetVal2) {
4381 if (isFloatingPointZero(Op)) {
4382 RetVal1 = DAG.getConstant(0, dl, MVT::i32);
4383 RetVal2 = DAG.getConstant(0, dl, MVT::i32);
4387 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Op)) {
4388 SDValue Ptr = Ld->getBasePtr();
4390 DAG.getLoad(MVT::i32, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
4391 Ld->getAlignment(), Ld->getMemOperand()->getFlags());
4393 EVT PtrType = Ptr.getValueType();
4394 unsigned NewAlign = MinAlign(Ld->getAlignment(), 4);
4395 SDValue NewPtr = DAG.getNode(ISD::ADD, dl,
4396 PtrType, Ptr, DAG.getConstant(4, dl, PtrType));
4397 RetVal2 = DAG.getLoad(MVT::i32, dl, Ld->getChain(), NewPtr,
4398 Ld->getPointerInfo().getWithOffset(4), NewAlign,
4399 Ld->getMemOperand()->getFlags());
4403 llvm_unreachable("Unknown VFP cmp argument!");
4406 /// OptimizeVFPBrcond - With -enable-unsafe-fp-math, it's legal to optimize some
4407 /// f32 and even f64 comparisons to integer ones.
4409 ARMTargetLowering::OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const {
4410 SDValue Chain = Op.getOperand(0);
4411 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4412 SDValue LHS = Op.getOperand(2);
4413 SDValue RHS = Op.getOperand(3);
4414 SDValue Dest = Op.getOperand(4);
4417 bool LHSSeenZero = false;
4418 bool LHSOk = canChangeToInt(LHS, LHSSeenZero, Subtarget);
4419 bool RHSSeenZero = false;
4420 bool RHSOk = canChangeToInt(RHS, RHSSeenZero, Subtarget);
4421 if (LHSOk && RHSOk && (LHSSeenZero || RHSSeenZero)) {
4422 // If unsafe fp math optimization is enabled and there are no other uses of
4423 // the CMP operands, and the condition code is EQ or NE, we can optimize it
4424 // to an integer comparison.
4425 if (CC == ISD::SETOEQ)
4427 else if (CC == ISD::SETUNE)
4430 SDValue Mask = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4432 if (LHS.getValueType() == MVT::f32) {
4433 LHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4434 bitcastf32Toi32(LHS, DAG), Mask);
4435 RHS = DAG.getNode(ISD::AND, dl, MVT::i32,
4436 bitcastf32Toi32(RHS, DAG), Mask);
4437 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4438 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4439 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4440 Chain, Dest, ARMcc, CCR, Cmp);
4445 expandf64Toi32(LHS, DAG, LHS1, LHS2);
4446 expandf64Toi32(RHS, DAG, RHS1, RHS2);
4447 LHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, LHS2, Mask);
4448 RHS2 = DAG.getNode(ISD::AND, dl, MVT::i32, RHS2, Mask);
4449 ARMCC::CondCodes CondCode = IntCCToARMCC(CC);
4450 ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4451 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4452 SDValue Ops[] = { Chain, ARMcc, LHS1, LHS2, RHS1, RHS2, Dest };
4453 return DAG.getNode(ARMISD::BCC_i64, dl, VTList, Ops);
4459 SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
4460 SDValue Chain = Op.getOperand(0);
4461 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
4462 SDValue LHS = Op.getOperand(2);
4463 SDValue RHS = Op.getOperand(3);
4464 SDValue Dest = Op.getOperand(4);
4467 if (Subtarget->isFPOnlySP() && LHS.getValueType() == MVT::f64) {
4468 DAG.getTargetLoweringInfo().softenSetCCOperands(DAG, MVT::f64, LHS, RHS, CC,
4471 // If softenSetCCOperands only returned one value, we should compare it to
4473 if (!RHS.getNode()) {
4474 RHS = DAG.getConstant(0, dl, LHS.getValueType());
4479 if (LHS.getValueType() == MVT::i32) {
4481 SDValue Cmp = getARMCmp(LHS, RHS, CC, ARMcc, DAG, dl);
4482 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4483 return DAG.getNode(ARMISD::BRCOND, dl, MVT::Other,
4484 Chain, Dest, ARMcc, CCR, Cmp);
4487 assert(LHS.getValueType() == MVT::f32 || LHS.getValueType() == MVT::f64);
4489 if (getTargetMachine().Options.UnsafeFPMath &&
4490 (CC == ISD::SETEQ || CC == ISD::SETOEQ ||
4491 CC == ISD::SETNE || CC == ISD::SETUNE)) {
4492 if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
4496 ARMCC::CondCodes CondCode, CondCode2;
4497 FPCCToARMCC(CC, CondCode, CondCode2);
4499 SDValue ARMcc = DAG.getConstant(CondCode, dl, MVT::i32);
4500 SDValue Cmp = getVFPCmp(LHS, RHS, DAG, dl);
4501 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4502 SDVTList VTList = DAG.getVTList(MVT::Other, MVT::Glue);
4503 SDValue Ops[] = { Chain, Dest, ARMcc, CCR, Cmp };
4504 SDValue Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4505 if (CondCode2 != ARMCC::AL) {
4506 ARMcc = DAG.getConstant(CondCode2, dl, MVT::i32);
4507 SDValue Ops[] = { Res, Dest, ARMcc, CCR, Res.getValue(1) };
4508 Res = DAG.getNode(ARMISD::BRCOND, dl, VTList, Ops);
4513 SDValue ARMTargetLowering::LowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
4514 SDValue Chain = Op.getOperand(0);
4515 SDValue Table = Op.getOperand(1);
4516 SDValue Index = Op.getOperand(2);
4519 EVT PTy = getPointerTy(DAG.getDataLayout());
4520 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
4521 SDValue JTI = DAG.getTargetJumpTable(JT->getIndex(), PTy);
4522 Table = DAG.getNode(ARMISD::WrapperJT, dl, MVT::i32, JTI);
4523 Index = DAG.getNode(ISD::MUL, dl, PTy, Index, DAG.getConstant(4, dl, PTy));
4524 SDValue Addr = DAG.getNode(ISD::ADD, dl, PTy, Index, Table);
4525 if (Subtarget->isThumb2() || (Subtarget->hasV8MBaselineOps() && Subtarget->isThumb())) {
4526 // Thumb2 and ARMv8-M use a two-level jump. That is, it jumps into the jump table
4527 // which does another jump to the destination. This also makes it easier
4528 // to translate it to TBB / TBH later (Thumb2 only).
4529 // FIXME: This might not work if the function is extremely large.
4530 return DAG.getNode(ARMISD::BR2_JT, dl, MVT::Other, Chain,
4531 Addr, Op.getOperand(2), JTI);
4533 if (isPositionIndependent() || Subtarget->isROPI()) {
4535 DAG.getLoad((EVT)MVT::i32, dl, Chain, Addr,
4536 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4537 Chain = Addr.getValue(1);
4538 Addr = DAG.getNode(ISD::ADD, dl, PTy, Addr, Table);
4539 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4542 DAG.getLoad(PTy, dl, Chain, Addr,
4543 MachinePointerInfo::getJumpTable(DAG.getMachineFunction()));
4544 Chain = Addr.getValue(1);
4545 return DAG.getNode(ARMISD::BR_JT, dl, MVT::Other, Chain, Addr, JTI);
4549 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) {
4550 EVT VT = Op.getValueType();
4553 if (Op.getValueType().getVectorElementType() == MVT::i32) {
4554 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::f32)
4556 return DAG.UnrollVectorOp(Op.getNode());
4559 assert(Op.getOperand(0).getValueType() == MVT::v4f32 &&
4560 "Invalid type for custom lowering!");
4561 if (VT != MVT::v4i16)
4562 return DAG.UnrollVectorOp(Op.getNode());
4564 Op = DAG.getNode(Op.getOpcode(), dl, MVT::v4i32, Op.getOperand(0));
4565 return DAG.getNode(ISD::TRUNCATE, dl, VT, Op);
4568 SDValue ARMTargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
4569 EVT VT = Op.getValueType();
4571 return LowerVectorFP_TO_INT(Op, DAG);
4572 if (Subtarget->isFPOnlySP() && Op.getOperand(0).getValueType() == MVT::f64) {
4574 if (Op.getOpcode() == ISD::FP_TO_SINT)
4575 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(),
4578 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(),
4580 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4581 /*isSigned*/ false, SDLoc(Op)).first;
4587 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) {
4588 EVT VT = Op.getValueType();
4591 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i32) {
4592 if (VT.getVectorElementType() == MVT::f32)
4594 return DAG.UnrollVectorOp(Op.getNode());
4597 assert(Op.getOperand(0).getValueType() == MVT::v4i16 &&
4598 "Invalid type for custom lowering!");
4599 if (VT != MVT::v4f32)
4600 return DAG.UnrollVectorOp(Op.getNode());
4604 switch (Op.getOpcode()) {
4605 default: llvm_unreachable("Invalid opcode!");
4606 case ISD::SINT_TO_FP:
4607 CastOpc = ISD::SIGN_EXTEND;
4608 Opc = ISD::SINT_TO_FP;
4610 case ISD::UINT_TO_FP:
4611 CastOpc = ISD::ZERO_EXTEND;
4612 Opc = ISD::UINT_TO_FP;
4616 Op = DAG.getNode(CastOpc, dl, MVT::v4i32, Op.getOperand(0));
4617 return DAG.getNode(Opc, dl, VT, Op);
4620 SDValue ARMTargetLowering::LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const {
4621 EVT VT = Op.getValueType();
4623 return LowerVectorINT_TO_FP(Op, DAG);
4624 if (Subtarget->isFPOnlySP() && Op.getValueType() == MVT::f64) {
4626 if (Op.getOpcode() == ISD::SINT_TO_FP)
4627 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(),
4630 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(),
4632 return makeLibCall(DAG, LC, Op.getValueType(), Op.getOperand(0),
4633 /*isSigned*/ false, SDLoc(Op)).first;
4639 SDValue ARMTargetLowering::LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const {
4640 // Implement fcopysign with a fabs and a conditional fneg.
4641 SDValue Tmp0 = Op.getOperand(0);
4642 SDValue Tmp1 = Op.getOperand(1);
4644 EVT VT = Op.getValueType();
4645 EVT SrcVT = Tmp1.getValueType();
4646 bool InGPR = Tmp0.getOpcode() == ISD::BITCAST ||
4647 Tmp0.getOpcode() == ARMISD::VMOVDRR;
4648 bool UseNEON = !InGPR && Subtarget->hasNEON();
4651 // Use VBSL to copy the sign bit.
4652 unsigned EncodedVal = ARM_AM::createNEONModImm(0x6, 0x80);
4653 SDValue Mask = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v2i32,
4654 DAG.getTargetConstant(EncodedVal, dl, MVT::i32));
4655 EVT OpVT = (VT == MVT::f32) ? MVT::v2i32 : MVT::v1i64;
4657 Mask = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4658 DAG.getNode(ISD::BITCAST, dl, OpVT, Mask),
4659 DAG.getConstant(32, dl, MVT::i32));
4660 else /*if (VT == MVT::f32)*/
4661 Tmp0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp0);
4662 if (SrcVT == MVT::f32) {
4663 Tmp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f32, Tmp1);
4665 Tmp1 = DAG.getNode(ARMISD::VSHL, dl, OpVT,
4666 DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1),
4667 DAG.getConstant(32, dl, MVT::i32));
4668 } else if (VT == MVT::f32)
4669 Tmp1 = DAG.getNode(ARMISD::VSHRu, dl, MVT::v1i64,
4670 DAG.getNode(ISD::BITCAST, dl, MVT::v1i64, Tmp1),
4671 DAG.getConstant(32, dl, MVT::i32));
4672 Tmp0 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp0);
4673 Tmp1 = DAG.getNode(ISD::BITCAST, dl, OpVT, Tmp1);
4675 SDValue AllOnes = DAG.getTargetConstant(ARM_AM::createNEONModImm(0xe, 0xff),
4677 AllOnes = DAG.getNode(ARMISD::VMOVIMM, dl, MVT::v8i8, AllOnes);
4678 SDValue MaskNot = DAG.getNode(ISD::XOR, dl, OpVT, Mask,
4679 DAG.getNode(ISD::BITCAST, dl, OpVT, AllOnes));
4681 SDValue Res = DAG.getNode(ISD::OR, dl, OpVT,
4682 DAG.getNode(ISD::AND, dl, OpVT, Tmp1, Mask),
4683 DAG.getNode(ISD::AND, dl, OpVT, Tmp0, MaskNot));
4684 if (VT == MVT::f32) {
4685 Res = DAG.getNode(ISD::BITCAST, dl, MVT::v2f32, Res);
4686 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
4687 DAG.getConstant(0, dl, MVT::i32));
4689 Res = DAG.getNode(ISD::BITCAST, dl, MVT::f64, Res);
4695 // Bitcast operand 1 to i32.
4696 if (SrcVT == MVT::f64)
4697 Tmp1 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4699 Tmp1 = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp1);
4701 // Or in the signbit with integer operations.
4702 SDValue Mask1 = DAG.getConstant(0x80000000, dl, MVT::i32);
4703 SDValue Mask2 = DAG.getConstant(0x7fffffff, dl, MVT::i32);
4704 Tmp1 = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp1, Mask1);
4705 if (VT == MVT::f32) {
4706 Tmp0 = DAG.getNode(ISD::AND, dl, MVT::i32,
4707 DAG.getNode(ISD::BITCAST, dl, MVT::i32, Tmp0), Mask2);
4708 return DAG.getNode(ISD::BITCAST, dl, MVT::f32,
4709 DAG.getNode(ISD::OR, dl, MVT::i32, Tmp0, Tmp1));
4712 // f64: Or the high part with signbit and then combine two parts.
4713 Tmp0 = DAG.getNode(ARMISD::VMOVRRD, dl, DAG.getVTList(MVT::i32, MVT::i32),
4715 SDValue Lo = Tmp0.getValue(0);
4716 SDValue Hi = DAG.getNode(ISD::AND, dl, MVT::i32, Tmp0.getValue(1), Mask2);
4717 Hi = DAG.getNode(ISD::OR, dl, MVT::i32, Hi, Tmp1);
4718 return DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi);
4721 SDValue ARMTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const{
4722 MachineFunction &MF = DAG.getMachineFunction();
4723 MachineFrameInfo &MFI = MF.getFrameInfo();
4724 MFI.setReturnAddressIsTaken(true);
4726 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
4729 EVT VT = Op.getValueType();
4731 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4733 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
4734 SDValue Offset = DAG.getConstant(4, dl, MVT::i32);
4735 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
4736 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
4737 MachinePointerInfo());
4740 // Return LR, which contains the return address. Mark it an implicit live-in.
4741 unsigned Reg = MF.addLiveIn(ARM::LR, getRegClassFor(MVT::i32));
4742 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
4745 SDValue ARMTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
4746 const ARMBaseRegisterInfo &ARI =
4747 *static_cast<const ARMBaseRegisterInfo*>(RegInfo);
4748 MachineFunction &MF = DAG.getMachineFunction();
4749 MachineFrameInfo &MFI = MF.getFrameInfo();
4750 MFI.setFrameAddressIsTaken(true);
4752 EVT VT = Op.getValueType();
4753 SDLoc dl(Op); // FIXME probably not meaningful
4754 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
4755 unsigned FrameReg = ARI.getFrameRegister(MF);
4756 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
4758 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
4759 MachinePointerInfo());
4763 // FIXME? Maybe this could be a TableGen attribute on some registers and
4764 // this table could be generated automatically from RegInfo.
4765 unsigned ARMTargetLowering::getRegisterByName(const char* RegName, EVT VT,
4766 SelectionDAG &DAG) const {
4767 unsigned Reg = StringSwitch<unsigned>(RegName)
4768 .Case("sp", ARM::SP)
4772 report_fatal_error(Twine("Invalid register name \""
4773 + StringRef(RegName) + "\"."));
4776 // Result is 64 bit value so split into two 32 bit values and return as a
4778 static void ExpandREAD_REGISTER(SDNode *N, SmallVectorImpl<SDValue> &Results,
4779 SelectionDAG &DAG) {
4782 // This function is only supposed to be called for i64 type destination.
4783 assert(N->getValueType(0) == MVT::i64
4784 && "ExpandREAD_REGISTER called for non-i64 type result.");
4786 SDValue Read = DAG.getNode(ISD::READ_REGISTER, DL,
4787 DAG.getVTList(MVT::i32, MVT::i32, MVT::Other),
4791 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Read.getValue(0),
4793 Results.push_back(Read.getOperand(0));
4796 /// \p BC is a bitcast that is about to be turned into a VMOVDRR.
4797 /// When \p DstVT, the destination type of \p BC, is on the vector
4798 /// register bank and the source of bitcast, \p Op, operates on the same bank,
4799 /// it might be possible to combine them, such that everything stays on the
4800 /// vector register bank.
4801 /// \p return The node that would replace \p BT, if the combine
4803 static SDValue CombineVMOVDRRCandidateWithVecOp(const SDNode *BC,
4804 SelectionDAG &DAG) {
4805 SDValue Op = BC->getOperand(0);
4806 EVT DstVT = BC->getValueType(0);
4808 // The only vector instruction that can produce a scalar (remember,
4809 // since the bitcast was about to be turned into VMOVDRR, the source
4810 // type is i64) from a vector is EXTRACT_VECTOR_ELT.
4811 // Moreover, we can do this combine only if there is one use.
4812 // Finally, if the destination type is not a vector, there is not
4813 // much point on forcing everything on the vector bank.
4814 if (!DstVT.isVector() || Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
4818 // If the index is not constant, we will introduce an additional
4819 // multiply that will stick.
4820 // Give up in that case.
4821 ConstantSDNode *Index = dyn_cast<ConstantSDNode>(Op.getOperand(1));
4824 unsigned DstNumElt = DstVT.getVectorNumElements();
4826 // Compute the new index.
4827 const APInt &APIntIndex = Index->getAPIntValue();
4828 APInt NewIndex(APIntIndex.getBitWidth(), DstNumElt);
4829 NewIndex *= APIntIndex;
4830 // Check if the new constant index fits into i32.
4831 if (NewIndex.getBitWidth() > 32)
4834 // vMTy bitcast(i64 extractelt vNi64 src, i32 index) ->
4835 // vMTy extractsubvector vNxMTy (bitcast vNi64 src), i32 index*M)
4837 SDValue ExtractSrc = Op.getOperand(0);
4838 EVT VecVT = EVT::getVectorVT(
4839 *DAG.getContext(), DstVT.getScalarType(),
4840 ExtractSrc.getValueType().getVectorNumElements() * DstNumElt);
4841 SDValue BitCast = DAG.getNode(ISD::BITCAST, dl, VecVT, ExtractSrc);
4842 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DstVT, BitCast,
4843 DAG.getConstant(NewIndex.getZExtValue(), dl, MVT::i32));
4846 /// ExpandBITCAST - If the target supports VFP, this function is called to
4847 /// expand a bit convert where either the source or destination type is i64 to
4848 /// use a VMOVDRR or VMOVRRD node. This should not be done when the non-i64
4849 /// operand type is illegal (e.g., v2f32 for a target that doesn't support
4850 /// vectors), since the legalizer won't know what to do with that.
4851 static SDValue ExpandBITCAST(SDNode *N, SelectionDAG &DAG) {
4852 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
4854 SDValue Op = N->getOperand(0);
4856 // This function is only supposed to be called for i64 types, either as the
4857 // source or destination of the bit convert.
4858 EVT SrcVT = Op.getValueType();
4859 EVT DstVT = N->getValueType(0);
4860 assert((SrcVT == MVT::i64 || DstVT == MVT::i64) &&
4861 "ExpandBITCAST called for non-i64 type");
4863 // Turn i64->f64 into VMOVDRR.
4864 if (SrcVT == MVT::i64 && TLI.isTypeLegal(DstVT)) {
4865 // Do not force values to GPRs (this is what VMOVDRR does for the inputs)
4866 // if we can combine the bitcast with its source.
4867 if (SDValue Val = CombineVMOVDRRCandidateWithVecOp(N, DAG))
4870 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4871 DAG.getConstant(0, dl, MVT::i32));
4872 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Op,
4873 DAG.getConstant(1, dl, MVT::i32));
4874 return DAG.getNode(ISD::BITCAST, dl, DstVT,
4875 DAG.getNode(ARMISD::VMOVDRR, dl, MVT::f64, Lo, Hi));
4878 // Turn f64->i64 into VMOVRRD.
4879 if (DstVT == MVT::i64 && TLI.isTypeLegal(SrcVT)) {
4881 if (DAG.getDataLayout().isBigEndian() && SrcVT.isVector() &&
4882 SrcVT.getVectorNumElements() > 1)
4883 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4884 DAG.getVTList(MVT::i32, MVT::i32),
4885 DAG.getNode(ARMISD::VREV64, dl, SrcVT, Op));
4887 Cvt = DAG.getNode(ARMISD::VMOVRRD, dl,
4888 DAG.getVTList(MVT::i32, MVT::i32), Op);
4889 // Merge the pieces into a single i64 value.
4890 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Cvt, Cvt.getValue(1));
4896 /// getZeroVector - Returns a vector of specified type with all zero elements.
4897 /// Zero vectors are used to represent vector negation and in those cases
4898 /// will be implemented with the NEON VNEG instruction. However, VNEG does
4899 /// not support i64 elements, so sometimes the zero vectors will need to be
4900 /// explicitly constructed. Regardless, use a canonical VMOV to create the
4902 static SDValue getZeroVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
4903 assert(VT.isVector() && "Expected a vector type");
4904 // The canonical modified immediate encoding of a zero vector is....0!
4905 SDValue EncodedVal = DAG.getTargetConstant(0, dl, MVT::i32);
4906 EVT VmovVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
4907 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, EncodedVal);
4908 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
4911 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two
4912 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4913 SDValue ARMTargetLowering::LowerShiftRightParts(SDValue Op,
4914 SelectionDAG &DAG) const {
4915 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4916 EVT VT = Op.getValueType();
4917 unsigned VTBits = VT.getSizeInBits();
4919 SDValue ShOpLo = Op.getOperand(0);
4920 SDValue ShOpHi = Op.getOperand(1);
4921 SDValue ShAmt = Op.getOperand(2);
4923 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4924 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL;
4926 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS);
4928 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4929 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4930 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt);
4931 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4932 DAG.getConstant(VTBits, dl, MVT::i32));
4933 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt);
4934 SDValue LoSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4935 SDValue LoBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt);
4936 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4937 ISD::SETGE, ARMcc, DAG, dl);
4938 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift, LoBigShift,
4942 SDValue HiSmallShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt);
4943 SDValue HiBigShift = Opc == ISD::SRA
4944 ? DAG.getNode(Opc, dl, VT, ShOpHi,
4945 DAG.getConstant(VTBits - 1, dl, VT))
4946 : DAG.getConstant(0, dl, VT);
4947 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4948 ISD::SETGE, ARMcc, DAG, dl);
4949 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4952 SDValue Ops[2] = { Lo, Hi };
4953 return DAG.getMergeValues(Ops, dl);
4956 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two
4957 /// i32 values and take a 2 x i32 value to shift plus a shift amount.
4958 SDValue ARMTargetLowering::LowerShiftLeftParts(SDValue Op,
4959 SelectionDAG &DAG) const {
4960 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
4961 EVT VT = Op.getValueType();
4962 unsigned VTBits = VT.getSizeInBits();
4964 SDValue ShOpLo = Op.getOperand(0);
4965 SDValue ShOpHi = Op.getOperand(1);
4966 SDValue ShAmt = Op.getOperand(2);
4968 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
4970 assert(Op.getOpcode() == ISD::SHL_PARTS);
4971 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32,
4972 DAG.getConstant(VTBits, dl, MVT::i32), ShAmt);
4973 SDValue Tmp1 = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt);
4974 SDValue Tmp2 = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt);
4975 SDValue HiSmallShift = DAG.getNode(ISD::OR, dl, VT, Tmp1, Tmp2);
4977 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i32, ShAmt,
4978 DAG.getConstant(VTBits, dl, MVT::i32));
4979 SDValue HiBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt);
4980 SDValue CmpHi = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4981 ISD::SETGE, ARMcc, DAG, dl);
4982 SDValue Hi = DAG.getNode(ARMISD::CMOV, dl, VT, HiSmallShift, HiBigShift,
4985 SDValue CmpLo = getARMCmp(ExtraShAmt, DAG.getConstant(0, dl, MVT::i32),
4986 ISD::SETGE, ARMcc, DAG, dl);
4987 SDValue LoSmallShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt);
4988 SDValue Lo = DAG.getNode(ARMISD::CMOV, dl, VT, LoSmallShift,
4989 DAG.getConstant(0, dl, VT), ARMcc, CCR, CmpLo);
4991 SDValue Ops[2] = { Lo, Hi };
4992 return DAG.getMergeValues(Ops, dl);
4995 SDValue ARMTargetLowering::LowerFLT_ROUNDS_(SDValue Op,
4996 SelectionDAG &DAG) const {
4997 // The rounding mode is in bits 23:22 of the FPSCR.
4998 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0
4999 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3)
5000 // so that the shift + and get folded into a bitfield extract.
5002 SDValue FPSCR = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
5003 DAG.getConstant(Intrinsic::arm_get_fpscr, dl,
5005 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPSCR,
5006 DAG.getConstant(1U << 22, dl, MVT::i32));
5007 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds,
5008 DAG.getConstant(22, dl, MVT::i32));
5009 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE,
5010 DAG.getConstant(3, dl, MVT::i32));
5013 static SDValue LowerCTTZ(SDNode *N, SelectionDAG &DAG,
5014 const ARMSubtarget *ST) {
5016 EVT VT = N->getValueType(0);
5017 if (VT.isVector()) {
5018 assert(ST->hasNEON());
5020 // Compute the least significant set bit: LSB = X & -X
5021 SDValue X = N->getOperand(0);
5022 SDValue NX = DAG.getNode(ISD::SUB, dl, VT, getZeroVector(VT, DAG, dl), X);
5023 SDValue LSB = DAG.getNode(ISD::AND, dl, VT, X, NX);
5025 EVT ElemTy = VT.getVectorElementType();
5027 if (ElemTy == MVT::i8) {
5028 // Compute with: cttz(x) = ctpop(lsb - 1)
5029 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5030 DAG.getTargetConstant(1, dl, ElemTy));
5031 SDValue Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5032 return DAG.getNode(ISD::CTPOP, dl, VT, Bits);
5035 if ((ElemTy == MVT::i16 || ElemTy == MVT::i32) &&
5036 (N->getOpcode() == ISD::CTTZ_ZERO_UNDEF)) {
5037 // Compute with: cttz(x) = (width - 1) - ctlz(lsb), if x != 0
5038 unsigned NumBits = ElemTy.getSizeInBits();
5039 SDValue WidthMinus1 =
5040 DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5041 DAG.getTargetConstant(NumBits - 1, dl, ElemTy));
5042 SDValue CTLZ = DAG.getNode(ISD::CTLZ, dl, VT, LSB);
5043 return DAG.getNode(ISD::SUB, dl, VT, WidthMinus1, CTLZ);
5046 // Compute with: cttz(x) = ctpop(lsb - 1)
5048 // Since we can only compute the number of bits in a byte with vcnt.8, we
5049 // have to gather the result with pairwise addition (vpaddl) for i16, i32,
5054 if (ElemTy == MVT::i64) {
5055 // Load constant 0xffff'ffff'ffff'ffff to register.
5056 SDValue FF = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5057 DAG.getTargetConstant(0x1eff, dl, MVT::i32));
5058 Bits = DAG.getNode(ISD::ADD, dl, VT, LSB, FF);
5060 SDValue One = DAG.getNode(ARMISD::VMOVIMM, dl, VT,
5061 DAG.getTargetConstant(1, dl, ElemTy));
5062 Bits = DAG.getNode(ISD::SUB, dl, VT, LSB, One);
5065 // Count #bits with vcnt.8.
5066 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5067 SDValue BitsVT8 = DAG.getNode(ISD::BITCAST, dl, VT8Bit, Bits);
5068 SDValue Cnt8 = DAG.getNode(ISD::CTPOP, dl, VT8Bit, BitsVT8);
5070 // Gather the #bits with vpaddl (pairwise add.)
5071 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5072 SDValue Cnt16 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT16Bit,
5073 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5075 if (ElemTy == MVT::i16)
5078 EVT VT32Bit = VT.is64BitVector() ? MVT::v2i32 : MVT::v4i32;
5079 SDValue Cnt32 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT32Bit,
5080 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5082 if (ElemTy == MVT::i32)
5085 assert(ElemTy == MVT::i64);
5086 SDValue Cnt64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5087 DAG.getTargetConstant(Intrinsic::arm_neon_vpaddlu, dl, MVT::i32),
5092 if (!ST->hasV6T2Ops())
5095 SDValue rbit = DAG.getNode(ISD::BITREVERSE, dl, VT, N->getOperand(0));
5096 return DAG.getNode(ISD::CTLZ, dl, VT, rbit);
5099 /// getCTPOP16BitCounts - Returns a v8i8/v16i8 vector containing the bit-count
5100 /// for each 16-bit element from operand, repeated. The basic idea is to
5101 /// leverage vcnt to get the 8-bit counts, gather and add the results.
5103 /// Trace for v4i16:
5104 /// input = [v0 v1 v2 v3 ] (vi 16-bit element)
5105 /// cast: N0 = [w0 w1 w2 w3 w4 w5 w6 w7] (v0 = [w0 w1], wi 8-bit element)
5106 /// vcnt: N1 = [b0 b1 b2 b3 b4 b5 b6 b7] (bi = bit-count of 8-bit element wi)
5107 /// vrev: N2 = [b1 b0 b3 b2 b5 b4 b7 b6]
5108 /// [b0 b1 b2 b3 b4 b5 b6 b7]
5109 /// +[b1 b0 b3 b2 b5 b4 b7 b6]
5110 /// N3=N1+N2 = [k0 k0 k1 k1 k2 k2 k3 k3] (k0 = b0+b1 = bit-count of 16-bit v0,
5111 /// vuzp: = [k0 k1 k2 k3 k0 k1 k2 k3] each ki is 8-bits)
5112 static SDValue getCTPOP16BitCounts(SDNode *N, SelectionDAG &DAG) {
5113 EVT VT = N->getValueType(0);
5116 EVT VT8Bit = VT.is64BitVector() ? MVT::v8i8 : MVT::v16i8;
5117 SDValue N0 = DAG.getNode(ISD::BITCAST, DL, VT8Bit, N->getOperand(0));
5118 SDValue N1 = DAG.getNode(ISD::CTPOP, DL, VT8Bit, N0);
5119 SDValue N2 = DAG.getNode(ARMISD::VREV16, DL, VT8Bit, N1);
5120 SDValue N3 = DAG.getNode(ISD::ADD, DL, VT8Bit, N1, N2);
5121 return DAG.getNode(ARMISD::VUZP, DL, VT8Bit, N3, N3);
5124 /// lowerCTPOP16BitElements - Returns a v4i16/v8i16 vector containing the
5125 /// bit-count for each 16-bit element from the operand. We need slightly
5126 /// different sequencing for v4i16 and v8i16 to stay within NEON's available
5127 /// 64/128-bit registers.
5129 /// Trace for v4i16:
5130 /// input = [v0 v1 v2 v3 ] (vi 16-bit element)
5131 /// v8i8: BitCounts = [k0 k1 k2 k3 k0 k1 k2 k3 ] (ki is the bit-count of vi)
5132 /// v8i16:Extended = [k0 k1 k2 k3 k0 k1 k2 k3 ]
5133 /// v4i16:Extracted = [k0 k1 k2 k3 ]
5134 static SDValue lowerCTPOP16BitElements(SDNode *N, SelectionDAG &DAG) {
5135 EVT VT = N->getValueType(0);
5138 SDValue BitCounts = getCTPOP16BitCounts(N, DAG);
5139 if (VT.is64BitVector()) {
5140 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, BitCounts);
5141 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, Extended,
5142 DAG.getIntPtrConstant(0, DL));
5144 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i8,
5145 BitCounts, DAG.getIntPtrConstant(0, DL));
5146 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v8i16, Extracted);
5150 /// lowerCTPOP32BitElements - Returns a v2i32/v4i32 vector containing the
5151 /// bit-count for each 32-bit element from the operand. The idea here is
5152 /// to split the vector into 16-bit elements, leverage the 16-bit count
5153 /// routine, and then combine the results.
5155 /// Trace for v2i32 (v4i32 similar with Extracted/Extended exchanged):
5156 /// input = [v0 v1 ] (vi: 32-bit elements)
5157 /// Bitcast = [w0 w1 w2 w3 ] (wi: 16-bit elements, v0 = [w0 w1])
5158 /// Counts16 = [k0 k1 k2 k3 ] (ki: 16-bit elements, bit-count of wi)
5159 /// vrev: N0 = [k1 k0 k3 k2 ]
5161 /// N1 =+[k1 k0 k3 k2 ]
5163 /// N2 =+[k1 k3 k0 k2 ]
5165 /// Extended =+[k1 k3 k0 k2 ]
5167 /// Extracted=+[k1 k3 ]
5169 static SDValue lowerCTPOP32BitElements(SDNode *N, SelectionDAG &DAG) {
5170 EVT VT = N->getValueType(0);
5173 EVT VT16Bit = VT.is64BitVector() ? MVT::v4i16 : MVT::v8i16;
5175 SDValue Bitcast = DAG.getNode(ISD::BITCAST, DL, VT16Bit, N->getOperand(0));
5176 SDValue Counts16 = lowerCTPOP16BitElements(Bitcast.getNode(), DAG);
5177 SDValue N0 = DAG.getNode(ARMISD::VREV32, DL, VT16Bit, Counts16);
5178 SDValue N1 = DAG.getNode(ISD::ADD, DL, VT16Bit, Counts16, N0);
5179 SDValue N2 = DAG.getNode(ARMISD::VUZP, DL, VT16Bit, N1, N1);
5181 if (VT.is64BitVector()) {
5182 SDValue Extended = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, N2);
5183 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i32, Extended,
5184 DAG.getIntPtrConstant(0, DL));
5186 SDValue Extracted = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i16, N2,
5187 DAG.getIntPtrConstant(0, DL));
5188 return DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i32, Extracted);
5192 static SDValue LowerCTPOP(SDNode *N, SelectionDAG &DAG,
5193 const ARMSubtarget *ST) {
5194 EVT VT = N->getValueType(0);
5196 assert(ST->hasNEON() && "Custom ctpop lowering requires NEON.");
5197 assert((VT == MVT::v2i32 || VT == MVT::v4i32 ||
5198 VT == MVT::v4i16 || VT == MVT::v8i16) &&
5199 "Unexpected type for custom ctpop lowering");
5201 if (VT.getVectorElementType() == MVT::i32)
5202 return lowerCTPOP32BitElements(N, DAG);
5204 return lowerCTPOP16BitElements(N, DAG);
5207 static SDValue LowerShift(SDNode *N, SelectionDAG &DAG,
5208 const ARMSubtarget *ST) {
5209 EVT VT = N->getValueType(0);
5215 // Lower vector shifts on NEON to use VSHL.
5216 assert(ST->hasNEON() && "unexpected vector shift");
5218 // Left shifts translate directly to the vshiftu intrinsic.
5219 if (N->getOpcode() == ISD::SHL)
5220 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5221 DAG.getConstant(Intrinsic::arm_neon_vshiftu, dl,
5223 N->getOperand(0), N->getOperand(1));
5225 assert((N->getOpcode() == ISD::SRA ||
5226 N->getOpcode() == ISD::SRL) && "unexpected vector shift opcode");
5228 // NEON uses the same intrinsics for both left and right shifts. For
5229 // right shifts, the shift amounts are negative, so negate the vector of
5231 EVT ShiftVT = N->getOperand(1).getValueType();
5232 SDValue NegatedCount = DAG.getNode(ISD::SUB, dl, ShiftVT,
5233 getZeroVector(ShiftVT, DAG, dl),
5235 Intrinsic::ID vshiftInt = (N->getOpcode() == ISD::SRA ?
5236 Intrinsic::arm_neon_vshifts :
5237 Intrinsic::arm_neon_vshiftu);
5238 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT,
5239 DAG.getConstant(vshiftInt, dl, MVT::i32),
5240 N->getOperand(0), NegatedCount);
5243 static SDValue Expand64BitShift(SDNode *N, SelectionDAG &DAG,
5244 const ARMSubtarget *ST) {
5245 EVT VT = N->getValueType(0);
5248 // We can get here for a node like i32 = ISD::SHL i32, i64
5252 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
5253 "Unknown shift to lower!");
5255 // We only lower SRA, SRL of 1 here, all others use generic lowering.
5256 if (!isOneConstant(N->getOperand(1)))
5259 // If we are in thumb mode, we don't have RRX.
5260 if (ST->isThumb1Only()) return SDValue();
5262 // Okay, we have a 64-bit SRA or SRL of 1. Lower this to an RRX expr.
5263 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5264 DAG.getConstant(0, dl, MVT::i32));
5265 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, N->getOperand(0),
5266 DAG.getConstant(1, dl, MVT::i32));
5268 // First, build a SRA_FLAG/SRL_FLAG op, which shifts the top part by one and
5269 // captures the result into a carry flag.
5270 unsigned Opc = N->getOpcode() == ISD::SRL ? ARMISD::SRL_FLAG:ARMISD::SRA_FLAG;
5271 Hi = DAG.getNode(Opc, dl, DAG.getVTList(MVT::i32, MVT::Glue), Hi);
5273 // The low part is an ARMISD::RRX operand, which shifts the carry in.
5274 Lo = DAG.getNode(ARMISD::RRX, dl, MVT::i32, Lo, Hi.getValue(1));
5276 // Merge the pieces into a single i64 value.
5277 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
5280 static SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) {
5281 SDValue TmpOp0, TmpOp1;
5282 bool Invert = false;
5286 SDValue Op0 = Op.getOperand(0);
5287 SDValue Op1 = Op.getOperand(1);
5288 SDValue CC = Op.getOperand(2);
5289 EVT CmpVT = Op0.getValueType().changeVectorElementTypeToInteger();
5290 EVT VT = Op.getValueType();
5291 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
5294 if (Op0.getValueType().getVectorElementType() == MVT::i64 &&
5295 (SetCCOpcode == ISD::SETEQ || SetCCOpcode == ISD::SETNE)) {
5296 // Special-case integer 64-bit equality comparisons. They aren't legal,
5297 // but they can be lowered with a few vector instructions.
5298 unsigned CmpElements = CmpVT.getVectorNumElements() * 2;
5299 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, CmpElements);
5300 SDValue CastOp0 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op0);
5301 SDValue CastOp1 = DAG.getNode(ISD::BITCAST, dl, SplitVT, Op1);
5302 SDValue Cmp = DAG.getNode(ISD::SETCC, dl, SplitVT, CastOp0, CastOp1,
5303 DAG.getCondCode(ISD::SETEQ));
5304 SDValue Reversed = DAG.getNode(ARMISD::VREV64, dl, SplitVT, Cmp);
5305 SDValue Merged = DAG.getNode(ISD::AND, dl, SplitVT, Cmp, Reversed);
5306 Merged = DAG.getNode(ISD::BITCAST, dl, CmpVT, Merged);
5307 if (SetCCOpcode == ISD::SETNE)
5308 Merged = DAG.getNOT(dl, Merged, CmpVT);
5309 Merged = DAG.getSExtOrTrunc(Merged, dl, VT);
5313 if (CmpVT.getVectorElementType() == MVT::i64)
5314 // 64-bit comparisons are not legal in general.
5317 if (Op1.getValueType().isFloatingPoint()) {
5318 switch (SetCCOpcode) {
5319 default: llvm_unreachable("Illegal FP comparison");
5321 case ISD::SETNE: Invert = true; LLVM_FALLTHROUGH;
5323 case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
5325 case ISD::SETLT: Swap = true; LLVM_FALLTHROUGH;
5327 case ISD::SETGT: Opc = ARMISD::VCGT; break;
5329 case ISD::SETLE: Swap = true; LLVM_FALLTHROUGH;
5331 case ISD::SETGE: Opc = ARMISD::VCGE; break;
5332 case ISD::SETUGE: Swap = true; LLVM_FALLTHROUGH;
5333 case ISD::SETULE: Invert = true; Opc = ARMISD::VCGT; break;
5334 case ISD::SETUGT: Swap = true; LLVM_FALLTHROUGH;
5335 case ISD::SETULT: Invert = true; Opc = ARMISD::VCGE; break;
5336 case ISD::SETUEQ: Invert = true; LLVM_FALLTHROUGH;
5338 // Expand this to (OLT | OGT).
5342 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5343 Op1 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp0, TmpOp1);
5349 // Expand this to (OLT | OGE).
5353 Op0 = DAG.getNode(ARMISD::VCGT, dl, CmpVT, TmpOp1, TmpOp0);
5354 Op1 = DAG.getNode(ARMISD::VCGE, dl, CmpVT, TmpOp0, TmpOp1);
5358 // Integer comparisons.
5359 switch (SetCCOpcode) {
5360 default: llvm_unreachable("Illegal integer comparison");
5361 case ISD::SETNE: Invert = true;
5362 case ISD::SETEQ: Opc = ARMISD::VCEQ; break;
5363 case ISD::SETLT: Swap = true;
5364 case ISD::SETGT: Opc = ARMISD::VCGT; break;
5365 case ISD::SETLE: Swap = true;
5366 case ISD::SETGE: Opc = ARMISD::VCGE; break;
5367 case ISD::SETULT: Swap = true;
5368 case ISD::SETUGT: Opc = ARMISD::VCGTU; break;
5369 case ISD::SETULE: Swap = true;
5370 case ISD::SETUGE: Opc = ARMISD::VCGEU; break;
5373 // Detect VTST (Vector Test Bits) = icmp ne (and (op0, op1), zero).
5374 if (Opc == ARMISD::VCEQ) {
5377 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5379 else if (ISD::isBuildVectorAllZeros(Op0.getNode()))
5382 // Ignore bitconvert.
5383 if (AndOp.getNode() && AndOp.getOpcode() == ISD::BITCAST)
5384 AndOp = AndOp.getOperand(0);
5386 if (AndOp.getNode() && AndOp.getOpcode() == ISD::AND) {
5388 Op0 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(0));
5389 Op1 = DAG.getNode(ISD::BITCAST, dl, CmpVT, AndOp.getOperand(1));
5396 std::swap(Op0, Op1);
5398 // If one of the operands is a constant vector zero, attempt to fold the
5399 // comparison to a specialized compare-against-zero form.
5401 if (ISD::isBuildVectorAllZeros(Op1.getNode()))
5403 else if (ISD::isBuildVectorAllZeros(Op0.getNode())) {
5404 if (Opc == ARMISD::VCGE)
5405 Opc = ARMISD::VCLEZ;
5406 else if (Opc == ARMISD::VCGT)
5407 Opc = ARMISD::VCLTZ;
5412 if (SingleOp.getNode()) {
5415 Result = DAG.getNode(ARMISD::VCEQZ, dl, CmpVT, SingleOp); break;
5417 Result = DAG.getNode(ARMISD::VCGEZ, dl, CmpVT, SingleOp); break;
5419 Result = DAG.getNode(ARMISD::VCLEZ, dl, CmpVT, SingleOp); break;
5421 Result = DAG.getNode(ARMISD::VCGTZ, dl, CmpVT, SingleOp); break;
5423 Result = DAG.getNode(ARMISD::VCLTZ, dl, CmpVT, SingleOp); break;
5425 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5428 Result = DAG.getNode(Opc, dl, CmpVT, Op0, Op1);
5431 Result = DAG.getSExtOrTrunc(Result, dl, VT);
5434 Result = DAG.getNOT(dl, Result, VT);
5439 static SDValue LowerSETCCE(SDValue Op, SelectionDAG &DAG) {
5440 SDValue LHS = Op.getOperand(0);
5441 SDValue RHS = Op.getOperand(1);
5442 SDValue Carry = Op.getOperand(2);
5443 SDValue Cond = Op.getOperand(3);
5446 assert(LHS.getSimpleValueType().isInteger() && "SETCCE is integer only.");
5448 assert(Carry.getOpcode() != ISD::CARRY_FALSE);
5449 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
5450 SDValue Cmp = DAG.getNode(ARMISD::SUBE, DL, VTs, LHS, RHS, Carry);
5452 SDValue FVal = DAG.getConstant(0, DL, MVT::i32);
5453 SDValue TVal = DAG.getConstant(1, DL, MVT::i32);
5454 SDValue ARMcc = DAG.getConstant(
5455 IntCCToARMCC(cast<CondCodeSDNode>(Cond)->get()), DL, MVT::i32);
5456 SDValue CCR = DAG.getRegister(ARM::CPSR, MVT::i32);
5457 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), DL, ARM::CPSR,
5458 Cmp.getValue(1), SDValue());
5459 return DAG.getNode(ARMISD::CMOV, DL, Op.getValueType(), FVal, TVal, ARMcc,
5460 CCR, Chain.getValue(1));
5463 /// isNEONModifiedImm - Check if the specified splat value corresponds to a
5464 /// valid vector constant for a NEON instruction with a "modified immediate"
5465 /// operand (e.g., VMOV). If so, return the encoded value.
5466 static SDValue isNEONModifiedImm(uint64_t SplatBits, uint64_t SplatUndef,
5467 unsigned SplatBitSize, SelectionDAG &DAG,
5468 const SDLoc &dl, EVT &VT, bool is128Bits,
5469 NEONModImmType type) {
5470 unsigned OpCmode, Imm;
5472 // SplatBitSize is set to the smallest size that splats the vector, so a
5473 // zero vector will always have SplatBitSize == 8. However, NEON modified
5474 // immediate instructions others than VMOV do not support the 8-bit encoding
5475 // of a zero vector, and the default encoding of zero is supposed to be the
5480 switch (SplatBitSize) {
5482 if (type != VMOVModImm)
5484 // Any 1-byte value is OK. Op=0, Cmode=1110.
5485 assert((SplatBits & ~0xff) == 0 && "one byte splat value is too big");
5488 VT = is128Bits ? MVT::v16i8 : MVT::v8i8;
5492 // NEON's 16-bit VMOV supports splat values where only one byte is nonzero.
5493 VT = is128Bits ? MVT::v8i16 : MVT::v4i16;
5494 if ((SplatBits & ~0xff) == 0) {
5495 // Value = 0x00nn: Op=x, Cmode=100x.
5500 if ((SplatBits & ~0xff00) == 0) {
5501 // Value = 0xnn00: Op=x, Cmode=101x.
5503 Imm = SplatBits >> 8;
5509 // NEON's 32-bit VMOV supports splat values where:
5510 // * only one byte is nonzero, or
5511 // * the least significant byte is 0xff and the second byte is nonzero, or
5512 // * the least significant 2 bytes are 0xff and the third is nonzero.
5513 VT = is128Bits ? MVT::v4i32 : MVT::v2i32;
5514 if ((SplatBits & ~0xff) == 0) {
5515 // Value = 0x000000nn: Op=x, Cmode=000x.
5520 if ((SplatBits & ~0xff00) == 0) {
5521 // Value = 0x0000nn00: Op=x, Cmode=001x.
5523 Imm = SplatBits >> 8;
5526 if ((SplatBits & ~0xff0000) == 0) {
5527 // Value = 0x00nn0000: Op=x, Cmode=010x.
5529 Imm = SplatBits >> 16;
5532 if ((SplatBits & ~0xff000000) == 0) {
5533 // Value = 0xnn000000: Op=x, Cmode=011x.
5535 Imm = SplatBits >> 24;
5539 // cmode == 0b1100 and cmode == 0b1101 are not supported for VORR or VBIC
5540 if (type == OtherModImm) return SDValue();
5542 if ((SplatBits & ~0xffff) == 0 &&
5543 ((SplatBits | SplatUndef) & 0xff) == 0xff) {
5544 // Value = 0x0000nnff: Op=x, Cmode=1100.
5546 Imm = SplatBits >> 8;
5550 if ((SplatBits & ~0xffffff) == 0 &&
5551 ((SplatBits | SplatUndef) & 0xffff) == 0xffff) {
5552 // Value = 0x00nnffff: Op=x, Cmode=1101.
5554 Imm = SplatBits >> 16;
5558 // Note: there are a few 32-bit splat values (specifically: 00ffff00,
5559 // ff000000, ff0000ff, and ffff00ff) that are valid for VMOV.I64 but not
5560 // VMOV.I32. A (very) minor optimization would be to replicate the value
5561 // and fall through here to test for a valid 64-bit splat. But, then the
5562 // caller would also need to check and handle the change in size.
5566 if (type != VMOVModImm)
5568 // NEON has a 64-bit VMOV splat where each byte is either 0 or 0xff.
5569 uint64_t BitMask = 0xff;
5571 unsigned ImmMask = 1;
5573 for (int ByteNum = 0; ByteNum < 8; ++ByteNum) {
5574 if (((SplatBits | SplatUndef) & BitMask) == BitMask) {
5577 } else if ((SplatBits & BitMask) != 0) {
5584 if (DAG.getDataLayout().isBigEndian())
5585 // swap higher and lower 32 bit word
5586 Imm = ((Imm & 0xf) << 4) | ((Imm & 0xf0) >> 4);
5588 // Op=1, Cmode=1110.
5590 VT = is128Bits ? MVT::v2i64 : MVT::v1i64;
5595 llvm_unreachable("unexpected size for isNEONModifiedImm");
5598 unsigned EncodedVal = ARM_AM::createNEONModImm(OpCmode, Imm);
5599 return DAG.getTargetConstant(EncodedVal, dl, MVT::i32);
5602 SDValue ARMTargetLowering::LowerConstantFP(SDValue Op, SelectionDAG &DAG,
5603 const ARMSubtarget *ST) const {
5604 bool IsDouble = Op.getValueType() == MVT::f64;
5605 ConstantFPSDNode *CFP = cast<ConstantFPSDNode>(Op);
5606 const APFloat &FPVal = CFP->getValueAPF();
5608 // Prevent floating-point constants from using literal loads
5609 // when execute-only is enabled.
5610 if (ST->genExecuteOnly()) {
5611 APInt INTVal = FPVal.bitcastToAPInt();
5614 SDValue Lo = DAG.getConstant(INTVal.trunc(32), DL, MVT::i32);
5615 SDValue Hi = DAG.getConstant(INTVal.lshr(32).trunc(32), DL, MVT::i32);
5616 if (!ST->isLittle())
5618 return DAG.getNode(ARMISD::VMOVDRR, DL, MVT::f64, Lo, Hi);
5620 return DAG.getConstant(INTVal, DL, MVT::i32);
5627 // Use the default (constant pool) lowering for double constants when we have
5629 if (IsDouble && Subtarget->isFPOnlySP())
5632 // Try splatting with a VMOV.f32...
5633 int ImmVal = IsDouble ? ARM_AM::getFP64Imm(FPVal) : ARM_AM::getFP32Imm(FPVal);
5636 if (IsDouble || !ST->useNEONForSinglePrecisionFP()) {
5637 // We have code in place to select a valid ConstantFP already, no need to
5642 // It's a float and we are trying to use NEON operations where
5643 // possible. Lower it to a splat followed by an extract.
5645 SDValue NewVal = DAG.getTargetConstant(ImmVal, DL, MVT::i32);
5646 SDValue VecConstant = DAG.getNode(ARMISD::VMOVFPIMM, DL, MVT::v2f32,
5648 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecConstant,
5649 DAG.getConstant(0, DL, MVT::i32));
5652 // The rest of our options are NEON only, make sure that's allowed before
5654 if (!ST->hasNEON() || (!IsDouble && !ST->useNEONForSinglePrecisionFP()))
5658 uint64_t iVal = FPVal.bitcastToAPInt().getZExtValue();
5660 // It wouldn't really be worth bothering for doubles except for one very
5661 // important value, which does happen to match: 0.0. So make sure we don't do
5663 if (IsDouble && (iVal & 0xffffffff) != (iVal >> 32))
5666 // Try a VMOV.i32 (FIXME: i8, i16, or i64 could work too).
5667 SDValue NewVal = isNEONModifiedImm(iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op),
5668 VMovVT, false, VMOVModImm);
5669 if (NewVal != SDValue()) {
5671 SDValue VecConstant = DAG.getNode(ARMISD::VMOVIMM, DL, VMovVT,
5674 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5676 // It's a float: cast and extract a vector element.
5677 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5679 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5680 DAG.getConstant(0, DL, MVT::i32));
5683 // Finally, try a VMVN.i32
5684 NewVal = isNEONModifiedImm(~iVal & 0xffffffffU, 0, 32, DAG, SDLoc(Op), VMovVT,
5686 if (NewVal != SDValue()) {
5688 SDValue VecConstant = DAG.getNode(ARMISD::VMVNIMM, DL, VMovVT, NewVal);
5691 return DAG.getNode(ISD::BITCAST, DL, MVT::f64, VecConstant);
5693 // It's a float: cast and extract a vector element.
5694 SDValue VecFConstant = DAG.getNode(ISD::BITCAST, DL, MVT::v2f32,
5696 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32, VecFConstant,
5697 DAG.getConstant(0, DL, MVT::i32));
5703 // check if an VEXT instruction can handle the shuffle mask when the
5704 // vector sources of the shuffle are the same.
5705 static bool isSingletonVEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) {
5706 unsigned NumElts = VT.getVectorNumElements();
5708 // Assume that the first shuffle index is not UNDEF. Fail if it is.
5714 // If this is a VEXT shuffle, the immediate value is the index of the first
5715 // element. The other shuffle indices must be the successive elements after
5717 unsigned ExpectedElt = Imm;
5718 for (unsigned i = 1; i < NumElts; ++i) {
5719 // Increment the expected index. If it wraps around, just follow it
5720 // back to index zero and keep going.
5722 if (ExpectedElt == NumElts)
5725 if (M[i] < 0) continue; // ignore UNDEF indices
5726 if (ExpectedElt != static_cast<unsigned>(M[i]))
5734 static bool isVEXTMask(ArrayRef<int> M, EVT VT,
5735 bool &ReverseVEXT, unsigned &Imm) {
5736 unsigned NumElts = VT.getVectorNumElements();
5737 ReverseVEXT = false;
5739 // Assume that the first shuffle index is not UNDEF. Fail if it is.
5745 // If this is a VEXT shuffle, the immediate value is the index of the first
5746 // element. The other shuffle indices must be the successive elements after
5748 unsigned ExpectedElt = Imm;
5749 for (unsigned i = 1; i < NumElts; ++i) {
5750 // Increment the expected index. If it wraps around, it may still be
5751 // a VEXT but the source vectors must be swapped.
5753 if (ExpectedElt == NumElts * 2) {
5758 if (M[i] < 0) continue; // ignore UNDEF indices
5759 if (ExpectedElt != static_cast<unsigned>(M[i]))
5763 // Adjust the index value if the source operands will be swapped.
5770 /// isVREVMask - Check if a vector shuffle corresponds to a VREV
5771 /// instruction with the specified blocksize. (The order of the elements
5772 /// within each block of the vector is reversed.)
5773 static bool isVREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) {
5774 assert((BlockSize==16 || BlockSize==32 || BlockSize==64) &&
5775 "Only possible block sizes for VREV are: 16, 32, 64");
5777 unsigned EltSz = VT.getScalarSizeInBits();
5781 unsigned NumElts = VT.getVectorNumElements();
5782 unsigned BlockElts = M[0] + 1;
5783 // If the first shuffle index is UNDEF, be optimistic.
5785 BlockElts = BlockSize / EltSz;
5787 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz)
5790 for (unsigned i = 0; i < NumElts; ++i) {
5791 if (M[i] < 0) continue; // ignore UNDEF indices
5792 if ((unsigned) M[i] != (i - i%BlockElts) + (BlockElts - 1 - i%BlockElts))
5799 static bool isVTBLMask(ArrayRef<int> M, EVT VT) {
5800 // We can handle <8 x i8> vector shuffles. If the index in the mask is out of
5801 // range, then 0 is placed into the resulting vector. So pretty much any mask
5802 // of 8 elements can work here.
5803 return VT == MVT::v8i8 && M.size() == 8;
5806 // Checks whether the shuffle mask represents a vector transpose (VTRN) by
5807 // checking that pairs of elements in the shuffle mask represent the same index
5808 // in each vector, incrementing the expected index by 2 at each step.
5809 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 2, 6]
5810 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,c,g}
5812 // WhichResult gives the offset for each element in the mask based on which
5813 // of the two results it belongs to.
5815 // The transpose can be represented either as:
5816 // result1 = shufflevector v1, v2, result1_shuffle_mask
5817 // result2 = shufflevector v1, v2, result2_shuffle_mask
5818 // where v1/v2 and the shuffle masks have the same number of elements
5819 // (here WhichResult (see below) indicates which result is being checked)
5822 // results = shufflevector v1, v2, shuffle_mask
5823 // where both results are returned in one vector and the shuffle mask has twice
5824 // as many elements as v1/v2 (here WhichResult will always be 0 if true) here we
5825 // want to check the low half and high half of the shuffle mask as if it were
5827 static bool isVTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5828 unsigned EltSz = VT.getScalarSizeInBits();
5832 unsigned NumElts = VT.getVectorNumElements();
5833 if (M.size() != NumElts && M.size() != NumElts*2)
5836 // If the mask is twice as long as the input vector then we need to check the
5837 // upper and lower parts of the mask with a matching value for WhichResult
5838 // FIXME: A mask with only even values will be rejected in case the first
5839 // element is undefined, e.g. [-1, 4, 2, 6] will be rejected, because only
5840 // M[0] is used to determine WhichResult
5841 for (unsigned i = 0; i < M.size(); i += NumElts) {
5842 if (M.size() == NumElts * 2)
5843 WhichResult = i / NumElts;
5845 WhichResult = M[i] == 0 ? 0 : 1;
5846 for (unsigned j = 0; j < NumElts; j += 2) {
5847 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5848 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + NumElts + WhichResult))
5853 if (M.size() == NumElts*2)
5859 /// isVTRN_v_undef_Mask - Special case of isVTRNMask for canonical form of
5860 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5861 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>.
5862 static bool isVTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5863 unsigned EltSz = VT.getScalarSizeInBits();
5867 unsigned NumElts = VT.getVectorNumElements();
5868 if (M.size() != NumElts && M.size() != NumElts*2)
5871 for (unsigned i = 0; i < M.size(); i += NumElts) {
5872 if (M.size() == NumElts * 2)
5873 WhichResult = i / NumElts;
5875 WhichResult = M[i] == 0 ? 0 : 1;
5876 for (unsigned j = 0; j < NumElts; j += 2) {
5877 if ((M[i+j] >= 0 && (unsigned) M[i+j] != j + WhichResult) ||
5878 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != j + WhichResult))
5883 if (M.size() == NumElts*2)
5889 // Checks whether the shuffle mask represents a vector unzip (VUZP) by checking
5890 // that the mask elements are either all even and in steps of size 2 or all odd
5891 // and in steps of size 2.
5892 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 2, 4, 6]
5893 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,c,e,g}
5895 // Requires similar checks to that of isVTRNMask with
5896 // respect the how results are returned.
5897 static bool isVUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5898 unsigned EltSz = VT.getScalarSizeInBits();
5902 unsigned NumElts = VT.getVectorNumElements();
5903 if (M.size() != NumElts && M.size() != NumElts*2)
5906 for (unsigned i = 0; i < M.size(); i += NumElts) {
5907 WhichResult = M[i] == 0 ? 0 : 1;
5908 for (unsigned j = 0; j < NumElts; ++j) {
5909 if (M[i+j] >= 0 && (unsigned) M[i+j] != 2 * j + WhichResult)
5914 if (M.size() == NumElts*2)
5917 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5918 if (VT.is64BitVector() && EltSz == 32)
5924 /// isVUZP_v_undef_Mask - Special case of isVUZPMask for canonical form of
5925 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
5926 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>,
5927 static bool isVUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
5928 unsigned EltSz = VT.getScalarSizeInBits();
5932 unsigned NumElts = VT.getVectorNumElements();
5933 if (M.size() != NumElts && M.size() != NumElts*2)
5936 unsigned Half = NumElts / 2;
5937 for (unsigned i = 0; i < M.size(); i += NumElts) {
5938 WhichResult = M[i] == 0 ? 0 : 1;
5939 for (unsigned j = 0; j < NumElts; j += Half) {
5940 unsigned Idx = WhichResult;
5941 for (unsigned k = 0; k < Half; ++k) {
5942 int MIdx = M[i + j + k];
5943 if (MIdx >= 0 && (unsigned) MIdx != Idx)
5950 if (M.size() == NumElts*2)
5953 // VUZP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5954 if (VT.is64BitVector() && EltSz == 32)
5960 // Checks whether the shuffle mask represents a vector zip (VZIP) by checking
5961 // that pairs of elements of the shufflemask represent the same index in each
5962 // vector incrementing sequentially through the vectors.
5963 // e.g. For v1,v2 of type v4i32 a valid shuffle mask is: [0, 4, 1, 5]
5964 // v1={a,b,c,d} => x=shufflevector v1, v2 shufflemask => x={a,e,b,f}
5966 // Requires similar checks to that of isVTRNMask with respect the how results
5968 static bool isVZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) {
5969 unsigned EltSz = VT.getScalarSizeInBits();
5973 unsigned NumElts = VT.getVectorNumElements();
5974 if (M.size() != NumElts && M.size() != NumElts*2)
5977 for (unsigned i = 0; i < M.size(); i += NumElts) {
5978 WhichResult = M[i] == 0 ? 0 : 1;
5979 unsigned Idx = WhichResult * NumElts / 2;
5980 for (unsigned j = 0; j < NumElts; j += 2) {
5981 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
5982 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx + NumElts))
5988 if (M.size() == NumElts*2)
5991 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
5992 if (VT.is64BitVector() && EltSz == 32)
5998 /// isVZIP_v_undef_Mask - Special case of isVZIPMask for canonical form of
5999 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef".
6000 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>.
6001 static bool isVZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult){
6002 unsigned EltSz = VT.getScalarSizeInBits();
6006 unsigned NumElts = VT.getVectorNumElements();
6007 if (M.size() != NumElts && M.size() != NumElts*2)
6010 for (unsigned i = 0; i < M.size(); i += NumElts) {
6011 WhichResult = M[i] == 0 ? 0 : 1;
6012 unsigned Idx = WhichResult * NumElts / 2;
6013 for (unsigned j = 0; j < NumElts; j += 2) {
6014 if ((M[i+j] >= 0 && (unsigned) M[i+j] != Idx) ||
6015 (M[i+j+1] >= 0 && (unsigned) M[i+j+1] != Idx))
6021 if (M.size() == NumElts*2)
6024 // VZIP.32 for 64-bit vectors is a pseudo-instruction alias for VTRN.32.
6025 if (VT.is64BitVector() && EltSz == 32)
6031 /// Check if \p ShuffleMask is a NEON two-result shuffle (VZIP, VUZP, VTRN),
6032 /// and return the corresponding ARMISD opcode if it is, or 0 if it isn't.
6033 static unsigned isNEONTwoResultShuffleMask(ArrayRef<int> ShuffleMask, EVT VT,
6034 unsigned &WhichResult,
6037 if (isVTRNMask(ShuffleMask, VT, WhichResult))
6038 return ARMISD::VTRN;
6039 if (isVUZPMask(ShuffleMask, VT, WhichResult))
6040 return ARMISD::VUZP;
6041 if (isVZIPMask(ShuffleMask, VT, WhichResult))
6042 return ARMISD::VZIP;
6045 if (isVTRN_v_undef_Mask(ShuffleMask, VT, WhichResult))
6046 return ARMISD::VTRN;
6047 if (isVUZP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6048 return ARMISD::VUZP;
6049 if (isVZIP_v_undef_Mask(ShuffleMask, VT, WhichResult))
6050 return ARMISD::VZIP;
6055 /// \return true if this is a reverse operation on an vector.
6056 static bool isReverseMask(ArrayRef<int> M, EVT VT) {
6057 unsigned NumElts = VT.getVectorNumElements();
6058 // Make sure the mask has the right size.
6059 if (NumElts != M.size())
6062 // Look for <15, ..., 3, -1, 1, 0>.
6063 for (unsigned i = 0; i != NumElts; ++i)
6064 if (M[i] >= 0 && M[i] != (int) (NumElts - 1 - i))
6070 // If N is an integer constant that can be moved into a register in one
6071 // instruction, return an SDValue of such a constant (will become a MOV
6072 // instruction). Otherwise return null.
6073 static SDValue IsSingleInstrConstant(SDValue N, SelectionDAG &DAG,
6074 const ARMSubtarget *ST, const SDLoc &dl) {
6076 if (!isa<ConstantSDNode>(N))
6078 Val = cast<ConstantSDNode>(N)->getZExtValue();
6080 if (ST->isThumb1Only()) {
6081 if (Val <= 255 || ~Val <= 255)
6082 return DAG.getConstant(Val, dl, MVT::i32);
6084 if (ARM_AM::getSOImmVal(Val) != -1 || ARM_AM::getSOImmVal(~Val) != -1)
6085 return DAG.getConstant(Val, dl, MVT::i32);
6090 // If this is a case we can't handle, return null and let the default
6091 // expansion code take care of it.
6092 SDValue ARMTargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
6093 const ARMSubtarget *ST) const {
6094 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode());
6096 EVT VT = Op.getValueType();
6098 APInt SplatBits, SplatUndef;
6099 unsigned SplatBitSize;
6101 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
6102 if (SplatUndef.isAllOnesValue())
6103 return DAG.getUNDEF(VT);
6105 if (SplatBitSize <= 64) {
6106 // Check if an immediate VMOV works.
6108 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
6109 SplatUndef.getZExtValue(), SplatBitSize,
6110 DAG, dl, VmovVT, VT.is128BitVector(),
6112 if (Val.getNode()) {
6113 SDValue Vmov = DAG.getNode(ARMISD::VMOVIMM, dl, VmovVT, Val);
6114 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6117 // Try an immediate VMVN.
6118 uint64_t NegatedImm = (~SplatBits).getZExtValue();
6119 Val = isNEONModifiedImm(NegatedImm,
6120 SplatUndef.getZExtValue(), SplatBitSize,
6121 DAG, dl, VmovVT, VT.is128BitVector(),
6123 if (Val.getNode()) {
6124 SDValue Vmov = DAG.getNode(ARMISD::VMVNIMM, dl, VmovVT, Val);
6125 return DAG.getNode(ISD::BITCAST, dl, VT, Vmov);
6128 // Use vmov.f32 to materialize other v2f32 and v4f32 splats.
6129 if ((VT == MVT::v2f32 || VT == MVT::v4f32) && SplatBitSize == 32) {
6130 int ImmVal = ARM_AM::getFP32Imm(SplatBits);
6132 SDValue Val = DAG.getTargetConstant(ImmVal, dl, MVT::i32);
6133 return DAG.getNode(ARMISD::VMOVFPIMM, dl, VT, Val);
6139 // Scan through the operands to see if only one value is used.
6141 // As an optimisation, even if more than one value is used it may be more
6142 // profitable to splat with one value then change some lanes.
6144 // Heuristically we decide to do this if the vector has a "dominant" value,
6145 // defined as splatted to more than half of the lanes.
6146 unsigned NumElts = VT.getVectorNumElements();
6147 bool isOnlyLowElement = true;
6148 bool usesOnlyOneValue = true;
6149 bool hasDominantValue = false;
6150 bool isConstant = true;
6152 // Map of the number of times a particular SDValue appears in the
6154 DenseMap<SDValue, unsigned> ValueCounts;
6156 for (unsigned i = 0; i < NumElts; ++i) {
6157 SDValue V = Op.getOperand(i);
6161 isOnlyLowElement = false;
6162 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V))
6165 ValueCounts.insert(std::make_pair(V, 0));
6166 unsigned &Count = ValueCounts[V];
6168 // Is this value dominant? (takes up more than half of the lanes)
6169 if (++Count > (NumElts / 2)) {
6170 hasDominantValue = true;
6174 if (ValueCounts.size() != 1)
6175 usesOnlyOneValue = false;
6176 if (!Value.getNode() && ValueCounts.size() > 0)
6177 Value = ValueCounts.begin()->first;
6179 if (ValueCounts.size() == 0)
6180 return DAG.getUNDEF(VT);
6182 // Loads are better lowered with insert_vector_elt/ARMISD::BUILD_VECTOR.
6183 // Keep going if we are hitting this case.
6184 if (isOnlyLowElement && !ISD::isNormalLoad(Value.getNode()))
6185 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value);
6187 unsigned EltSize = VT.getScalarSizeInBits();
6189 // Use VDUP for non-constant splats. For f32 constant splats, reduce to
6190 // i32 and try again.
6191 if (hasDominantValue && EltSize <= 32) {
6195 // If we are VDUPing a value that comes directly from a vector, that will
6196 // cause an unnecessary move to and from a GPR, where instead we could
6197 // just use VDUPLANE. We can only do this if the lane being extracted
6198 // is at a constant index, as the VDUP from lane instructions only have
6199 // constant-index forms.
6200 ConstantSDNode *constIndex;
6201 if (Value->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6202 (constIndex = dyn_cast<ConstantSDNode>(Value->getOperand(1)))) {
6203 // We need to create a new undef vector to use for the VDUPLANE if the
6204 // size of the vector from which we get the value is different than the
6205 // size of the vector that we need to create. We will insert the element
6206 // such that the register coalescer will remove unnecessary copies.
6207 if (VT != Value->getOperand(0).getValueType()) {
6208 unsigned index = constIndex->getAPIntValue().getLimitedValue() %
6209 VT.getVectorNumElements();
6210 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6211 DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DAG.getUNDEF(VT),
6212 Value, DAG.getConstant(index, dl, MVT::i32)),
6213 DAG.getConstant(index, dl, MVT::i32));
6215 N = DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6216 Value->getOperand(0), Value->getOperand(1));
6218 N = DAG.getNode(ARMISD::VDUP, dl, VT, Value);
6220 if (!usesOnlyOneValue) {
6221 // The dominant value was splatted as 'N', but we now have to insert
6222 // all differing elements.
6223 for (unsigned I = 0; I < NumElts; ++I) {
6224 if (Op.getOperand(I) == Value)
6226 SmallVector<SDValue, 3> Ops;
6228 Ops.push_back(Op.getOperand(I));
6229 Ops.push_back(DAG.getConstant(I, dl, MVT::i32));
6230 N = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ops);
6235 if (VT.getVectorElementType().isFloatingPoint()) {
6236 SmallVector<SDValue, 8> Ops;
6237 for (unsigned i = 0; i < NumElts; ++i)
6238 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, MVT::i32,
6240 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
6241 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops);
6242 Val = LowerBUILD_VECTOR(Val, DAG, ST);
6244 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6246 if (usesOnlyOneValue) {
6247 SDValue Val = IsSingleInstrConstant(Value, DAG, ST, dl);
6248 if (isConstant && Val.getNode())
6249 return DAG.getNode(ARMISD::VDUP, dl, VT, Val);
6253 // If all elements are constants and the case above didn't get hit, fall back
6254 // to the default expansion, which will generate a load from the constant
6259 // Empirical tests suggest this is rarely worth it for vectors of length <= 2.
6261 SDValue shuffle = ReconstructShuffle(Op, DAG);
6262 if (shuffle != SDValue())
6266 if (VT.is128BitVector() && VT != MVT::v2f64 && VT != MVT::v4f32) {
6267 // If we haven't found an efficient lowering, try splitting a 128-bit vector
6268 // into two 64-bit vectors; we might discover a better way to lower it.
6269 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElts);
6270 EVT ExtVT = VT.getVectorElementType();
6271 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElts / 2);
6273 DAG.getBuildVector(HVT, dl, makeArrayRef(&Ops[0], NumElts / 2));
6274 if (Lower.getOpcode() == ISD::BUILD_VECTOR)
6275 Lower = LowerBUILD_VECTOR(Lower, DAG, ST);
6276 SDValue Upper = DAG.getBuildVector(
6277 HVT, dl, makeArrayRef(&Ops[NumElts / 2], NumElts / 2));
6278 if (Upper.getOpcode() == ISD::BUILD_VECTOR)
6279 Upper = LowerBUILD_VECTOR(Upper, DAG, ST);
6281 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lower, Upper);
6284 // Vectors with 32- or 64-bit elements can be built by directly assigning
6285 // the subregisters. Lower it to an ARMISD::BUILD_VECTOR so the operands
6286 // will be legalized.
6287 if (EltSize >= 32) {
6288 // Do the expansion with floating-point types, since that is what the VFP
6289 // registers are defined to use, and since i64 is not legal.
6290 EVT EltVT = EVT::getFloatingPointVT(EltSize);
6291 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6292 SmallVector<SDValue, 8> Ops;
6293 for (unsigned i = 0; i < NumElts; ++i)
6294 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, EltVT, Op.getOperand(i)));
6295 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6296 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6299 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we
6300 // know the default expansion would otherwise fall back on something even
6301 // worse. For a vector with one or two non-undef values, that's
6302 // scalar_to_vector for the elements followed by a shuffle (provided the
6303 // shuffle is valid for the target) and materialization element by element
6304 // on the stack followed by a load for everything else.
6305 if (!isConstant && !usesOnlyOneValue) {
6306 SDValue Vec = DAG.getUNDEF(VT);
6307 for (unsigned i = 0 ; i < NumElts; ++i) {
6308 SDValue V = Op.getOperand(i);
6311 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i32);
6312 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx);
6320 // Gather data to see if the operation can be modelled as a
6321 // shuffle in combination with VEXTs.
6322 SDValue ARMTargetLowering::ReconstructShuffle(SDValue Op,
6323 SelectionDAG &DAG) const {
6324 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!");
6326 EVT VT = Op.getValueType();
6327 unsigned NumElts = VT.getVectorNumElements();
6329 struct ShuffleSourceInfo {
6334 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to
6335 // be compatible with the shuffle we intend to construct. As a result
6336 // ShuffleVec will be some sliding window into the original Vec.
6339 // Code should guarantee that element i in Vec starts at element "WindowBase
6340 // + i * WindowScale in ShuffleVec".
6344 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; }
6345 ShuffleSourceInfo(SDValue Vec)
6346 : Vec(Vec), MinElt(UINT_MAX), MaxElt(0), ShuffleVec(Vec), WindowBase(0),
6350 // First gather all vectors used as an immediate source for this BUILD_VECTOR
6352 SmallVector<ShuffleSourceInfo, 2> Sources;
6353 for (unsigned i = 0; i < NumElts; ++i) {
6354 SDValue V = Op.getOperand(i);
6357 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) {
6358 // A shuffle can only come from building a vector from various
6359 // elements of other vectors.
6361 } else if (!isa<ConstantSDNode>(V.getOperand(1))) {
6362 // Furthermore, shuffles require a constant mask, whereas extractelts
6363 // accept variable indices.
6367 // Add this element source to the list if it's not already there.
6368 SDValue SourceVec = V.getOperand(0);
6369 auto Source = find(Sources, SourceVec);
6370 if (Source == Sources.end())
6371 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec));
6373 // Update the minimum and maximum lane number seen.
6374 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue();
6375 Source->MinElt = std::min(Source->MinElt, EltNo);
6376 Source->MaxElt = std::max(Source->MaxElt, EltNo);
6379 // Currently only do something sane when at most two source vectors
6381 if (Sources.size() > 2)
6384 // Find out the smallest element size among result and two sources, and use
6385 // it as element size to build the shuffle_vector.
6386 EVT SmallestEltTy = VT.getVectorElementType();
6387 for (auto &Source : Sources) {
6388 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType();
6389 if (SrcEltTy.bitsLT(SmallestEltTy))
6390 SmallestEltTy = SrcEltTy;
6392 unsigned ResMultiplier =
6393 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits();
6394 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits();
6395 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts);
6397 // If the source vector is too wide or too narrow, we may nevertheless be able
6398 // to construct a compatible shuffle either by concatenating it with UNDEF or
6399 // extracting a suitable range of elements.
6400 for (auto &Src : Sources) {
6401 EVT SrcVT = Src.ShuffleVec.getValueType();
6403 if (SrcVT.getSizeInBits() == VT.getSizeInBits())
6406 // This stage of the search produces a source with the same element type as
6407 // the original, but with a total width matching the BUILD_VECTOR output.
6408 EVT EltVT = SrcVT.getVectorElementType();
6409 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits();
6410 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts);
6412 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) {
6413 if (2 * SrcVT.getSizeInBits() != VT.getSizeInBits())
6415 // We can pad out the smaller vector for free, so if it's part of a
6418 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec,
6419 DAG.getUNDEF(Src.ShuffleVec.getValueType()));
6423 if (SrcVT.getSizeInBits() != 2 * VT.getSizeInBits())
6426 if (Src.MaxElt - Src.MinElt >= NumSrcElts) {
6427 // Span too large for a VEXT to cope
6431 if (Src.MinElt >= NumSrcElts) {
6432 // The extraction can just take the second half
6434 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6435 DAG.getConstant(NumSrcElts, dl, MVT::i32));
6436 Src.WindowBase = -NumSrcElts;
6437 } else if (Src.MaxElt < NumSrcElts) {
6438 // The extraction can just take the first half
6440 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6441 DAG.getConstant(0, dl, MVT::i32));
6443 // An actual VEXT is needed
6445 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6446 DAG.getConstant(0, dl, MVT::i32));
6448 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec,
6449 DAG.getConstant(NumSrcElts, dl, MVT::i32));
6451 Src.ShuffleVec = DAG.getNode(ARMISD::VEXT, dl, DestVT, VEXTSrc1,
6453 DAG.getConstant(Src.MinElt, dl, MVT::i32));
6454 Src.WindowBase = -Src.MinElt;
6458 // Another possible incompatibility occurs from the vector element types. We
6459 // can fix this by bitcasting the source vectors to the same type we intend
6461 for (auto &Src : Sources) {
6462 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType();
6463 if (SrcEltTy == SmallestEltTy)
6465 assert(ShuffleVT.getVectorElementType() == SmallestEltTy);
6466 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec);
6467 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits();
6468 Src.WindowBase *= Src.WindowScale;
6471 // Final sanity check before we try to actually produce a shuffle.
6473 for (auto Src : Sources)
6474 assert(Src.ShuffleVec.getValueType() == ShuffleVT);
6477 // The stars all align, our next step is to produce the mask for the shuffle.
6478 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1);
6479 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits();
6480 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) {
6481 SDValue Entry = Op.getOperand(i);
6482 if (Entry.isUndef())
6485 auto Src = find(Sources, Entry.getOperand(0));
6486 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue();
6488 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit
6489 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this
6491 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType();
6492 int BitsDefined = std::min(OrigEltTy.getSizeInBits(),
6493 VT.getScalarSizeInBits());
6494 int LanesDefined = BitsDefined / BitsPerShuffleLane;
6496 // This source is expected to fill ResMultiplier lanes of the final shuffle,
6497 // starting at the appropriate offset.
6498 int *LaneMask = &Mask[i * ResMultiplier];
6500 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase;
6501 ExtractBase += NumElts * (Src - Sources.begin());
6502 for (int j = 0; j < LanesDefined; ++j)
6503 LaneMask[j] = ExtractBase + j;
6506 // Final check before we try to produce nonsense...
6507 if (!isShuffleMaskLegal(Mask, ShuffleVT))
6510 // We can't handle more than two sources. This should have already
6511 // been checked before this point.
6512 assert(Sources.size() <= 2 && "Too many sources!");
6514 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) };
6515 for (unsigned i = 0; i < Sources.size(); ++i)
6516 ShuffleOps[i] = Sources[i].ShuffleVec;
6518 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0],
6519 ShuffleOps[1], Mask);
6520 return DAG.getNode(ISD::BITCAST, dl, VT, Shuffle);
6523 /// isShuffleMaskLegal - Targets can use this to indicate that they only
6524 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
6525 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
6526 /// are assumed to be legal.
6528 ARMTargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
6530 if (VT.getVectorNumElements() == 4 &&
6531 (VT.is128BitVector() || VT.is64BitVector())) {
6532 unsigned PFIndexes[4];
6533 for (unsigned i = 0; i != 4; ++i) {
6537 PFIndexes[i] = M[i];
6540 // Compute the index in the perfect shuffle table.
6541 unsigned PFTableIndex =
6542 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6543 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6544 unsigned Cost = (PFEntry >> 30);
6550 bool ReverseVEXT, isV_UNDEF;
6551 unsigned Imm, WhichResult;
6553 unsigned EltSize = VT.getScalarSizeInBits();
6554 return (EltSize >= 32 ||
6555 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
6556 isVREVMask(M, VT, 64) ||
6557 isVREVMask(M, VT, 32) ||
6558 isVREVMask(M, VT, 16) ||
6559 isVEXTMask(M, VT, ReverseVEXT, Imm) ||
6560 isVTBLMask(M, VT) ||
6561 isNEONTwoResultShuffleMask(M, VT, WhichResult, isV_UNDEF) ||
6562 ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(M, VT)));
6565 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit
6566 /// the specified operations to build the shuffle.
6567 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS,
6568 SDValue RHS, SelectionDAG &DAG,
6570 unsigned OpNum = (PFEntry >> 26) & 0x0F;
6571 unsigned LHSID = (PFEntry >> 13) & ((1 << 13)-1);
6572 unsigned RHSID = (PFEntry >> 0) & ((1 << 13)-1);
6575 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3>
6584 OP_VUZPL, // VUZP, left result
6585 OP_VUZPR, // VUZP, right result
6586 OP_VZIPL, // VZIP, left result
6587 OP_VZIPR, // VZIP, right result
6588 OP_VTRNL, // VTRN, left result
6589 OP_VTRNR // VTRN, right result
6592 if (OpNum == OP_COPY) {
6593 if (LHSID == (1*9+2)*9+3) return LHS;
6594 assert(LHSID == ((4*9+5)*9+6)*9+7 && "Illegal OP_COPY!");
6598 SDValue OpLHS, OpRHS;
6599 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl);
6600 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl);
6601 EVT VT = OpLHS.getValueType();
6604 default: llvm_unreachable("Unknown shuffle opcode!");
6606 // VREV divides the vector in half and swaps within the half.
6607 if (VT.getVectorElementType() == MVT::i32 ||
6608 VT.getVectorElementType() == MVT::f32)
6609 return DAG.getNode(ARMISD::VREV64, dl, VT, OpLHS);
6610 // vrev <4 x i16> -> VREV32
6611 if (VT.getVectorElementType() == MVT::i16)
6612 return DAG.getNode(ARMISD::VREV32, dl, VT, OpLHS);
6613 // vrev <4 x i8> -> VREV16
6614 assert(VT.getVectorElementType() == MVT::i8);
6615 return DAG.getNode(ARMISD::VREV16, dl, VT, OpLHS);
6620 return DAG.getNode(ARMISD::VDUPLANE, dl, VT,
6621 OpLHS, DAG.getConstant(OpNum-OP_VDUP0, dl, MVT::i32));
6625 return DAG.getNode(ARMISD::VEXT, dl, VT,
6627 DAG.getConstant(OpNum - OP_VEXT1 + 1, dl, MVT::i32));
6630 return DAG.getNode(ARMISD::VUZP, dl, DAG.getVTList(VT, VT),
6631 OpLHS, OpRHS).getValue(OpNum-OP_VUZPL);
6634 return DAG.getNode(ARMISD::VZIP, dl, DAG.getVTList(VT, VT),
6635 OpLHS, OpRHS).getValue(OpNum-OP_VZIPL);
6638 return DAG.getNode(ARMISD::VTRN, dl, DAG.getVTList(VT, VT),
6639 OpLHS, OpRHS).getValue(OpNum-OP_VTRNL);
6643 static SDValue LowerVECTOR_SHUFFLEv8i8(SDValue Op,
6644 ArrayRef<int> ShuffleMask,
6645 SelectionDAG &DAG) {
6646 // Check to see if we can use the VTBL instruction.
6647 SDValue V1 = Op.getOperand(0);
6648 SDValue V2 = Op.getOperand(1);
6651 SmallVector<SDValue, 8> VTBLMask;
6652 for (ArrayRef<int>::iterator
6653 I = ShuffleMask.begin(), E = ShuffleMask.end(); I != E; ++I)
6654 VTBLMask.push_back(DAG.getConstant(*I, DL, MVT::i32));
6656 if (V2.getNode()->isUndef())
6657 return DAG.getNode(ARMISD::VTBL1, DL, MVT::v8i8, V1,
6658 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6660 return DAG.getNode(ARMISD::VTBL2, DL, MVT::v8i8, V1, V2,
6661 DAG.getBuildVector(MVT::v8i8, DL, VTBLMask));
6664 static SDValue LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(SDValue Op,
6665 SelectionDAG &DAG) {
6667 SDValue OpLHS = Op.getOperand(0);
6668 EVT VT = OpLHS.getValueType();
6670 assert((VT == MVT::v8i16 || VT == MVT::v16i8) &&
6671 "Expect an v8i16/v16i8 type");
6672 OpLHS = DAG.getNode(ARMISD::VREV64, DL, VT, OpLHS);
6673 // For a v16i8 type: After the VREV, we have got <8, ...15, 8, ..., 0>. Now,
6674 // extract the first 8 bytes into the top double word and the last 8 bytes
6675 // into the bottom double word. The v8i16 case is similar.
6676 unsigned ExtractNum = (VT == MVT::v16i8) ? 8 : 4;
6677 return DAG.getNode(ARMISD::VEXT, DL, VT, OpLHS, OpLHS,
6678 DAG.getConstant(ExtractNum, DL, MVT::i32));
6681 static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
6682 SDValue V1 = Op.getOperand(0);
6683 SDValue V2 = Op.getOperand(1);
6685 EVT VT = Op.getValueType();
6686 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
6688 // Convert shuffles that are directly supported on NEON to target-specific
6689 // DAG nodes, instead of keeping them as shuffles and matching them again
6690 // during code selection. This is more efficient and avoids the possibility
6691 // of inconsistencies between legalization and selection.
6692 // FIXME: floating-point vectors should be canonicalized to integer vectors
6693 // of the same time so that they get CSEd properly.
6694 ArrayRef<int> ShuffleMask = SVN->getMask();
6696 unsigned EltSize = VT.getScalarSizeInBits();
6697 if (EltSize <= 32) {
6698 if (SVN->isSplat()) {
6699 int Lane = SVN->getSplatIndex();
6700 // If this is undef splat, generate it via "just" vdup, if possible.
6701 if (Lane == -1) Lane = 0;
6703 // Test if V1 is a SCALAR_TO_VECTOR.
6704 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) {
6705 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6707 // Test if V1 is a BUILD_VECTOR which is equivalent to a SCALAR_TO_VECTOR
6708 // (and probably will turn into a SCALAR_TO_VECTOR once legalization
6710 if (Lane == 0 && V1.getOpcode() == ISD::BUILD_VECTOR &&
6711 !isa<ConstantSDNode>(V1.getOperand(0))) {
6712 bool IsScalarToVector = true;
6713 for (unsigned i = 1, e = V1.getNumOperands(); i != e; ++i)
6714 if (!V1.getOperand(i).isUndef()) {
6715 IsScalarToVector = false;
6718 if (IsScalarToVector)
6719 return DAG.getNode(ARMISD::VDUP, dl, VT, V1.getOperand(0));
6721 return DAG.getNode(ARMISD::VDUPLANE, dl, VT, V1,
6722 DAG.getConstant(Lane, dl, MVT::i32));
6727 if (isVEXTMask(ShuffleMask, VT, ReverseVEXT, Imm)) {
6730 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V2,
6731 DAG.getConstant(Imm, dl, MVT::i32));
6734 if (isVREVMask(ShuffleMask, VT, 64))
6735 return DAG.getNode(ARMISD::VREV64, dl, VT, V1);
6736 if (isVREVMask(ShuffleMask, VT, 32))
6737 return DAG.getNode(ARMISD::VREV32, dl, VT, V1);
6738 if (isVREVMask(ShuffleMask, VT, 16))
6739 return DAG.getNode(ARMISD::VREV16, dl, VT, V1);
6741 if (V2->isUndef() && isSingletonVEXTMask(ShuffleMask, VT, Imm)) {
6742 return DAG.getNode(ARMISD::VEXT, dl, VT, V1, V1,
6743 DAG.getConstant(Imm, dl, MVT::i32));
6746 // Check for Neon shuffles that modify both input vectors in place.
6747 // If both results are used, i.e., if there are two shuffles with the same
6748 // source operands and with masks corresponding to both results of one of
6749 // these operations, DAG memoization will ensure that a single node is
6750 // used for both shuffles.
6751 unsigned WhichResult;
6753 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6754 ShuffleMask, VT, WhichResult, isV_UNDEF)) {
6757 return DAG.getNode(ShuffleOpc, dl, DAG.getVTList(VT, VT), V1, V2)
6758 .getValue(WhichResult);
6761 // Also check for these shuffles through CONCAT_VECTORS: we canonicalize
6762 // shuffles that produce a result larger than their operands with:
6763 // shuffle(concat(v1, undef), concat(v2, undef))
6765 // shuffle(concat(v1, v2), undef)
6766 // because we can access quad vectors (see PerformVECTOR_SHUFFLECombine).
6768 // This is useful in the general case, but there are special cases where
6769 // native shuffles produce larger results: the two-result ops.
6771 // Look through the concat when lowering them:
6772 // shuffle(concat(v1, v2), undef)
6774 // concat(VZIP(v1, v2):0, :1)
6776 if (V1->getOpcode() == ISD::CONCAT_VECTORS && V2->isUndef()) {
6777 SDValue SubV1 = V1->getOperand(0);
6778 SDValue SubV2 = V1->getOperand(1);
6779 EVT SubVT = SubV1.getValueType();
6781 // We expect these to have been canonicalized to -1.
6782 assert(all_of(ShuffleMask, [&](int i) {
6783 return i < (int)VT.getVectorNumElements();
6784 }) && "Unexpected shuffle index into UNDEF operand!");
6786 if (unsigned ShuffleOpc = isNEONTwoResultShuffleMask(
6787 ShuffleMask, SubVT, WhichResult, isV_UNDEF)) {
6790 assert((WhichResult == 0) &&
6791 "In-place shuffle of concat can only have one result!");
6792 SDValue Res = DAG.getNode(ShuffleOpc, dl, DAG.getVTList(SubVT, SubVT),
6794 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Res.getValue(0),
6800 // If the shuffle is not directly supported and it has 4 elements, use
6801 // the PerfectShuffle-generated table to synthesize it from other shuffles.
6802 unsigned NumElts = VT.getVectorNumElements();
6804 unsigned PFIndexes[4];
6805 for (unsigned i = 0; i != 4; ++i) {
6806 if (ShuffleMask[i] < 0)
6809 PFIndexes[i] = ShuffleMask[i];
6812 // Compute the index in the perfect shuffle table.
6813 unsigned PFTableIndex =
6814 PFIndexes[0]*9*9*9+PFIndexes[1]*9*9+PFIndexes[2]*9+PFIndexes[3];
6815 unsigned PFEntry = PerfectShuffleTable[PFTableIndex];
6816 unsigned Cost = (PFEntry >> 30);
6819 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl);
6822 // Implement shuffles with 32- or 64-bit elements as ARMISD::BUILD_VECTORs.
6823 if (EltSize >= 32) {
6824 // Do the expansion with floating-point types, since that is what the VFP
6825 // registers are defined to use, and since i64 is not legal.
6826 EVT EltVT = EVT::getFloatingPointVT(EltSize);
6827 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumElts);
6828 V1 = DAG.getNode(ISD::BITCAST, dl, VecVT, V1);
6829 V2 = DAG.getNode(ISD::BITCAST, dl, VecVT, V2);
6830 SmallVector<SDValue, 8> Ops;
6831 for (unsigned i = 0; i < NumElts; ++i) {
6832 if (ShuffleMask[i] < 0)
6833 Ops.push_back(DAG.getUNDEF(EltVT));
6835 Ops.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
6836 ShuffleMask[i] < (int)NumElts ? V1 : V2,
6837 DAG.getConstant(ShuffleMask[i] & (NumElts-1),
6840 SDValue Val = DAG.getNode(ARMISD::BUILD_VECTOR, dl, VecVT, Ops);
6841 return DAG.getNode(ISD::BITCAST, dl, VT, Val);
6844 if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
6845 return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
6847 if (VT == MVT::v8i8)
6848 if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
6854 static SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6855 // INSERT_VECTOR_ELT is legal only for immediate indexes.
6856 SDValue Lane = Op.getOperand(2);
6857 if (!isa<ConstantSDNode>(Lane))
6863 static SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) {
6864 // EXTRACT_VECTOR_ELT is legal only for immediate indexes.
6865 SDValue Lane = Op.getOperand(1);
6866 if (!isa<ConstantSDNode>(Lane))
6869 SDValue Vec = Op.getOperand(0);
6870 if (Op.getValueType() == MVT::i32 && Vec.getScalarValueSizeInBits() < 32) {
6872 return DAG.getNode(ARMISD::VGETLANEu, dl, MVT::i32, Vec, Lane);
6878 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
6879 // The only time a CONCAT_VECTORS operation can have legal types is when
6880 // two 64-bit vectors are concatenated to a 128-bit vector.
6881 assert(Op.getValueType().is128BitVector() && Op.getNumOperands() == 2 &&
6882 "unexpected CONCAT_VECTORS");
6884 SDValue Val = DAG.getUNDEF(MVT::v2f64);
6885 SDValue Op0 = Op.getOperand(0);
6886 SDValue Op1 = Op.getOperand(1);
6888 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6889 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op0),
6890 DAG.getIntPtrConstant(0, dl));
6892 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2f64, Val,
6893 DAG.getNode(ISD::BITCAST, dl, MVT::f64, Op1),
6894 DAG.getIntPtrConstant(1, dl));
6895 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Val);
6898 /// isExtendedBUILD_VECTOR - Check if N is a constant BUILD_VECTOR where each
6899 /// element has been zero/sign-extended, depending on the isSigned parameter,
6900 /// from an integer type half its size.
6901 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG,
6903 // A v2i64 BUILD_VECTOR will have been legalized to a BITCAST from v4i32.
6904 EVT VT = N->getValueType(0);
6905 if (VT == MVT::v2i64 && N->getOpcode() == ISD::BITCAST) {
6906 SDNode *BVN = N->getOperand(0).getNode();
6907 if (BVN->getValueType(0) != MVT::v4i32 ||
6908 BVN->getOpcode() != ISD::BUILD_VECTOR)
6910 unsigned LoElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
6911 unsigned HiElt = 1 - LoElt;
6912 ConstantSDNode *Lo0 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt));
6913 ConstantSDNode *Hi0 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt));
6914 ConstantSDNode *Lo1 = dyn_cast<ConstantSDNode>(BVN->getOperand(LoElt+2));
6915 ConstantSDNode *Hi1 = dyn_cast<ConstantSDNode>(BVN->getOperand(HiElt+2));
6916 if (!Lo0 || !Hi0 || !Lo1 || !Hi1)
6919 if (Hi0->getSExtValue() == Lo0->getSExtValue() >> 32 &&
6920 Hi1->getSExtValue() == Lo1->getSExtValue() >> 32)
6923 if (Hi0->isNullValue() && Hi1->isNullValue())
6929 if (N->getOpcode() != ISD::BUILD_VECTOR)
6932 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
6933 SDNode *Elt = N->getOperand(i).getNode();
6934 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) {
6935 unsigned EltSize = VT.getScalarSizeInBits();
6936 unsigned HalfSize = EltSize / 2;
6938 if (!isIntN(HalfSize, C->getSExtValue()))
6941 if (!isUIntN(HalfSize, C->getZExtValue()))
6952 /// isSignExtended - Check if a node is a vector value that is sign-extended
6953 /// or a constant BUILD_VECTOR with sign-extended elements.
6954 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) {
6955 if (N->getOpcode() == ISD::SIGN_EXTEND || ISD::isSEXTLoad(N))
6957 if (isExtendedBUILD_VECTOR(N, DAG, true))
6962 /// isZeroExtended - Check if a node is a vector value that is zero-extended
6963 /// or a constant BUILD_VECTOR with zero-extended elements.
6964 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) {
6965 if (N->getOpcode() == ISD::ZERO_EXTEND || ISD::isZEXTLoad(N))
6967 if (isExtendedBUILD_VECTOR(N, DAG, false))
6972 static EVT getExtensionTo64Bits(const EVT &OrigVT) {
6973 if (OrigVT.getSizeInBits() >= 64)
6976 assert(OrigVT.isSimple() && "Expecting a simple value type");
6978 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy;
6979 switch (OrigSimpleTy) {
6980 default: llvm_unreachable("Unexpected Vector Type");
6989 /// AddRequiredExtensionForVMULL - Add a sign/zero extension to extend the total
6990 /// value size to 64 bits. We need a 64-bit D register as an operand to VMULL.
6991 /// We insert the required extension here to get the vector to fill a D register.
6992 static SDValue AddRequiredExtensionForVMULL(SDValue N, SelectionDAG &DAG,
6995 unsigned ExtOpcode) {
6996 // The vector originally had a size of OrigTy. It was then extended to ExtTy.
6997 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than
6998 // 64-bits we need to insert a new extension so that it will be 64-bits.
6999 assert(ExtTy.is128BitVector() && "Unexpected extension size");
7000 if (OrigTy.getSizeInBits() >= 64)
7003 // Must extend size to at least 64 bits to be used as an operand for VMULL.
7004 EVT NewVT = getExtensionTo64Bits(OrigTy);
7006 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N);
7009 /// SkipLoadExtensionForVMULL - return a load of the original vector size that
7010 /// does not do any sign/zero extension. If the original vector is less
7011 /// than 64 bits, an appropriate extension will be added after the load to
7012 /// reach a total size of 64 bits. We have to add the extension separately
7013 /// because ARM does not have a sign/zero extending load for vectors.
7014 static SDValue SkipLoadExtensionForVMULL(LoadSDNode *LD, SelectionDAG& DAG) {
7015 EVT ExtendedTy = getExtensionTo64Bits(LD->getMemoryVT());
7017 // The load already has the right type.
7018 if (ExtendedTy == LD->getMemoryVT())
7019 return DAG.getLoad(LD->getMemoryVT(), SDLoc(LD), LD->getChain(),
7020 LD->getBasePtr(), LD->getPointerInfo(),
7021 LD->getAlignment(), LD->getMemOperand()->getFlags());
7023 // We need to create a zextload/sextload. We cannot just create a load
7024 // followed by a zext/zext node because LowerMUL is also run during normal
7025 // operation legalization where we can't create illegal types.
7026 return DAG.getExtLoad(LD->getExtensionType(), SDLoc(LD), ExtendedTy,
7027 LD->getChain(), LD->getBasePtr(), LD->getPointerInfo(),
7028 LD->getMemoryVT(), LD->getAlignment(),
7029 LD->getMemOperand()->getFlags());
7032 /// SkipExtensionForVMULL - For a node that is a SIGN_EXTEND, ZERO_EXTEND,
7033 /// extending load, or BUILD_VECTOR with extended elements, return the
7034 /// unextended value. The unextended vector should be 64 bits so that it can
7035 /// be used as an operand to a VMULL instruction. If the original vector size
7036 /// before extension is less than 64 bits we add a an extension to resize
7037 /// the vector to 64 bits.
7038 static SDValue SkipExtensionForVMULL(SDNode *N, SelectionDAG &DAG) {
7039 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND)
7040 return AddRequiredExtensionForVMULL(N->getOperand(0), DAG,
7041 N->getOperand(0)->getValueType(0),
7045 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
7046 return SkipLoadExtensionForVMULL(LD, DAG);
7048 // Otherwise, the value must be a BUILD_VECTOR. For v2i64, it will
7049 // have been legalized as a BITCAST from v4i32.
7050 if (N->getOpcode() == ISD::BITCAST) {
7051 SDNode *BVN = N->getOperand(0).getNode();
7052 assert(BVN->getOpcode() == ISD::BUILD_VECTOR &&
7053 BVN->getValueType(0) == MVT::v4i32 && "expected v4i32 BUILD_VECTOR");
7054 unsigned LowElt = DAG.getDataLayout().isBigEndian() ? 1 : 0;
7055 return DAG.getBuildVector(
7056 MVT::v2i32, SDLoc(N),
7057 {BVN->getOperand(LowElt), BVN->getOperand(LowElt + 2)});
7059 // Construct a new BUILD_VECTOR with elements truncated to half the size.
7060 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR");
7061 EVT VT = N->getValueType(0);
7062 unsigned EltSize = VT.getScalarSizeInBits() / 2;
7063 unsigned NumElts = VT.getVectorNumElements();
7064 MVT TruncVT = MVT::getIntegerVT(EltSize);
7065 SmallVector<SDValue, 8> Ops;
7067 for (unsigned i = 0; i != NumElts; ++i) {
7068 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i));
7069 const APInt &CInt = C->getAPIntValue();
7070 // Element types smaller than 32 bits are not legal, so use i32 elements.
7071 // The values are implicitly truncated so sext vs. zext doesn't matter.
7072 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32));
7074 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops);
7077 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) {
7078 unsigned Opcode = N->getOpcode();
7079 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7080 SDNode *N0 = N->getOperand(0).getNode();
7081 SDNode *N1 = N->getOperand(1).getNode();
7082 return N0->hasOneUse() && N1->hasOneUse() &&
7083 isSignExtended(N0, DAG) && isSignExtended(N1, DAG);
7088 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) {
7089 unsigned Opcode = N->getOpcode();
7090 if (Opcode == ISD::ADD || Opcode == ISD::SUB) {
7091 SDNode *N0 = N->getOperand(0).getNode();
7092 SDNode *N1 = N->getOperand(1).getNode();
7093 return N0->hasOneUse() && N1->hasOneUse() &&
7094 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG);
7099 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) {
7100 // Multiplications are only custom-lowered for 128-bit vectors so that
7101 // VMULL can be detected. Otherwise v2i64 multiplications are not legal.
7102 EVT VT = Op.getValueType();
7103 assert(VT.is128BitVector() && VT.isInteger() &&
7104 "unexpected type for custom-lowering ISD::MUL");
7105 SDNode *N0 = Op.getOperand(0).getNode();
7106 SDNode *N1 = Op.getOperand(1).getNode();
7107 unsigned NewOpc = 0;
7109 bool isN0SExt = isSignExtended(N0, DAG);
7110 bool isN1SExt = isSignExtended(N1, DAG);
7111 if (isN0SExt && isN1SExt)
7112 NewOpc = ARMISD::VMULLs;
7114 bool isN0ZExt = isZeroExtended(N0, DAG);
7115 bool isN1ZExt = isZeroExtended(N1, DAG);
7116 if (isN0ZExt && isN1ZExt)
7117 NewOpc = ARMISD::VMULLu;
7118 else if (isN1SExt || isN1ZExt) {
7119 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these
7120 // into (s/zext A * s/zext C) + (s/zext B * s/zext C)
7121 if (isN1SExt && isAddSubSExt(N0, DAG)) {
7122 NewOpc = ARMISD::VMULLs;
7124 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) {
7125 NewOpc = ARMISD::VMULLu;
7127 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) {
7129 NewOpc = ARMISD::VMULLu;
7135 if (VT == MVT::v2i64)
7136 // Fall through to expand this. It is not legal.
7139 // Other vector multiplications are legal.
7144 // Legalize to a VMULL instruction.
7147 SDValue Op1 = SkipExtensionForVMULL(N1, DAG);
7149 Op0 = SkipExtensionForVMULL(N0, DAG);
7150 assert(Op0.getValueType().is64BitVector() &&
7151 Op1.getValueType().is64BitVector() &&
7152 "unexpected types for extended operands to VMULL");
7153 return DAG.getNode(NewOpc, DL, VT, Op0, Op1);
7156 // Optimizing (zext A + zext B) * C, to (VMULL A, C) + (VMULL B, C) during
7157 // isel lowering to take advantage of no-stall back to back vmul + vmla.
7164 SDValue N00 = SkipExtensionForVMULL(N0->getOperand(0).getNode(), DAG);
7165 SDValue N01 = SkipExtensionForVMULL(N0->getOperand(1).getNode(), DAG);
7166 EVT Op1VT = Op1.getValueType();
7167 return DAG.getNode(N0->getOpcode(), DL, VT,
7168 DAG.getNode(NewOpc, DL, VT,
7169 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1),
7170 DAG.getNode(NewOpc, DL, VT,
7171 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1));
7174 static SDValue LowerSDIV_v4i8(SDValue X, SDValue Y, const SDLoc &dl,
7175 SelectionDAG &DAG) {
7176 // TODO: Should this propagate fast-math-flags?
7179 // float4 xf = vcvt_f32_s32(vmovl_s16(a.lo));
7180 // float4 yf = vcvt_f32_s32(vmovl_s16(b.lo));
7181 X = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, X);
7182 Y = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Y);
7183 X = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, X);
7184 Y = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, Y);
7185 // Get reciprocal estimate.
7186 // float4 recip = vrecpeq_f32(yf);
7187 Y = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7188 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7190 // Because char has a smaller range than uchar, we can actually get away
7191 // without any newton steps. This requires that we use a weird bias
7192 // of 0xb000, however (again, this has been exhaustively tested).
7193 // float4 result = as_float4(as_int4(xf*recip) + 0xb000);
7194 X = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, X, Y);
7195 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, X);
7196 Y = DAG.getConstant(0xb000, dl, MVT::v4i32);
7197 X = DAG.getNode(ISD::ADD, dl, MVT::v4i32, X, Y);
7198 X = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, X);
7199 // Convert back to short.
7200 X = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, X);
7201 X = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, X);
7205 static SDValue LowerSDIV_v4i16(SDValue N0, SDValue N1, const SDLoc &dl,
7206 SelectionDAG &DAG) {
7207 // TODO: Should this propagate fast-math-flags?
7210 // Convert to float.
7211 // float4 yf = vcvt_f32_s32(vmovl_s16(y));
7212 // float4 xf = vcvt_f32_s32(vmovl_s16(x));
7213 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N0);
7214 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, N1);
7215 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7216 N1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7218 // Use reciprocal estimate and one refinement step.
7219 // float4 recip = vrecpeq_f32(yf);
7220 // recip *= vrecpsq_f32(yf, recip);
7221 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7222 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7224 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7225 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7227 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7228 // Because short has a smaller range than ushort, we can actually get away
7229 // with only a single newton step. This requires that we use a weird bias
7230 // of 89, however (again, this has been exhaustively tested).
7231 // float4 result = as_float4(as_int4(xf*recip) + 0x89);
7232 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7233 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7234 N1 = DAG.getConstant(0x89, dl, MVT::v4i32);
7235 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7236 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7237 // Convert back to integer and return.
7238 // return vmovn_s32(vcvt_s32_f32(result));
7239 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7240 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7244 static SDValue LowerSDIV(SDValue Op, SelectionDAG &DAG) {
7245 EVT VT = Op.getValueType();
7246 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7247 "unexpected type for custom-lowering ISD::SDIV");
7250 SDValue N0 = Op.getOperand(0);
7251 SDValue N1 = Op.getOperand(1);
7254 if (VT == MVT::v8i8) {
7255 N0 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N0);
7256 N1 = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v8i16, N1);
7258 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7259 DAG.getIntPtrConstant(4, dl));
7260 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7261 DAG.getIntPtrConstant(4, dl));
7262 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7263 DAG.getIntPtrConstant(0, dl));
7264 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7265 DAG.getIntPtrConstant(0, dl));
7267 N0 = LowerSDIV_v4i8(N0, N1, dl, DAG); // v4i16
7268 N2 = LowerSDIV_v4i8(N2, N3, dl, DAG); // v4i16
7270 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7271 N0 = LowerCONCAT_VECTORS(N0, DAG);
7273 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v8i8, N0);
7276 return LowerSDIV_v4i16(N0, N1, dl, DAG);
7279 static SDValue LowerUDIV(SDValue Op, SelectionDAG &DAG) {
7280 // TODO: Should this propagate fast-math-flags?
7281 EVT VT = Op.getValueType();
7282 assert((VT == MVT::v4i16 || VT == MVT::v8i8) &&
7283 "unexpected type for custom-lowering ISD::UDIV");
7286 SDValue N0 = Op.getOperand(0);
7287 SDValue N1 = Op.getOperand(1);
7290 if (VT == MVT::v8i8) {
7291 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N0);
7292 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v8i16, N1);
7294 N2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7295 DAG.getIntPtrConstant(4, dl));
7296 N3 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7297 DAG.getIntPtrConstant(4, dl));
7298 N0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N0,
7299 DAG.getIntPtrConstant(0, dl));
7300 N1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v4i16, N1,
7301 DAG.getIntPtrConstant(0, dl));
7303 N0 = LowerSDIV_v4i16(N0, N1, dl, DAG); // v4i16
7304 N2 = LowerSDIV_v4i16(N2, N3, dl, DAG); // v4i16
7306 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, N0, N2);
7307 N0 = LowerCONCAT_VECTORS(N0, DAG);
7309 N0 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v8i8,
7310 DAG.getConstant(Intrinsic::arm_neon_vqmovnsu, dl,
7316 // v4i16 sdiv ... Convert to float.
7317 // float4 yf = vcvt_f32_s32(vmovl_u16(y));
7318 // float4 xf = vcvt_f32_s32(vmovl_u16(x));
7319 N0 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N0);
7320 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v4i32, N1);
7321 N0 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N0);
7322 SDValue BN1 = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::v4f32, N1);
7324 // Use reciprocal estimate and two refinement steps.
7325 // float4 recip = vrecpeq_f32(yf);
7326 // recip *= vrecpsq_f32(yf, recip);
7327 // recip *= vrecpsq_f32(yf, recip);
7328 N2 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7329 DAG.getConstant(Intrinsic::arm_neon_vrecpe, dl, MVT::i32),
7331 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7332 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7334 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7335 N1 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::v4f32,
7336 DAG.getConstant(Intrinsic::arm_neon_vrecps, dl, MVT::i32),
7338 N2 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N1, N2);
7339 // Simply multiplying by the reciprocal estimate can leave us a few ulps
7340 // too low, so we add 2 ulps (exhaustive testing shows that this is enough,
7341 // and that it will never cause us to return an answer too large).
7342 // float4 result = as_float4(as_int4(xf*recip) + 2);
7343 N0 = DAG.getNode(ISD::FMUL, dl, MVT::v4f32, N0, N2);
7344 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, N0);
7345 N1 = DAG.getConstant(2, dl, MVT::v4i32);
7346 N0 = DAG.getNode(ISD::ADD, dl, MVT::v4i32, N0, N1);
7347 N0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, N0);
7348 // Convert back to integer and return.
7349 // return vmovn_u32(vcvt_s32_f32(result));
7350 N0 = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::v4i32, N0);
7351 N0 = DAG.getNode(ISD::TRUNCATE, dl, MVT::v4i16, N0);
7355 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
7356 EVT VT = Op.getNode()->getValueType(0);
7357 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
7360 bool ExtraOp = false;
7361 switch (Op.getOpcode()) {
7362 default: llvm_unreachable("Invalid code");
7363 case ISD::ADDC: Opc = ARMISD::ADDC; break;
7364 case ISD::ADDE: Opc = ARMISD::ADDE; ExtraOp = true; break;
7365 case ISD::SUBC: Opc = ARMISD::SUBC; break;
7366 case ISD::SUBE: Opc = ARMISD::SUBE; ExtraOp = true; break;
7370 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7372 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
7373 Op.getOperand(1), Op.getOperand(2));
7376 SDValue ARMTargetLowering::LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const {
7377 assert(Subtarget->isTargetDarwin());
7379 // For iOS, we want to call an alternative entry point: __sincos_stret,
7380 // return values are passed via sret.
7382 SDValue Arg = Op.getOperand(0);
7383 EVT ArgVT = Arg.getValueType();
7384 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
7385 auto PtrVT = getPointerTy(DAG.getDataLayout());
7387 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
7388 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
7390 // Pair of floats / doubles used to pass the result.
7391 Type *RetTy = StructType::get(ArgTy, ArgTy, nullptr);
7392 auto &DL = DAG.getDataLayout();
7395 bool ShouldUseSRet = Subtarget->isAPCS_ABI();
7397 if (ShouldUseSRet) {
7398 // Create stack object for sret.
7399 const uint64_t ByteSize = DL.getTypeAllocSize(RetTy);
7400 const unsigned StackAlign = DL.getPrefTypeAlignment(RetTy);
7401 int FrameIdx = MFI.CreateStackObject(ByteSize, StackAlign, false);
7402 SRet = DAG.getFrameIndex(FrameIdx, TLI.getPointerTy(DL));
7406 Entry.Ty = RetTy->getPointerTo();
7407 Entry.isSExt = false;
7408 Entry.isZExt = false;
7409 Entry.isSRet = true;
7410 Args.push_back(Entry);
7411 RetTy = Type::getVoidTy(*DAG.getContext());
7417 Entry.isSExt = false;
7418 Entry.isZExt = false;
7419 Args.push_back(Entry);
7421 const char *LibcallName =
7422 (ArgVT == MVT::f64) ? "__sincos_stret" : "__sincosf_stret";
7424 (ArgVT == MVT::f64) ? RTLIB::SINCOS_F64 : RTLIB::SINCOS_F32;
7425 CallingConv::ID CC = getLibcallCallingConv(LC);
7426 SDValue Callee = DAG.getExternalSymbol(LibcallName, getPointerTy(DL));
7428 TargetLowering::CallLoweringInfo CLI(DAG);
7430 .setChain(DAG.getEntryNode())
7431 .setCallee(CC, RetTy, Callee, std::move(Args))
7432 .setDiscardResult(ShouldUseSRet);
7433 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
7436 return CallResult.first;
7439 DAG.getLoad(ArgVT, dl, CallResult.second, SRet, MachinePointerInfo());
7441 // Address of cos field.
7442 SDValue Add = DAG.getNode(ISD::ADD, dl, PtrVT, SRet,
7443 DAG.getIntPtrConstant(ArgVT.getStoreSize(), dl));
7445 DAG.getLoad(ArgVT, dl, LoadSin.getValue(1), Add, MachinePointerInfo());
7447 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
7448 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys,
7449 LoadSin.getValue(0), LoadCos.getValue(0));
7452 SDValue ARMTargetLowering::LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG,
7454 SDValue &Chain) const {
7455 EVT VT = Op.getValueType();
7456 assert((VT == MVT::i32 || VT == MVT::i64) &&
7457 "unexpected type for custom lowering DIV");
7460 const auto &DL = DAG.getDataLayout();
7461 const auto &TLI = DAG.getTargetLoweringInfo();
7463 const char *Name = nullptr;
7465 Name = (VT == MVT::i32) ? "__rt_sdiv" : "__rt_sdiv64";
7467 Name = (VT == MVT::i32) ? "__rt_udiv" : "__rt_udiv64";
7469 SDValue ES = DAG.getExternalSymbol(Name, TLI.getPointerTy(DL));
7471 ARMTargetLowering::ArgListTy Args;
7473 for (auto AI : {1, 0}) {
7475 Arg.Node = Op.getOperand(AI);
7476 Arg.Ty = Arg.Node.getValueType().getTypeForEVT(*DAG.getContext());
7477 Args.push_back(Arg);
7480 CallLoweringInfo CLI(DAG);
7483 .setCallee(CallingConv::ARM_AAPCS_VFP, VT.getTypeForEVT(*DAG.getContext()),
7484 ES, std::move(Args));
7486 return LowerCallTo(CLI).first;
7489 SDValue ARMTargetLowering::LowerDIV_Windows(SDValue Op, SelectionDAG &DAG,
7490 bool Signed) const {
7491 assert(Op.getValueType() == MVT::i32 &&
7492 "unexpected type for custom lowering DIV");
7495 SDValue DBZCHK = DAG.getNode(ARMISD::WIN__DBZCHK, dl, MVT::Other,
7496 DAG.getEntryNode(), Op.getOperand(1));
7498 return LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7501 static SDValue WinDBZCheckDenominator(SelectionDAG &DAG, SDNode *N, SDValue InChain) {
7503 SDValue Op = N->getOperand(1);
7504 if (N->getValueType(0) == MVT::i32)
7505 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain, Op);
7506 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7507 DAG.getConstant(0, DL, MVT::i32));
7508 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Op,
7509 DAG.getConstant(1, DL, MVT::i32));
7510 return DAG.getNode(ARMISD::WIN__DBZCHK, DL, MVT::Other, InChain,
7511 DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi));
7514 void ARMTargetLowering::ExpandDIV_Windows(
7515 SDValue Op, SelectionDAG &DAG, bool Signed,
7516 SmallVectorImpl<SDValue> &Results) const {
7517 const auto &DL = DAG.getDataLayout();
7518 const auto &TLI = DAG.getTargetLoweringInfo();
7520 assert(Op.getValueType() == MVT::i64 &&
7521 "unexpected type for custom lowering DIV");
7524 SDValue DBZCHK = WinDBZCheckDenominator(DAG, Op.getNode(), DAG.getEntryNode());
7526 SDValue Result = LowerWindowsDIVLibCall(Op, DAG, Signed, DBZCHK);
7528 SDValue Lower = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Result);
7529 SDValue Upper = DAG.getNode(ISD::SRL, dl, MVT::i64, Result,
7530 DAG.getConstant(32, dl, TLI.getPointerTy(DL)));
7531 Upper = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Upper);
7533 Results.push_back(Lower);
7534 Results.push_back(Upper);
7537 static SDValue LowerAtomicLoadStore(SDValue Op, SelectionDAG &DAG) {
7538 if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
7539 // Acquire/Release load/store is not legal for targets without a dmb or
7540 // equivalent available.
7543 // Monotonic load/store is legal for all targets.
7547 static void ReplaceREADCYCLECOUNTER(SDNode *N,
7548 SmallVectorImpl<SDValue> &Results,
7550 const ARMSubtarget *Subtarget) {
7552 // Under Power Management extensions, the cycle-count is:
7553 // mrc p15, #0, <Rt>, c9, c13, #0
7554 SDValue Ops[] = { N->getOperand(0), // Chain
7555 DAG.getConstant(Intrinsic::arm_mrc, DL, MVT::i32),
7556 DAG.getConstant(15, DL, MVT::i32),
7557 DAG.getConstant(0, DL, MVT::i32),
7558 DAG.getConstant(9, DL, MVT::i32),
7559 DAG.getConstant(13, DL, MVT::i32),
7560 DAG.getConstant(0, DL, MVT::i32)
7563 SDValue Cycles32 = DAG.getNode(ISD::INTRINSIC_W_CHAIN, DL,
7564 DAG.getVTList(MVT::i32, MVT::Other), Ops);
7565 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Cycles32,
7566 DAG.getConstant(0, DL, MVT::i32)));
7567 Results.push_back(Cycles32.getValue(1));
7570 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) {
7571 SDLoc dl(V.getNode());
7572 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i32);
7573 SDValue VHi = DAG.getAnyExtOrTrunc(
7574 DAG.getNode(ISD::SRL, dl, MVT::i64, V, DAG.getConstant(32, dl, MVT::i32)),
7577 DAG.getTargetConstant(ARM::GPRPairRegClassID, dl, MVT::i32);
7578 SDValue SubReg0 = DAG.getTargetConstant(ARM::gsub_0, dl, MVT::i32);
7579 SDValue SubReg1 = DAG.getTargetConstant(ARM::gsub_1, dl, MVT::i32);
7580 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 };
7582 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0);
7585 static void ReplaceCMP_SWAP_64Results(SDNode *N,
7586 SmallVectorImpl<SDValue> & Results,
7587 SelectionDAG &DAG) {
7588 assert(N->getValueType(0) == MVT::i64 &&
7589 "AtomicCmpSwap on types less than 64 should be legal");
7590 SDValue Ops[] = {N->getOperand(1),
7591 createGPRPairNode(DAG, N->getOperand(2)),
7592 createGPRPairNode(DAG, N->getOperand(3)),
7594 SDNode *CmpSwap = DAG.getMachineNode(
7595 ARM::CMP_SWAP_64, SDLoc(N),
7596 DAG.getVTList(MVT::Untyped, MVT::i32, MVT::Other), Ops);
7598 MachineFunction &MF = DAG.getMachineFunction();
7599 MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1);
7600 MemOp[0] = cast<MemSDNode>(N)->getMemOperand();
7601 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1);
7603 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_0, SDLoc(N), MVT::i32,
7604 SDValue(CmpSwap, 0)));
7605 Results.push_back(DAG.getTargetExtractSubreg(ARM::gsub_1, SDLoc(N), MVT::i32,
7606 SDValue(CmpSwap, 0)));
7607 Results.push_back(SDValue(CmpSwap, 2));
7610 static SDValue LowerFPOWI(SDValue Op, const ARMSubtarget &Subtarget,
7611 SelectionDAG &DAG) {
7612 const auto &TLI = DAG.getTargetLoweringInfo();
7614 assert(Subtarget.getTargetTriple().isOSMSVCRT() &&
7615 "Custom lowering is MSVCRT specific!");
7618 SDValue Val = Op.getOperand(0);
7619 MVT Ty = Val->getSimpleValueType(0);
7620 SDValue Exponent = DAG.getNode(ISD::SINT_TO_FP, dl, Ty, Op.getOperand(1));
7621 SDValue Callee = DAG.getExternalSymbol(Ty == MVT::f32 ? "powf" : "pow",
7622 TLI.getPointerTy(DAG.getDataLayout()));
7624 TargetLowering::ArgListTy Args;
7625 TargetLowering::ArgListEntry Entry;
7628 Entry.Ty = Val.getValueType().getTypeForEVT(*DAG.getContext());
7629 Entry.isZExt = true;
7630 Args.push_back(Entry);
7632 Entry.Node = Exponent;
7633 Entry.Ty = Exponent.getValueType().getTypeForEVT(*DAG.getContext());
7634 Entry.isZExt = true;
7635 Args.push_back(Entry);
7637 Type *LCRTy = Val.getValueType().getTypeForEVT(*DAG.getContext());
7639 // In the in-chain to the call is the entry node If we are emitting a
7640 // tailcall, the chain will be mutated if the node has a non-entry input
7642 SDValue InChain = DAG.getEntryNode();
7643 SDValue TCChain = InChain;
7645 const auto *F = DAG.getMachineFunction().getFunction();
7646 bool IsTC = TLI.isInTailCallPosition(DAG, Op.getNode(), TCChain) &&
7647 F->getReturnType() == LCRTy;
7651 TargetLowering::CallLoweringInfo CLI(DAG);
7654 .setCallee(CallingConv::ARM_AAPCS_VFP, LCRTy, Callee, std::move(Args))
7656 std::pair<SDValue, SDValue> CI = TLI.LowerCallTo(CLI);
7658 // Return the chain (the DAG root) if it is a tail call
7659 return !CI.second.getNode() ? DAG.getRoot() : CI.first;
7662 SDValue ARMTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
7663 switch (Op.getOpcode()) {
7664 default: llvm_unreachable("Don't know how to custom lower this!");
7665 case ISD::WRITE_REGISTER: return LowerWRITE_REGISTER(Op, DAG);
7666 case ISD::ConstantPool:
7667 if (Subtarget->genExecuteOnly())
7668 llvm_unreachable("execute-only should not generate constant pools");
7669 return LowerConstantPool(Op, DAG);
7670 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
7671 case ISD::GlobalAddress:
7672 switch (Subtarget->getTargetTriple().getObjectFormat()) {
7673 default: llvm_unreachable("unknown object format");
7675 return LowerGlobalAddressWindows(Op, DAG);
7677 return LowerGlobalAddressELF(Op, DAG);
7679 return LowerGlobalAddressDarwin(Op, DAG);
7681 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
7682 case ISD::SELECT: return LowerSELECT(Op, DAG);
7683 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
7684 case ISD::BR_CC: return LowerBR_CC(Op, DAG);
7685 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
7686 case ISD::VASTART: return LowerVASTART(Op, DAG);
7687 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG, Subtarget);
7688 case ISD::PREFETCH: return LowerPREFETCH(Op, DAG, Subtarget);
7689 case ISD::SINT_TO_FP:
7690 case ISD::UINT_TO_FP: return LowerINT_TO_FP(Op, DAG);
7691 case ISD::FP_TO_SINT:
7692 case ISD::FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
7693 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
7694 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
7695 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
7696 case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG);
7697 case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG);
7698 case ISD::EH_SJLJ_SETUP_DISPATCH: return LowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
7699 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG,
7701 case ISD::BITCAST: return ExpandBITCAST(Op.getNode(), DAG);
7704 case ISD::SRA: return LowerShift(Op.getNode(), DAG, Subtarget);
7705 case ISD::SREM: return LowerREM(Op.getNode(), DAG);
7706 case ISD::UREM: return LowerREM(Op.getNode(), DAG);
7707 case ISD::SHL_PARTS: return LowerShiftLeftParts(Op, DAG);
7708 case ISD::SRL_PARTS:
7709 case ISD::SRA_PARTS: return LowerShiftRightParts(Op, DAG);
7711 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op.getNode(), DAG, Subtarget);
7712 case ISD::CTPOP: return LowerCTPOP(Op.getNode(), DAG, Subtarget);
7713 case ISD::SETCC: return LowerVSETCC(Op, DAG);
7714 case ISD::SETCCE: return LowerSETCCE(Op, DAG);
7715 case ISD::ConstantFP: return LowerConstantFP(Op, DAG, Subtarget);
7716 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG, Subtarget);
7717 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
7718 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
7719 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
7720 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
7721 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
7722 case ISD::MUL: return LowerMUL(Op, DAG);
7724 if (Subtarget->isTargetWindows())
7725 return LowerDIV_Windows(Op, DAG, /* Signed */ true);
7726 return LowerSDIV(Op, DAG);
7728 if (Subtarget->isTargetWindows())
7729 return LowerDIV_Windows(Op, DAG, /* Signed */ false);
7730 return LowerUDIV(Op, DAG);
7734 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
7739 return LowerXALUO(Op, DAG);
7740 case ISD::ATOMIC_LOAD:
7741 case ISD::ATOMIC_STORE: return LowerAtomicLoadStore(Op, DAG);
7742 case ISD::FSINCOS: return LowerFSINCOS(Op, DAG);
7744 case ISD::UDIVREM: return LowerDivRem(Op, DAG);
7745 case ISD::DYNAMIC_STACKALLOC:
7746 if (Subtarget->getTargetTriple().isWindowsItaniumEnvironment())
7747 return LowerDYNAMIC_STACKALLOC(Op, DAG);
7748 llvm_unreachable("Don't know how to custom lower this!");
7749 case ISD::FP_ROUND: return LowerFP_ROUND(Op, DAG);
7750 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
7751 case ISD::FPOWI: return LowerFPOWI(Op, *Subtarget, DAG);
7752 case ARMISD::WIN__DBZCHK: return SDValue();
7756 /// ReplaceNodeResults - Replace the results of node with an illegal result
7757 /// type with new values built out of custom code.
7758 void ARMTargetLowering::ReplaceNodeResults(SDNode *N,
7759 SmallVectorImpl<SDValue> &Results,
7760 SelectionDAG &DAG) const {
7762 switch (N->getOpcode()) {
7764 llvm_unreachable("Don't know how to custom expand this!");
7765 case ISD::READ_REGISTER:
7766 ExpandREAD_REGISTER(N, Results, DAG);
7769 Res = ExpandBITCAST(N, DAG);
7773 Res = Expand64BitShift(N, DAG, Subtarget);
7777 Res = LowerREM(N, DAG);
7781 Res = LowerDivRem(SDValue(N, 0), DAG);
7782 assert(Res.getNumOperands() == 2 && "DivRem needs two values");
7783 Results.push_back(Res.getValue(0));
7784 Results.push_back(Res.getValue(1));
7786 case ISD::READCYCLECOUNTER:
7787 ReplaceREADCYCLECOUNTER(N, Results, DAG, Subtarget);
7791 assert(Subtarget->isTargetWindows() && "can only expand DIV on Windows");
7792 return ExpandDIV_Windows(SDValue(N, 0), DAG, N->getOpcode() == ISD::SDIV,
7794 case ISD::ATOMIC_CMP_SWAP:
7795 ReplaceCMP_SWAP_64Results(N, Results, DAG);
7799 Results.push_back(Res);
7802 //===----------------------------------------------------------------------===//
7803 // ARM Scheduler Hooks
7804 //===----------------------------------------------------------------------===//
7806 /// SetupEntryBlockForSjLj - Insert code into the entry block that creates and
7807 /// registers the function context.
7808 void ARMTargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
7809 MachineBasicBlock *MBB,
7810 MachineBasicBlock *DispatchBB,
7812 assert(!Subtarget->isROPI() && !Subtarget->isRWPI() &&
7813 "ROPI/RWPI not currently supported with SjLj");
7814 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7815 DebugLoc dl = MI.getDebugLoc();
7816 MachineFunction *MF = MBB->getParent();
7817 MachineRegisterInfo *MRI = &MF->getRegInfo();
7818 MachineConstantPool *MCP = MF->getConstantPool();
7819 ARMFunctionInfo *AFI = MF->getInfo<ARMFunctionInfo>();
7820 const Function *F = MF->getFunction();
7822 bool isThumb = Subtarget->isThumb();
7823 bool isThumb2 = Subtarget->isThumb2();
7825 unsigned PCLabelId = AFI->createPICLabelUId();
7826 unsigned PCAdj = (isThumb || isThumb2) ? 4 : 8;
7827 ARMConstantPoolValue *CPV =
7828 ARMConstantPoolMBB::Create(F->getContext(), DispatchBB, PCLabelId, PCAdj);
7829 unsigned CPI = MCP->getConstantPoolIndex(CPV, 4);
7831 const TargetRegisterClass *TRC = isThumb ? &ARM::tGPRRegClass
7832 : &ARM::GPRRegClass;
7834 // Grab constant pool and fixed stack memory operands.
7835 MachineMemOperand *CPMMO =
7836 MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
7837 MachineMemOperand::MOLoad, 4, 4);
7839 MachineMemOperand *FIMMOSt =
7840 MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
7841 MachineMemOperand::MOStore, 4, 4);
7843 // Load the address of the dispatch MBB into the jump buffer.
7845 // Incoming value: jbuf
7846 // ldr.n r5, LCPI1_1
7849 // str r5, [$jbuf, #+4] ; &jbuf[1]
7850 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7851 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2LDRpci), NewVReg1)
7852 .addConstantPoolIndex(CPI)
7853 .addMemOperand(CPMMO));
7854 // Set the low bit because of thumb mode.
7855 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7857 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2ORRri), NewVReg2)
7858 .addReg(NewVReg1, RegState::Kill)
7860 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7861 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg3)
7862 .addReg(NewVReg2, RegState::Kill)
7864 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::t2STRi12))
7865 .addReg(NewVReg3, RegState::Kill)
7867 .addImm(36) // &jbuf[1] :: pc
7868 .addMemOperand(FIMMOSt));
7869 } else if (isThumb) {
7870 // Incoming value: jbuf
7871 // ldr.n r1, LCPI1_4
7875 // add r2, $jbuf, #+4 ; &jbuf[1]
7877 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7878 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tLDRpci), NewVReg1)
7879 .addConstantPoolIndex(CPI)
7880 .addMemOperand(CPMMO));
7881 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7882 BuildMI(*MBB, MI, dl, TII->get(ARM::tPICADD), NewVReg2)
7883 .addReg(NewVReg1, RegState::Kill)
7885 // Set the low bit because of thumb mode.
7886 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
7887 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tMOVi8), NewVReg3)
7888 .addReg(ARM::CPSR, RegState::Define)
7890 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
7891 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tORR), NewVReg4)
7892 .addReg(ARM::CPSR, RegState::Define)
7893 .addReg(NewVReg2, RegState::Kill)
7894 .addReg(NewVReg3, RegState::Kill));
7895 unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
7896 BuildMI(*MBB, MI, dl, TII->get(ARM::tADDframe), NewVReg5)
7898 .addImm(36); // &jbuf[1] :: pc
7899 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::tSTRi))
7900 .addReg(NewVReg4, RegState::Kill)
7901 .addReg(NewVReg5, RegState::Kill)
7903 .addMemOperand(FIMMOSt));
7905 // Incoming value: jbuf
7908 // str r1, [$jbuf, #+4] ; &jbuf[1]
7909 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
7910 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::LDRi12), NewVReg1)
7911 .addConstantPoolIndex(CPI)
7913 .addMemOperand(CPMMO));
7914 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
7915 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::PICADD), NewVReg2)
7916 .addReg(NewVReg1, RegState::Kill)
7917 .addImm(PCLabelId));
7918 AddDefaultPred(BuildMI(*MBB, MI, dl, TII->get(ARM::STRi12))
7919 .addReg(NewVReg2, RegState::Kill)
7921 .addImm(36) // &jbuf[1] :: pc
7922 .addMemOperand(FIMMOSt));
7926 void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
7927 MachineBasicBlock *MBB) const {
7928 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
7929 DebugLoc dl = MI.getDebugLoc();
7930 MachineFunction *MF = MBB->getParent();
7931 MachineRegisterInfo *MRI = &MF->getRegInfo();
7932 MachineFrameInfo &MFI = MF->getFrameInfo();
7933 int FI = MFI.getFunctionContextIndex();
7935 const TargetRegisterClass *TRC = Subtarget->isThumb() ? &ARM::tGPRRegClass
7936 : &ARM::GPRnopcRegClass;
7938 // Get a mapping of the call site numbers to all of the landing pads they're
7940 DenseMap<unsigned, SmallVector<MachineBasicBlock*, 2> > CallSiteNumToLPad;
7941 unsigned MaxCSNum = 0;
7942 for (MachineFunction::iterator BB = MF->begin(), E = MF->end(); BB != E;
7944 if (!BB->isEHPad()) continue;
7946 // FIXME: We should assert that the EH_LABEL is the first MI in the landing
7948 for (MachineBasicBlock::iterator
7949 II = BB->begin(), IE = BB->end(); II != IE; ++II) {
7950 if (!II->isEHLabel()) continue;
7952 MCSymbol *Sym = II->getOperand(0).getMCSymbol();
7953 if (!MF->hasCallSiteLandingPad(Sym)) continue;
7955 SmallVectorImpl<unsigned> &CallSiteIdxs = MF->getCallSiteLandingPad(Sym);
7956 for (SmallVectorImpl<unsigned>::iterator
7957 CSI = CallSiteIdxs.begin(), CSE = CallSiteIdxs.end();
7958 CSI != CSE; ++CSI) {
7959 CallSiteNumToLPad[*CSI].push_back(&*BB);
7960 MaxCSNum = std::max(MaxCSNum, *CSI);
7966 // Get an ordered list of the machine basic blocks for the jump table.
7967 std::vector<MachineBasicBlock*> LPadList;
7968 SmallPtrSet<MachineBasicBlock*, 32> InvokeBBs;
7969 LPadList.reserve(CallSiteNumToLPad.size());
7970 for (unsigned I = 1; I <= MaxCSNum; ++I) {
7971 SmallVectorImpl<MachineBasicBlock*> &MBBList = CallSiteNumToLPad[I];
7972 for (SmallVectorImpl<MachineBasicBlock*>::iterator
7973 II = MBBList.begin(), IE = MBBList.end(); II != IE; ++II) {
7974 LPadList.push_back(*II);
7975 InvokeBBs.insert((*II)->pred_begin(), (*II)->pred_end());
7979 assert(!LPadList.empty() &&
7980 "No landing pad destinations for the dispatch jump table!");
7982 // Create the jump table and associated information.
7983 MachineJumpTableInfo *JTI =
7984 MF->getOrCreateJumpTableInfo(MachineJumpTableInfo::EK_Inline);
7985 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
7987 // Create the MBBs for the dispatch code.
7989 // Shove the dispatch's address into the return slot in the function context.
7990 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
7991 DispatchBB->setIsEHPad();
7993 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
7994 unsigned trap_opcode;
7995 if (Subtarget->isThumb())
7996 trap_opcode = ARM::tTRAP;
7998 trap_opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
8000 BuildMI(TrapBB, dl, TII->get(trap_opcode));
8001 DispatchBB->addSuccessor(TrapBB);
8003 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
8004 DispatchBB->addSuccessor(DispContBB);
8007 MF->insert(MF->end(), DispatchBB);
8008 MF->insert(MF->end(), DispContBB);
8009 MF->insert(MF->end(), TrapBB);
8011 // Insert code into the entry block that creates and registers the function
8013 SetupEntryBlockForSjLj(MI, MBB, DispatchBB, FI);
8015 MachineMemOperand *FIMMOLd = MF->getMachineMemOperand(
8016 MachinePointerInfo::getFixedStack(*MF, FI),
8017 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile, 4, 4);
8019 MachineInstrBuilder MIB;
8020 MIB = BuildMI(DispatchBB, dl, TII->get(ARM::Int_eh_sjlj_dispatchsetup));
8022 const ARMBaseInstrInfo *AII = static_cast<const ARMBaseInstrInfo*>(TII);
8023 const ARMBaseRegisterInfo &RI = AII->getRegisterInfo();
8025 // Add a register mask with no preserved registers. This results in all
8026 // registers being marked as clobbered. This can't work if the dispatch block
8027 // is in a Thumb1 function and is linked with ARM code which uses the FP
8028 // registers, as there is no way to preserve the FP registers in Thumb1 mode.
8029 MIB.addRegMask(RI.getSjLjDispatchPreservedMask(*MF));
8031 bool IsPositionIndependent = isPositionIndependent();
8032 unsigned NumLPads = LPadList.size();
8033 if (Subtarget->isThumb2()) {
8034 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8035 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2LDRi12), NewVReg1)
8038 .addMemOperand(FIMMOLd));
8040 if (NumLPads < 256) {
8041 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPri))
8043 .addImm(LPadList.size()));
8045 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8046 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVi16), VReg1)
8047 .addImm(NumLPads & 0xFFFF));
8049 unsigned VReg2 = VReg1;
8050 if ((NumLPads & 0xFFFF0000) != 0) {
8051 VReg2 = MRI->createVirtualRegister(TRC);
8052 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2MOVTi16), VReg2)
8054 .addImm(NumLPads >> 16));
8057 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::t2CMPrr))
8062 BuildMI(DispatchBB, dl, TII->get(ARM::t2Bcc))
8067 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8068 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::t2LEApcrelJT),NewVReg3)
8069 .addJumpTableIndex(MJTI));
8071 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8074 BuildMI(DispContBB, dl, TII->get(ARM::t2ADDrs), NewVReg4)
8075 .addReg(NewVReg3, RegState::Kill)
8077 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
8079 BuildMI(DispContBB, dl, TII->get(ARM::t2BR_JT))
8080 .addReg(NewVReg4, RegState::Kill)
8082 .addJumpTableIndex(MJTI);
8083 } else if (Subtarget->isThumb()) {
8084 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8085 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRspi), NewVReg1)
8088 .addMemOperand(FIMMOLd));
8090 if (NumLPads < 256) {
8091 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPi8))
8095 MachineConstantPool *ConstantPool = MF->getConstantPool();
8096 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8097 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8099 // MachineConstantPool wants an explicit alignment.
8100 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8102 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8103 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8105 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8106 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tLDRpci))
8107 .addReg(VReg1, RegState::Define)
8108 .addConstantPoolIndex(Idx));
8109 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::tCMPr))
8114 BuildMI(DispatchBB, dl, TII->get(ARM::tBcc))
8119 unsigned NewVReg2 = MRI->createVirtualRegister(TRC);
8120 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLSLri), NewVReg2)
8121 .addReg(ARM::CPSR, RegState::Define)
8125 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8126 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLEApcrelJT), NewVReg3)
8127 .addJumpTableIndex(MJTI));
8129 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8130 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg4)
8131 .addReg(ARM::CPSR, RegState::Define)
8132 .addReg(NewVReg2, RegState::Kill)
8135 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8136 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8138 unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8139 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tLDRi), NewVReg5)
8140 .addReg(NewVReg4, RegState::Kill)
8142 .addMemOperand(JTMMOLd));
8144 unsigned NewVReg6 = NewVReg5;
8145 if (IsPositionIndependent) {
8146 NewVReg6 = MRI->createVirtualRegister(TRC);
8147 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::tADDrr), NewVReg6)
8148 .addReg(ARM::CPSR, RegState::Define)
8149 .addReg(NewVReg5, RegState::Kill)
8153 BuildMI(DispContBB, dl, TII->get(ARM::tBR_JTr))
8154 .addReg(NewVReg6, RegState::Kill)
8155 .addJumpTableIndex(MJTI);
8157 unsigned NewVReg1 = MRI->createVirtualRegister(TRC);
8158 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRi12), NewVReg1)
8161 .addMemOperand(FIMMOLd));
8163 if (NumLPads < 256) {
8164 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPri))
8167 } else if (Subtarget->hasV6T2Ops() && isUInt<16>(NumLPads)) {
8168 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8169 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVi16), VReg1)
8170 .addImm(NumLPads & 0xFFFF));
8172 unsigned VReg2 = VReg1;
8173 if ((NumLPads & 0xFFFF0000) != 0) {
8174 VReg2 = MRI->createVirtualRegister(TRC);
8175 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::MOVTi16), VReg2)
8177 .addImm(NumLPads >> 16));
8180 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8184 MachineConstantPool *ConstantPool = MF->getConstantPool();
8185 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8186 const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
8188 // MachineConstantPool wants an explicit alignment.
8189 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8191 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8192 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8194 unsigned VReg1 = MRI->createVirtualRegister(TRC);
8195 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::LDRcp))
8196 .addReg(VReg1, RegState::Define)
8197 .addConstantPoolIndex(Idx)
8199 AddDefaultPred(BuildMI(DispatchBB, dl, TII->get(ARM::CMPrr))
8201 .addReg(VReg1, RegState::Kill));
8204 BuildMI(DispatchBB, dl, TII->get(ARM::Bcc))
8209 unsigned NewVReg3 = MRI->createVirtualRegister(TRC);
8211 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::MOVsi), NewVReg3)
8213 .addImm(ARM_AM::getSORegOpc(ARM_AM::lsl, 2))));
8214 unsigned NewVReg4 = MRI->createVirtualRegister(TRC);
8215 AddDefaultPred(BuildMI(DispContBB, dl, TII->get(ARM::LEApcrelJT), NewVReg4)
8216 .addJumpTableIndex(MJTI));
8218 MachineMemOperand *JTMMOLd = MF->getMachineMemOperand(
8219 MachinePointerInfo::getJumpTable(*MF), MachineMemOperand::MOLoad, 4, 4);
8220 unsigned NewVReg5 = MRI->createVirtualRegister(TRC);
8222 BuildMI(DispContBB, dl, TII->get(ARM::LDRrs), NewVReg5)
8223 .addReg(NewVReg3, RegState::Kill)
8226 .addMemOperand(JTMMOLd));
8228 if (IsPositionIndependent) {
8229 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTadd))
8230 .addReg(NewVReg5, RegState::Kill)
8232 .addJumpTableIndex(MJTI);
8234 BuildMI(DispContBB, dl, TII->get(ARM::BR_JTr))
8235 .addReg(NewVReg5, RegState::Kill)
8236 .addJumpTableIndex(MJTI);
8240 // Add the jump table entries as successors to the MBB.
8241 SmallPtrSet<MachineBasicBlock*, 8> SeenMBBs;
8242 for (std::vector<MachineBasicBlock*>::iterator
8243 I = LPadList.begin(), E = LPadList.end(); I != E; ++I) {
8244 MachineBasicBlock *CurMBB = *I;
8245 if (SeenMBBs.insert(CurMBB).second)
8246 DispContBB->addSuccessor(CurMBB);
8249 // N.B. the order the invoke BBs are processed in doesn't matter here.
8250 const MCPhysReg *SavedRegs = RI.getCalleeSavedRegs(MF);
8251 SmallVector<MachineBasicBlock*, 64> MBBLPads;
8252 for (MachineBasicBlock *BB : InvokeBBs) {
8254 // Remove the landing pad successor from the invoke block and replace it
8255 // with the new dispatch block.
8256 SmallVector<MachineBasicBlock*, 4> Successors(BB->succ_begin(),
8258 while (!Successors.empty()) {
8259 MachineBasicBlock *SMBB = Successors.pop_back_val();
8260 if (SMBB->isEHPad()) {
8261 BB->removeSuccessor(SMBB);
8262 MBBLPads.push_back(SMBB);
8266 BB->addSuccessor(DispatchBB, BranchProbability::getZero());
8267 BB->normalizeSuccProbs();
8269 // Find the invoke call and mark all of the callee-saved registers as
8270 // 'implicit defined' so that they're spilled. This prevents code from
8271 // moving instructions to before the EH block, where they will never be
8273 for (MachineBasicBlock::reverse_iterator
8274 II = BB->rbegin(), IE = BB->rend(); II != IE; ++II) {
8275 if (!II->isCall()) continue;
8277 DenseMap<unsigned, bool> DefRegs;
8278 for (MachineInstr::mop_iterator
8279 OI = II->operands_begin(), OE = II->operands_end();
8281 if (!OI->isReg()) continue;
8282 DefRegs[OI->getReg()] = true;
8285 MachineInstrBuilder MIB(*MF, &*II);
8287 for (unsigned i = 0; SavedRegs[i] != 0; ++i) {
8288 unsigned Reg = SavedRegs[i];
8289 if (Subtarget->isThumb2() &&
8290 !ARM::tGPRRegClass.contains(Reg) &&
8291 !ARM::hGPRRegClass.contains(Reg))
8293 if (Subtarget->isThumb1Only() && !ARM::tGPRRegClass.contains(Reg))
8295 if (!Subtarget->isThumb() && !ARM::GPRRegClass.contains(Reg))
8298 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
8305 // Mark all former landing pads as non-landing pads. The dispatch is the only
8307 for (SmallVectorImpl<MachineBasicBlock*>::iterator
8308 I = MBBLPads.begin(), E = MBBLPads.end(); I != E; ++I)
8309 (*I)->setIsEHPad(false);
8311 // The instruction is gone now.
8312 MI.eraseFromParent();
8316 MachineBasicBlock *OtherSucc(MachineBasicBlock *MBB, MachineBasicBlock *Succ) {
8317 for (MachineBasicBlock::succ_iterator I = MBB->succ_begin(),
8318 E = MBB->succ_end(); I != E; ++I)
8321 llvm_unreachable("Expecting a BB with two successors!");
8324 /// Return the load opcode for a given load size. If load size >= 8,
8325 /// neon opcode will be returned.
8326 static unsigned getLdOpcode(unsigned LdSize, bool IsThumb1, bool IsThumb2) {
8328 return LdSize == 16 ? ARM::VLD1q32wb_fixed
8329 : LdSize == 8 ? ARM::VLD1d32wb_fixed : 0;
8331 return LdSize == 4 ? ARM::tLDRi
8332 : LdSize == 2 ? ARM::tLDRHi
8333 : LdSize == 1 ? ARM::tLDRBi : 0;
8335 return LdSize == 4 ? ARM::t2LDR_POST
8336 : LdSize == 2 ? ARM::t2LDRH_POST
8337 : LdSize == 1 ? ARM::t2LDRB_POST : 0;
8338 return LdSize == 4 ? ARM::LDR_POST_IMM
8339 : LdSize == 2 ? ARM::LDRH_POST
8340 : LdSize == 1 ? ARM::LDRB_POST_IMM : 0;
8343 /// Return the store opcode for a given store size. If store size >= 8,
8344 /// neon opcode will be returned.
8345 static unsigned getStOpcode(unsigned StSize, bool IsThumb1, bool IsThumb2) {
8347 return StSize == 16 ? ARM::VST1q32wb_fixed
8348 : StSize == 8 ? ARM::VST1d32wb_fixed : 0;
8350 return StSize == 4 ? ARM::tSTRi
8351 : StSize == 2 ? ARM::tSTRHi
8352 : StSize == 1 ? ARM::tSTRBi : 0;
8354 return StSize == 4 ? ARM::t2STR_POST
8355 : StSize == 2 ? ARM::t2STRH_POST
8356 : StSize == 1 ? ARM::t2STRB_POST : 0;
8357 return StSize == 4 ? ARM::STR_POST_IMM
8358 : StSize == 2 ? ARM::STRH_POST
8359 : StSize == 1 ? ARM::STRB_POST_IMM : 0;
8362 /// Emit a post-increment load operation with given size. The instructions
8363 /// will be added to BB at Pos.
8364 static void emitPostLd(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8365 const TargetInstrInfo *TII, const DebugLoc &dl,
8366 unsigned LdSize, unsigned Data, unsigned AddrIn,
8367 unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8368 unsigned LdOpc = getLdOpcode(LdSize, IsThumb1, IsThumb2);
8369 assert(LdOpc != 0 && "Should have a load opcode");
8371 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8372 .addReg(AddrOut, RegState::Define).addReg(AddrIn)
8374 } else if (IsThumb1) {
8375 // load + update AddrIn
8376 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8377 .addReg(AddrIn).addImm(0));
8378 MachineInstrBuilder MIB =
8379 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut);
8380 MIB = AddDefaultT1CC(MIB);
8381 MIB.addReg(AddrIn).addImm(LdSize);
8382 AddDefaultPred(MIB);
8383 } else if (IsThumb2) {
8384 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8385 .addReg(AddrOut, RegState::Define).addReg(AddrIn)
8388 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(LdOpc), Data)
8389 .addReg(AddrOut, RegState::Define).addReg(AddrIn)
8390 .addReg(0).addImm(LdSize));
8394 /// Emit a post-increment store operation with given size. The instructions
8395 /// will be added to BB at Pos.
8396 static void emitPostSt(MachineBasicBlock *BB, MachineBasicBlock::iterator Pos,
8397 const TargetInstrInfo *TII, const DebugLoc &dl,
8398 unsigned StSize, unsigned Data, unsigned AddrIn,
8399 unsigned AddrOut, bool IsThumb1, bool IsThumb2) {
8400 unsigned StOpc = getStOpcode(StSize, IsThumb1, IsThumb2);
8401 assert(StOpc != 0 && "Should have a store opcode");
8403 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8404 .addReg(AddrIn).addImm(0).addReg(Data));
8405 } else if (IsThumb1) {
8406 // store + update AddrIn
8407 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc)).addReg(Data)
8408 .addReg(AddrIn).addImm(0));
8409 MachineInstrBuilder MIB =
8410 BuildMI(*BB, Pos, dl, TII->get(ARM::tADDi8), AddrOut);
8411 MIB = AddDefaultT1CC(MIB);
8412 MIB.addReg(AddrIn).addImm(StSize);
8413 AddDefaultPred(MIB);
8414 } else if (IsThumb2) {
8415 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8416 .addReg(Data).addReg(AddrIn).addImm(StSize));
8418 AddDefaultPred(BuildMI(*BB, Pos, dl, TII->get(StOpc), AddrOut)
8419 .addReg(Data).addReg(AddrIn).addReg(0)
8425 ARMTargetLowering::EmitStructByval(MachineInstr &MI,
8426 MachineBasicBlock *BB) const {
8427 // This pseudo instruction has 3 operands: dst, src, size
8428 // We expand it to a loop if size > Subtarget->getMaxInlineSizeThreshold().
8429 // Otherwise, we will generate unrolled scalar copies.
8430 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8431 const BasicBlock *LLVM_BB = BB->getBasicBlock();
8432 MachineFunction::iterator It = ++BB->getIterator();
8434 unsigned dest = MI.getOperand(0).getReg();
8435 unsigned src = MI.getOperand(1).getReg();
8436 unsigned SizeVal = MI.getOperand(2).getImm();
8437 unsigned Align = MI.getOperand(3).getImm();
8438 DebugLoc dl = MI.getDebugLoc();
8440 MachineFunction *MF = BB->getParent();
8441 MachineRegisterInfo &MRI = MF->getRegInfo();
8442 unsigned UnitSize = 0;
8443 const TargetRegisterClass *TRC = nullptr;
8444 const TargetRegisterClass *VecTRC = nullptr;
8446 bool IsThumb1 = Subtarget->isThumb1Only();
8447 bool IsThumb2 = Subtarget->isThumb2();
8448 bool IsThumb = Subtarget->isThumb();
8452 } else if (Align & 2) {
8455 // Check whether we can use NEON instructions.
8456 if (!MF->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat) &&
8457 Subtarget->hasNEON()) {
8458 if ((Align % 16 == 0) && SizeVal >= 16)
8460 else if ((Align % 8 == 0) && SizeVal >= 8)
8463 // Can't use NEON instructions.
8468 // Select the correct opcode and register class for unit size load/store
8469 bool IsNeon = UnitSize >= 8;
8470 TRC = IsThumb ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
8472 VecTRC = UnitSize == 16 ? &ARM::DPairRegClass
8473 : UnitSize == 8 ? &ARM::DPRRegClass
8476 unsigned BytesLeft = SizeVal % UnitSize;
8477 unsigned LoopSize = SizeVal - BytesLeft;
8479 if (SizeVal <= Subtarget->getMaxInlineSizeThreshold()) {
8480 // Use LDR and STR to copy.
8481 // [scratch, srcOut] = LDR_POST(srcIn, UnitSize)
8482 // [destOut] = STR_POST(scratch, destIn, UnitSize)
8483 unsigned srcIn = src;
8484 unsigned destIn = dest;
8485 for (unsigned i = 0; i < LoopSize; i+=UnitSize) {
8486 unsigned srcOut = MRI.createVirtualRegister(TRC);
8487 unsigned destOut = MRI.createVirtualRegister(TRC);
8488 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8489 emitPostLd(BB, MI, TII, dl, UnitSize, scratch, srcIn, srcOut,
8490 IsThumb1, IsThumb2);
8491 emitPostSt(BB, MI, TII, dl, UnitSize, scratch, destIn, destOut,
8492 IsThumb1, IsThumb2);
8497 // Handle the leftover bytes with LDRB and STRB.
8498 // [scratch, srcOut] = LDRB_POST(srcIn, 1)
8499 // [destOut] = STRB_POST(scratch, destIn, 1)
8500 for (unsigned i = 0; i < BytesLeft; i++) {
8501 unsigned srcOut = MRI.createVirtualRegister(TRC);
8502 unsigned destOut = MRI.createVirtualRegister(TRC);
8503 unsigned scratch = MRI.createVirtualRegister(TRC);
8504 emitPostLd(BB, MI, TII, dl, 1, scratch, srcIn, srcOut,
8505 IsThumb1, IsThumb2);
8506 emitPostSt(BB, MI, TII, dl, 1, scratch, destIn, destOut,
8507 IsThumb1, IsThumb2);
8511 MI.eraseFromParent(); // The instruction is gone now.
8515 // Expand the pseudo op to a loop.
8518 // movw varEnd, # --> with thumb2
8520 // ldrcp varEnd, idx --> without thumb2
8521 // fallthrough --> loopMBB
8523 // PHI varPhi, varEnd, varLoop
8524 // PHI srcPhi, src, srcLoop
8525 // PHI destPhi, dst, destLoop
8526 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8527 // [destLoop] = STR_POST(scratch, destPhi, UnitSize)
8528 // subs varLoop, varPhi, #UnitSize
8530 // fallthrough --> exitMBB
8532 // epilogue to handle left-over bytes
8533 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8534 // [destOut] = STRB_POST(scratch, destLoop, 1)
8535 MachineBasicBlock *loopMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8536 MachineBasicBlock *exitMBB = MF->CreateMachineBasicBlock(LLVM_BB);
8537 MF->insert(It, loopMBB);
8538 MF->insert(It, exitMBB);
8540 // Transfer the remainder of BB and its successor edges to exitMBB.
8541 exitMBB->splice(exitMBB->begin(), BB,
8542 std::next(MachineBasicBlock::iterator(MI)), BB->end());
8543 exitMBB->transferSuccessorsAndUpdatePHIs(BB);
8545 // Load an immediate to varEnd.
8546 unsigned varEnd = MRI.createVirtualRegister(TRC);
8547 if (Subtarget->useMovt(*MF)) {
8548 unsigned Vtmp = varEnd;
8549 if ((LoopSize & 0xFFFF0000) != 0)
8550 Vtmp = MRI.createVirtualRegister(TRC);
8551 AddDefaultPred(BuildMI(BB, dl,
8552 TII->get(IsThumb ? ARM::t2MOVi16 : ARM::MOVi16),
8553 Vtmp).addImm(LoopSize & 0xFFFF));
8555 if ((LoopSize & 0xFFFF0000) != 0)
8556 AddDefaultPred(BuildMI(BB, dl,
8557 TII->get(IsThumb ? ARM::t2MOVTi16 : ARM::MOVTi16),
8560 .addImm(LoopSize >> 16));
8562 MachineConstantPool *ConstantPool = MF->getConstantPool();
8563 Type *Int32Ty = Type::getInt32Ty(MF->getFunction()->getContext());
8564 const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
8566 // MachineConstantPool wants an explicit alignment.
8567 unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
8569 Align = MF->getDataLayout().getTypeAllocSize(C->getType());
8570 unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
8573 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::tLDRpci)).addReg(
8574 varEnd, RegState::Define).addConstantPoolIndex(Idx));
8576 AddDefaultPred(BuildMI(*BB, MI, dl, TII->get(ARM::LDRcp)).addReg(
8577 varEnd, RegState::Define).addConstantPoolIndex(Idx).addImm(0));
8579 BB->addSuccessor(loopMBB);
8581 // Generate the loop body:
8582 // varPhi = PHI(varLoop, varEnd)
8583 // srcPhi = PHI(srcLoop, src)
8584 // destPhi = PHI(destLoop, dst)
8585 MachineBasicBlock *entryBB = BB;
8587 unsigned varLoop = MRI.createVirtualRegister(TRC);
8588 unsigned varPhi = MRI.createVirtualRegister(TRC);
8589 unsigned srcLoop = MRI.createVirtualRegister(TRC);
8590 unsigned srcPhi = MRI.createVirtualRegister(TRC);
8591 unsigned destLoop = MRI.createVirtualRegister(TRC);
8592 unsigned destPhi = MRI.createVirtualRegister(TRC);
8594 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), varPhi)
8595 .addReg(varLoop).addMBB(loopMBB)
8596 .addReg(varEnd).addMBB(entryBB);
8597 BuildMI(BB, dl, TII->get(ARM::PHI), srcPhi)
8598 .addReg(srcLoop).addMBB(loopMBB)
8599 .addReg(src).addMBB(entryBB);
8600 BuildMI(BB, dl, TII->get(ARM::PHI), destPhi)
8601 .addReg(destLoop).addMBB(loopMBB)
8602 .addReg(dest).addMBB(entryBB);
8604 // [scratch, srcLoop] = LDR_POST(srcPhi, UnitSize)
8605 // [destLoop] = STR_POST(scratch, destPhi, UnitSiz)
8606 unsigned scratch = MRI.createVirtualRegister(IsNeon ? VecTRC : TRC);
8607 emitPostLd(BB, BB->end(), TII, dl, UnitSize, scratch, srcPhi, srcLoop,
8608 IsThumb1, IsThumb2);
8609 emitPostSt(BB, BB->end(), TII, dl, UnitSize, scratch, destPhi, destLoop,
8610 IsThumb1, IsThumb2);
8612 // Decrement loop variable by UnitSize.
8614 MachineInstrBuilder MIB =
8615 BuildMI(*BB, BB->end(), dl, TII->get(ARM::tSUBi8), varLoop);
8616 MIB = AddDefaultT1CC(MIB);
8617 MIB.addReg(varPhi).addImm(UnitSize);
8618 AddDefaultPred(MIB);
8620 MachineInstrBuilder MIB =
8621 BuildMI(*BB, BB->end(), dl,
8622 TII->get(IsThumb2 ? ARM::t2SUBri : ARM::SUBri), varLoop);
8623 AddDefaultCC(AddDefaultPred(MIB.addReg(varPhi).addImm(UnitSize)));
8624 MIB->getOperand(5).setReg(ARM::CPSR);
8625 MIB->getOperand(5).setIsDef(true);
8627 BuildMI(*BB, BB->end(), dl,
8628 TII->get(IsThumb1 ? ARM::tBcc : IsThumb2 ? ARM::t2Bcc : ARM::Bcc))
8629 .addMBB(loopMBB).addImm(ARMCC::NE).addReg(ARM::CPSR);
8631 // loopMBB can loop back to loopMBB or fall through to exitMBB.
8632 BB->addSuccessor(loopMBB);
8633 BB->addSuccessor(exitMBB);
8635 // Add epilogue to handle BytesLeft.
8637 auto StartOfExit = exitMBB->begin();
8639 // [scratch, srcOut] = LDRB_POST(srcLoop, 1)
8640 // [destOut] = STRB_POST(scratch, destLoop, 1)
8641 unsigned srcIn = srcLoop;
8642 unsigned destIn = destLoop;
8643 for (unsigned i = 0; i < BytesLeft; i++) {
8644 unsigned srcOut = MRI.createVirtualRegister(TRC);
8645 unsigned destOut = MRI.createVirtualRegister(TRC);
8646 unsigned scratch = MRI.createVirtualRegister(TRC);
8647 emitPostLd(BB, StartOfExit, TII, dl, 1, scratch, srcIn, srcOut,
8648 IsThumb1, IsThumb2);
8649 emitPostSt(BB, StartOfExit, TII, dl, 1, scratch, destIn, destOut,
8650 IsThumb1, IsThumb2);
8655 MI.eraseFromParent(); // The instruction is gone now.
8660 ARMTargetLowering::EmitLowered__chkstk(MachineInstr &MI,
8661 MachineBasicBlock *MBB) const {
8662 const TargetMachine &TM = getTargetMachine();
8663 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
8664 DebugLoc DL = MI.getDebugLoc();
8666 assert(Subtarget->isTargetWindows() &&
8667 "__chkstk is only supported on Windows");
8668 assert(Subtarget->isThumb2() && "Windows on ARM requires Thumb-2 mode");
8670 // __chkstk takes the number of words to allocate on the stack in R4, and
8671 // returns the stack adjustment in number of bytes in R4. This will not
8672 // clober any other registers (other than the obvious lr).
8674 // Although, technically, IP should be considered a register which may be
8675 // clobbered, the call itself will not touch it. Windows on ARM is a pure
8676 // thumb-2 environment, so there is no interworking required. As a result, we
8677 // do not expect a veneer to be emitted by the linker, clobbering IP.
8679 // Each module receives its own copy of __chkstk, so no import thunk is
8680 // required, again, ensuring that IP is not clobbered.
8682 // Finally, although some linkers may theoretically provide a trampoline for
8683 // out of range calls (which is quite common due to a 32M range limitation of
8684 // branches for Thumb), we can generate the long-call version via
8685 // -mcmodel=large, alleviating the need for the trampoline which may clobber
8688 switch (TM.getCodeModel()) {
8689 case CodeModel::Small:
8690 case CodeModel::Medium:
8691 case CodeModel::Default:
8692 case CodeModel::Kernel:
8693 BuildMI(*MBB, MI, DL, TII.get(ARM::tBL))
8694 .addImm((unsigned)ARMCC::AL).addReg(0)
8695 .addExternalSymbol("__chkstk")
8696 .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8697 .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8698 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead);
8700 case CodeModel::Large:
8701 case CodeModel::JITDefault: {
8702 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
8703 unsigned Reg = MRI.createVirtualRegister(&ARM::rGPRRegClass);
8705 BuildMI(*MBB, MI, DL, TII.get(ARM::t2MOVi32imm), Reg)
8706 .addExternalSymbol("__chkstk");
8707 BuildMI(*MBB, MI, DL, TII.get(ARM::tBLXr))
8708 .addImm((unsigned)ARMCC::AL).addReg(0)
8709 .addReg(Reg, RegState::Kill)
8710 .addReg(ARM::R4, RegState::Implicit | RegState::Kill)
8711 .addReg(ARM::R4, RegState::Implicit | RegState::Define)
8712 .addReg(ARM::R12, RegState::Implicit | RegState::Define | RegState::Dead);
8717 AddDefaultCC(AddDefaultPred(BuildMI(*MBB, MI, DL, TII.get(ARM::t2SUBrr),
8719 .addReg(ARM::SP, RegState::Kill)
8720 .addReg(ARM::R4, RegState::Kill)
8721 .setMIFlags(MachineInstr::FrameSetup)));
8723 MI.eraseFromParent();
8728 ARMTargetLowering::EmitLowered__dbzchk(MachineInstr &MI,
8729 MachineBasicBlock *MBB) const {
8730 DebugLoc DL = MI.getDebugLoc();
8731 MachineFunction *MF = MBB->getParent();
8732 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8734 MachineBasicBlock *ContBB = MF->CreateMachineBasicBlock();
8735 MF->insert(++MBB->getIterator(), ContBB);
8736 ContBB->splice(ContBB->begin(), MBB,
8737 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
8738 ContBB->transferSuccessorsAndUpdatePHIs(MBB);
8739 MBB->addSuccessor(ContBB);
8741 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
8742 BuildMI(TrapBB, DL, TII->get(ARM::t__brkdiv0));
8743 MF->push_back(TrapBB);
8744 MBB->addSuccessor(TrapBB);
8746 AddDefaultPred(BuildMI(*MBB, MI, DL, TII->get(ARM::tCMPi8))
8747 .addReg(MI.getOperand(0).getReg())
8749 BuildMI(*MBB, MI, DL, TII->get(ARM::t2Bcc))
8754 MI.eraseFromParent();
8759 ARMTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
8760 MachineBasicBlock *BB) const {
8761 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
8762 DebugLoc dl = MI.getDebugLoc();
8763 bool isThumb2 = Subtarget->isThumb2();
8764 switch (MI.getOpcode()) {
8767 llvm_unreachable("Unexpected instr type to insert");
8770 // Thumb1 post-indexed loads are really just single-register LDMs.
8771 case ARM::tLDR_postidx: {
8772 BuildMI(*BB, MI, dl, TII->get(ARM::tLDMIA_UPD))
8773 .addOperand(MI.getOperand(1)) // Rn_wb
8774 .addOperand(MI.getOperand(2)) // Rn
8775 .addOperand(MI.getOperand(3)) // PredImm
8776 .addOperand(MI.getOperand(4)) // PredReg
8777 .addOperand(MI.getOperand(0)); // Rt
8778 MI.eraseFromParent();
8782 // The Thumb2 pre-indexed stores have the same MI operands, they just
8783 // define them differently in the .td files from the isel patterns, so
8784 // they need pseudos.
8785 case ARM::t2STR_preidx:
8786 MI.setDesc(TII->get(ARM::t2STR_PRE));
8788 case ARM::t2STRB_preidx:
8789 MI.setDesc(TII->get(ARM::t2STRB_PRE));
8791 case ARM::t2STRH_preidx:
8792 MI.setDesc(TII->get(ARM::t2STRH_PRE));
8795 case ARM::STRi_preidx:
8796 case ARM::STRBi_preidx: {
8797 unsigned NewOpc = MI.getOpcode() == ARM::STRi_preidx ? ARM::STR_PRE_IMM
8798 : ARM::STRB_PRE_IMM;
8799 // Decode the offset.
8800 unsigned Offset = MI.getOperand(4).getImm();
8801 bool isSub = ARM_AM::getAM2Op(Offset) == ARM_AM::sub;
8802 Offset = ARM_AM::getAM2Offset(Offset);
8806 MachineMemOperand *MMO = *MI.memoperands_begin();
8807 BuildMI(*BB, MI, dl, TII->get(NewOpc))
8808 .addOperand(MI.getOperand(0)) // Rn_wb
8809 .addOperand(MI.getOperand(1)) // Rt
8810 .addOperand(MI.getOperand(2)) // Rn
8811 .addImm(Offset) // offset (skip GPR==zero_reg)
8812 .addOperand(MI.getOperand(5)) // pred
8813 .addOperand(MI.getOperand(6))
8814 .addMemOperand(MMO);
8815 MI.eraseFromParent();
8818 case ARM::STRr_preidx:
8819 case ARM::STRBr_preidx:
8820 case ARM::STRH_preidx: {
8822 switch (MI.getOpcode()) {
8823 default: llvm_unreachable("unexpected opcode!");
8824 case ARM::STRr_preidx: NewOpc = ARM::STR_PRE_REG; break;
8825 case ARM::STRBr_preidx: NewOpc = ARM::STRB_PRE_REG; break;
8826 case ARM::STRH_preidx: NewOpc = ARM::STRH_PRE; break;
8828 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(NewOpc));
8829 for (unsigned i = 0; i < MI.getNumOperands(); ++i)
8830 MIB.addOperand(MI.getOperand(i));
8831 MI.eraseFromParent();
8835 case ARM::tMOVCCr_pseudo: {
8836 // To "insert" a SELECT_CC instruction, we actually have to insert the
8837 // diamond control-flow pattern. The incoming instruction knows the
8838 // destination vreg to set, the condition code register to branch on, the
8839 // true/false values to select between, and a branch opcode to use.
8840 const BasicBlock *LLVM_BB = BB->getBasicBlock();
8841 MachineFunction::iterator It = ++BB->getIterator();
8846 // cmpTY ccX, r1, r2
8848 // fallthrough --> copy0MBB
8849 MachineBasicBlock *thisMBB = BB;
8850 MachineFunction *F = BB->getParent();
8851 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
8852 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
8853 F->insert(It, copy0MBB);
8854 F->insert(It, sinkMBB);
8856 // Transfer the remainder of BB and its successor edges to sinkMBB.
8857 sinkMBB->splice(sinkMBB->begin(), BB,
8858 std::next(MachineBasicBlock::iterator(MI)), BB->end());
8859 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
8861 BB->addSuccessor(copy0MBB);
8862 BB->addSuccessor(sinkMBB);
8864 BuildMI(BB, dl, TII->get(ARM::tBcc))
8866 .addImm(MI.getOperand(3).getImm())
8867 .addReg(MI.getOperand(4).getReg());
8870 // %FalseValue = ...
8871 // # fallthrough to sinkMBB
8874 // Update machine-CFG edges
8875 BB->addSuccessor(sinkMBB);
8878 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
8881 BuildMI(*BB, BB->begin(), dl, TII->get(ARM::PHI), MI.getOperand(0).getReg())
8882 .addReg(MI.getOperand(1).getReg())
8884 .addReg(MI.getOperand(2).getReg())
8887 MI.eraseFromParent(); // The pseudo instruction is gone now.
8892 case ARM::BCCZi64: {
8893 // If there is an unconditional branch to the other successor, remove it.
8894 BB->erase(std::next(MachineBasicBlock::iterator(MI)), BB->end());
8896 // Compare both parts that make up the double comparison separately for
8898 bool RHSisZero = MI.getOpcode() == ARM::BCCZi64;
8900 unsigned LHS1 = MI.getOperand(1).getReg();
8901 unsigned LHS2 = MI.getOperand(2).getReg();
8903 AddDefaultPred(BuildMI(BB, dl,
8904 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8905 .addReg(LHS1).addImm(0));
8906 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8907 .addReg(LHS2).addImm(0)
8908 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
8910 unsigned RHS1 = MI.getOperand(3).getReg();
8911 unsigned RHS2 = MI.getOperand(4).getReg();
8912 AddDefaultPred(BuildMI(BB, dl,
8913 TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8914 .addReg(LHS1).addReg(RHS1));
8915 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2CMPrr : ARM::CMPrr))
8916 .addReg(LHS2).addReg(RHS2)
8917 .addImm(ARMCC::EQ).addReg(ARM::CPSR);
8920 MachineBasicBlock *destMBB = MI.getOperand(RHSisZero ? 3 : 5).getMBB();
8921 MachineBasicBlock *exitMBB = OtherSucc(BB, destMBB);
8922 if (MI.getOperand(0).getImm() == ARMCC::NE)
8923 std::swap(destMBB, exitMBB);
8925 BuildMI(BB, dl, TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc))
8926 .addMBB(destMBB).addImm(ARMCC::EQ).addReg(ARM::CPSR);
8928 AddDefaultPred(BuildMI(BB, dl, TII->get(ARM::t2B)).addMBB(exitMBB));
8930 BuildMI(BB, dl, TII->get(ARM::B)) .addMBB(exitMBB);
8932 MI.eraseFromParent(); // The pseudo instruction is gone now.
8936 case ARM::Int_eh_sjlj_setjmp:
8937 case ARM::Int_eh_sjlj_setjmp_nofp:
8938 case ARM::tInt_eh_sjlj_setjmp:
8939 case ARM::t2Int_eh_sjlj_setjmp:
8940 case ARM::t2Int_eh_sjlj_setjmp_nofp:
8943 case ARM::Int_eh_sjlj_setup_dispatch:
8944 EmitSjLjDispatchBlock(MI, BB);
8949 // To insert an ABS instruction, we have to insert the
8950 // diamond control-flow pattern. The incoming instruction knows the
8951 // source vreg to test against 0, the destination vreg to set,
8952 // the condition code register to branch on, the
8953 // true/false values to select between, and a branch opcode to use.
8958 // BCC (branch to SinkBB if V0 >= 0)
8959 // RSBBB: V3 = RSBri V2, 0 (compute ABS if V2 < 0)
8960 // SinkBB: V1 = PHI(V2, V3)
8961 const BasicBlock *LLVM_BB = BB->getBasicBlock();
8962 MachineFunction::iterator BBI = ++BB->getIterator();
8963 MachineFunction *Fn = BB->getParent();
8964 MachineBasicBlock *RSBBB = Fn->CreateMachineBasicBlock(LLVM_BB);
8965 MachineBasicBlock *SinkBB = Fn->CreateMachineBasicBlock(LLVM_BB);
8966 Fn->insert(BBI, RSBBB);
8967 Fn->insert(BBI, SinkBB);
8969 unsigned int ABSSrcReg = MI.getOperand(1).getReg();
8970 unsigned int ABSDstReg = MI.getOperand(0).getReg();
8971 bool ABSSrcKIll = MI.getOperand(1).isKill();
8972 bool isThumb2 = Subtarget->isThumb2();
8973 MachineRegisterInfo &MRI = Fn->getRegInfo();
8974 // In Thumb mode S must not be specified if source register is the SP or
8975 // PC and if destination register is the SP, so restrict register class
8976 unsigned NewRsbDstReg =
8977 MRI.createVirtualRegister(isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass);
8979 // Transfer the remainder of BB and its successor edges to sinkMBB.
8980 SinkBB->splice(SinkBB->begin(), BB,
8981 std::next(MachineBasicBlock::iterator(MI)), BB->end());
8982 SinkBB->transferSuccessorsAndUpdatePHIs(BB);
8984 BB->addSuccessor(RSBBB);
8985 BB->addSuccessor(SinkBB);
8987 // fall through to SinkMBB
8988 RSBBB->addSuccessor(SinkBB);
8990 // insert a cmp at the end of BB
8991 AddDefaultPred(BuildMI(BB, dl,
8992 TII->get(isThumb2 ? ARM::t2CMPri : ARM::CMPri))
8993 .addReg(ABSSrcReg).addImm(0));
8995 // insert a bcc with opposite CC to ARMCC::MI at the end of BB
8997 TII->get(isThumb2 ? ARM::t2Bcc : ARM::Bcc)).addMBB(SinkBB)
8998 .addImm(ARMCC::getOppositeCondition(ARMCC::MI)).addReg(ARM::CPSR);
9000 // insert rsbri in RSBBB
9001 // Note: BCC and rsbri will be converted into predicated rsbmi
9002 // by if-conversion pass
9003 BuildMI(*RSBBB, RSBBB->begin(), dl,
9004 TII->get(isThumb2 ? ARM::t2RSBri : ARM::RSBri), NewRsbDstReg)
9005 .addReg(ABSSrcReg, ABSSrcKIll ? RegState::Kill : 0)
9006 .addImm(0).addImm((unsigned)ARMCC::AL).addReg(0).addReg(0);
9008 // insert PHI in SinkBB,
9009 // reuse ABSDstReg to not change uses of ABS instruction
9010 BuildMI(*SinkBB, SinkBB->begin(), dl,
9011 TII->get(ARM::PHI), ABSDstReg)
9012 .addReg(NewRsbDstReg).addMBB(RSBBB)
9013 .addReg(ABSSrcReg).addMBB(BB);
9015 // remove ABS instruction
9016 MI.eraseFromParent();
9018 // return last added BB
9021 case ARM::COPY_STRUCT_BYVAL_I32:
9023 return EmitStructByval(MI, BB);
9024 case ARM::WIN__CHKSTK:
9025 return EmitLowered__chkstk(MI, BB);
9026 case ARM::WIN__DBZCHK:
9027 return EmitLowered__dbzchk(MI, BB);
9031 /// \brief Attaches vregs to MEMCPY that it will use as scratch registers
9032 /// when it is expanded into LDM/STM. This is done as a post-isel lowering
9033 /// instead of as a custom inserter because we need the use list from the SDNode.
9034 static void attachMEMCPYScratchRegs(const ARMSubtarget *Subtarget,
9035 MachineInstr &MI, const SDNode *Node) {
9036 bool isThumb1 = Subtarget->isThumb1Only();
9038 DebugLoc DL = MI.getDebugLoc();
9039 MachineFunction *MF = MI.getParent()->getParent();
9040 MachineRegisterInfo &MRI = MF->getRegInfo();
9041 MachineInstrBuilder MIB(*MF, MI);
9043 // If the new dst/src is unused mark it as dead.
9044 if (!Node->hasAnyUseOfValue(0)) {
9045 MI.getOperand(0).setIsDead(true);
9047 if (!Node->hasAnyUseOfValue(1)) {
9048 MI.getOperand(1).setIsDead(true);
9051 // The MEMCPY both defines and kills the scratch registers.
9052 for (unsigned I = 0; I != MI.getOperand(4).getImm(); ++I) {
9053 unsigned TmpReg = MRI.createVirtualRegister(isThumb1 ? &ARM::tGPRRegClass
9054 : &ARM::GPRRegClass);
9055 MIB.addReg(TmpReg, RegState::Define|RegState::Dead);
9059 void ARMTargetLowering::AdjustInstrPostInstrSelection(MachineInstr &MI,
9060 SDNode *Node) const {
9061 if (MI.getOpcode() == ARM::MEMCPY) {
9062 attachMEMCPYScratchRegs(Subtarget, MI, Node);
9066 const MCInstrDesc *MCID = &MI.getDesc();
9067 // Adjust potentially 's' setting instructions after isel, i.e. ADC, SBC, RSB,
9068 // RSC. Coming out of isel, they have an implicit CPSR def, but the optional
9069 // operand is still set to noreg. If needed, set the optional operand's
9070 // register to CPSR, and remove the redundant implicit def.
9072 // e.g. ADCS (..., CPSR<imp-def>) -> ADC (... opt:CPSR<def>).
9074 // Rename pseudo opcodes.
9075 unsigned NewOpc = convertAddSubFlagsOpcode(MI.getOpcode());
9077 const ARMBaseInstrInfo *TII = Subtarget->getInstrInfo();
9078 MCID = &TII->get(NewOpc);
9080 assert(MCID->getNumOperands() == MI.getDesc().getNumOperands() + 1 &&
9081 "converted opcode should be the same except for cc_out");
9085 // Add the optional cc_out operand
9086 MI.addOperand(MachineOperand::CreateReg(0, /*isDef=*/true));
9088 unsigned ccOutIdx = MCID->getNumOperands() - 1;
9090 // Any ARM instruction that sets the 's' bit should specify an optional
9091 // "cc_out" operand in the last operand position.
9092 if (!MI.hasOptionalDef() || !MCID->OpInfo[ccOutIdx].isOptionalDef()) {
9093 assert(!NewOpc && "Optional cc_out operand required");
9096 // Look for an implicit def of CPSR added by MachineInstr ctor. Remove it
9097 // since we already have an optional CPSR def.
9098 bool definesCPSR = false;
9099 bool deadCPSR = false;
9100 for (unsigned i = MCID->getNumOperands(), e = MI.getNumOperands(); i != e;
9102 const MachineOperand &MO = MI.getOperand(i);
9103 if (MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR) {
9107 MI.RemoveOperand(i);
9112 assert(!NewOpc && "Optional cc_out operand required");
9115 assert(deadCPSR == !Node->hasAnyUseOfValue(1) && "inconsistent dead flag");
9117 assert(!MI.getOperand(ccOutIdx).getReg() &&
9118 "expect uninitialized optional cc_out operand");
9122 // If this instruction was defined with an optional CPSR def and its dag node
9123 // had a live implicit CPSR def, then activate the optional CPSR def.
9124 MachineOperand &MO = MI.getOperand(ccOutIdx);
9125 MO.setReg(ARM::CPSR);
9129 //===----------------------------------------------------------------------===//
9130 // ARM Optimization Hooks
9131 //===----------------------------------------------------------------------===//
9133 // Helper function that checks if N is a null or all ones constant.
9134 static inline bool isZeroOrAllOnes(SDValue N, bool AllOnes) {
9135 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
9138 // Return true if N is conditionally 0 or all ones.
9139 // Detects these expressions where cc is an i1 value:
9141 // (select cc 0, y) [AllOnes=0]
9142 // (select cc y, 0) [AllOnes=0]
9143 // (zext cc) [AllOnes=0]
9144 // (sext cc) [AllOnes=0/1]
9145 // (select cc -1, y) [AllOnes=1]
9146 // (select cc y, -1) [AllOnes=1]
9148 // Invert is set when N is the null/all ones constant when CC is false.
9149 // OtherOp is set to the alternative value of N.
9150 static bool isConditionalZeroOrAllOnes(SDNode *N, bool AllOnes,
9151 SDValue &CC, bool &Invert,
9153 SelectionDAG &DAG) {
9154 switch (N->getOpcode()) {
9155 default: return false;
9157 CC = N->getOperand(0);
9158 SDValue N1 = N->getOperand(1);
9159 SDValue N2 = N->getOperand(2);
9160 if (isZeroOrAllOnes(N1, AllOnes)) {
9165 if (isZeroOrAllOnes(N2, AllOnes)) {
9172 case ISD::ZERO_EXTEND:
9173 // (zext cc) can never be the all ones value.
9177 case ISD::SIGN_EXTEND: {
9179 EVT VT = N->getValueType(0);
9180 CC = N->getOperand(0);
9181 if (CC.getValueType() != MVT::i1)
9185 // When looking for an AllOnes constant, N is an sext, and the 'other'
9187 OtherOp = DAG.getConstant(0, dl, VT);
9188 else if (N->getOpcode() == ISD::ZERO_EXTEND)
9189 // When looking for a 0 constant, N can be zext or sext.
9190 OtherOp = DAG.getConstant(1, dl, VT);
9192 OtherOp = DAG.getConstant(APInt::getAllOnesValue(VT.getSizeInBits()), dl,
9199 // Combine a constant select operand into its use:
9201 // (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
9202 // (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
9203 // (and (select cc, -1, c), x) -> (select cc, x, (and, x, c)) [AllOnes=1]
9204 // (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
9205 // (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
9207 // The transform is rejected if the select doesn't have a constant operand that
9208 // is null, or all ones when AllOnes is set.
9210 // Also recognize sext/zext from i1:
9212 // (add (zext cc), x) -> (select cc (add x, 1), x)
9213 // (add (sext cc), x) -> (select cc (add x, -1), x)
9215 // These transformations eventually create predicated instructions.
9217 // @param N The node to transform.
9218 // @param Slct The N operand that is a select.
9219 // @param OtherOp The other N operand (x above).
9220 // @param DCI Context.
9221 // @param AllOnes Require the select constant to be all ones instead of null.
9222 // @returns The new node, or SDValue() on failure.
9224 SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
9225 TargetLowering::DAGCombinerInfo &DCI,
9226 bool AllOnes = false) {
9227 SelectionDAG &DAG = DCI.DAG;
9228 EVT VT = N->getValueType(0);
9229 SDValue NonConstantVal;
9232 if (!isConditionalZeroOrAllOnes(Slct.getNode(), AllOnes, CCOp, SwapSelectOps,
9233 NonConstantVal, DAG))
9236 // Slct is now know to be the desired identity constant when CC is true.
9237 SDValue TrueVal = OtherOp;
9238 SDValue FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT,
9239 OtherOp, NonConstantVal);
9240 // Unless SwapSelectOps says CC should be false.
9242 std::swap(TrueVal, FalseVal);
9244 return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
9245 CCOp, TrueVal, FalseVal);
9248 // Attempt combineSelectAndUse on each operand of a commutative operator N.
9250 SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
9251 TargetLowering::DAGCombinerInfo &DCI) {
9252 SDValue N0 = N->getOperand(0);
9253 SDValue N1 = N->getOperand(1);
9254 if (N0.getNode()->hasOneUse())
9255 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
9257 if (N1.getNode()->hasOneUse())
9258 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
9263 static bool IsVUZPShuffleNode(SDNode *N) {
9264 // VUZP shuffle node.
9265 if (N->getOpcode() == ARMISD::VUZP)
9268 // "VUZP" on i32 is an alias for VTRN.
9269 if (N->getOpcode() == ARMISD::VTRN && N->getValueType(0) == MVT::v2i32)
9275 static SDValue AddCombineToVPADD(SDNode *N, SDValue N0, SDValue N1,
9276 TargetLowering::DAGCombinerInfo &DCI,
9277 const ARMSubtarget *Subtarget) {
9278 // Look for ADD(VUZP.0, VUZP.1).
9279 if (!IsVUZPShuffleNode(N0.getNode()) || N0.getNode() != N1.getNode() ||
9283 // Make sure the ADD is a 64-bit add; there is no 128-bit VPADD.
9284 if (!N->getValueType(0).is64BitVector())
9288 SelectionDAG &DAG = DCI.DAG;
9289 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9291 SDNode *Unzip = N0.getNode();
9292 EVT VT = N->getValueType(0);
9294 SmallVector<SDValue, 8> Ops;
9295 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpadd, dl,
9296 TLI.getPointerTy(DAG.getDataLayout())));
9297 Ops.push_back(Unzip->getOperand(0));
9298 Ops.push_back(Unzip->getOperand(1));
9300 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
9303 static SDValue AddCombineVUZPToVPADDL(SDNode *N, SDValue N0, SDValue N1,
9304 TargetLowering::DAGCombinerInfo &DCI,
9305 const ARMSubtarget *Subtarget) {
9306 // Check for two extended operands.
9307 if (!(N0.getOpcode() == ISD::SIGN_EXTEND &&
9308 N1.getOpcode() == ISD::SIGN_EXTEND) &&
9309 !(N0.getOpcode() == ISD::ZERO_EXTEND &&
9310 N1.getOpcode() == ISD::ZERO_EXTEND))
9313 SDValue N00 = N0.getOperand(0);
9314 SDValue N10 = N1.getOperand(0);
9316 // Look for ADD(SEXT(VUZP.0), SEXT(VUZP.1))
9317 if (!IsVUZPShuffleNode(N00.getNode()) || N00.getNode() != N10.getNode() ||
9321 // We only recognize Q register paddl here; this can't be reached until
9322 // after type legalization.
9323 if (!N00.getValueType().is64BitVector() ||
9324 !N0.getValueType().is128BitVector())
9328 SelectionDAG &DAG = DCI.DAG;
9329 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9331 EVT VT = N->getValueType(0);
9333 SmallVector<SDValue, 8> Ops;
9334 // Form vpaddl.sN or vpaddl.uN depending on the kind of extension.
9336 if (N0.getOpcode() == ISD::SIGN_EXTEND)
9337 Opcode = Intrinsic::arm_neon_vpaddls;
9339 Opcode = Intrinsic::arm_neon_vpaddlu;
9340 Ops.push_back(DAG.getConstant(Opcode, dl,
9341 TLI.getPointerTy(DAG.getDataLayout())));
9342 EVT ElemTy = N00.getValueType().getVectorElementType();
9343 unsigned NumElts = VT.getVectorNumElements();
9344 EVT ConcatVT = EVT::getVectorVT(*DAG.getContext(), ElemTy, NumElts * 2);
9345 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), ConcatVT,
9346 N00.getOperand(0), N00.getOperand(1));
9347 Ops.push_back(Concat);
9349 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, Ops);
9352 // FIXME: This function shouldn't be necessary; if we lower BUILD_VECTOR in
9353 // an appropriate manner, we end up with ADD(VUZP(ZEXT(N))), which is
9354 // much easier to match.
9356 AddCombineBUILD_VECTORToVPADDL(SDNode *N, SDValue N0, SDValue N1,
9357 TargetLowering::DAGCombinerInfo &DCI,
9358 const ARMSubtarget *Subtarget) {
9359 // Only perform optimization if after legalize, and if NEON is available. We
9360 // also expected both operands to be BUILD_VECTORs.
9361 if (DCI.isBeforeLegalize() || !Subtarget->hasNEON()
9362 || N0.getOpcode() != ISD::BUILD_VECTOR
9363 || N1.getOpcode() != ISD::BUILD_VECTOR)
9366 // Check output type since VPADDL operand elements can only be 8, 16, or 32.
9367 EVT VT = N->getValueType(0);
9368 if (!VT.isInteger() || VT.getVectorElementType() == MVT::i64)
9371 // Check that the vector operands are of the right form.
9372 // N0 and N1 are BUILD_VECTOR nodes with N number of EXTRACT_VECTOR
9373 // operands, where N is the size of the formed vector.
9374 // Each EXTRACT_VECTOR should have the same input vector and odd or even
9375 // index such that we have a pair wise add pattern.
9377 // Grab the vector that all EXTRACT_VECTOR nodes should be referencing.
9378 if (N0->getOperand(0)->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
9380 SDValue Vec = N0->getOperand(0)->getOperand(0);
9381 SDNode *V = Vec.getNode();
9382 unsigned nextIndex = 0;
9384 // For each operands to the ADD which are BUILD_VECTORs,
9385 // check to see if each of their operands are an EXTRACT_VECTOR with
9386 // the same vector and appropriate index.
9387 for (unsigned i = 0, e = N0->getNumOperands(); i != e; ++i) {
9388 if (N0->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT
9389 && N1->getOperand(i)->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
9391 SDValue ExtVec0 = N0->getOperand(i);
9392 SDValue ExtVec1 = N1->getOperand(i);
9394 // First operand is the vector, verify its the same.
9395 if (V != ExtVec0->getOperand(0).getNode() ||
9396 V != ExtVec1->getOperand(0).getNode())
9399 // Second is the constant, verify its correct.
9400 ConstantSDNode *C0 = dyn_cast<ConstantSDNode>(ExtVec0->getOperand(1));
9401 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(ExtVec1->getOperand(1));
9403 // For the constant, we want to see all the even or all the odd.
9404 if (!C0 || !C1 || C0->getZExtValue() != nextIndex
9405 || C1->getZExtValue() != nextIndex+1)
9414 // Don't generate vpaddl+vmovn; we'll match it to vpadd later.
9415 if (Vec.getValueType().getVectorElementType() == VT.getVectorElementType())
9418 // Create VPADDL node.
9419 SelectionDAG &DAG = DCI.DAG;
9420 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9424 // Build operand list.
9425 SmallVector<SDValue, 8> Ops;
9426 Ops.push_back(DAG.getConstant(Intrinsic::arm_neon_vpaddls, dl,
9427 TLI.getPointerTy(DAG.getDataLayout())));
9429 // Input is the vector.
9432 // Get widened type and narrowed type.
9434 unsigned numElem = VT.getVectorNumElements();
9436 EVT inputLaneType = Vec.getValueType().getVectorElementType();
9437 switch (inputLaneType.getSimpleVT().SimpleTy) {
9438 case MVT::i8: widenType = MVT::getVectorVT(MVT::i16, numElem); break;
9439 case MVT::i16: widenType = MVT::getVectorVT(MVT::i32, numElem); break;
9440 case MVT::i32: widenType = MVT::getVectorVT(MVT::i64, numElem); break;
9442 llvm_unreachable("Invalid vector element type for padd optimization.");
9445 SDValue tmp = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, widenType, Ops);
9446 unsigned ExtOp = VT.bitsGT(tmp.getValueType()) ? ISD::ANY_EXTEND : ISD::TRUNCATE;
9447 return DAG.getNode(ExtOp, dl, VT, tmp);
9450 static SDValue findMUL_LOHI(SDValue V) {
9451 if (V->getOpcode() == ISD::UMUL_LOHI ||
9452 V->getOpcode() == ISD::SMUL_LOHI)
9457 static SDValue AddCombineTo64bitMLAL(SDNode *AddcNode,
9458 TargetLowering::DAGCombinerInfo &DCI,
9459 const ARMSubtarget *Subtarget) {
9461 // Look for multiply add opportunities.
9462 // The pattern is a ISD::UMUL_LOHI followed by two add nodes, where
9463 // each add nodes consumes a value from ISD::UMUL_LOHI and there is
9464 // a glue link from the first add to the second add.
9465 // If we find this pattern, we can replace the U/SMUL_LOHI, ADDC, and ADDE by
9466 // a S/UMLAL instruction.
9469 // / \ [no multiline comment]
9475 assert(AddcNode->getOpcode() == ISD::ADDC && "Expect an ADDC");
9476 SDValue AddcOp0 = AddcNode->getOperand(0);
9477 SDValue AddcOp1 = AddcNode->getOperand(1);
9479 // Check if the two operands are from the same mul_lohi node.
9480 if (AddcOp0.getNode() == AddcOp1.getNode())
9483 assert(AddcNode->getNumValues() == 2 &&
9484 AddcNode->getValueType(0) == MVT::i32 &&
9485 "Expect ADDC with two result values. First: i32");
9487 // Check that we have a glued ADDC node.
9488 if (AddcNode->getValueType(1) != MVT::Glue)
9491 // Check that the ADDC adds the low result of the S/UMUL_LOHI.
9492 if (AddcOp0->getOpcode() != ISD::UMUL_LOHI &&
9493 AddcOp0->getOpcode() != ISD::SMUL_LOHI &&
9494 AddcOp1->getOpcode() != ISD::UMUL_LOHI &&
9495 AddcOp1->getOpcode() != ISD::SMUL_LOHI)
9498 // Look for the glued ADDE.
9499 SDNode* AddeNode = AddcNode->getGluedUser();
9503 // Make sure it is really an ADDE.
9504 if (AddeNode->getOpcode() != ISD::ADDE)
9507 assert(AddeNode->getNumOperands() == 3 &&
9508 AddeNode->getOperand(2).getValueType() == MVT::Glue &&
9509 "ADDE node has the wrong inputs");
9511 // Check for the triangle shape.
9512 SDValue AddeOp0 = AddeNode->getOperand(0);
9513 SDValue AddeOp1 = AddeNode->getOperand(1);
9515 // Make sure that the ADDE operands are not coming from the same node.
9516 if (AddeOp0.getNode() == AddeOp1.getNode())
9519 // Find the MUL_LOHI node walking up ADDE's operands.
9520 bool IsLeftOperandMUL = false;
9521 SDValue MULOp = findMUL_LOHI(AddeOp0);
9522 if (MULOp == SDValue())
9523 MULOp = findMUL_LOHI(AddeOp1);
9525 IsLeftOperandMUL = true;
9526 if (MULOp == SDValue())
9529 // Figure out the right opcode.
9530 unsigned Opc = MULOp->getOpcode();
9531 unsigned FinalOpc = (Opc == ISD::SMUL_LOHI) ? ARMISD::SMLAL : ARMISD::UMLAL;
9533 // Figure out the high and low input values to the MLAL node.
9534 SDValue* HiAdd = nullptr;
9535 SDValue* LoMul = nullptr;
9536 SDValue* LowAdd = nullptr;
9538 // Ensure that ADDE is from high result of ISD::SMUL_LOHI.
9539 if ((AddeOp0 != MULOp.getValue(1)) && (AddeOp1 != MULOp.getValue(1)))
9542 if (IsLeftOperandMUL)
9548 // Ensure that LoMul and LowAdd are taken from correct ISD::SMUL_LOHI node
9549 // whose low result is fed to the ADDC we are checking.
9551 if (AddcOp0 == MULOp.getValue(0)) {
9555 if (AddcOp1 == MULOp.getValue(0)) {
9563 // Create the merged node.
9564 SelectionDAG &DAG = DCI.DAG;
9566 // Build operand list.
9567 SmallVector<SDValue, 8> Ops;
9568 Ops.push_back(LoMul->getOperand(0));
9569 Ops.push_back(LoMul->getOperand(1));
9570 Ops.push_back(*LowAdd);
9571 Ops.push_back(*HiAdd);
9573 SDValue MLALNode = DAG.getNode(FinalOpc, SDLoc(AddcNode),
9574 DAG.getVTList(MVT::i32, MVT::i32), Ops);
9576 // Replace the ADDs' nodes uses by the MLA node's values.
9577 SDValue HiMLALResult(MLALNode.getNode(), 1);
9578 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), HiMLALResult);
9580 SDValue LoMLALResult(MLALNode.getNode(), 0);
9581 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), LoMLALResult);
9583 // Return original node to notify the driver to stop replacing.
9584 SDValue resNode(AddcNode, 0);
9588 static SDValue AddCombineTo64bitUMAAL(SDNode *AddcNode,
9589 TargetLowering::DAGCombinerInfo &DCI,
9590 const ARMSubtarget *Subtarget) {
9591 // UMAAL is similar to UMLAL except that it adds two unsigned values.
9592 // While trying to combine for the other MLAL nodes, first search for the
9593 // chance to use UMAAL. Check if Addc uses another addc node which can first
9594 // be combined into a UMLAL. The other pattern is AddcNode being combined
9595 // into an UMLAL and then using another addc is handled in ISelDAGToDAG.
9597 if (!Subtarget->hasV6Ops() || !Subtarget->hasDSP() ||
9598 (Subtarget->isThumb() && !Subtarget->hasThumb2()))
9599 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
9601 SDNode *PrevAddc = nullptr;
9602 if (AddcNode->getOperand(0).getOpcode() == ISD::ADDC)
9603 PrevAddc = AddcNode->getOperand(0).getNode();
9604 else if (AddcNode->getOperand(1).getOpcode() == ISD::ADDC)
9605 PrevAddc = AddcNode->getOperand(1).getNode();
9607 // If there's no addc chains, just return a search for any MLAL.
9608 if (PrevAddc == nullptr)
9609 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
9611 // Try to convert the addc operand to an MLAL and if that fails try to
9612 // combine AddcNode.
9613 SDValue MLAL = AddCombineTo64bitMLAL(PrevAddc, DCI, Subtarget);
9614 if (MLAL != SDValue(PrevAddc, 0))
9615 return AddCombineTo64bitMLAL(AddcNode, DCI, Subtarget);
9617 // Find the converted UMAAL or quit if it doesn't exist.
9618 SDNode *UmlalNode = nullptr;
9620 if (AddcNode->getOperand(0).getOpcode() == ARMISD::UMLAL) {
9621 UmlalNode = AddcNode->getOperand(0).getNode();
9622 AddHi = AddcNode->getOperand(1);
9623 } else if (AddcNode->getOperand(1).getOpcode() == ARMISD::UMLAL) {
9624 UmlalNode = AddcNode->getOperand(1).getNode();
9625 AddHi = AddcNode->getOperand(0);
9630 // The ADDC should be glued to an ADDE node, which uses the same UMLAL as
9631 // the ADDC as well as Zero.
9632 auto *Zero = dyn_cast<ConstantSDNode>(UmlalNode->getOperand(3));
9634 if (!Zero || Zero->getZExtValue() != 0)
9637 // Check that we have a glued ADDC node.
9638 if (AddcNode->getValueType(1) != MVT::Glue)
9641 // Look for the glued ADDE.
9642 SDNode* AddeNode = AddcNode->getGluedUser();
9646 if ((AddeNode->getOperand(0).getNode() == Zero &&
9647 AddeNode->getOperand(1).getNode() == UmlalNode) ||
9648 (AddeNode->getOperand(0).getNode() == UmlalNode &&
9649 AddeNode->getOperand(1).getNode() == Zero)) {
9651 SelectionDAG &DAG = DCI.DAG;
9652 SDValue Ops[] = { UmlalNode->getOperand(0), UmlalNode->getOperand(1),
9653 UmlalNode->getOperand(2), AddHi };
9654 SDValue UMAAL = DAG.getNode(ARMISD::UMAAL, SDLoc(AddcNode),
9655 DAG.getVTList(MVT::i32, MVT::i32), Ops);
9657 // Replace the ADDs' nodes uses by the UMAAL node's values.
9658 DAG.ReplaceAllUsesOfValueWith(SDValue(AddeNode, 0), SDValue(UMAAL.getNode(), 1));
9659 DAG.ReplaceAllUsesOfValueWith(SDValue(AddcNode, 0), SDValue(UMAAL.getNode(), 0));
9661 // Return original node to notify the driver to stop replacing.
9662 return SDValue(AddcNode, 0);
9667 /// PerformADDCCombine - Target-specific dag combine transform from
9668 /// ISD::ADDC, ISD::ADDE, and ISD::MUL_LOHI to MLAL or
9669 /// ISD::ADDC, ISD::ADDE and ARMISD::UMLAL to ARMISD::UMAAL
9670 static SDValue PerformADDCCombine(SDNode *N,
9671 TargetLowering::DAGCombinerInfo &DCI,
9672 const ARMSubtarget *Subtarget) {
9674 if (Subtarget->isThumb1Only()) return SDValue();
9676 // Only perform the checks after legalize when the pattern is available.
9677 if (DCI.isBeforeLegalize()) return SDValue();
9679 return AddCombineTo64bitUMAAL(N, DCI, Subtarget);
9682 /// PerformADDCombineWithOperands - Try DAG combinations for an ADD with
9683 /// operands N0 and N1. This is a helper for PerformADDCombine that is
9684 /// called with the default operands, and if that fails, with commuted
9686 static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
9687 TargetLowering::DAGCombinerInfo &DCI,
9688 const ARMSubtarget *Subtarget){
9689 // Attempt to create vpadd for this add.
9690 if (SDValue Result = AddCombineToVPADD(N, N0, N1, DCI, Subtarget))
9693 // Attempt to create vpaddl for this add.
9694 if (SDValue Result = AddCombineVUZPToVPADDL(N, N0, N1, DCI, Subtarget))
9696 if (SDValue Result = AddCombineBUILD_VECTORToVPADDL(N, N0, N1, DCI,
9700 // fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
9701 if (N0.getNode()->hasOneUse())
9702 if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
9707 /// PerformADDCombine - Target-specific dag combine xforms for ISD::ADD.
9709 static SDValue PerformADDCombine(SDNode *N,
9710 TargetLowering::DAGCombinerInfo &DCI,
9711 const ARMSubtarget *Subtarget) {
9712 SDValue N0 = N->getOperand(0);
9713 SDValue N1 = N->getOperand(1);
9715 // First try with the default operand order.
9716 if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
9719 // If that didn't work, try again with the operands commuted.
9720 return PerformADDCombineWithOperands(N, N1, N0, DCI, Subtarget);
9723 /// PerformSUBCombine - Target-specific dag combine xforms for ISD::SUB.
9725 static SDValue PerformSUBCombine(SDNode *N,
9726 TargetLowering::DAGCombinerInfo &DCI) {
9727 SDValue N0 = N->getOperand(0);
9728 SDValue N1 = N->getOperand(1);
9730 // fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
9731 if (N1.getNode()->hasOneUse())
9732 if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
9738 /// PerformVMULCombine
9739 /// Distribute (A + B) * C to (A * C) + (B * C) to take advantage of the
9740 /// special multiplier accumulator forwarding.
9746 // However, for (A + B) * (A + B),
9753 static SDValue PerformVMULCombine(SDNode *N,
9754 TargetLowering::DAGCombinerInfo &DCI,
9755 const ARMSubtarget *Subtarget) {
9756 if (!Subtarget->hasVMLxForwarding())
9759 SelectionDAG &DAG = DCI.DAG;
9760 SDValue N0 = N->getOperand(0);
9761 SDValue N1 = N->getOperand(1);
9762 unsigned Opcode = N0.getOpcode();
9763 if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
9764 Opcode != ISD::FADD && Opcode != ISD::FSUB) {
9765 Opcode = N1.getOpcode();
9766 if (Opcode != ISD::ADD && Opcode != ISD::SUB &&
9767 Opcode != ISD::FADD && Opcode != ISD::FSUB)
9775 EVT VT = N->getValueType(0);
9777 SDValue N00 = N0->getOperand(0);
9778 SDValue N01 = N0->getOperand(1);
9779 return DAG.getNode(Opcode, DL, VT,
9780 DAG.getNode(ISD::MUL, DL, VT, N00, N1),
9781 DAG.getNode(ISD::MUL, DL, VT, N01, N1));
9784 static SDValue PerformMULCombine(SDNode *N,
9785 TargetLowering::DAGCombinerInfo &DCI,
9786 const ARMSubtarget *Subtarget) {
9787 SelectionDAG &DAG = DCI.DAG;
9789 if (Subtarget->isThumb1Only())
9792 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
9795 EVT VT = N->getValueType(0);
9796 if (VT.is64BitVector() || VT.is128BitVector())
9797 return PerformVMULCombine(N, DCI, Subtarget);
9801 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
9805 int64_t MulAmt = C->getSExtValue();
9806 unsigned ShiftAmt = countTrailingZeros<uint64_t>(MulAmt);
9808 ShiftAmt = ShiftAmt & (32 - 1);
9809 SDValue V = N->getOperand(0);
9813 MulAmt >>= ShiftAmt;
9816 if (isPowerOf2_32(MulAmt - 1)) {
9817 // (mul x, 2^N + 1) => (add (shl x, N), x)
9818 Res = DAG.getNode(ISD::ADD, DL, VT,
9820 DAG.getNode(ISD::SHL, DL, VT,
9822 DAG.getConstant(Log2_32(MulAmt - 1), DL,
9824 } else if (isPowerOf2_32(MulAmt + 1)) {
9825 // (mul x, 2^N - 1) => (sub (shl x, N), x)
9826 Res = DAG.getNode(ISD::SUB, DL, VT,
9827 DAG.getNode(ISD::SHL, DL, VT,
9829 DAG.getConstant(Log2_32(MulAmt + 1), DL,
9835 uint64_t MulAmtAbs = -MulAmt;
9836 if (isPowerOf2_32(MulAmtAbs + 1)) {
9837 // (mul x, -(2^N - 1)) => (sub x, (shl x, N))
9838 Res = DAG.getNode(ISD::SUB, DL, VT,
9840 DAG.getNode(ISD::SHL, DL, VT,
9842 DAG.getConstant(Log2_32(MulAmtAbs + 1), DL,
9844 } else if (isPowerOf2_32(MulAmtAbs - 1)) {
9845 // (mul x, -(2^N + 1)) => - (add (shl x, N), x)
9846 Res = DAG.getNode(ISD::ADD, DL, VT,
9848 DAG.getNode(ISD::SHL, DL, VT,
9850 DAG.getConstant(Log2_32(MulAmtAbs - 1), DL,
9852 Res = DAG.getNode(ISD::SUB, DL, VT,
9853 DAG.getConstant(0, DL, MVT::i32), Res);
9860 Res = DAG.getNode(ISD::SHL, DL, VT,
9861 Res, DAG.getConstant(ShiftAmt, DL, MVT::i32));
9863 // Do not add new nodes to DAG combiner worklist.
9864 DCI.CombineTo(N, Res, false);
9868 static SDValue PerformANDCombine(SDNode *N,
9869 TargetLowering::DAGCombinerInfo &DCI,
9870 const ARMSubtarget *Subtarget) {
9872 // Attempt to use immediate-form VBIC
9873 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
9875 EVT VT = N->getValueType(0);
9876 SelectionDAG &DAG = DCI.DAG;
9878 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
9881 APInt SplatBits, SplatUndef;
9882 unsigned SplatBitSize;
9885 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
9886 if (SplatBitSize <= 64) {
9888 SDValue Val = isNEONModifiedImm((~SplatBits).getZExtValue(),
9889 SplatUndef.getZExtValue(), SplatBitSize,
9890 DAG, dl, VbicVT, VT.is128BitVector(),
9892 if (Val.getNode()) {
9894 DAG.getNode(ISD::BITCAST, dl, VbicVT, N->getOperand(0));
9895 SDValue Vbic = DAG.getNode(ARMISD::VBICIMM, dl, VbicVT, Input, Val);
9896 return DAG.getNode(ISD::BITCAST, dl, VT, Vbic);
9901 if (!Subtarget->isThumb1Only()) {
9902 // fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
9903 if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
9910 /// PerformORCombine - Target-specific dag combine xforms for ISD::OR
9911 static SDValue PerformORCombine(SDNode *N,
9912 TargetLowering::DAGCombinerInfo &DCI,
9913 const ARMSubtarget *Subtarget) {
9914 // Attempt to use immediate-form VORR
9915 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(1));
9917 EVT VT = N->getValueType(0);
9918 SelectionDAG &DAG = DCI.DAG;
9920 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
9923 APInt SplatBits, SplatUndef;
9924 unsigned SplatBitSize;
9926 if (BVN && Subtarget->hasNEON() &&
9927 BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) {
9928 if (SplatBitSize <= 64) {
9930 SDValue Val = isNEONModifiedImm(SplatBits.getZExtValue(),
9931 SplatUndef.getZExtValue(), SplatBitSize,
9932 DAG, dl, VorrVT, VT.is128BitVector(),
9934 if (Val.getNode()) {
9936 DAG.getNode(ISD::BITCAST, dl, VorrVT, N->getOperand(0));
9937 SDValue Vorr = DAG.getNode(ARMISD::VORRIMM, dl, VorrVT, Input, Val);
9938 return DAG.getNode(ISD::BITCAST, dl, VT, Vorr);
9943 if (!Subtarget->isThumb1Only()) {
9944 // fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
9945 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
9949 // The code below optimizes (or (and X, Y), Z).
9950 // The AND operand needs to have a single user to make these optimizations
9952 SDValue N0 = N->getOperand(0);
9953 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
9955 SDValue N1 = N->getOperand(1);
9957 // (or (and B, A), (and C, ~A)) => (VBSL A, B, C) when A is a constant.
9958 if (Subtarget->hasNEON() && N1.getOpcode() == ISD::AND && VT.isVector() &&
9959 DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
9961 unsigned SplatBitSize;
9964 APInt SplatBits0, SplatBits1;
9965 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(1));
9966 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(1));
9967 // Ensure that the second operand of both ands are constants
9968 if (BVN0 && BVN0->isConstantSplat(SplatBits0, SplatUndef, SplatBitSize,
9969 HasAnyUndefs) && !HasAnyUndefs) {
9970 if (BVN1 && BVN1->isConstantSplat(SplatBits1, SplatUndef, SplatBitSize,
9971 HasAnyUndefs) && !HasAnyUndefs) {
9972 // Ensure that the bit width of the constants are the same and that
9973 // the splat arguments are logical inverses as per the pattern we
9974 // are trying to simplify.
9975 if (SplatBits0.getBitWidth() == SplatBits1.getBitWidth() &&
9976 SplatBits0 == ~SplatBits1) {
9977 // Canonicalize the vector type to make instruction selection
9979 EVT CanonicalVT = VT.is128BitVector() ? MVT::v4i32 : MVT::v2i32;
9980 SDValue Result = DAG.getNode(ARMISD::VBSL, dl, CanonicalVT,
9984 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
9990 // Try to use the ARM/Thumb2 BFI (bitfield insert) instruction when
9993 // BFI is only available on V6T2+
9994 if (Subtarget->isThumb1Only() || !Subtarget->hasV6T2Ops())
9998 // 1) or (and A, mask), val => ARMbfi A, val, mask
9999 // iff (val & mask) == val
10001 // 2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
10002 // 2a) iff isBitFieldInvertedMask(mask) && isBitFieldInvertedMask(~mask2)
10003 // && mask == ~mask2
10004 // 2b) iff isBitFieldInvertedMask(~mask) && isBitFieldInvertedMask(mask2)
10005 // && ~mask == mask2
10006 // (i.e., copy a bitfield value into another bitfield of the same width)
10008 if (VT != MVT::i32)
10011 SDValue N00 = N0.getOperand(0);
10013 // The value and the mask need to be constants so we can verify this is
10014 // actually a bitfield set. If the mask is 0xffff, we can do better
10015 // via a movt instruction, so don't use BFI in that case.
10016 SDValue MaskOp = N0.getOperand(1);
10017 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(MaskOp);
10020 unsigned Mask = MaskC->getZExtValue();
10021 if (Mask == 0xffff)
10024 // Case (1): or (and A, mask), val => ARMbfi A, val, mask
10025 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
10027 unsigned Val = N1C->getZExtValue();
10028 if ((Val & ~Mask) != Val)
10031 if (ARM::isBitFieldInvertedMask(Mask)) {
10032 Val >>= countTrailingZeros(~Mask);
10034 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00,
10035 DAG.getConstant(Val, DL, MVT::i32),
10036 DAG.getConstant(Mask, DL, MVT::i32));
10038 // Do not add new nodes to DAG combiner worklist.
10039 DCI.CombineTo(N, Res, false);
10042 } else if (N1.getOpcode() == ISD::AND) {
10043 // case (2) or (and A, mask), (and B, mask2) => ARMbfi A, (lsr B, amt), mask
10044 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
10047 unsigned Mask2 = N11C->getZExtValue();
10049 // Mask and ~Mask2 (or reverse) must be equivalent for the BFI pattern
10051 if (ARM::isBitFieldInvertedMask(Mask) &&
10052 (Mask == ~Mask2)) {
10053 // The pack halfword instruction works better for masks that fit it,
10054 // so use that when it's available.
10055 if (Subtarget->hasT2ExtractPack() &&
10056 (Mask == 0xffff || Mask == 0xffff0000))
10059 unsigned amt = countTrailingZeros(Mask2);
10060 Res = DAG.getNode(ISD::SRL, DL, VT, N1.getOperand(0),
10061 DAG.getConstant(amt, DL, MVT::i32));
10062 Res = DAG.getNode(ARMISD::BFI, DL, VT, N00, Res,
10063 DAG.getConstant(Mask, DL, MVT::i32));
10064 // Do not add new nodes to DAG combiner worklist.
10065 DCI.CombineTo(N, Res, false);
10067 } else if (ARM::isBitFieldInvertedMask(~Mask) &&
10068 (~Mask == Mask2)) {
10069 // The pack halfword instruction works better for masks that fit it,
10070 // so use that when it's available.
10071 if (Subtarget->hasT2ExtractPack() &&
10072 (Mask2 == 0xffff || Mask2 == 0xffff0000))
10075 unsigned lsb = countTrailingZeros(Mask);
10076 Res = DAG.getNode(ISD::SRL, DL, VT, N00,
10077 DAG.getConstant(lsb, DL, MVT::i32));
10078 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1.getOperand(0), Res,
10079 DAG.getConstant(Mask2, DL, MVT::i32));
10080 // Do not add new nodes to DAG combiner worklist.
10081 DCI.CombineTo(N, Res, false);
10086 if (DAG.MaskedValueIsZero(N1, MaskC->getAPIntValue()) &&
10087 N00.getOpcode() == ISD::SHL && isa<ConstantSDNode>(N00.getOperand(1)) &&
10088 ARM::isBitFieldInvertedMask(~Mask)) {
10089 // Case (3): or (and (shl A, #shamt), mask), B => ARMbfi B, A, ~mask
10090 // where lsb(mask) == #shamt and masked bits of B are known zero.
10091 SDValue ShAmt = N00.getOperand(1);
10092 unsigned ShAmtC = cast<ConstantSDNode>(ShAmt)->getZExtValue();
10093 unsigned LSB = countTrailingZeros(Mask);
10097 Res = DAG.getNode(ARMISD::BFI, DL, VT, N1, N00.getOperand(0),
10098 DAG.getConstant(~Mask, DL, MVT::i32));
10100 // Do not add new nodes to DAG combiner worklist.
10101 DCI.CombineTo(N, Res, false);
10107 static SDValue PerformXORCombine(SDNode *N,
10108 TargetLowering::DAGCombinerInfo &DCI,
10109 const ARMSubtarget *Subtarget) {
10110 EVT VT = N->getValueType(0);
10111 SelectionDAG &DAG = DCI.DAG;
10113 if(!DAG.getTargetLoweringInfo().isTypeLegal(VT))
10116 if (!Subtarget->isThumb1Only()) {
10117 // fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
10118 if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
10125 // ParseBFI - given a BFI instruction in N, extract the "from" value (Rn) and return it,
10126 // and fill in FromMask and ToMask with (consecutive) bits in "from" to be extracted and
10127 // their position in "to" (Rd).
10128 static SDValue ParseBFI(SDNode *N, APInt &ToMask, APInt &FromMask) {
10129 assert(N->getOpcode() == ARMISD::BFI);
10131 SDValue From = N->getOperand(1);
10132 ToMask = ~cast<ConstantSDNode>(N->getOperand(2))->getAPIntValue();
10133 FromMask = APInt::getLowBitsSet(ToMask.getBitWidth(), ToMask.countPopulation());
10135 // If the Base came from a SHR #C, we can deduce that it is really testing bit
10136 // #C in the base of the SHR.
10137 if (From->getOpcode() == ISD::SRL &&
10138 isa<ConstantSDNode>(From->getOperand(1))) {
10139 APInt Shift = cast<ConstantSDNode>(From->getOperand(1))->getAPIntValue();
10140 assert(Shift.getLimitedValue() < 32 && "Shift too large!");
10141 FromMask <<= Shift.getLimitedValue(31);
10142 From = From->getOperand(0);
10148 // If A and B contain one contiguous set of bits, does A | B == A . B?
10150 // Neither A nor B must be zero.
10151 static bool BitsProperlyConcatenate(const APInt &A, const APInt &B) {
10152 unsigned LastActiveBitInA = A.countTrailingZeros();
10153 unsigned FirstActiveBitInB = B.getBitWidth() - B.countLeadingZeros() - 1;
10154 return LastActiveBitInA - 1 == FirstActiveBitInB;
10157 static SDValue FindBFIToCombineWith(SDNode *N) {
10158 // We have a BFI in N. Follow a possible chain of BFIs and find a BFI it can combine with,
10160 APInt ToMask, FromMask;
10161 SDValue From = ParseBFI(N, ToMask, FromMask);
10162 SDValue To = N->getOperand(0);
10164 // Now check for a compatible BFI to merge with. We can pass through BFIs that
10165 // aren't compatible, but not if they set the same bit in their destination as
10166 // we do (or that of any BFI we're going to combine with).
10168 APInt CombinedToMask = ToMask;
10169 while (V.getOpcode() == ARMISD::BFI) {
10170 APInt NewToMask, NewFromMask;
10171 SDValue NewFrom = ParseBFI(V.getNode(), NewToMask, NewFromMask);
10172 if (NewFrom != From) {
10173 // This BFI has a different base. Keep going.
10174 CombinedToMask |= NewToMask;
10175 V = V.getOperand(0);
10179 // Do the written bits conflict with any we've seen so far?
10180 if ((NewToMask & CombinedToMask).getBoolValue())
10181 // Conflicting bits - bail out because going further is unsafe.
10184 // Are the new bits contiguous when combined with the old bits?
10185 if (BitsProperlyConcatenate(ToMask, NewToMask) &&
10186 BitsProperlyConcatenate(FromMask, NewFromMask))
10188 if (BitsProperlyConcatenate(NewToMask, ToMask) &&
10189 BitsProperlyConcatenate(NewFromMask, FromMask))
10192 // We've seen a write to some bits, so track it.
10193 CombinedToMask |= NewToMask;
10195 V = V.getOperand(0);
10201 static SDValue PerformBFICombine(SDNode *N,
10202 TargetLowering::DAGCombinerInfo &DCI) {
10203 SDValue N1 = N->getOperand(1);
10204 if (N1.getOpcode() == ISD::AND) {
10205 // (bfi A, (and B, Mask1), Mask2) -> (bfi A, B, Mask2) iff
10206 // the bits being cleared by the AND are not demanded by the BFI.
10207 ConstantSDNode *N11C = dyn_cast<ConstantSDNode>(N1.getOperand(1));
10210 unsigned InvMask = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue();
10211 unsigned LSB = countTrailingZeros(~InvMask);
10212 unsigned Width = (32 - countLeadingZeros(~InvMask)) - LSB;
10214 static_cast<unsigned>(std::numeric_limits<unsigned>::digits) &&
10215 "undefined behavior");
10216 unsigned Mask = (1u << Width) - 1;
10217 unsigned Mask2 = N11C->getZExtValue();
10218 if ((Mask & (~Mask2)) == 0)
10219 return DCI.DAG.getNode(ARMISD::BFI, SDLoc(N), N->getValueType(0),
10220 N->getOperand(0), N1.getOperand(0),
10222 } else if (N->getOperand(0).getOpcode() == ARMISD::BFI) {
10223 // We have a BFI of a BFI. Walk up the BFI chain to see how long it goes.
10224 // Keep track of any consecutive bits set that all come from the same base
10225 // value. We can combine these together into a single BFI.
10226 SDValue CombineBFI = FindBFIToCombineWith(N);
10227 if (CombineBFI == SDValue())
10230 // We've found a BFI.
10231 APInt ToMask1, FromMask1;
10232 SDValue From1 = ParseBFI(N, ToMask1, FromMask1);
10234 APInt ToMask2, FromMask2;
10235 SDValue From2 = ParseBFI(CombineBFI.getNode(), ToMask2, FromMask2);
10236 assert(From1 == From2);
10239 // First, unlink CombineBFI.
10240 DCI.DAG.ReplaceAllUsesWith(CombineBFI, CombineBFI.getOperand(0));
10241 // Then create a new BFI, combining the two together.
10242 APInt NewFromMask = FromMask1 | FromMask2;
10243 APInt NewToMask = ToMask1 | ToMask2;
10245 EVT VT = N->getValueType(0);
10248 if (NewFromMask[0] == 0)
10249 From1 = DCI.DAG.getNode(
10250 ISD::SRL, dl, VT, From1,
10251 DCI.DAG.getConstant(NewFromMask.countTrailingZeros(), dl, VT));
10252 return DCI.DAG.getNode(ARMISD::BFI, dl, VT, N->getOperand(0), From1,
10253 DCI.DAG.getConstant(~NewToMask, dl, VT));
10258 /// PerformVMOVRRDCombine - Target-specific dag combine xforms for
10259 /// ARMISD::VMOVRRD.
10260 static SDValue PerformVMOVRRDCombine(SDNode *N,
10261 TargetLowering::DAGCombinerInfo &DCI,
10262 const ARMSubtarget *Subtarget) {
10263 // vmovrrd(vmovdrr x, y) -> x,y
10264 SDValue InDouble = N->getOperand(0);
10265 if (InDouble.getOpcode() == ARMISD::VMOVDRR && !Subtarget->isFPOnlySP())
10266 return DCI.CombineTo(N, InDouble.getOperand(0), InDouble.getOperand(1));
10268 // vmovrrd(load f64) -> (load i32), (load i32)
10269 SDNode *InNode = InDouble.getNode();
10270 if (ISD::isNormalLoad(InNode) && InNode->hasOneUse() &&
10271 InNode->getValueType(0) == MVT::f64 &&
10272 InNode->getOperand(1).getOpcode() == ISD::FrameIndex &&
10273 !cast<LoadSDNode>(InNode)->isVolatile()) {
10274 // TODO: Should this be done for non-FrameIndex operands?
10275 LoadSDNode *LD = cast<LoadSDNode>(InNode);
10277 SelectionDAG &DAG = DCI.DAG;
10279 SDValue BasePtr = LD->getBasePtr();
10281 DAG.getLoad(MVT::i32, DL, LD->getChain(), BasePtr, LD->getPointerInfo(),
10282 LD->getAlignment(), LD->getMemOperand()->getFlags());
10284 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
10285 DAG.getConstant(4, DL, MVT::i32));
10286 SDValue NewLD2 = DAG.getLoad(
10287 MVT::i32, DL, NewLD1.getValue(1), OffsetPtr, LD->getPointerInfo(),
10288 std::min(4U, LD->getAlignment() / 2), LD->getMemOperand()->getFlags());
10290 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), NewLD2.getValue(1));
10291 if (DCI.DAG.getDataLayout().isBigEndian())
10292 std::swap (NewLD1, NewLD2);
10293 SDValue Result = DCI.CombineTo(N, NewLD1, NewLD2);
10300 /// PerformVMOVDRRCombine - Target-specific dag combine xforms for
10301 /// ARMISD::VMOVDRR. This is also used for BUILD_VECTORs with 2 operands.
10302 static SDValue PerformVMOVDRRCombine(SDNode *N, SelectionDAG &DAG) {
10303 // N=vmovrrd(X); vmovdrr(N:0, N:1) -> bit_convert(X)
10304 SDValue Op0 = N->getOperand(0);
10305 SDValue Op1 = N->getOperand(1);
10306 if (Op0.getOpcode() == ISD::BITCAST)
10307 Op0 = Op0.getOperand(0);
10308 if (Op1.getOpcode() == ISD::BITCAST)
10309 Op1 = Op1.getOperand(0);
10310 if (Op0.getOpcode() == ARMISD::VMOVRRD &&
10311 Op0.getNode() == Op1.getNode() &&
10312 Op0.getResNo() == 0 && Op1.getResNo() == 1)
10313 return DAG.getNode(ISD::BITCAST, SDLoc(N),
10314 N->getValueType(0), Op0.getOperand(0));
10318 /// hasNormalLoadOperand - Check if any of the operands of a BUILD_VECTOR node
10319 /// are normal, non-volatile loads. If so, it is profitable to bitcast an
10320 /// i64 vector to have f64 elements, since the value can then be loaded
10321 /// directly into a VFP register.
10322 static bool hasNormalLoadOperand(SDNode *N) {
10323 unsigned NumElts = N->getValueType(0).getVectorNumElements();
10324 for (unsigned i = 0; i < NumElts; ++i) {
10325 SDNode *Elt = N->getOperand(i).getNode();
10326 if (ISD::isNormalLoad(Elt) && !cast<LoadSDNode>(Elt)->isVolatile())
10332 /// PerformBUILD_VECTORCombine - Target-specific dag combine xforms for
10333 /// ISD::BUILD_VECTOR.
10334 static SDValue PerformBUILD_VECTORCombine(SDNode *N,
10335 TargetLowering::DAGCombinerInfo &DCI,
10336 const ARMSubtarget *Subtarget) {
10337 // build_vector(N=ARMISD::VMOVRRD(X), N:1) -> bit_convert(X):
10338 // VMOVRRD is introduced when legalizing i64 types. It forces the i64 value
10339 // into a pair of GPRs, which is fine when the value is used as a scalar,
10340 // but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
10341 SelectionDAG &DAG = DCI.DAG;
10342 if (N->getNumOperands() == 2)
10343 if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
10346 // Load i64 elements as f64 values so that type legalization does not split
10347 // them up into i32 values.
10348 EVT VT = N->getValueType(0);
10349 if (VT.getVectorElementType() != MVT::i64 || !hasNormalLoadOperand(N))
10352 SmallVector<SDValue, 8> Ops;
10353 unsigned NumElts = VT.getVectorNumElements();
10354 for (unsigned i = 0; i < NumElts; ++i) {
10355 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(i));
10357 // Make the DAGCombiner fold the bitcast.
10358 DCI.AddToWorklist(V.getNode());
10360 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, NumElts);
10361 SDValue BV = DAG.getBuildVector(FloatVT, dl, Ops);
10362 return DAG.getNode(ISD::BITCAST, dl, VT, BV);
10365 /// \brief Target-specific dag combine xforms for ARMISD::BUILD_VECTOR.
10367 PerformARMBUILD_VECTORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
10368 // ARMISD::BUILD_VECTOR is introduced when legalizing ISD::BUILD_VECTOR.
10369 // At that time, we may have inserted bitcasts from integer to float.
10370 // If these bitcasts have survived DAGCombine, change the lowering of this
10371 // BUILD_VECTOR in something more vector friendly, i.e., that does not
10372 // force to use floating point types.
10374 // Make sure we can change the type of the vector.
10375 // This is possible iff:
10376 // 1. The vector is only used in a bitcast to a integer type. I.e.,
10377 // 1.1. Vector is used only once.
10378 // 1.2. Use is a bit convert to an integer type.
10379 // 2. The size of its operands are 32-bits (64-bits are not legal).
10380 EVT VT = N->getValueType(0);
10381 EVT EltVT = VT.getVectorElementType();
10383 // Check 1.1. and 2.
10384 if (EltVT.getSizeInBits() != 32 || !N->hasOneUse())
10387 // By construction, the input type must be float.
10388 assert(EltVT == MVT::f32 && "Unexpected type!");
10391 SDNode *Use = *N->use_begin();
10392 if (Use->getOpcode() != ISD::BITCAST ||
10393 Use->getValueType(0).isFloatingPoint())
10396 // Check profitability.
10397 // Model is, if more than half of the relevant operands are bitcast from
10398 // i32, turn the build_vector into a sequence of insert_vector_elt.
10399 // Relevant operands are everything that is not statically
10400 // (i.e., at compile time) bitcasted.
10401 unsigned NumOfBitCastedElts = 0;
10402 unsigned NumElts = VT.getVectorNumElements();
10403 unsigned NumOfRelevantElts = NumElts;
10404 for (unsigned Idx = 0; Idx < NumElts; ++Idx) {
10405 SDValue Elt = N->getOperand(Idx);
10406 if (Elt->getOpcode() == ISD::BITCAST) {
10407 // Assume only bit cast to i32 will go away.
10408 if (Elt->getOperand(0).getValueType() == MVT::i32)
10409 ++NumOfBitCastedElts;
10410 } else if (Elt.isUndef() || isa<ConstantSDNode>(Elt))
10411 // Constants are statically casted, thus do not count them as
10412 // relevant operands.
10413 --NumOfRelevantElts;
10416 // Check if more than half of the elements require a non-free bitcast.
10417 if (NumOfBitCastedElts <= NumOfRelevantElts / 2)
10420 SelectionDAG &DAG = DCI.DAG;
10421 // Create the new vector type.
10422 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
10423 // Check if the type is legal.
10424 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10425 if (!TLI.isTypeLegal(VecVT))
10429 // ARMISD::BUILD_VECTOR E1, E2, ..., EN.
10430 // => BITCAST INSERT_VECTOR_ELT
10431 // (INSERT_VECTOR_ELT (...), (BITCAST EN-1), N-1),
10432 // (BITCAST EN), N.
10433 SDValue Vec = DAG.getUNDEF(VecVT);
10435 for (unsigned Idx = 0 ; Idx < NumElts; ++Idx) {
10436 SDValue V = N->getOperand(Idx);
10439 if (V.getOpcode() == ISD::BITCAST &&
10440 V->getOperand(0).getValueType() == MVT::i32)
10441 // Fold obvious case.
10442 V = V.getOperand(0);
10444 V = DAG.getNode(ISD::BITCAST, SDLoc(V), MVT::i32, V);
10445 // Make the DAGCombiner fold the bitcasts.
10446 DCI.AddToWorklist(V.getNode());
10448 SDValue LaneIdx = DAG.getConstant(Idx, dl, MVT::i32);
10449 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecVT, Vec, V, LaneIdx);
10451 Vec = DAG.getNode(ISD::BITCAST, dl, VT, Vec);
10452 // Make the DAGCombiner fold the bitcasts.
10453 DCI.AddToWorklist(Vec.getNode());
10457 /// PerformInsertEltCombine - Target-specific dag combine xforms for
10458 /// ISD::INSERT_VECTOR_ELT.
10459 static SDValue PerformInsertEltCombine(SDNode *N,
10460 TargetLowering::DAGCombinerInfo &DCI) {
10461 // Bitcast an i64 load inserted into a vector to f64.
10462 // Otherwise, the i64 value will be legalized to a pair of i32 values.
10463 EVT VT = N->getValueType(0);
10464 SDNode *Elt = N->getOperand(1).getNode();
10465 if (VT.getVectorElementType() != MVT::i64 ||
10466 !ISD::isNormalLoad(Elt) || cast<LoadSDNode>(Elt)->isVolatile())
10469 SelectionDAG &DAG = DCI.DAG;
10471 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
10472 VT.getVectorNumElements());
10473 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, N->getOperand(0));
10474 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::f64, N->getOperand(1));
10475 // Make the DAGCombiner fold the bitcasts.
10476 DCI.AddToWorklist(Vec.getNode());
10477 DCI.AddToWorklist(V.getNode());
10478 SDValue InsElt = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, FloatVT,
10479 Vec, V, N->getOperand(2));
10480 return DAG.getNode(ISD::BITCAST, dl, VT, InsElt);
10483 /// PerformVECTOR_SHUFFLECombine - Target-specific dag combine xforms for
10484 /// ISD::VECTOR_SHUFFLE.
10485 static SDValue PerformVECTOR_SHUFFLECombine(SDNode *N, SelectionDAG &DAG) {
10486 // The LLVM shufflevector instruction does not require the shuffle mask
10487 // length to match the operand vector length, but ISD::VECTOR_SHUFFLE does
10488 // have that requirement. When translating to ISD::VECTOR_SHUFFLE, if the
10489 // operands do not match the mask length, they are extended by concatenating
10490 // them with undef vectors. That is probably the right thing for other
10491 // targets, but for NEON it is better to concatenate two double-register
10492 // size vector operands into a single quad-register size vector. Do that
10493 // transformation here:
10494 // shuffle(concat(v1, undef), concat(v2, undef)) ->
10495 // shuffle(concat(v1, v2), undef)
10496 SDValue Op0 = N->getOperand(0);
10497 SDValue Op1 = N->getOperand(1);
10498 if (Op0.getOpcode() != ISD::CONCAT_VECTORS ||
10499 Op1.getOpcode() != ISD::CONCAT_VECTORS ||
10500 Op0.getNumOperands() != 2 ||
10501 Op1.getNumOperands() != 2)
10503 SDValue Concat0Op1 = Op0.getOperand(1);
10504 SDValue Concat1Op1 = Op1.getOperand(1);
10505 if (!Concat0Op1.isUndef() || !Concat1Op1.isUndef())
10507 // Skip the transformation if any of the types are illegal.
10508 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10509 EVT VT = N->getValueType(0);
10510 if (!TLI.isTypeLegal(VT) ||
10511 !TLI.isTypeLegal(Concat0Op1.getValueType()) ||
10512 !TLI.isTypeLegal(Concat1Op1.getValueType()))
10515 SDValue NewConcat = DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(N), VT,
10516 Op0.getOperand(0), Op1.getOperand(0));
10517 // Translate the shuffle mask.
10518 SmallVector<int, 16> NewMask;
10519 unsigned NumElts = VT.getVectorNumElements();
10520 unsigned HalfElts = NumElts/2;
10521 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N);
10522 for (unsigned n = 0; n < NumElts; ++n) {
10523 int MaskElt = SVN->getMaskElt(n);
10525 if (MaskElt < (int)HalfElts)
10527 else if (MaskElt >= (int)NumElts && MaskElt < (int)(NumElts + HalfElts))
10528 NewElt = HalfElts + MaskElt - NumElts;
10529 NewMask.push_back(NewElt);
10531 return DAG.getVectorShuffle(VT, SDLoc(N), NewConcat,
10532 DAG.getUNDEF(VT), NewMask);
10535 /// CombineBaseUpdate - Target-specific DAG combine function for VLDDUP,
10536 /// NEON load/store intrinsics, and generic vector load/stores, to merge
10537 /// base address updates.
10538 /// For generic load/stores, the memory type is assumed to be a vector.
10539 /// The caller is assumed to have checked legality.
10540 static SDValue CombineBaseUpdate(SDNode *N,
10541 TargetLowering::DAGCombinerInfo &DCI) {
10542 SelectionDAG &DAG = DCI.DAG;
10543 const bool isIntrinsic = (N->getOpcode() == ISD::INTRINSIC_VOID ||
10544 N->getOpcode() == ISD::INTRINSIC_W_CHAIN);
10545 const bool isStore = N->getOpcode() == ISD::STORE;
10546 const unsigned AddrOpIdx = ((isIntrinsic || isStore) ? 2 : 1);
10547 SDValue Addr = N->getOperand(AddrOpIdx);
10548 MemSDNode *MemN = cast<MemSDNode>(N);
10551 // Search for a use of the address operand that is an increment.
10552 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(),
10553 UE = Addr.getNode()->use_end(); UI != UE; ++UI) {
10554 SDNode *User = *UI;
10555 if (User->getOpcode() != ISD::ADD ||
10556 UI.getUse().getResNo() != Addr.getResNo())
10559 // Check that the add is independent of the load/store. Otherwise, folding
10560 // it would create a cycle.
10561 if (User->isPredecessorOf(N) || N->isPredecessorOf(User))
10564 // Find the new opcode for the updating load/store.
10565 bool isLoadOp = true;
10566 bool isLaneOp = false;
10567 unsigned NewOpc = 0;
10568 unsigned NumVecs = 0;
10570 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
10572 default: llvm_unreachable("unexpected intrinsic for Neon base update");
10573 case Intrinsic::arm_neon_vld1: NewOpc = ARMISD::VLD1_UPD;
10574 NumVecs = 1; break;
10575 case Intrinsic::arm_neon_vld2: NewOpc = ARMISD::VLD2_UPD;
10576 NumVecs = 2; break;
10577 case Intrinsic::arm_neon_vld3: NewOpc = ARMISD::VLD3_UPD;
10578 NumVecs = 3; break;
10579 case Intrinsic::arm_neon_vld4: NewOpc = ARMISD::VLD4_UPD;
10580 NumVecs = 4; break;
10581 case Intrinsic::arm_neon_vld2lane: NewOpc = ARMISD::VLD2LN_UPD;
10582 NumVecs = 2; isLaneOp = true; break;
10583 case Intrinsic::arm_neon_vld3lane: NewOpc = ARMISD::VLD3LN_UPD;
10584 NumVecs = 3; isLaneOp = true; break;
10585 case Intrinsic::arm_neon_vld4lane: NewOpc = ARMISD::VLD4LN_UPD;
10586 NumVecs = 4; isLaneOp = true; break;
10587 case Intrinsic::arm_neon_vst1: NewOpc = ARMISD::VST1_UPD;
10588 NumVecs = 1; isLoadOp = false; break;
10589 case Intrinsic::arm_neon_vst2: NewOpc = ARMISD::VST2_UPD;
10590 NumVecs = 2; isLoadOp = false; break;
10591 case Intrinsic::arm_neon_vst3: NewOpc = ARMISD::VST3_UPD;
10592 NumVecs = 3; isLoadOp = false; break;
10593 case Intrinsic::arm_neon_vst4: NewOpc = ARMISD::VST4_UPD;
10594 NumVecs = 4; isLoadOp = false; break;
10595 case Intrinsic::arm_neon_vst2lane: NewOpc = ARMISD::VST2LN_UPD;
10596 NumVecs = 2; isLoadOp = false; isLaneOp = true; break;
10597 case Intrinsic::arm_neon_vst3lane: NewOpc = ARMISD::VST3LN_UPD;
10598 NumVecs = 3; isLoadOp = false; isLaneOp = true; break;
10599 case Intrinsic::arm_neon_vst4lane: NewOpc = ARMISD::VST4LN_UPD;
10600 NumVecs = 4; isLoadOp = false; isLaneOp = true; break;
10604 switch (N->getOpcode()) {
10605 default: llvm_unreachable("unexpected opcode for Neon base update");
10606 case ARMISD::VLD1DUP: NewOpc = ARMISD::VLD1DUP_UPD; NumVecs = 1; break;
10607 case ARMISD::VLD2DUP: NewOpc = ARMISD::VLD2DUP_UPD; NumVecs = 2; break;
10608 case ARMISD::VLD3DUP: NewOpc = ARMISD::VLD3DUP_UPD; NumVecs = 3; break;
10609 case ARMISD::VLD4DUP: NewOpc = ARMISD::VLD4DUP_UPD; NumVecs = 4; break;
10610 case ISD::LOAD: NewOpc = ARMISD::VLD1_UPD;
10611 NumVecs = 1; isLaneOp = false; break;
10612 case ISD::STORE: NewOpc = ARMISD::VST1_UPD;
10613 NumVecs = 1; isLaneOp = false; isLoadOp = false; break;
10617 // Find the size of memory referenced by the load/store.
10620 VecTy = N->getValueType(0);
10621 } else if (isIntrinsic) {
10622 VecTy = N->getOperand(AddrOpIdx+1).getValueType();
10624 assert(isStore && "Node has to be a load, a store, or an intrinsic!");
10625 VecTy = N->getOperand(1).getValueType();
10628 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8;
10630 NumBytes /= VecTy.getVectorNumElements();
10632 // If the increment is a constant, it must match the memory ref size.
10633 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0);
10634 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) {
10635 uint64_t IncVal = CInc->getZExtValue();
10636 if (IncVal != NumBytes)
10638 } else if (NumBytes >= 3 * 16) {
10639 // VLD3/4 and VST3/4 for 128-bit vectors are implemented with two
10640 // separate instructions that make it harder to use a non-constant update.
10644 // OK, we found an ADD we can fold into the base update.
10645 // Now, create a _UPD node, taking care of not breaking alignment.
10647 EVT AlignedVecTy = VecTy;
10648 unsigned Alignment = MemN->getAlignment();
10650 // If this is a less-than-standard-aligned load/store, change the type to
10651 // match the standard alignment.
10652 // The alignment is overlooked when selecting _UPD variants; and it's
10653 // easier to introduce bitcasts here than fix that.
10654 // There are 3 ways to get to this base-update combine:
10655 // - intrinsics: they are assumed to be properly aligned (to the standard
10656 // alignment of the memory type), so we don't need to do anything.
10657 // - ARMISD::VLDx nodes: they are only generated from the aforementioned
10658 // intrinsics, so, likewise, there's nothing to do.
10659 // - generic load/store instructions: the alignment is specified as an
10660 // explicit operand, rather than implicitly as the standard alignment
10661 // of the memory type (like the intrisics). We need to change the
10662 // memory type to match the explicit alignment. That way, we don't
10663 // generate non-standard-aligned ARMISD::VLDx nodes.
10664 if (isa<LSBaseSDNode>(N)) {
10665 if (Alignment == 0)
10667 if (Alignment < VecTy.getScalarSizeInBits() / 8) {
10668 MVT EltTy = MVT::getIntegerVT(Alignment * 8);
10669 assert(NumVecs == 1 && "Unexpected multi-element generic load/store.");
10670 assert(!isLaneOp && "Unexpected generic load/store lane.");
10671 unsigned NumElts = NumBytes / (EltTy.getSizeInBits() / 8);
10672 AlignedVecTy = MVT::getVectorVT(EltTy, NumElts);
10674 // Don't set an explicit alignment on regular load/stores that we want
10675 // to transform to VLD/VST 1_UPD nodes.
10676 // This matches the behavior of regular load/stores, which only get an
10677 // explicit alignment if the MMO alignment is larger than the standard
10678 // alignment of the memory type.
10679 // Intrinsics, however, always get an explicit alignment, set to the
10680 // alignment of the MMO.
10684 // Create the new updating load/store node.
10685 // First, create an SDVTList for the new updating node's results.
10687 unsigned NumResultVecs = (isLoadOp ? NumVecs : 0);
10689 for (n = 0; n < NumResultVecs; ++n)
10690 Tys[n] = AlignedVecTy;
10691 Tys[n++] = MVT::i32;
10692 Tys[n] = MVT::Other;
10693 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs+2));
10695 // Then, gather the new node's operands.
10696 SmallVector<SDValue, 8> Ops;
10697 Ops.push_back(N->getOperand(0)); // incoming chain
10698 Ops.push_back(N->getOperand(AddrOpIdx));
10699 Ops.push_back(Inc);
10701 if (StoreSDNode *StN = dyn_cast<StoreSDNode>(N)) {
10702 // Try to match the intrinsic's signature
10703 Ops.push_back(StN->getValue());
10705 // Loads (and of course intrinsics) match the intrinsics' signature,
10706 // so just add all but the alignment operand.
10707 for (unsigned i = AddrOpIdx + 1; i < N->getNumOperands() - 1; ++i)
10708 Ops.push_back(N->getOperand(i));
10711 // For all node types, the alignment operand is always the last one.
10712 Ops.push_back(DAG.getConstant(Alignment, dl, MVT::i32));
10714 // If this is a non-standard-aligned STORE, the penultimate operand is the
10715 // stored value. Bitcast it to the aligned type.
10716 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::STORE) {
10717 SDValue &StVal = Ops[Ops.size()-2];
10718 StVal = DAG.getNode(ISD::BITCAST, dl, AlignedVecTy, StVal);
10721 EVT LoadVT = isLaneOp ? VecTy.getVectorElementType() : AlignedVecTy;
10722 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, dl, SDTys, Ops, LoadVT,
10723 MemN->getMemOperand());
10725 // Update the uses.
10726 SmallVector<SDValue, 5> NewResults;
10727 for (unsigned i = 0; i < NumResultVecs; ++i)
10728 NewResults.push_back(SDValue(UpdN.getNode(), i));
10730 // If this is an non-standard-aligned LOAD, the first result is the loaded
10731 // value. Bitcast it to the expected result type.
10732 if (AlignedVecTy != VecTy && N->getOpcode() == ISD::LOAD) {
10733 SDValue &LdVal = NewResults[0];
10734 LdVal = DAG.getNode(ISD::BITCAST, dl, VecTy, LdVal);
10737 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs+1)); // chain
10738 DCI.CombineTo(N, NewResults);
10739 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs));
10746 static SDValue PerformVLDCombine(SDNode *N,
10747 TargetLowering::DAGCombinerInfo &DCI) {
10748 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
10751 return CombineBaseUpdate(N, DCI);
10754 /// CombineVLDDUP - For a VDUPLANE node N, check if its source operand is a
10755 /// vldN-lane (N > 1) intrinsic, and if all the other uses of that intrinsic
10756 /// are also VDUPLANEs. If so, combine them to a vldN-dup operation and
10758 static bool CombineVLDDUP(SDNode *N, TargetLowering::DAGCombinerInfo &DCI) {
10759 SelectionDAG &DAG = DCI.DAG;
10760 EVT VT = N->getValueType(0);
10761 // vldN-dup instructions only support 64-bit vectors for N > 1.
10762 if (!VT.is64BitVector())
10765 // Check if the VDUPLANE operand is a vldN-dup intrinsic.
10766 SDNode *VLD = N->getOperand(0).getNode();
10767 if (VLD->getOpcode() != ISD::INTRINSIC_W_CHAIN)
10769 unsigned NumVecs = 0;
10770 unsigned NewOpc = 0;
10771 unsigned IntNo = cast<ConstantSDNode>(VLD->getOperand(1))->getZExtValue();
10772 if (IntNo == Intrinsic::arm_neon_vld2lane) {
10774 NewOpc = ARMISD::VLD2DUP;
10775 } else if (IntNo == Intrinsic::arm_neon_vld3lane) {
10777 NewOpc = ARMISD::VLD3DUP;
10778 } else if (IntNo == Intrinsic::arm_neon_vld4lane) {
10780 NewOpc = ARMISD::VLD4DUP;
10785 // First check that all the vldN-lane uses are VDUPLANEs and that the lane
10786 // numbers match the load.
10787 unsigned VLDLaneNo =
10788 cast<ConstantSDNode>(VLD->getOperand(NumVecs+3))->getZExtValue();
10789 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
10791 // Ignore uses of the chain result.
10792 if (UI.getUse().getResNo() == NumVecs)
10794 SDNode *User = *UI;
10795 if (User->getOpcode() != ARMISD::VDUPLANE ||
10796 VLDLaneNo != cast<ConstantSDNode>(User->getOperand(1))->getZExtValue())
10800 // Create the vldN-dup node.
10803 for (n = 0; n < NumVecs; ++n)
10805 Tys[n] = MVT::Other;
10806 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumVecs+1));
10807 SDValue Ops[] = { VLD->getOperand(0), VLD->getOperand(2) };
10808 MemIntrinsicSDNode *VLDMemInt = cast<MemIntrinsicSDNode>(VLD);
10809 SDValue VLDDup = DAG.getMemIntrinsicNode(NewOpc, SDLoc(VLD), SDTys,
10810 Ops, VLDMemInt->getMemoryVT(),
10811 VLDMemInt->getMemOperand());
10813 // Update the uses.
10814 for (SDNode::use_iterator UI = VLD->use_begin(), UE = VLD->use_end();
10816 unsigned ResNo = UI.getUse().getResNo();
10817 // Ignore uses of the chain result.
10818 if (ResNo == NumVecs)
10820 SDNode *User = *UI;
10821 DCI.CombineTo(User, SDValue(VLDDup.getNode(), ResNo));
10824 // Now the vldN-lane intrinsic is dead except for its chain result.
10825 // Update uses of the chain.
10826 std::vector<SDValue> VLDDupResults;
10827 for (unsigned n = 0; n < NumVecs; ++n)
10828 VLDDupResults.push_back(SDValue(VLDDup.getNode(), n));
10829 VLDDupResults.push_back(SDValue(VLDDup.getNode(), NumVecs));
10830 DCI.CombineTo(VLD, VLDDupResults);
10835 /// PerformVDUPLANECombine - Target-specific dag combine xforms for
10836 /// ARMISD::VDUPLANE.
10837 static SDValue PerformVDUPLANECombine(SDNode *N,
10838 TargetLowering::DAGCombinerInfo &DCI) {
10839 SDValue Op = N->getOperand(0);
10841 // If the source is a vldN-lane (N > 1) intrinsic, and all the other uses
10842 // of that intrinsic are also VDUPLANEs, combine them to a vldN-dup operation.
10843 if (CombineVLDDUP(N, DCI))
10844 return SDValue(N, 0);
10846 // If the source is already a VMOVIMM or VMVNIMM splat, the VDUPLANE is
10847 // redundant. Ignore bit_converts for now; element sizes are checked below.
10848 while (Op.getOpcode() == ISD::BITCAST)
10849 Op = Op.getOperand(0);
10850 if (Op.getOpcode() != ARMISD::VMOVIMM && Op.getOpcode() != ARMISD::VMVNIMM)
10853 // Make sure the VMOV element size is not bigger than the VDUPLANE elements.
10854 unsigned EltSize = Op.getScalarValueSizeInBits();
10855 // The canonical VMOV for a zero vector uses a 32-bit element size.
10856 unsigned Imm = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
10858 if (ARM_AM::decodeNEONModImm(Imm, EltBits) == 0)
10860 EVT VT = N->getValueType(0);
10861 if (EltSize > VT.getScalarSizeInBits())
10864 return DCI.DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
10867 /// PerformVDUPCombine - Target-specific dag combine xforms for ARMISD::VDUP.
10868 static SDValue PerformVDUPCombine(SDNode *N,
10869 TargetLowering::DAGCombinerInfo &DCI) {
10870 SelectionDAG &DAG = DCI.DAG;
10871 SDValue Op = N->getOperand(0);
10873 // Match VDUP(LOAD) -> VLD1DUP.
10874 // We match this pattern here rather than waiting for isel because the
10875 // transform is only legal for unindexed loads.
10876 LoadSDNode *LD = dyn_cast<LoadSDNode>(Op.getNode());
10877 if (LD && Op.hasOneUse() && LD->isUnindexed() &&
10878 LD->getMemoryVT() == N->getValueType(0).getVectorElementType()) {
10879 SDValue Ops[] = { LD->getOperand(0), LD->getOperand(1),
10880 DAG.getConstant(LD->getAlignment(), SDLoc(N), MVT::i32) };
10881 SDVTList SDTys = DAG.getVTList(N->getValueType(0), MVT::Other);
10882 SDValue VLDDup = DAG.getMemIntrinsicNode(ARMISD::VLD1DUP, SDLoc(N), SDTys,
10883 Ops, LD->getMemoryVT(),
10884 LD->getMemOperand());
10885 DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), VLDDup.getValue(1));
10892 static SDValue PerformLOADCombine(SDNode *N,
10893 TargetLowering::DAGCombinerInfo &DCI) {
10894 EVT VT = N->getValueType(0);
10896 // If this is a legal vector load, try to combine it into a VLD1_UPD.
10897 if (ISD::isNormalLoad(N) && VT.isVector() &&
10898 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
10899 return CombineBaseUpdate(N, DCI);
10904 /// PerformSTORECombine - Target-specific dag combine xforms for
10906 static SDValue PerformSTORECombine(SDNode *N,
10907 TargetLowering::DAGCombinerInfo &DCI) {
10908 StoreSDNode *St = cast<StoreSDNode>(N);
10909 if (St->isVolatile())
10912 // Optimize trunc store (of multiple scalars) to shuffle and store. First,
10913 // pack all of the elements in one place. Next, store to memory in fewer
10915 SDValue StVal = St->getValue();
10916 EVT VT = StVal.getValueType();
10917 if (St->isTruncatingStore() && VT.isVector()) {
10918 SelectionDAG &DAG = DCI.DAG;
10919 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
10920 EVT StVT = St->getMemoryVT();
10921 unsigned NumElems = VT.getVectorNumElements();
10922 assert(StVT != VT && "Cannot truncate to the same type");
10923 unsigned FromEltSz = VT.getScalarSizeInBits();
10924 unsigned ToEltSz = StVT.getScalarSizeInBits();
10926 // From, To sizes and ElemCount must be pow of two
10927 if (!isPowerOf2_32(NumElems * FromEltSz * ToEltSz)) return SDValue();
10929 // We are going to use the original vector elt for storing.
10930 // Accumulated smaller vector elements must be a multiple of the store size.
10931 if (0 != (NumElems * FromEltSz) % ToEltSz) return SDValue();
10933 unsigned SizeRatio = FromEltSz / ToEltSz;
10934 assert(SizeRatio * NumElems * ToEltSz == VT.getSizeInBits());
10936 // Create a type on which we perform the shuffle.
10937 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(), StVT.getScalarType(),
10938 NumElems*SizeRatio);
10939 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
10942 SDValue WideVec = DAG.getNode(ISD::BITCAST, DL, WideVecVT, StVal);
10943 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
10944 for (unsigned i = 0; i < NumElems; ++i)
10945 ShuffleVec[i] = DAG.getDataLayout().isBigEndian()
10946 ? (i + 1) * SizeRatio - 1
10949 // Can't shuffle using an illegal type.
10950 if (!TLI.isTypeLegal(WideVecVT)) return SDValue();
10952 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, DL, WideVec,
10953 DAG.getUNDEF(WideVec.getValueType()),
10955 // At this point all of the data is stored at the bottom of the
10956 // register. We now need to save it to mem.
10958 // Find the largest store unit
10959 MVT StoreType = MVT::i8;
10960 for (MVT Tp : MVT::integer_valuetypes()) {
10961 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToEltSz)
10964 // Didn't find a legal store type.
10965 if (!TLI.isTypeLegal(StoreType))
10968 // Bitcast the original vector into a vector of store-size units
10969 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
10970 StoreType, VT.getSizeInBits()/EVT(StoreType).getSizeInBits());
10971 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
10972 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, DL, StoreVecVT, Shuff);
10973 SmallVector<SDValue, 8> Chains;
10974 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits() / 8, DL,
10975 TLI.getPointerTy(DAG.getDataLayout()));
10976 SDValue BasePtr = St->getBasePtr();
10978 // Perform one or more big stores into memory.
10979 unsigned E = (ToEltSz*NumElems)/StoreType.getSizeInBits();
10980 for (unsigned I = 0; I < E; I++) {
10981 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
10982 StoreType, ShuffWide,
10983 DAG.getIntPtrConstant(I, DL));
10984 SDValue Ch = DAG.getStore(St->getChain(), DL, SubVec, BasePtr,
10985 St->getPointerInfo(), St->getAlignment(),
10986 St->getMemOperand()->getFlags());
10987 BasePtr = DAG.getNode(ISD::ADD, DL, BasePtr.getValueType(), BasePtr,
10989 Chains.push_back(Ch);
10991 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
10994 if (!ISD::isNormalStore(St))
10997 // Split a store of a VMOVDRR into two integer stores to avoid mixing NEON and
10998 // ARM stores of arguments in the same cache line.
10999 if (StVal.getNode()->getOpcode() == ARMISD::VMOVDRR &&
11000 StVal.getNode()->hasOneUse()) {
11001 SelectionDAG &DAG = DCI.DAG;
11002 bool isBigEndian = DAG.getDataLayout().isBigEndian();
11004 SDValue BasePtr = St->getBasePtr();
11005 SDValue NewST1 = DAG.getStore(
11006 St->getChain(), DL, StVal.getNode()->getOperand(isBigEndian ? 1 : 0),
11007 BasePtr, St->getPointerInfo(), St->getAlignment(),
11008 St->getMemOperand()->getFlags());
11010 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
11011 DAG.getConstant(4, DL, MVT::i32));
11012 return DAG.getStore(NewST1.getValue(0), DL,
11013 StVal.getNode()->getOperand(isBigEndian ? 0 : 1),
11014 OffsetPtr, St->getPointerInfo(),
11015 std::min(4U, St->getAlignment() / 2),
11016 St->getMemOperand()->getFlags());
11019 if (StVal.getValueType() == MVT::i64 &&
11020 StVal.getNode()->getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11022 // Bitcast an i64 store extracted from a vector to f64.
11023 // Otherwise, the i64 value will be legalized to a pair of i32 values.
11024 SelectionDAG &DAG = DCI.DAG;
11026 SDValue IntVec = StVal.getOperand(0);
11027 EVT FloatVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64,
11028 IntVec.getValueType().getVectorNumElements());
11029 SDValue Vec = DAG.getNode(ISD::BITCAST, dl, FloatVT, IntVec);
11030 SDValue ExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
11031 Vec, StVal.getOperand(1));
11033 SDValue V = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ExtElt);
11034 // Make the DAGCombiner fold the bitcasts.
11035 DCI.AddToWorklist(Vec.getNode());
11036 DCI.AddToWorklist(ExtElt.getNode());
11037 DCI.AddToWorklist(V.getNode());
11038 return DAG.getStore(St->getChain(), dl, V, St->getBasePtr(),
11039 St->getPointerInfo(), St->getAlignment(),
11040 St->getMemOperand()->getFlags(), St->getAAInfo());
11043 // If this is a legal vector store, try to combine it into a VST1_UPD.
11044 if (ISD::isNormalStore(N) && VT.isVector() &&
11045 DCI.DAG.getTargetLoweringInfo().isTypeLegal(VT))
11046 return CombineBaseUpdate(N, DCI);
11051 /// PerformVCVTCombine - VCVT (floating-point to fixed-point, Advanced SIMD)
11052 /// can replace combinations of VMUL and VCVT (floating-point to integer)
11053 /// when the VMUL has a constant operand that is a power of 2.
11055 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
11056 /// vmul.f32 d16, d17, d16
11057 /// vcvt.s32.f32 d16, d16
11059 /// vcvt.s32.f32 d16, d16, #3
11060 static SDValue PerformVCVTCombine(SDNode *N, SelectionDAG &DAG,
11061 const ARMSubtarget *Subtarget) {
11062 if (!Subtarget->hasNEON())
11065 SDValue Op = N->getOperand(0);
11066 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() ||
11067 Op.getOpcode() != ISD::FMUL)
11070 SDValue ConstVec = Op->getOperand(1);
11071 if (!isa<BuildVectorSDNode>(ConstVec))
11074 MVT FloatTy = Op.getSimpleValueType().getVectorElementType();
11075 uint32_t FloatBits = FloatTy.getSizeInBits();
11076 MVT IntTy = N->getSimpleValueType(0).getVectorElementType();
11077 uint32_t IntBits = IntTy.getSizeInBits();
11078 unsigned NumLanes = Op.getValueType().getVectorNumElements();
11079 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11080 // These instructions only exist converting from f32 to i32. We can handle
11081 // smaller integers by generating an extra truncate, but larger ones would
11082 // be lossy. We also can't handle more then 4 lanes, since these intructions
11083 // only support v2i32/v4i32 types.
11087 BitVector UndefElements;
11088 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
11089 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
11090 if (C == -1 || C == 0 || C > 32)
11094 bool isSigned = N->getOpcode() == ISD::FP_TO_SINT;
11095 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfp2fxs :
11096 Intrinsic::arm_neon_vcvtfp2fxu;
11097 SDValue FixConv = DAG.getNode(
11098 ISD::INTRINSIC_WO_CHAIN, dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
11099 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32), Op->getOperand(0),
11100 DAG.getConstant(C, dl, MVT::i32));
11102 if (IntBits < FloatBits)
11103 FixConv = DAG.getNode(ISD::TRUNCATE, dl, N->getValueType(0), FixConv);
11108 /// PerformVDIVCombine - VCVT (fixed-point to floating-point, Advanced SIMD)
11109 /// can replace combinations of VCVT (integer to floating-point) and VDIV
11110 /// when the VDIV has a constant operand that is a power of 2.
11112 /// Example (assume d17 = <float 8.000000e+00, float 8.000000e+00>):
11113 /// vcvt.f32.s32 d16, d16
11114 /// vdiv.f32 d16, d17, d16
11116 /// vcvt.f32.s32 d16, d16, #3
11117 static SDValue PerformVDIVCombine(SDNode *N, SelectionDAG &DAG,
11118 const ARMSubtarget *Subtarget) {
11119 if (!Subtarget->hasNEON())
11122 SDValue Op = N->getOperand(0);
11123 unsigned OpOpcode = Op.getNode()->getOpcode();
11124 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple() ||
11125 (OpOpcode != ISD::SINT_TO_FP && OpOpcode != ISD::UINT_TO_FP))
11128 SDValue ConstVec = N->getOperand(1);
11129 if (!isa<BuildVectorSDNode>(ConstVec))
11132 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType();
11133 uint32_t FloatBits = FloatTy.getSizeInBits();
11134 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType();
11135 uint32_t IntBits = IntTy.getSizeInBits();
11136 unsigned NumLanes = Op.getValueType().getVectorNumElements();
11137 if (FloatBits != 32 || IntBits > 32 || NumLanes > 4) {
11138 // These instructions only exist converting from i32 to f32. We can handle
11139 // smaller integers by generating an extra extend, but larger ones would
11140 // be lossy. We also can't handle more then 4 lanes, since these intructions
11141 // only support v2i32/v4i32 types.
11145 BitVector UndefElements;
11146 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec);
11147 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, 33);
11148 if (C == -1 || C == 0 || C > 32)
11152 bool isSigned = OpOpcode == ISD::SINT_TO_FP;
11153 SDValue ConvInput = Op.getOperand(0);
11154 if (IntBits < FloatBits)
11155 ConvInput = DAG.getNode(isSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
11156 dl, NumLanes == 2 ? MVT::v2i32 : MVT::v4i32,
11159 unsigned IntrinsicOpcode = isSigned ? Intrinsic::arm_neon_vcvtfxs2fp :
11160 Intrinsic::arm_neon_vcvtfxu2fp;
11161 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl,
11163 DAG.getConstant(IntrinsicOpcode, dl, MVT::i32),
11164 ConvInput, DAG.getConstant(C, dl, MVT::i32));
11167 /// Getvshiftimm - Check if this is a valid build_vector for the immediate
11168 /// operand of a vector shift operation, where all the elements of the
11169 /// build_vector must have the same constant integer value.
11170 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) {
11171 // Ignore bit_converts.
11172 while (Op.getOpcode() == ISD::BITCAST)
11173 Op = Op.getOperand(0);
11174 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode());
11175 APInt SplatBits, SplatUndef;
11176 unsigned SplatBitSize;
11178 if (! BVN || ! BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize,
11179 HasAnyUndefs, ElementBits) ||
11180 SplatBitSize > ElementBits)
11182 Cnt = SplatBits.getSExtValue();
11186 /// isVShiftLImm - Check if this is a valid build_vector for the immediate
11187 /// operand of a vector shift left operation. That value must be in the range:
11188 /// 0 <= Value < ElementBits for a left shift; or
11189 /// 0 <= Value <= ElementBits for a long left shift.
11190 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) {
11191 assert(VT.isVector() && "vector shift count is not a vector type");
11192 int64_t ElementBits = VT.getScalarSizeInBits();
11193 if (! getVShiftImm(Op, ElementBits, Cnt))
11195 return (Cnt >= 0 && (isLong ? Cnt-1 : Cnt) < ElementBits);
11198 /// isVShiftRImm - Check if this is a valid build_vector for the immediate
11199 /// operand of a vector shift right operation. For a shift opcode, the value
11200 /// is positive, but for an intrinsic the value count must be negative. The
11201 /// absolute value must be in the range:
11202 /// 1 <= |Value| <= ElementBits for a right shift; or
11203 /// 1 <= |Value| <= ElementBits/2 for a narrow right shift.
11204 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, bool isIntrinsic,
11206 assert(VT.isVector() && "vector shift count is not a vector type");
11207 int64_t ElementBits = VT.getScalarSizeInBits();
11208 if (! getVShiftImm(Op, ElementBits, Cnt))
11211 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits/2 : ElementBits));
11212 if (Cnt >= -(isNarrow ? ElementBits/2 : ElementBits) && Cnt <= -1) {
11219 /// PerformIntrinsicCombine - ARM-specific DAG combining for intrinsics.
11220 static SDValue PerformIntrinsicCombine(SDNode *N, SelectionDAG &DAG) {
11221 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
11224 // Don't do anything for most intrinsics.
11227 // Vector shifts: check for immediate versions and lower them.
11228 // Note: This is done during DAG combining instead of DAG legalizing because
11229 // the build_vectors for 64-bit vector element shift counts are generally
11230 // not legal, and it is hard to see their values after they get legalized to
11231 // loads from a constant pool.
11232 case Intrinsic::arm_neon_vshifts:
11233 case Intrinsic::arm_neon_vshiftu:
11234 case Intrinsic::arm_neon_vrshifts:
11235 case Intrinsic::arm_neon_vrshiftu:
11236 case Intrinsic::arm_neon_vrshiftn:
11237 case Intrinsic::arm_neon_vqshifts:
11238 case Intrinsic::arm_neon_vqshiftu:
11239 case Intrinsic::arm_neon_vqshiftsu:
11240 case Intrinsic::arm_neon_vqshiftns:
11241 case Intrinsic::arm_neon_vqshiftnu:
11242 case Intrinsic::arm_neon_vqshiftnsu:
11243 case Intrinsic::arm_neon_vqrshiftns:
11244 case Intrinsic::arm_neon_vqrshiftnu:
11245 case Intrinsic::arm_neon_vqrshiftnsu: {
11246 EVT VT = N->getOperand(1).getValueType();
11248 unsigned VShiftOpc = 0;
11251 case Intrinsic::arm_neon_vshifts:
11252 case Intrinsic::arm_neon_vshiftu:
11253 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt)) {
11254 VShiftOpc = ARMISD::VSHL;
11257 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt)) {
11258 VShiftOpc = (IntNo == Intrinsic::arm_neon_vshifts ?
11259 ARMISD::VSHRs : ARMISD::VSHRu);
11264 case Intrinsic::arm_neon_vrshifts:
11265 case Intrinsic::arm_neon_vrshiftu:
11266 if (isVShiftRImm(N->getOperand(2), VT, false, true, Cnt))
11270 case Intrinsic::arm_neon_vqshifts:
11271 case Intrinsic::arm_neon_vqshiftu:
11272 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11276 case Intrinsic::arm_neon_vqshiftsu:
11277 if (isVShiftLImm(N->getOperand(2), VT, false, Cnt))
11279 llvm_unreachable("invalid shift count for vqshlu intrinsic");
11281 case Intrinsic::arm_neon_vrshiftn:
11282 case Intrinsic::arm_neon_vqshiftns:
11283 case Intrinsic::arm_neon_vqshiftnu:
11284 case Intrinsic::arm_neon_vqshiftnsu:
11285 case Intrinsic::arm_neon_vqrshiftns:
11286 case Intrinsic::arm_neon_vqrshiftnu:
11287 case Intrinsic::arm_neon_vqrshiftnsu:
11288 // Narrowing shifts require an immediate right shift.
11289 if (isVShiftRImm(N->getOperand(2), VT, true, true, Cnt))
11291 llvm_unreachable("invalid shift count for narrowing vector shift "
11295 llvm_unreachable("unhandled vector shift");
11299 case Intrinsic::arm_neon_vshifts:
11300 case Intrinsic::arm_neon_vshiftu:
11301 // Opcode already set above.
11303 case Intrinsic::arm_neon_vrshifts:
11304 VShiftOpc = ARMISD::VRSHRs; break;
11305 case Intrinsic::arm_neon_vrshiftu:
11306 VShiftOpc = ARMISD::VRSHRu; break;
11307 case Intrinsic::arm_neon_vrshiftn:
11308 VShiftOpc = ARMISD::VRSHRN; break;
11309 case Intrinsic::arm_neon_vqshifts:
11310 VShiftOpc = ARMISD::VQSHLs; break;
11311 case Intrinsic::arm_neon_vqshiftu:
11312 VShiftOpc = ARMISD::VQSHLu; break;
11313 case Intrinsic::arm_neon_vqshiftsu:
11314 VShiftOpc = ARMISD::VQSHLsu; break;
11315 case Intrinsic::arm_neon_vqshiftns:
11316 VShiftOpc = ARMISD::VQSHRNs; break;
11317 case Intrinsic::arm_neon_vqshiftnu:
11318 VShiftOpc = ARMISD::VQSHRNu; break;
11319 case Intrinsic::arm_neon_vqshiftnsu:
11320 VShiftOpc = ARMISD::VQSHRNsu; break;
11321 case Intrinsic::arm_neon_vqrshiftns:
11322 VShiftOpc = ARMISD::VQRSHRNs; break;
11323 case Intrinsic::arm_neon_vqrshiftnu:
11324 VShiftOpc = ARMISD::VQRSHRNu; break;
11325 case Intrinsic::arm_neon_vqrshiftnsu:
11326 VShiftOpc = ARMISD::VQRSHRNsu; break;
11330 return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11331 N->getOperand(1), DAG.getConstant(Cnt, dl, MVT::i32));
11334 case Intrinsic::arm_neon_vshiftins: {
11335 EVT VT = N->getOperand(1).getValueType();
11337 unsigned VShiftOpc = 0;
11339 if (isVShiftLImm(N->getOperand(3), VT, false, Cnt))
11340 VShiftOpc = ARMISD::VSLI;
11341 else if (isVShiftRImm(N->getOperand(3), VT, false, true, Cnt))
11342 VShiftOpc = ARMISD::VSRI;
11344 llvm_unreachable("invalid shift count for vsli/vsri intrinsic");
11348 return DAG.getNode(VShiftOpc, dl, N->getValueType(0),
11349 N->getOperand(1), N->getOperand(2),
11350 DAG.getConstant(Cnt, dl, MVT::i32));
11353 case Intrinsic::arm_neon_vqrshifts:
11354 case Intrinsic::arm_neon_vqrshiftu:
11355 // No immediate versions of these to check for.
11362 /// PerformShiftCombine - Checks for immediate versions of vector shifts and
11363 /// lowers them. As with the vector shift intrinsics, this is done during DAG
11364 /// combining instead of DAG legalizing because the build_vectors for 64-bit
11365 /// vector element shift counts are generally not legal, and it is hard to see
11366 /// their values after they get legalized to loads from a constant pool.
11367 static SDValue PerformShiftCombine(SDNode *N, SelectionDAG &DAG,
11368 const ARMSubtarget *ST) {
11369 EVT VT = N->getValueType(0);
11370 if (N->getOpcode() == ISD::SRL && VT == MVT::i32 && ST->hasV6Ops()) {
11371 // Canonicalize (srl (bswap x), 16) to (rotr (bswap x), 16) if the high
11372 // 16-bits of x is zero. This optimizes rev + lsr 16 to rev16.
11373 SDValue N1 = N->getOperand(1);
11374 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) {
11375 SDValue N0 = N->getOperand(0);
11376 if (C->getZExtValue() == 16 && N0.getOpcode() == ISD::BSWAP &&
11377 DAG.MaskedValueIsZero(N0.getOperand(0),
11378 APInt::getHighBitsSet(32, 16)))
11379 return DAG.getNode(ISD::ROTR, SDLoc(N), VT, N0, N1);
11383 // Nothing to be done for scalar shifts.
11384 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11385 if (!VT.isVector() || !TLI.isTypeLegal(VT))
11388 assert(ST->hasNEON() && "unexpected vector shift");
11391 switch (N->getOpcode()) {
11392 default: llvm_unreachable("unexpected shift opcode");
11395 if (isVShiftLImm(N->getOperand(1), VT, false, Cnt)) {
11397 return DAG.getNode(ARMISD::VSHL, dl, VT, N->getOperand(0),
11398 DAG.getConstant(Cnt, dl, MVT::i32));
11404 if (isVShiftRImm(N->getOperand(1), VT, false, false, Cnt)) {
11405 unsigned VShiftOpc = (N->getOpcode() == ISD::SRA ?
11406 ARMISD::VSHRs : ARMISD::VSHRu);
11408 return DAG.getNode(VShiftOpc, dl, VT, N->getOperand(0),
11409 DAG.getConstant(Cnt, dl, MVT::i32));
11415 /// PerformExtendCombine - Target-specific DAG combining for ISD::SIGN_EXTEND,
11416 /// ISD::ZERO_EXTEND, and ISD::ANY_EXTEND.
11417 static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
11418 const ARMSubtarget *ST) {
11419 SDValue N0 = N->getOperand(0);
11421 // Check for sign- and zero-extensions of vector extract operations of 8-
11422 // and 16-bit vector elements. NEON supports these directly. They are
11423 // handled during DAG combining because type legalization will promote them
11424 // to 32-bit types and it is messy to recognize the operations after that.
11425 if (ST->hasNEON() && N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
11426 SDValue Vec = N0.getOperand(0);
11427 SDValue Lane = N0.getOperand(1);
11428 EVT VT = N->getValueType(0);
11429 EVT EltVT = N0.getValueType();
11430 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11432 if (VT == MVT::i32 &&
11433 (EltVT == MVT::i8 || EltVT == MVT::i16) &&
11434 TLI.isTypeLegal(Vec.getValueType()) &&
11435 isa<ConstantSDNode>(Lane)) {
11438 switch (N->getOpcode()) {
11439 default: llvm_unreachable("unexpected opcode");
11440 case ISD::SIGN_EXTEND:
11441 Opc = ARMISD::VGETLANEs;
11443 case ISD::ZERO_EXTEND:
11444 case ISD::ANY_EXTEND:
11445 Opc = ARMISD::VGETLANEu;
11448 return DAG.getNode(Opc, SDLoc(N), VT, Vec, Lane);
11455 static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero,
11457 if (Op.getOpcode() == ARMISD::BFI) {
11458 // Conservatively, we can recurse down the first operand
11459 // and just mask out all affected bits.
11460 computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne);
11462 // The operand to BFI is already a mask suitable for removing the bits it
11464 ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
11465 const APInt &Mask = CI->getAPIntValue();
11470 if (Op.getOpcode() == ARMISD::CMOV) {
11471 APInt KZ2(KnownZero.getBitWidth(), 0);
11472 APInt KO2(KnownOne.getBitWidth(), 0);
11473 computeKnownBits(DAG, Op.getOperand(1), KnownZero, KnownOne);
11474 computeKnownBits(DAG, Op.getOperand(2), KZ2, KO2);
11480 return DAG.computeKnownBits(Op, KnownZero, KnownOne);
11483 SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
11484 // If we have a CMOV, OR and AND combination such as:
11489 // * CN is a single bit;
11490 // * All bits covered by CM are known zero in y
11492 // Then we can convert this into a sequence of BFI instructions. This will
11493 // always be a win if CM is a single bit, will always be no worse than the
11494 // TST&OR sequence if CM is two bits, and for thumb will be no worse if CM is
11495 // three bits (due to the extra IT instruction).
11497 SDValue Op0 = CMOV->getOperand(0);
11498 SDValue Op1 = CMOV->getOperand(1);
11499 auto CCNode = cast<ConstantSDNode>(CMOV->getOperand(2));
11500 auto CC = CCNode->getAPIntValue().getLimitedValue();
11501 SDValue CmpZ = CMOV->getOperand(4);
11503 // The compare must be against zero.
11504 if (!isNullConstant(CmpZ->getOperand(1)))
11507 assert(CmpZ->getOpcode() == ARMISD::CMPZ);
11508 SDValue And = CmpZ->getOperand(0);
11509 if (And->getOpcode() != ISD::AND)
11511 ConstantSDNode *AndC = dyn_cast<ConstantSDNode>(And->getOperand(1));
11512 if (!AndC || !AndC->getAPIntValue().isPowerOf2())
11514 SDValue X = And->getOperand(0);
11516 if (CC == ARMCC::EQ) {
11517 // We're performing an "equal to zero" compare. Swap the operands so we
11518 // canonicalize on a "not equal to zero" compare.
11519 std::swap(Op0, Op1);
11521 assert(CC == ARMCC::NE && "How can a CMPZ node not be EQ or NE?");
11524 if (Op1->getOpcode() != ISD::OR)
11527 ConstantSDNode *OrC = dyn_cast<ConstantSDNode>(Op1->getOperand(1));
11530 SDValue Y = Op1->getOperand(0);
11535 // Now, is it profitable to continue?
11536 APInt OrCI = OrC->getAPIntValue();
11537 unsigned Heuristic = Subtarget->isThumb() ? 3 : 2;
11538 if (OrCI.countPopulation() > Heuristic)
11541 // Lastly, can we determine that the bits defined by OrCI
11543 APInt KnownZero, KnownOne;
11544 computeKnownBits(DAG, Y, KnownZero, KnownOne);
11545 if ((OrCI & KnownZero) != OrCI)
11548 // OK, we can do the combine.
11551 EVT VT = X.getValueType();
11552 unsigned BitInX = AndC->getAPIntValue().logBase2();
11555 // We must shift X first.
11556 X = DAG.getNode(ISD::SRL, dl, VT, X,
11557 DAG.getConstant(BitInX, dl, VT));
11560 for (unsigned BitInY = 0, NumActiveBits = OrCI.getActiveBits();
11561 BitInY < NumActiveBits; ++BitInY) {
11562 if (OrCI[BitInY] == 0)
11564 APInt Mask(VT.getSizeInBits(), 0);
11565 Mask.setBit(BitInY);
11566 V = DAG.getNode(ARMISD::BFI, dl, VT, V, X,
11567 // Confusingly, the operand is an *inverted* mask.
11568 DAG.getConstant(~Mask, dl, VT));
11574 /// PerformBRCONDCombine - Target-specific DAG combining for ARMISD::BRCOND.
11576 ARMTargetLowering::PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const {
11577 SDValue Cmp = N->getOperand(4);
11578 if (Cmp.getOpcode() != ARMISD::CMPZ)
11579 // Only looking at NE cases.
11582 EVT VT = N->getValueType(0);
11584 SDValue LHS = Cmp.getOperand(0);
11585 SDValue RHS = Cmp.getOperand(1);
11586 SDValue Chain = N->getOperand(0);
11587 SDValue BB = N->getOperand(1);
11588 SDValue ARMcc = N->getOperand(2);
11589 ARMCC::CondCodes CC =
11590 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11592 // (brcond Chain BB ne CPSR (cmpz (and (cmov 0 1 CC CPSR Cmp) 1) 0))
11593 // -> (brcond Chain BB CC CPSR Cmp)
11594 if (CC == ARMCC::NE && LHS.getOpcode() == ISD::AND && LHS->hasOneUse() &&
11595 LHS->getOperand(0)->getOpcode() == ARMISD::CMOV &&
11596 LHS->getOperand(0)->hasOneUse()) {
11597 auto *LHS00C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(0));
11598 auto *LHS01C = dyn_cast<ConstantSDNode>(LHS->getOperand(0)->getOperand(1));
11599 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11600 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11601 if ((LHS00C && LHS00C->getZExtValue() == 0) &&
11602 (LHS01C && LHS01C->getZExtValue() == 1) &&
11603 (LHS1C && LHS1C->getZExtValue() == 1) &&
11604 (RHSC && RHSC->getZExtValue() == 0)) {
11605 return DAG.getNode(
11606 ARMISD::BRCOND, dl, VT, Chain, BB, LHS->getOperand(0)->getOperand(2),
11607 LHS->getOperand(0)->getOperand(3), LHS->getOperand(0)->getOperand(4));
11614 /// PerformCMOVCombine - Target-specific DAG combining for ARMISD::CMOV.
11616 ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
11617 SDValue Cmp = N->getOperand(4);
11618 if (Cmp.getOpcode() != ARMISD::CMPZ)
11619 // Only looking at EQ and NE cases.
11622 EVT VT = N->getValueType(0);
11624 SDValue LHS = Cmp.getOperand(0);
11625 SDValue RHS = Cmp.getOperand(1);
11626 SDValue FalseVal = N->getOperand(0);
11627 SDValue TrueVal = N->getOperand(1);
11628 SDValue ARMcc = N->getOperand(2);
11629 ARMCC::CondCodes CC =
11630 (ARMCC::CondCodes)cast<ConstantSDNode>(ARMcc)->getZExtValue();
11632 // BFI is only available on V6T2+.
11633 if (!Subtarget->isThumb1Only() && Subtarget->hasV6T2Ops()) {
11634 SDValue R = PerformCMOVToBFICombine(N, DAG);
11655 /// FIXME: Turn this into a target neutral optimization?
11657 if (CC == ARMCC::NE && FalseVal == RHS && FalseVal != LHS) {
11658 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, TrueVal, ARMcc,
11659 N->getOperand(3), Cmp);
11660 } else if (CC == ARMCC::EQ && TrueVal == RHS) {
11662 SDValue NewCmp = getARMCmp(LHS, RHS, ISD::SETNE, ARMcc, DAG, dl);
11663 Res = DAG.getNode(ARMISD::CMOV, dl, VT, LHS, FalseVal, ARMcc,
11664 N->getOperand(3), NewCmp);
11667 // (cmov F T ne CPSR (cmpz (cmov 0 1 CC CPSR Cmp) 0))
11668 // -> (cmov F T CC CPSR Cmp)
11669 if (CC == ARMCC::NE && LHS.getOpcode() == ARMISD::CMOV && LHS->hasOneUse()) {
11670 auto *LHS0C = dyn_cast<ConstantSDNode>(LHS->getOperand(0));
11671 auto *LHS1C = dyn_cast<ConstantSDNode>(LHS->getOperand(1));
11672 auto *RHSC = dyn_cast<ConstantSDNode>(RHS);
11673 if ((LHS0C && LHS0C->getZExtValue() == 0) &&
11674 (LHS1C && LHS1C->getZExtValue() == 1) &&
11675 (RHSC && RHSC->getZExtValue() == 0)) {
11676 return DAG.getNode(ARMISD::CMOV, dl, VT, FalseVal, TrueVal,
11677 LHS->getOperand(2), LHS->getOperand(3),
11678 LHS->getOperand(4));
11682 if (Res.getNode()) {
11683 APInt KnownZero, KnownOne;
11684 DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne);
11685 // Capture demanded bits information that would be otherwise lost.
11686 if (KnownZero == 0xfffffffe)
11687 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11688 DAG.getValueType(MVT::i1));
11689 else if (KnownZero == 0xffffff00)
11690 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11691 DAG.getValueType(MVT::i8));
11692 else if (KnownZero == 0xffff0000)
11693 Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
11694 DAG.getValueType(MVT::i16));
11700 SDValue ARMTargetLowering::PerformDAGCombine(SDNode *N,
11701 DAGCombinerInfo &DCI) const {
11702 switch (N->getOpcode()) {
11704 case ISD::ADDC: return PerformADDCCombine(N, DCI, Subtarget);
11705 case ISD::ADD: return PerformADDCombine(N, DCI, Subtarget);
11706 case ISD::SUB: return PerformSUBCombine(N, DCI);
11707 case ISD::MUL: return PerformMULCombine(N, DCI, Subtarget);
11708 case ISD::OR: return PerformORCombine(N, DCI, Subtarget);
11709 case ISD::XOR: return PerformXORCombine(N, DCI, Subtarget);
11710 case ISD::AND: return PerformANDCombine(N, DCI, Subtarget);
11711 case ARMISD::BFI: return PerformBFICombine(N, DCI);
11712 case ARMISD::VMOVRRD: return PerformVMOVRRDCombine(N, DCI, Subtarget);
11713 case ARMISD::VMOVDRR: return PerformVMOVDRRCombine(N, DCI.DAG);
11714 case ISD::STORE: return PerformSTORECombine(N, DCI);
11715 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DCI, Subtarget);
11716 case ISD::INSERT_VECTOR_ELT: return PerformInsertEltCombine(N, DCI);
11717 case ISD::VECTOR_SHUFFLE: return PerformVECTOR_SHUFFLECombine(N, DCI.DAG);
11718 case ARMISD::VDUPLANE: return PerformVDUPLANECombine(N, DCI);
11719 case ARMISD::VDUP: return PerformVDUPCombine(N, DCI);
11720 case ISD::FP_TO_SINT:
11721 case ISD::FP_TO_UINT:
11722 return PerformVCVTCombine(N, DCI.DAG, Subtarget);
11724 return PerformVDIVCombine(N, DCI.DAG, Subtarget);
11725 case ISD::INTRINSIC_WO_CHAIN: return PerformIntrinsicCombine(N, DCI.DAG);
11728 case ISD::SRL: return PerformShiftCombine(N, DCI.DAG, Subtarget);
11729 case ISD::SIGN_EXTEND:
11730 case ISD::ZERO_EXTEND:
11731 case ISD::ANY_EXTEND: return PerformExtendCombine(N, DCI.DAG, Subtarget);
11732 case ARMISD::CMOV: return PerformCMOVCombine(N, DCI.DAG);
11733 case ARMISD::BRCOND: return PerformBRCONDCombine(N, DCI.DAG);
11734 case ISD::LOAD: return PerformLOADCombine(N, DCI);
11735 case ARMISD::VLD1DUP:
11736 case ARMISD::VLD2DUP:
11737 case ARMISD::VLD3DUP:
11738 case ARMISD::VLD4DUP:
11739 return PerformVLDCombine(N, DCI);
11740 case ARMISD::BUILD_VECTOR:
11741 return PerformARMBUILD_VECTORCombine(N, DCI);
11742 case ISD::INTRINSIC_VOID:
11743 case ISD::INTRINSIC_W_CHAIN:
11744 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) {
11745 case Intrinsic::arm_neon_vld1:
11746 case Intrinsic::arm_neon_vld2:
11747 case Intrinsic::arm_neon_vld3:
11748 case Intrinsic::arm_neon_vld4:
11749 case Intrinsic::arm_neon_vld2lane:
11750 case Intrinsic::arm_neon_vld3lane:
11751 case Intrinsic::arm_neon_vld4lane:
11752 case Intrinsic::arm_neon_vst1:
11753 case Intrinsic::arm_neon_vst2:
11754 case Intrinsic::arm_neon_vst3:
11755 case Intrinsic::arm_neon_vst4:
11756 case Intrinsic::arm_neon_vst2lane:
11757 case Intrinsic::arm_neon_vst3lane:
11758 case Intrinsic::arm_neon_vst4lane:
11759 return PerformVLDCombine(N, DCI);
11767 bool ARMTargetLowering::isDesirableToTransformToIntegerOp(unsigned Opc,
11769 return (VT == MVT::f32) && (Opc == ISD::LOAD || Opc == ISD::STORE);
11772 bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
11775 bool *Fast) const {
11776 // The AllowsUnaliged flag models the SCTLR.A setting in ARM cpus
11777 bool AllowsUnaligned = Subtarget->allowsUnalignedMem();
11779 switch (VT.getSimpleVT().SimpleTy) {
11785 // Unaligned access can use (for example) LRDB, LRDH, LDR
11786 if (AllowsUnaligned) {
11788 *Fast = Subtarget->hasV7Ops();
11795 // For any little-endian targets with neon, we can support unaligned ld/st
11796 // of D and Q (e.g. {D0,D1}) registers by using vld1.i8/vst1.i8.
11797 // A big-endian target may also explicitly support unaligned accesses
11798 if (Subtarget->hasNEON() && (AllowsUnaligned || Subtarget->isLittle())) {
11808 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign,
11809 unsigned AlignCheck) {
11810 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) &&
11811 (DstAlign == 0 || DstAlign % AlignCheck == 0));
11814 EVT ARMTargetLowering::getOptimalMemOpType(uint64_t Size,
11815 unsigned DstAlign, unsigned SrcAlign,
11816 bool IsMemset, bool ZeroMemset,
11818 MachineFunction &MF) const {
11819 const Function *F = MF.getFunction();
11821 // See if we can use NEON instructions for this...
11822 if ((!IsMemset || ZeroMemset) && Subtarget->hasNEON() &&
11823 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
11826 (memOpAlign(SrcAlign, DstAlign, 16) ||
11827 (allowsMisalignedMemoryAccesses(MVT::v2f64, 0, 1, &Fast) && Fast))) {
11829 } else if (Size >= 8 &&
11830 (memOpAlign(SrcAlign, DstAlign, 8) ||
11831 (allowsMisalignedMemoryAccesses(MVT::f64, 0, 1, &Fast) &&
11837 // Lowering to i32/i16 if the size permits.
11840 else if (Size >= 2)
11843 // Let the target-independent logic figure it out.
11847 bool ARMTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
11848 if (Val.getOpcode() != ISD::LOAD)
11851 EVT VT1 = Val.getValueType();
11852 if (!VT1.isSimple() || !VT1.isInteger() ||
11853 !VT2.isSimple() || !VT2.isInteger())
11856 switch (VT1.getSimpleVT().SimpleTy) {
11861 // 8-bit and 16-bit loads implicitly zero-extend to 32-bits.
11868 bool ARMTargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
11869 EVT VT = ExtVal.getValueType();
11871 if (!isTypeLegal(VT))
11874 // Don't create a loadext if we can fold the extension into a wide/long
11876 // If there's more than one user instruction, the loadext is desirable no
11877 // matter what. There can be two uses by the same instruction.
11878 if (ExtVal->use_empty() ||
11879 !ExtVal->use_begin()->isOnlyUserOf(ExtVal.getNode()))
11882 SDNode *U = *ExtVal->use_begin();
11883 if ((U->getOpcode() == ISD::ADD || U->getOpcode() == ISD::SUB ||
11884 U->getOpcode() == ISD::SHL || U->getOpcode() == ARMISD::VSHL))
11890 bool ARMTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
11891 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
11894 if (!isTypeLegal(EVT::getEVT(Ty1)))
11897 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
11899 // Assuming the caller doesn't have a zeroext or signext return parameter,
11900 // truncation all the way down to i1 is valid.
11904 int ARMTargetLowering::getScalingFactorCost(const DataLayout &DL,
11905 const AddrMode &AM, Type *Ty,
11906 unsigned AS) const {
11907 if (isLegalAddressingMode(DL, AM, Ty, AS)) {
11908 if (Subtarget->hasFPAO())
11909 return AM.Scale < 0 ? 1 : 0; // positive offsets execute faster
11916 static bool isLegalT1AddressImmediate(int64_t V, EVT VT) {
11920 unsigned Scale = 1;
11921 switch (VT.getSimpleVT().SimpleTy) {
11922 default: return false;
11937 if ((V & (Scale - 1)) != 0)
11940 return V == (V & ((1LL << 5) - 1));
11943 static bool isLegalT2AddressImmediate(int64_t V, EVT VT,
11944 const ARMSubtarget *Subtarget) {
11945 bool isNeg = false;
11951 switch (VT.getSimpleVT().SimpleTy) {
11952 default: return false;
11957 // + imm12 or - imm8
11959 return V == (V & ((1LL << 8) - 1));
11960 return V == (V & ((1LL << 12) - 1));
11963 // Same as ARM mode. FIXME: NEON?
11964 if (!Subtarget->hasVFP2())
11969 return V == (V & ((1LL << 8) - 1));
11973 /// isLegalAddressImmediate - Return true if the integer value can be used
11974 /// as the offset of the target addressing mode for load / store of the
11976 static bool isLegalAddressImmediate(int64_t V, EVT VT,
11977 const ARMSubtarget *Subtarget) {
11981 if (!VT.isSimple())
11984 if (Subtarget->isThumb1Only())
11985 return isLegalT1AddressImmediate(V, VT);
11986 else if (Subtarget->isThumb2())
11987 return isLegalT2AddressImmediate(V, VT, Subtarget);
11992 switch (VT.getSimpleVT().SimpleTy) {
11993 default: return false;
11998 return V == (V & ((1LL << 12) - 1));
12001 return V == (V & ((1LL << 8) - 1));
12004 if (!Subtarget->hasVFP2()) // FIXME: NEON?
12009 return V == (V & ((1LL << 8) - 1));
12013 bool ARMTargetLowering::isLegalT2ScaledAddressingMode(const AddrMode &AM,
12015 int Scale = AM.Scale;
12019 switch (VT.getSimpleVT().SimpleTy) {
12020 default: return false;
12028 Scale = Scale & ~1;
12029 return Scale == 2 || Scale == 4 || Scale == 8;
12032 if (((unsigned)AM.HasBaseReg + Scale) <= 2)
12036 // Note, we allow "void" uses (basically, uses that aren't loads or
12037 // stores), because arm allows folding a scale into many arithmetic
12038 // operations. This should be made more precise and revisited later.
12040 // Allow r << imm, but the imm has to be a multiple of two.
12041 if (Scale & 1) return false;
12042 return isPowerOf2_32(Scale);
12046 /// isLegalAddressingMode - Return true if the addressing mode represented
12047 /// by AM is legal for this target, for a load/store of the specified type.
12048 bool ARMTargetLowering::isLegalAddressingMode(const DataLayout &DL,
12049 const AddrMode &AM, Type *Ty,
12050 unsigned AS) const {
12051 EVT VT = getValueType(DL, Ty, true);
12052 if (!isLegalAddressImmediate(AM.BaseOffs, VT, Subtarget))
12055 // Can never fold addr of global into load/store.
12059 switch (AM.Scale) {
12060 case 0: // no scale reg, must be "r+i" or "r", or "i".
12063 if (Subtarget->isThumb1Only())
12067 // ARM doesn't support any R+R*scale+imm addr modes.
12071 if (!VT.isSimple())
12074 if (Subtarget->isThumb2())
12075 return isLegalT2ScaledAddressingMode(AM, VT);
12077 int Scale = AM.Scale;
12078 switch (VT.getSimpleVT().SimpleTy) {
12079 default: return false;
12083 if (Scale < 0) Scale = -Scale;
12087 return isPowerOf2_32(Scale & ~1);
12091 if (((unsigned)AM.HasBaseReg + Scale) <= 2)
12096 // Note, we allow "void" uses (basically, uses that aren't loads or
12097 // stores), because arm allows folding a scale into many arithmetic
12098 // operations. This should be made more precise and revisited later.
12100 // Allow r << imm, but the imm has to be a multiple of two.
12101 if (Scale & 1) return false;
12102 return isPowerOf2_32(Scale);
12108 /// isLegalICmpImmediate - Return true if the specified immediate is legal
12109 /// icmp immediate, that is the target has icmp instructions which can compare
12110 /// a register against the immediate without having to materialize the
12111 /// immediate into a register.
12112 bool ARMTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
12113 // Thumb2 and ARM modes can use cmn for negative immediates.
12114 if (!Subtarget->isThumb())
12115 return ARM_AM::getSOImmVal(std::abs(Imm)) != -1;
12116 if (Subtarget->isThumb2())
12117 return ARM_AM::getT2SOImmVal(std::abs(Imm)) != -1;
12118 // Thumb1 doesn't have cmn, and only 8-bit immediates.
12119 return Imm >= 0 && Imm <= 255;
12122 /// isLegalAddImmediate - Return true if the specified immediate is a legal add
12123 /// *or sub* immediate, that is the target has add or sub instructions which can
12124 /// add a register with the immediate without having to materialize the
12125 /// immediate into a register.
12126 bool ARMTargetLowering::isLegalAddImmediate(int64_t Imm) const {
12127 // Same encoding for add/sub, just flip the sign.
12128 int64_t AbsImm = std::abs(Imm);
12129 if (!Subtarget->isThumb())
12130 return ARM_AM::getSOImmVal(AbsImm) != -1;
12131 if (Subtarget->isThumb2())
12132 return ARM_AM::getT2SOImmVal(AbsImm) != -1;
12133 // Thumb1 only has 8-bit unsigned immediate.
12134 return AbsImm >= 0 && AbsImm <= 255;
12137 static bool getARMIndexedAddressParts(SDNode *Ptr, EVT VT,
12138 bool isSEXTLoad, SDValue &Base,
12139 SDValue &Offset, bool &isInc,
12140 SelectionDAG &DAG) {
12141 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12144 if (VT == MVT::i16 || ((VT == MVT::i8 || VT == MVT::i1) && isSEXTLoad)) {
12145 // AddressingMode 3
12146 Base = Ptr->getOperand(0);
12147 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12148 int RHSC = (int)RHS->getZExtValue();
12149 if (RHSC < 0 && RHSC > -256) {
12150 assert(Ptr->getOpcode() == ISD::ADD);
12152 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12156 isInc = (Ptr->getOpcode() == ISD::ADD);
12157 Offset = Ptr->getOperand(1);
12159 } else if (VT == MVT::i32 || VT == MVT::i8 || VT == MVT::i1) {
12160 // AddressingMode 2
12161 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12162 int RHSC = (int)RHS->getZExtValue();
12163 if (RHSC < 0 && RHSC > -0x1000) {
12164 assert(Ptr->getOpcode() == ISD::ADD);
12166 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12167 Base = Ptr->getOperand(0);
12172 if (Ptr->getOpcode() == ISD::ADD) {
12174 ARM_AM::ShiftOpc ShOpcVal=
12175 ARM_AM::getShiftOpcForNode(Ptr->getOperand(0).getOpcode());
12176 if (ShOpcVal != ARM_AM::no_shift) {
12177 Base = Ptr->getOperand(1);
12178 Offset = Ptr->getOperand(0);
12180 Base = Ptr->getOperand(0);
12181 Offset = Ptr->getOperand(1);
12186 isInc = (Ptr->getOpcode() == ISD::ADD);
12187 Base = Ptr->getOperand(0);
12188 Offset = Ptr->getOperand(1);
12192 // FIXME: Use VLDM / VSTM to emulate indexed FP load / store.
12196 static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
12197 bool isSEXTLoad, SDValue &Base,
12198 SDValue &Offset, bool &isInc,
12199 SelectionDAG &DAG) {
12200 if (Ptr->getOpcode() != ISD::ADD && Ptr->getOpcode() != ISD::SUB)
12203 Base = Ptr->getOperand(0);
12204 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Ptr->getOperand(1))) {
12205 int RHSC = (int)RHS->getZExtValue();
12206 if (RHSC < 0 && RHSC > -0x100) { // 8 bits.
12207 assert(Ptr->getOpcode() == ISD::ADD);
12209 Offset = DAG.getConstant(-RHSC, SDLoc(Ptr), RHS->getValueType(0));
12211 } else if (RHSC > 0 && RHSC < 0x100) { // 8 bit, no zero.
12212 isInc = Ptr->getOpcode() == ISD::ADD;
12213 Offset = DAG.getConstant(RHSC, SDLoc(Ptr), RHS->getValueType(0));
12221 /// getPreIndexedAddressParts - returns true by value, base pointer and
12222 /// offset pointer and addressing mode by reference if the node's address
12223 /// can be legally represented as pre-indexed load / store address.
12225 ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
12227 ISD::MemIndexedMode &AM,
12228 SelectionDAG &DAG) const {
12229 if (Subtarget->isThumb1Only())
12234 bool isSEXTLoad = false;
12235 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12236 Ptr = LD->getBasePtr();
12237 VT = LD->getMemoryVT();
12238 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12239 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12240 Ptr = ST->getBasePtr();
12241 VT = ST->getMemoryVT();
12246 bool isLegal = false;
12247 if (Subtarget->isThumb2())
12248 isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12249 Offset, isInc, DAG);
12251 isLegal = getARMIndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
12252 Offset, isInc, DAG);
12256 AM = isInc ? ISD::PRE_INC : ISD::PRE_DEC;
12260 /// getPostIndexedAddressParts - returns true by value, base pointer and
12261 /// offset pointer and addressing mode by reference if this node can be
12262 /// combined with a load / store to form a post-indexed load / store.
12263 bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
12266 ISD::MemIndexedMode &AM,
12267 SelectionDAG &DAG) const {
12270 bool isSEXTLoad = false, isNonExt;
12271 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
12272 VT = LD->getMemoryVT();
12273 Ptr = LD->getBasePtr();
12274 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
12275 isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
12276 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
12277 VT = ST->getMemoryVT();
12278 Ptr = ST->getBasePtr();
12279 isNonExt = !ST->isTruncatingStore();
12283 if (Subtarget->isThumb1Only()) {
12284 // Thumb-1 can do a limited post-inc load or store as an updating LDM. It
12285 // must be non-extending/truncating, i32, with an offset of 4.
12286 assert(Op->getValueType(0) == MVT::i32 && "Non-i32 post-inc op?!");
12287 if (Op->getOpcode() != ISD::ADD || !isNonExt)
12289 auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
12290 if (!RHS || RHS->getZExtValue() != 4)
12293 Offset = Op->getOperand(1);
12294 Base = Op->getOperand(0);
12295 AM = ISD::POST_INC;
12300 bool isLegal = false;
12301 if (Subtarget->isThumb2())
12302 isLegal = getT2IndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12305 isLegal = getARMIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
12311 // Swap base ptr and offset to catch more post-index load / store when
12312 // it's legal. In Thumb2 mode, offset must be an immediate.
12313 if (Ptr == Offset && Op->getOpcode() == ISD::ADD &&
12314 !Subtarget->isThumb2())
12315 std::swap(Base, Offset);
12317 // Post-indexed load / store update the base pointer.
12322 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
12326 void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
12329 const SelectionDAG &DAG,
12330 unsigned Depth) const {
12331 unsigned BitWidth = KnownOne.getBitWidth();
12332 KnownZero = KnownOne = APInt(BitWidth, 0);
12333 switch (Op.getOpcode()) {
12339 // These nodes' second result is a boolean
12340 if (Op.getResNo() == 0)
12342 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
12344 case ARMISD::CMOV: {
12345 // Bits are known zero/one if known on the LHS and RHS.
12346 DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
12347 if (KnownZero == 0 && KnownOne == 0) return;
12349 APInt KnownZeroRHS, KnownOneRHS;
12350 DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
12351 KnownZero &= KnownZeroRHS;
12352 KnownOne &= KnownOneRHS;
12355 case ISD::INTRINSIC_W_CHAIN: {
12356 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1));
12357 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue());
12360 case Intrinsic::arm_ldaex:
12361 case Intrinsic::arm_ldrex: {
12362 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT();
12363 unsigned MemBits = VT.getScalarSizeInBits();
12364 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits);
12372 //===----------------------------------------------------------------------===//
12373 // ARM Inline Assembly Support
12374 //===----------------------------------------------------------------------===//
12376 bool ARMTargetLowering::ExpandInlineAsm(CallInst *CI) const {
12377 // Looking for "rev" which is V6+.
12378 if (!Subtarget->hasV6Ops())
12381 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
12382 std::string AsmStr = IA->getAsmString();
12383 SmallVector<StringRef, 4> AsmPieces;
12384 SplitString(AsmStr, AsmPieces, ";\n");
12386 switch (AsmPieces.size()) {
12387 default: return false;
12389 AsmStr = AsmPieces[0];
12391 SplitString(AsmStr, AsmPieces, " \t,");
12394 if (AsmPieces.size() == 3 &&
12395 AsmPieces[0] == "rev" && AsmPieces[1] == "$0" && AsmPieces[2] == "$1" &&
12396 IA->getConstraintString().compare(0, 4, "=l,l") == 0) {
12397 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
12398 if (Ty && Ty->getBitWidth() == 32)
12399 return IntrinsicLowering::LowerToByteSwap(CI);
12407 const char *ARMTargetLowering::LowerXConstraint(EVT ConstraintVT) const {
12408 // At this point, we have to lower this constraint to something else, so we
12409 // lower it to an "r" or "w". However, by doing this we will force the result
12410 // to be in register, while the X constraint is much more permissive.
12412 // Although we are correct (we are free to emit anything, without
12413 // constraints), we might break use cases that would expect us to be more
12414 // efficient and emit something else.
12415 if (!Subtarget->hasVFP2())
12417 if (ConstraintVT.isFloatingPoint())
12419 if (ConstraintVT.isVector() && Subtarget->hasNEON() &&
12420 (ConstraintVT.getSizeInBits() == 64 ||
12421 ConstraintVT.getSizeInBits() == 128))
12427 /// getConstraintType - Given a constraint letter, return the type of
12428 /// constraint it is for this target.
12429 ARMTargetLowering::ConstraintType
12430 ARMTargetLowering::getConstraintType(StringRef Constraint) const {
12431 if (Constraint.size() == 1) {
12432 switch (Constraint[0]) {
12434 case 'l': return C_RegisterClass;
12435 case 'w': return C_RegisterClass;
12436 case 'h': return C_RegisterClass;
12437 case 'x': return C_RegisterClass;
12438 case 't': return C_RegisterClass;
12439 case 'j': return C_Other; // Constant for movw.
12440 // An address with a single base register. Due to the way we
12441 // currently handle addresses it is the same as an 'r' memory constraint.
12442 case 'Q': return C_Memory;
12444 } else if (Constraint.size() == 2) {
12445 switch (Constraint[0]) {
12447 // All 'U+' constraints are addresses.
12448 case 'U': return C_Memory;
12451 return TargetLowering::getConstraintType(Constraint);
12454 /// Examine constraint type and operand type and determine a weight value.
12455 /// This object must already have been set up with the operand type
12456 /// and the current alternative constraint selected.
12457 TargetLowering::ConstraintWeight
12458 ARMTargetLowering::getSingleConstraintMatchWeight(
12459 AsmOperandInfo &info, const char *constraint) const {
12460 ConstraintWeight weight = CW_Invalid;
12461 Value *CallOperandVal = info.CallOperandVal;
12462 // If we don't have a value, we can't do a match,
12463 // but allow it at the lowest weight.
12464 if (!CallOperandVal)
12466 Type *type = CallOperandVal->getType();
12467 // Look at the constraint type.
12468 switch (*constraint) {
12470 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
12473 if (type->isIntegerTy()) {
12474 if (Subtarget->isThumb())
12475 weight = CW_SpecificReg;
12477 weight = CW_Register;
12481 if (type->isFloatingPointTy())
12482 weight = CW_Register;
12488 typedef std::pair<unsigned, const TargetRegisterClass*> RCPair;
12489 RCPair ARMTargetLowering::getRegForInlineAsmConstraint(
12490 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const {
12491 if (Constraint.size() == 1) {
12492 // GCC ARM Constraint Letters
12493 switch (Constraint[0]) {
12494 case 'l': // Low regs or general regs.
12495 if (Subtarget->isThumb())
12496 return RCPair(0U, &ARM::tGPRRegClass);
12497 return RCPair(0U, &ARM::GPRRegClass);
12498 case 'h': // High regs or no regs.
12499 if (Subtarget->isThumb())
12500 return RCPair(0U, &ARM::hGPRRegClass);
12503 if (Subtarget->isThumb1Only())
12504 return RCPair(0U, &ARM::tGPRRegClass);
12505 return RCPair(0U, &ARM::GPRRegClass);
12507 if (VT == MVT::Other)
12509 if (VT == MVT::f32)
12510 return RCPair(0U, &ARM::SPRRegClass);
12511 if (VT.getSizeInBits() == 64)
12512 return RCPair(0U, &ARM::DPRRegClass);
12513 if (VT.getSizeInBits() == 128)
12514 return RCPair(0U, &ARM::QPRRegClass);
12517 if (VT == MVT::Other)
12519 if (VT == MVT::f32)
12520 return RCPair(0U, &ARM::SPR_8RegClass);
12521 if (VT.getSizeInBits() == 64)
12522 return RCPair(0U, &ARM::DPR_8RegClass);
12523 if (VT.getSizeInBits() == 128)
12524 return RCPair(0U, &ARM::QPR_8RegClass);
12527 if (VT == MVT::f32)
12528 return RCPair(0U, &ARM::SPRRegClass);
12532 if (StringRef("{cc}").equals_lower(Constraint))
12533 return std::make_pair(unsigned(ARM::CPSR), &ARM::CCRRegClass);
12535 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
12538 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
12539 /// vector. If it is invalid, don't add anything to Ops.
12540 void ARMTargetLowering::LowerAsmOperandForConstraint(SDValue Op,
12541 std::string &Constraint,
12542 std::vector<SDValue>&Ops,
12543 SelectionDAG &DAG) const {
12546 // Currently only support length 1 constraints.
12547 if (Constraint.length() != 1) return;
12549 char ConstraintLetter = Constraint[0];
12550 switch (ConstraintLetter) {
12553 case 'I': case 'J': case 'K': case 'L':
12554 case 'M': case 'N': case 'O':
12555 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op);
12559 int64_t CVal64 = C->getSExtValue();
12560 int CVal = (int) CVal64;
12561 // None of these constraints allow values larger than 32 bits. Check
12562 // that the value fits in an int.
12563 if (CVal != CVal64)
12566 switch (ConstraintLetter) {
12568 // Constant suitable for movw, must be between 0 and
12570 if (Subtarget->hasV6T2Ops())
12571 if (CVal >= 0 && CVal <= 65535)
12575 if (Subtarget->isThumb1Only()) {
12576 // This must be a constant between 0 and 255, for ADD
12578 if (CVal >= 0 && CVal <= 255)
12580 } else if (Subtarget->isThumb2()) {
12581 // A constant that can be used as an immediate value in a
12582 // data-processing instruction.
12583 if (ARM_AM::getT2SOImmVal(CVal) != -1)
12586 // A constant that can be used as an immediate value in a
12587 // data-processing instruction.
12588 if (ARM_AM::getSOImmVal(CVal) != -1)
12594 if (Subtarget->isThumb1Only()) {
12595 // This must be a constant between -255 and -1, for negated ADD
12596 // immediates. This can be used in GCC with an "n" modifier that
12597 // prints the negated value, for use with SUB instructions. It is
12598 // not useful otherwise but is implemented for compatibility.
12599 if (CVal >= -255 && CVal <= -1)
12602 // This must be a constant between -4095 and 4095. It is not clear
12603 // what this constraint is intended for. Implemented for
12604 // compatibility with GCC.
12605 if (CVal >= -4095 && CVal <= 4095)
12611 if (Subtarget->isThumb1Only()) {
12612 // A 32-bit value where only one byte has a nonzero value. Exclude
12613 // zero to match GCC. This constraint is used by GCC internally for
12614 // constants that can be loaded with a move/shift combination.
12615 // It is not useful otherwise but is implemented for compatibility.
12616 if (CVal != 0 && ARM_AM::isThumbImmShiftedVal(CVal))
12618 } else if (Subtarget->isThumb2()) {
12619 // A constant whose bitwise inverse can be used as an immediate
12620 // value in a data-processing instruction. This can be used in GCC
12621 // with a "B" modifier that prints the inverted value, for use with
12622 // BIC and MVN instructions. It is not useful otherwise but is
12623 // implemented for compatibility.
12624 if (ARM_AM::getT2SOImmVal(~CVal) != -1)
12627 // A constant whose bitwise inverse can be used as an immediate
12628 // value in a data-processing instruction. This can be used in GCC
12629 // with a "B" modifier that prints the inverted value, for use with
12630 // BIC and MVN instructions. It is not useful otherwise but is
12631 // implemented for compatibility.
12632 if (ARM_AM::getSOImmVal(~CVal) != -1)
12638 if (Subtarget->isThumb1Only()) {
12639 // This must be a constant between -7 and 7,
12640 // for 3-operand ADD/SUB immediate instructions.
12641 if (CVal >= -7 && CVal < 7)
12643 } else if (Subtarget->isThumb2()) {
12644 // A constant whose negation can be used as an immediate value in a
12645 // data-processing instruction. This can be used in GCC with an "n"
12646 // modifier that prints the negated value, for use with SUB
12647 // instructions. It is not useful otherwise but is implemented for
12649 if (ARM_AM::getT2SOImmVal(-CVal) != -1)
12652 // A constant whose negation can be used as an immediate value in a
12653 // data-processing instruction. This can be used in GCC with an "n"
12654 // modifier that prints the negated value, for use with SUB
12655 // instructions. It is not useful otherwise but is implemented for
12657 if (ARM_AM::getSOImmVal(-CVal) != -1)
12663 if (Subtarget->isThumb1Only()) {
12664 // This must be a multiple of 4 between 0 and 1020, for
12665 // ADD sp + immediate.
12666 if ((CVal >= 0 && CVal <= 1020) && ((CVal & 3) == 0))
12669 // A power of two or a constant between 0 and 32. This is used in
12670 // GCC for the shift amount on shifted register operands, but it is
12671 // useful in general for any shift amounts.
12672 if ((CVal >= 0 && CVal <= 32) || ((CVal & (CVal - 1)) == 0))
12678 if (Subtarget->isThumb()) { // FIXME thumb2
12679 // This must be a constant between 0 and 31, for shift amounts.
12680 if (CVal >= 0 && CVal <= 31)
12686 if (Subtarget->isThumb()) { // FIXME thumb2
12687 // This must be a multiple of 4 between -508 and 508, for
12688 // ADD/SUB sp = sp + immediate.
12689 if ((CVal >= -508 && CVal <= 508) && ((CVal & 3) == 0))
12694 Result = DAG.getTargetConstant(CVal, SDLoc(Op), Op.getValueType());
12698 if (Result.getNode()) {
12699 Ops.push_back(Result);
12702 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
12705 static RTLIB::Libcall getDivRemLibcall(
12706 const SDNode *N, MVT::SimpleValueType SVT) {
12707 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
12708 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&
12709 "Unhandled Opcode in getDivRemLibcall");
12710 bool isSigned = N->getOpcode() == ISD::SDIVREM ||
12711 N->getOpcode() == ISD::SREM;
12714 default: llvm_unreachable("Unexpected request for libcall!");
12715 case MVT::i8: LC = isSigned ? RTLIB::SDIVREM_I8 : RTLIB::UDIVREM_I8; break;
12716 case MVT::i16: LC = isSigned ? RTLIB::SDIVREM_I16 : RTLIB::UDIVREM_I16; break;
12717 case MVT::i32: LC = isSigned ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; break;
12718 case MVT::i64: LC = isSigned ? RTLIB::SDIVREM_I64 : RTLIB::UDIVREM_I64; break;
12723 static TargetLowering::ArgListTy getDivRemArgList(
12724 const SDNode *N, LLVMContext *Context, const ARMSubtarget *Subtarget) {
12725 assert((N->getOpcode() == ISD::SDIVREM || N->getOpcode() == ISD::UDIVREM ||
12726 N->getOpcode() == ISD::SREM || N->getOpcode() == ISD::UREM) &&
12727 "Unhandled Opcode in getDivRemArgList");
12728 bool isSigned = N->getOpcode() == ISD::SDIVREM ||
12729 N->getOpcode() == ISD::SREM;
12730 TargetLowering::ArgListTy Args;
12731 TargetLowering::ArgListEntry Entry;
12732 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
12733 EVT ArgVT = N->getOperand(i).getValueType();
12734 Type *ArgTy = ArgVT.getTypeForEVT(*Context);
12735 Entry.Node = N->getOperand(i);
12737 Entry.isSExt = isSigned;
12738 Entry.isZExt = !isSigned;
12739 Args.push_back(Entry);
12741 if (Subtarget->isTargetWindows() && Args.size() >= 2)
12742 std::swap(Args[0], Args[1]);
12746 SDValue ARMTargetLowering::LowerDivRem(SDValue Op, SelectionDAG &DAG) const {
12747 assert((Subtarget->isTargetAEABI() || Subtarget->isTargetAndroid() ||
12748 Subtarget->isTargetGNUAEABI() || Subtarget->isTargetMuslAEABI() ||
12749 Subtarget->isTargetWindows()) &&
12750 "Register-based DivRem lowering only");
12751 unsigned Opcode = Op->getOpcode();
12752 assert((Opcode == ISD::SDIVREM || Opcode == ISD::UDIVREM) &&
12753 "Invalid opcode for Div/Rem lowering");
12754 bool isSigned = (Opcode == ISD::SDIVREM);
12755 EVT VT = Op->getValueType(0);
12756 Type *Ty = VT.getTypeForEVT(*DAG.getContext());
12759 // If the target has hardware divide, use divide + multiply + subtract:
12761 // rem = a - b * div
12762 // return {div, rem}
12763 // This should be lowered into UDIV/SDIV + MLS later on.
12764 if (Subtarget->hasDivide() && Op->getValueType(0).isSimple() &&
12765 Op->getSimpleValueType(0) == MVT::i32) {
12766 unsigned DivOpcode = isSigned ? ISD::SDIV : ISD::UDIV;
12767 const SDValue Dividend = Op->getOperand(0);
12768 const SDValue Divisor = Op->getOperand(1);
12769 SDValue Div = DAG.getNode(DivOpcode, dl, VT, Dividend, Divisor);
12770 SDValue Mul = DAG.getNode(ISD::MUL, dl, VT, Div, Divisor);
12771 SDValue Rem = DAG.getNode(ISD::SUB, dl, VT, Dividend, Mul);
12773 SDValue Values[2] = {Div, Rem};
12774 return DAG.getNode(ISD::MERGE_VALUES, dl, DAG.getVTList(VT, VT), Values);
12777 RTLIB::Libcall LC = getDivRemLibcall(Op.getNode(),
12778 VT.getSimpleVT().SimpleTy);
12779 SDValue InChain = DAG.getEntryNode();
12781 TargetLowering::ArgListTy Args = getDivRemArgList(Op.getNode(),
12785 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
12786 getPointerTy(DAG.getDataLayout()));
12788 Type *RetTy = (Type*)StructType::get(Ty, Ty, nullptr);
12790 if (Subtarget->isTargetWindows())
12791 InChain = WinDBZCheckDenominator(DAG, Op.getNode(), InChain);
12793 TargetLowering::CallLoweringInfo CLI(DAG);
12794 CLI.setDebugLoc(dl).setChain(InChain)
12795 .setCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
12796 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
12798 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
12799 return CallInfo.first;
12802 // Lowers REM using divmod helpers
12803 // see RTABI section 4.2/4.3
12804 SDValue ARMTargetLowering::LowerREM(SDNode *N, SelectionDAG &DAG) const {
12805 // Build return types (div and rem)
12806 std::vector<Type*> RetTyParams;
12807 Type *RetTyElement;
12809 switch (N->getValueType(0).getSimpleVT().SimpleTy) {
12810 default: llvm_unreachable("Unexpected request for libcall!");
12811 case MVT::i8: RetTyElement = Type::getInt8Ty(*DAG.getContext()); break;
12812 case MVT::i16: RetTyElement = Type::getInt16Ty(*DAG.getContext()); break;
12813 case MVT::i32: RetTyElement = Type::getInt32Ty(*DAG.getContext()); break;
12814 case MVT::i64: RetTyElement = Type::getInt64Ty(*DAG.getContext()); break;
12817 RetTyParams.push_back(RetTyElement);
12818 RetTyParams.push_back(RetTyElement);
12819 ArrayRef<Type*> ret = ArrayRef<Type*>(RetTyParams);
12820 Type *RetTy = StructType::get(*DAG.getContext(), ret);
12822 RTLIB::Libcall LC = getDivRemLibcall(N, N->getValueType(0).getSimpleVT().
12824 SDValue InChain = DAG.getEntryNode();
12825 TargetLowering::ArgListTy Args = getDivRemArgList(N, DAG.getContext(),
12827 bool isSigned = N->getOpcode() == ISD::SREM;
12828 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
12829 getPointerTy(DAG.getDataLayout()));
12831 if (Subtarget->isTargetWindows())
12832 InChain = WinDBZCheckDenominator(DAG, N, InChain);
12835 CallLoweringInfo CLI(DAG);
12836 CLI.setChain(InChain)
12837 .setCallee(CallingConv::ARM_AAPCS, RetTy, Callee, std::move(Args))
12838 .setSExtResult(isSigned).setZExtResult(!isSigned).setDebugLoc(SDLoc(N));
12839 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
12841 // Return second (rem) result operand (first contains div)
12842 SDNode *ResNode = CallResult.first.getNode();
12843 assert(ResNode->getNumOperands() == 2 && "divmod should return two operands");
12844 return ResNode->getOperand(1);
12848 ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const {
12849 assert(Subtarget->isTargetWindows() && "unsupported target platform");
12853 SDValue Chain = Op.getOperand(0);
12854 SDValue Size = Op.getOperand(1);
12856 SDValue Words = DAG.getNode(ISD::SRL, DL, MVT::i32, Size,
12857 DAG.getConstant(2, DL, MVT::i32));
12860 Chain = DAG.getCopyToReg(Chain, DL, ARM::R4, Words, Flag);
12861 Flag = Chain.getValue(1);
12863 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
12864 Chain = DAG.getNode(ARMISD::WIN__CHKSTK, DL, NodeTys, Chain, Flag);
12866 SDValue NewSP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
12867 Chain = NewSP.getValue(1);
12869 SDValue Ops[2] = { NewSP, Chain };
12870 return DAG.getMergeValues(Ops, DL);
12873 SDValue ARMTargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
12874 assert(Op.getValueType() == MVT::f64 && Subtarget->isFPOnlySP() &&
12875 "Unexpected type for custom-lowering FP_EXTEND");
12878 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType());
12880 SDValue SrcVal = Op.getOperand(0);
12881 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
12885 SDValue ARMTargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
12886 assert(Op.getOperand(0).getValueType() == MVT::f64 &&
12887 Subtarget->isFPOnlySP() &&
12888 "Unexpected type for custom-lowering FP_ROUND");
12891 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType());
12893 SDValue SrcVal = Op.getOperand(0);
12894 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false,
12899 ARMTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
12900 // The ARM target isn't yet aware of offsets.
12904 bool ARM::isBitFieldInvertedMask(unsigned v) {
12905 if (v == 0xffffffff)
12908 // there can be 1's on either or both "outsides", all the "inside"
12909 // bits must be 0's
12910 return isShiftedMask_32(~v);
12913 /// isFPImmLegal - Returns true if the target can instruction select the
12914 /// specified FP immediate natively. If false, the legalizer will
12915 /// materialize the FP immediate as a load from a constant pool.
12916 bool ARMTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
12917 if (!Subtarget->hasVFP3())
12919 if (VT == MVT::f32)
12920 return ARM_AM::getFP32Imm(Imm) != -1;
12921 if (VT == MVT::f64 && !Subtarget->isFPOnlySP())
12922 return ARM_AM::getFP64Imm(Imm) != -1;
12926 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
12927 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
12928 /// specified in the intrinsic calls.
12929 bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
12931 unsigned Intrinsic) const {
12932 switch (Intrinsic) {
12933 case Intrinsic::arm_neon_vld1:
12934 case Intrinsic::arm_neon_vld2:
12935 case Intrinsic::arm_neon_vld3:
12936 case Intrinsic::arm_neon_vld4:
12937 case Intrinsic::arm_neon_vld2lane:
12938 case Intrinsic::arm_neon_vld3lane:
12939 case Intrinsic::arm_neon_vld4lane: {
12940 Info.opc = ISD::INTRINSIC_W_CHAIN;
12941 // Conservatively set memVT to the entire set of vectors loaded.
12942 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12943 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64;
12944 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12945 Info.ptrVal = I.getArgOperand(0);
12947 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
12948 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
12949 Info.vol = false; // volatile loads with NEON intrinsics not supported
12950 Info.readMem = true;
12951 Info.writeMem = false;
12954 case Intrinsic::arm_neon_vst1:
12955 case Intrinsic::arm_neon_vst2:
12956 case Intrinsic::arm_neon_vst3:
12957 case Intrinsic::arm_neon_vst4:
12958 case Intrinsic::arm_neon_vst2lane:
12959 case Intrinsic::arm_neon_vst3lane:
12960 case Intrinsic::arm_neon_vst4lane: {
12961 Info.opc = ISD::INTRINSIC_VOID;
12962 // Conservatively set memVT to the entire set of vectors stored.
12963 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12964 unsigned NumElts = 0;
12965 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
12966 Type *ArgTy = I.getArgOperand(ArgI)->getType();
12967 if (!ArgTy->isVectorTy())
12969 NumElts += DL.getTypeSizeInBits(ArgTy) / 64;
12971 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
12972 Info.ptrVal = I.getArgOperand(0);
12974 Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
12975 Info.align = cast<ConstantInt>(AlignArg)->getZExtValue();
12976 Info.vol = false; // volatile stores with NEON intrinsics not supported
12977 Info.readMem = false;
12978 Info.writeMem = true;
12981 case Intrinsic::arm_ldaex:
12982 case Intrinsic::arm_ldrex: {
12983 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12984 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
12985 Info.opc = ISD::INTRINSIC_W_CHAIN;
12986 Info.memVT = MVT::getVT(PtrTy->getElementType());
12987 Info.ptrVal = I.getArgOperand(0);
12989 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
12991 Info.readMem = true;
12992 Info.writeMem = false;
12995 case Intrinsic::arm_stlex:
12996 case Intrinsic::arm_strex: {
12997 auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
12998 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
12999 Info.opc = ISD::INTRINSIC_W_CHAIN;
13000 Info.memVT = MVT::getVT(PtrTy->getElementType());
13001 Info.ptrVal = I.getArgOperand(1);
13003 Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
13005 Info.readMem = false;
13006 Info.writeMem = true;
13009 case Intrinsic::arm_stlexd:
13010 case Intrinsic::arm_strexd: {
13011 Info.opc = ISD::INTRINSIC_W_CHAIN;
13012 Info.memVT = MVT::i64;
13013 Info.ptrVal = I.getArgOperand(2);
13017 Info.readMem = false;
13018 Info.writeMem = true;
13021 case Intrinsic::arm_ldaexd:
13022 case Intrinsic::arm_ldrexd: {
13023 Info.opc = ISD::INTRINSIC_W_CHAIN;
13024 Info.memVT = MVT::i64;
13025 Info.ptrVal = I.getArgOperand(0);
13029 Info.readMem = true;
13030 Info.writeMem = false;
13040 /// \brief Returns true if it is beneficial to convert a load of a constant
13041 /// to just the constant itself.
13042 bool ARMTargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
13044 assert(Ty->isIntegerTy());
13046 unsigned Bits = Ty->getPrimitiveSizeInBits();
13047 if (Bits == 0 || Bits > 32)
13052 bool ARMTargetLowering::isExtractSubvectorCheap(EVT ResVT,
13053 unsigned Index) const {
13054 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
13057 return (Index == 0 || Index == ResVT.getVectorNumElements());
13060 Instruction* ARMTargetLowering::makeDMB(IRBuilder<> &Builder,
13061 ARM_MB::MemBOpt Domain) const {
13062 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13064 // First, if the target has no DMB, see what fallback we can use.
13065 if (!Subtarget->hasDataBarrier()) {
13066 // Some ARMv6 cpus can support data barriers with an mcr instruction.
13067 // Thumb1 and pre-v6 ARM mode use a libcall instead and should never get
13069 if (Subtarget->hasV6Ops() && !Subtarget->isThumb()) {
13070 Function *MCR = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_mcr);
13071 Value* args[6] = {Builder.getInt32(15), Builder.getInt32(0),
13072 Builder.getInt32(0), Builder.getInt32(7),
13073 Builder.getInt32(10), Builder.getInt32(5)};
13074 return Builder.CreateCall(MCR, args);
13076 // Instead of using barriers, atomic accesses on these subtargets use
13078 llvm_unreachable("makeDMB on a target so old that it has no barriers");
13081 Function *DMB = llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_dmb);
13082 // Only a full system barrier exists in the M-class architectures.
13083 Domain = Subtarget->isMClass() ? ARM_MB::SY : Domain;
13084 Constant *CDomain = Builder.getInt32(Domain);
13085 return Builder.CreateCall(DMB, CDomain);
13089 // Based on http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
13090 Instruction* ARMTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
13091 AtomicOrdering Ord, bool IsStore,
13092 bool IsLoad) const {
13094 case AtomicOrdering::NotAtomic:
13095 case AtomicOrdering::Unordered:
13096 llvm_unreachable("Invalid fence: unordered/non-atomic");
13097 case AtomicOrdering::Monotonic:
13098 case AtomicOrdering::Acquire:
13099 return nullptr; // Nothing to do
13100 case AtomicOrdering::SequentiallyConsistent:
13102 return nullptr; // Nothing to do
13104 case AtomicOrdering::Release:
13105 case AtomicOrdering::AcquireRelease:
13106 if (Subtarget->preferISHSTBarriers())
13107 return makeDMB(Builder, ARM_MB::ISHST);
13108 // FIXME: add a comment with a link to documentation justifying this.
13110 return makeDMB(Builder, ARM_MB::ISH);
13112 llvm_unreachable("Unknown fence ordering in emitLeadingFence");
13115 Instruction* ARMTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
13116 AtomicOrdering Ord, bool IsStore,
13117 bool IsLoad) const {
13119 case AtomicOrdering::NotAtomic:
13120 case AtomicOrdering::Unordered:
13121 llvm_unreachable("Invalid fence: unordered/not-atomic");
13122 case AtomicOrdering::Monotonic:
13123 case AtomicOrdering::Release:
13124 return nullptr; // Nothing to do
13125 case AtomicOrdering::Acquire:
13126 case AtomicOrdering::AcquireRelease:
13127 case AtomicOrdering::SequentiallyConsistent:
13128 return makeDMB(Builder, ARM_MB::ISH);
13130 llvm_unreachable("Unknown fence ordering in emitTrailingFence");
13133 // Loads and stores less than 64-bits are already atomic; ones above that
13134 // are doomed anyway, so defer to the default libcall and blame the OS when
13135 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13136 // anything for those.
13137 bool ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
13138 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
13139 return (Size == 64) && !Subtarget->isMClass();
13142 // Loads and stores less than 64-bits are already atomic; ones above that
13143 // are doomed anyway, so defer to the default libcall and blame the OS when
13144 // things go wrong. Cortex M doesn't have ldrexd/strexd though, so don't emit
13145 // anything for those.
13146 // FIXME: ldrd and strd are atomic if the CPU has LPAE (e.g. A15 has that
13147 // guarantee, see DDI0406C ARM architecture reference manual,
13148 // sections A8.8.72-74 LDRD)
13149 TargetLowering::AtomicExpansionKind
13150 ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
13151 unsigned Size = LI->getType()->getPrimitiveSizeInBits();
13152 return ((Size == 64) && !Subtarget->isMClass()) ? AtomicExpansionKind::LLOnly
13153 : AtomicExpansionKind::None;
13156 // For the real atomic operations, we have ldrex/strex up to 32 bits,
13157 // and up to 64 bits on the non-M profiles
13158 TargetLowering::AtomicExpansionKind
13159 ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
13160 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
13161 bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13162 return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
13163 ? AtomicExpansionKind::LLSC
13164 : AtomicExpansionKind::None;
13167 bool ARMTargetLowering::shouldExpandAtomicCmpXchgInIR(
13168 AtomicCmpXchgInst *AI) const {
13169 // At -O0, fast-regalloc cannot cope with the live vregs necessary to
13170 // implement cmpxchg without spilling. If the address being exchanged is also
13171 // on the stack and close enough to the spill slot, this can lead to a
13172 // situation where the monitor always gets cleared and the atomic operation
13173 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead.
13174 bool hasAtomicCmpXchg =
13175 !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
13176 return getTargetMachine().getOptLevel() != 0 && hasAtomicCmpXchg;
13179 bool ARMTargetLowering::shouldInsertFencesForAtomic(
13180 const Instruction *I) const {
13181 return InsertFencesForAtomic;
13184 // This has so far only been implemented for MachO.
13185 bool ARMTargetLowering::useLoadStackGuardNode() const {
13186 return Subtarget->isTargetMachO();
13189 bool ARMTargetLowering::canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
13190 unsigned &Cost) const {
13191 // If we do not have NEON, vector types are not natively supported.
13192 if (!Subtarget->hasNEON())
13195 // Floating point values and vector values map to the same register file.
13196 // Therefore, although we could do a store extract of a vector type, this is
13197 // better to leave at float as we have more freedom in the addressing mode for
13199 if (VectorTy->isFPOrFPVectorTy())
13202 // If the index is unknown at compile time, this is very expensive to lower
13203 // and it is not possible to combine the store with the extract.
13204 if (!isa<ConstantInt>(Idx))
13207 assert(VectorTy->isVectorTy() && "VectorTy is not a vector type");
13208 unsigned BitWidth = cast<VectorType>(VectorTy)->getBitWidth();
13209 // We can do a store + vector extract on any vector that fits perfectly in a D
13211 if (BitWidth == 64 || BitWidth == 128) {
13218 bool ARMTargetLowering::isCheapToSpeculateCttz() const {
13219 return Subtarget->hasV6T2Ops();
13222 bool ARMTargetLowering::isCheapToSpeculateCtlz() const {
13223 return Subtarget->hasV6T2Ops();
13226 Value *ARMTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
13227 AtomicOrdering Ord) const {
13228 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13229 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType();
13230 bool IsAcquire = isAcquireOrStronger(Ord);
13232 // Since i64 isn't legal and intrinsics don't get type-lowered, the ldrexd
13233 // intrinsic must return {i32, i32} and we have to recombine them into a
13234 // single i64 here.
13235 if (ValTy->getPrimitiveSizeInBits() == 64) {
13236 Intrinsic::ID Int =
13237 IsAcquire ? Intrinsic::arm_ldaexd : Intrinsic::arm_ldrexd;
13238 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int);
13240 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13241 Value *LoHi = Builder.CreateCall(Ldrex, Addr, "lohi");
13243 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo");
13244 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi");
13245 if (!Subtarget->isLittle())
13246 std::swap (Lo, Hi);
13247 Lo = Builder.CreateZExt(Lo, ValTy, "lo64");
13248 Hi = Builder.CreateZExt(Hi, ValTy, "hi64");
13249 return Builder.CreateOr(
13250 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 32)), "val64");
13253 Type *Tys[] = { Addr->getType() };
13254 Intrinsic::ID Int = IsAcquire ? Intrinsic::arm_ldaex : Intrinsic::arm_ldrex;
13255 Function *Ldrex = llvm::Intrinsic::getDeclaration(M, Int, Tys);
13257 return Builder.CreateTruncOrBitCast(
13258 Builder.CreateCall(Ldrex, Addr),
13259 cast<PointerType>(Addr->getType())->getElementType());
13262 void ARMTargetLowering::emitAtomicCmpXchgNoStoreLLBalance(
13263 IRBuilder<> &Builder) const {
13264 if (!Subtarget->hasV7Ops())
13266 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13267 Builder.CreateCall(llvm::Intrinsic::getDeclaration(M, Intrinsic::arm_clrex));
13270 Value *ARMTargetLowering::emitStoreConditional(IRBuilder<> &Builder, Value *Val,
13272 AtomicOrdering Ord) const {
13273 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
13274 bool IsRelease = isReleaseOrStronger(Ord);
13276 // Since the intrinsics must have legal type, the i64 intrinsics take two
13277 // parameters: "i32, i32". We must marshal Val into the appropriate form
13278 // before the call.
13279 if (Val->getType()->getPrimitiveSizeInBits() == 64) {
13280 Intrinsic::ID Int =
13281 IsRelease ? Intrinsic::arm_stlexd : Intrinsic::arm_strexd;
13282 Function *Strex = Intrinsic::getDeclaration(M, Int);
13283 Type *Int32Ty = Type::getInt32Ty(M->getContext());
13285 Value *Lo = Builder.CreateTrunc(Val, Int32Ty, "lo");
13286 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 32), Int32Ty, "hi");
13287 if (!Subtarget->isLittle())
13288 std::swap (Lo, Hi);
13289 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext()));
13290 return Builder.CreateCall(Strex, {Lo, Hi, Addr});
13293 Intrinsic::ID Int = IsRelease ? Intrinsic::arm_stlex : Intrinsic::arm_strex;
13294 Type *Tys[] = { Addr->getType() };
13295 Function *Strex = Intrinsic::getDeclaration(M, Int, Tys);
13297 return Builder.CreateCall(
13298 Strex, {Builder.CreateZExtOrBitCast(
13299 Val, Strex->getFunctionType()->getParamType(0)),
13303 /// \brief Lower an interleaved load into a vldN intrinsic.
13305 /// E.g. Lower an interleaved load (Factor = 2):
13306 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr, align 4
13307 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements
13308 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements
13311 /// %vld2 = { <4 x i32>, <4 x i32> } call llvm.arm.neon.vld2(%ptr, 4)
13312 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 0
13313 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %vld2, i32 1
13314 bool ARMTargetLowering::lowerInterleavedLoad(
13315 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles,
13316 ArrayRef<unsigned> Indices, unsigned Factor) const {
13317 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13318 "Invalid interleave factor");
13319 assert(!Shuffles.empty() && "Empty shufflevector input");
13320 assert(Shuffles.size() == Indices.size() &&
13321 "Unmatched number of shufflevectors and indices");
13323 VectorType *VecTy = Shuffles[0]->getType();
13324 Type *EltTy = VecTy->getVectorElementType();
13326 const DataLayout &DL = LI->getModule()->getDataLayout();
13327 unsigned VecSize = DL.getTypeSizeInBits(VecTy);
13328 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64;
13330 // Skip if we do not have NEON and skip illegal vector types and vector types
13331 // with i64/f64 elements (vldN doesn't support i64/f64 elements).
13332 if (!Subtarget->hasNEON() || (VecSize != 64 && VecSize != 128) || EltIs64Bits)
13335 // A pointer vector can not be the return type of the ldN intrinsics. Need to
13336 // load integer vectors first and then convert to pointer vectors.
13337 if (EltTy->isPointerTy())
13339 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
13341 static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
13342 Intrinsic::arm_neon_vld3,
13343 Intrinsic::arm_neon_vld4};
13345 IRBuilder<> Builder(LI);
13346 SmallVector<Value *, 2> Ops;
13348 Type *Int8Ptr = Builder.getInt8PtrTy(LI->getPointerAddressSpace());
13349 Ops.push_back(Builder.CreateBitCast(LI->getPointerOperand(), Int8Ptr));
13350 Ops.push_back(Builder.getInt32(LI->getAlignment()));
13352 Type *Tys[] = { VecTy, Int8Ptr };
13353 Function *VldnFunc =
13354 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys);
13355 CallInst *VldN = Builder.CreateCall(VldnFunc, Ops, "vldN");
13357 // Replace uses of each shufflevector with the corresponding vector loaded
13359 for (unsigned i = 0; i < Shuffles.size(); i++) {
13360 ShuffleVectorInst *SV = Shuffles[i];
13361 unsigned Index = Indices[i];
13363 Value *SubVec = Builder.CreateExtractValue(VldN, Index);
13365 // Convert the integer vector to pointer vector if the element is pointer.
13366 if (EltTy->isPointerTy())
13367 SubVec = Builder.CreateIntToPtr(SubVec, SV->getType());
13369 SV->replaceAllUsesWith(SubVec);
13375 /// \brief Get a mask consisting of sequential integers starting from \p Start.
13377 /// I.e. <Start, Start + 1, ..., Start + NumElts - 1>
13378 static Constant *getSequentialMask(IRBuilder<> &Builder, unsigned Start,
13379 unsigned NumElts) {
13380 SmallVector<Constant *, 16> Mask;
13381 for (unsigned i = 0; i < NumElts; i++)
13382 Mask.push_back(Builder.getInt32(Start + i));
13384 return ConstantVector::get(Mask);
13387 /// \brief Lower an interleaved store into a vstN intrinsic.
13389 /// E.g. Lower an interleaved store (Factor = 3):
13390 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1,
13391 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11>
13392 /// store <12 x i32> %i.vec, <12 x i32>* %ptr, align 4
13395 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3>
13396 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7>
13397 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11>
13398 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13400 /// Note that the new shufflevectors will be removed and we'll only generate one
13401 /// vst3 instruction in CodeGen.
13403 /// Example for a more general valid mask (Factor 3). Lower:
13404 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1,
13405 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19>
13406 /// store <12 x i32> %i.vec, <12 x i32>* %ptr
13409 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7>
13410 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35>
13411 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19>
13412 /// call void llvm.arm.neon.vst3(%ptr, %sub.v0, %sub.v1, %sub.v2, 4)
13413 bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
13414 ShuffleVectorInst *SVI,
13415 unsigned Factor) const {
13416 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() &&
13417 "Invalid interleave factor");
13419 VectorType *VecTy = SVI->getType();
13420 assert(VecTy->getVectorNumElements() % Factor == 0 &&
13421 "Invalid interleaved store");
13423 unsigned LaneLen = VecTy->getVectorNumElements() / Factor;
13424 Type *EltTy = VecTy->getVectorElementType();
13425 VectorType *SubVecTy = VectorType::get(EltTy, LaneLen);
13427 const DataLayout &DL = SI->getModule()->getDataLayout();
13428 unsigned SubVecSize = DL.getTypeSizeInBits(SubVecTy);
13429 bool EltIs64Bits = DL.getTypeSizeInBits(EltTy) == 64;
13431 // Skip if we do not have NEON and skip illegal vector types and vector types
13432 // with i64/f64 elements (vstN doesn't support i64/f64 elements).
13433 if (!Subtarget->hasNEON() || (SubVecSize != 64 && SubVecSize != 128) ||
13437 Value *Op0 = SVI->getOperand(0);
13438 Value *Op1 = SVI->getOperand(1);
13439 IRBuilder<> Builder(SI);
13441 // StN intrinsics don't support pointer vectors as arguments. Convert pointer
13442 // vectors to integer vectors.
13443 if (EltTy->isPointerTy()) {
13444 Type *IntTy = DL.getIntPtrType(EltTy);
13446 // Convert to the corresponding integer vector.
13448 VectorType::get(IntTy, Op0->getType()->getVectorNumElements());
13449 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy);
13450 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy);
13452 SubVecTy = VectorType::get(IntTy, LaneLen);
13455 static const Intrinsic::ID StoreInts[3] = {Intrinsic::arm_neon_vst2,
13456 Intrinsic::arm_neon_vst3,
13457 Intrinsic::arm_neon_vst4};
13458 SmallVector<Value *, 6> Ops;
13460 Type *Int8Ptr = Builder.getInt8PtrTy(SI->getPointerAddressSpace());
13461 Ops.push_back(Builder.CreateBitCast(SI->getPointerOperand(), Int8Ptr));
13463 Type *Tys[] = { Int8Ptr, SubVecTy };
13464 Function *VstNFunc = Intrinsic::getDeclaration(
13465 SI->getModule(), StoreInts[Factor - 2], Tys);
13467 // Split the shufflevector operands into sub vectors for the new vstN call.
13468 auto Mask = SVI->getShuffleMask();
13469 for (unsigned i = 0; i < Factor; i++) {
13470 if (Mask[i] >= 0) {
13471 Ops.push_back(Builder.CreateShuffleVector(
13472 Op0, Op1, getSequentialMask(Builder, Mask[i], LaneLen)));
13474 unsigned StartMask = 0;
13475 for (unsigned j = 1; j < LaneLen; j++) {
13476 if (Mask[j*Factor + i] >= 0) {
13477 StartMask = Mask[j*Factor + i] - j;
13481 // Note: If all elements in a chunk are undefs, StartMask=0!
13482 // Note: Filling undef gaps with random elements is ok, since
13483 // those elements were being written anyway (with undefs).
13484 // In the case of all undefs we're defaulting to using elems from 0
13485 // Note: StartMask cannot be negative, it's checked in isReInterleaveMask
13486 Ops.push_back(Builder.CreateShuffleVector(
13487 Op0, Op1, getSequentialMask(Builder, StartMask, LaneLen)));
13491 Ops.push_back(Builder.getInt32(SI->getAlignment()));
13492 Builder.CreateCall(VstNFunc, Ops);
13504 static bool isHomogeneousAggregate(Type *Ty, HABaseType &Base,
13505 uint64_t &Members) {
13506 if (auto *ST = dyn_cast<StructType>(Ty)) {
13507 for (unsigned i = 0; i < ST->getNumElements(); ++i) {
13508 uint64_t SubMembers = 0;
13509 if (!isHomogeneousAggregate(ST->getElementType(i), Base, SubMembers))
13511 Members += SubMembers;
13513 } else if (auto *AT = dyn_cast<ArrayType>(Ty)) {
13514 uint64_t SubMembers = 0;
13515 if (!isHomogeneousAggregate(AT->getElementType(), Base, SubMembers))
13517 Members += SubMembers * AT->getNumElements();
13518 } else if (Ty->isFloatTy()) {
13519 if (Base != HA_UNKNOWN && Base != HA_FLOAT)
13523 } else if (Ty->isDoubleTy()) {
13524 if (Base != HA_UNKNOWN && Base != HA_DOUBLE)
13528 } else if (auto *VT = dyn_cast<VectorType>(Ty)) {
13535 return VT->getBitWidth() == 64;
13537 return VT->getBitWidth() == 128;
13539 switch (VT->getBitWidth()) {
13552 return (Members > 0 && Members <= 4);
13555 /// \brief Return true if a type is an AAPCS-VFP homogeneous aggregate or one of
13556 /// [N x i32] or [N x i64]. This allows front-ends to skip emitting padding when
13557 /// passing according to AAPCS rules.
13558 bool ARMTargetLowering::functionArgumentNeedsConsecutiveRegisters(
13559 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const {
13560 if (getEffectiveCallingConv(CallConv, isVarArg) !=
13561 CallingConv::ARM_AAPCS_VFP)
13564 HABaseType Base = HA_UNKNOWN;
13565 uint64_t Members = 0;
13566 bool IsHA = isHomogeneousAggregate(Ty, Base, Members);
13567 DEBUG(dbgs() << "isHA: " << IsHA << " "; Ty->dump());
13569 bool IsIntArray = Ty->isArrayTy() && Ty->getArrayElementType()->isIntegerTy();
13570 return IsHA || IsIntArray;
13573 unsigned ARMTargetLowering::getExceptionPointerRegister(
13574 const Constant *PersonalityFn) const {
13575 // Platforms which do not use SjLj EH may return values in these registers
13576 // via the personality function.
13577 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R0;
13580 unsigned ARMTargetLowering::getExceptionSelectorRegister(
13581 const Constant *PersonalityFn) const {
13582 // Platforms which do not use SjLj EH may return values in these registers
13583 // via the personality function.
13584 return Subtarget->useSjLjEH() ? ARM::NoRegister : ARM::R1;
13587 void ARMTargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
13588 // Update IsSplitCSR in ARMFunctionInfo.
13589 ARMFunctionInfo *AFI = Entry->getParent()->getInfo<ARMFunctionInfo>();
13590 AFI->setIsSplitCSR(true);
13593 void ARMTargetLowering::insertCopiesSplitCSR(
13594 MachineBasicBlock *Entry,
13595 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
13596 const ARMBaseRegisterInfo *TRI = Subtarget->getRegisterInfo();
13597 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
13601 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
13602 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
13603 MachineBasicBlock::iterator MBBI = Entry->begin();
13604 for (const MCPhysReg *I = IStart; *I; ++I) {
13605 const TargetRegisterClass *RC = nullptr;
13606 if (ARM::GPRRegClass.contains(*I))
13607 RC = &ARM::GPRRegClass;
13608 else if (ARM::DPRRegClass.contains(*I))
13609 RC = &ARM::DPRRegClass;
13611 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
13613 unsigned NewVR = MRI->createVirtualRegister(RC);
13614 // Create copy from CSR to a virtual register.
13615 // FIXME: this currently does not emit CFI pseudo-instructions, it works
13616 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
13617 // nounwind. If we want to generalize this later, we may need to emit
13618 // CFI pseudo-instructions.
13619 assert(Entry->getParent()->getFunction()->hasFnAttribute(
13620 Attribute::NoUnwind) &&
13621 "Function should be nounwind in insertCopiesSplitCSR!");
13622 Entry->addLiveIn(*I);
13623 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
13626 // Insert the copy-back instructions right before the terminator.
13627 for (auto *Exit : Exits)
13628 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
13629 TII->get(TargetOpcode::COPY), *I)