1 //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
15 #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
17 #include "MCTargetDesc/ARMBaseInfo.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/ISDOpcodes.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/SelectionDAGNodes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/IRBuilder.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/Support/CodeGen.h"
32 #include "llvm/Support/MachineValueType.h"
40 class FunctionLoweringInfo;
42 class InstrItineraryData;
44 class MachineBasicBlock;
47 class TargetLibraryInfo;
49 class TargetRegisterInfo;
54 // ARM Specific DAG Nodes
55 enum NodeType : unsigned {
56 // Start the numbering where the builtin ops and target ops leave off.
57 FIRST_NUMBER = ISD::BUILTIN_OP_END,
59 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
60 // TargetExternalSymbol, and TargetGlobalAddress.
61 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
63 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
65 // Add pseudo op to model memcpy for struct byval.
68 CALL, // Function call.
69 CALL_PRED, // Function call that's predicable.
70 CALL_NOLINK, // Function call with branch not branch-and-link.
71 BRCOND, // Conditional branch.
72 BR_JT, // Jumptable branch.
73 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
74 RET_FLAG, // Return with a flag operand.
75 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
77 PIC_ADD, // Add with a PC operand and a PIC label.
79 ASRL, // MVE long arithmetic shift right.
80 LSRL, // MVE long shift right.
81 LSLL, // MVE long shift left.
83 CMP, // ARM compare instructions.
84 CMN, // ARM CMN instructions.
85 CMPZ, // ARM compare that sets only Z flag.
86 CMPFP, // ARM VFP compare instruction, sets FPSCR.
87 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
88 FMSTAT, // ARM fmstat instruction.
90 CMOV, // ARM conditional move instructions.
91 SUBS, // Flag-setting subtraction.
93 SSAT, // Signed saturation
94 USAT, // Unsigned saturation
98 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
99 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
100 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
102 ADDC, // Add with carry
103 ADDE, // Add using carry
104 SUBC, // Sub with carry
105 SUBE, // Sub using carry
107 VMOVRRD, // double to two gprs.
108 VMOVDRR, // Two gprs to double.
109 VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
111 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
112 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
113 EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
115 TC_RETURN, // Tail call return pseudo.
119 DYN_ALLOC, // Dynamic allocation on the stack.
121 MEMBARRIER_MCR, // Memory barrier (MCR)
125 WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
126 WIN__DBZCHK, // Windows' divide by zero check
128 WLS, // Low-overhead loops, While Loop Start
130 VCEQ, // Vector compare equal.
131 VCEQZ, // Vector compare equal to zero.
132 VCGE, // Vector compare greater than or equal.
133 VCGEZ, // Vector compare greater than or equal to zero.
134 VCLEZ, // Vector compare less than or equal to zero.
135 VCGEU, // Vector compare unsigned greater than or equal.
136 VCGT, // Vector compare greater than.
137 VCGTZ, // Vector compare greater than zero.
138 VCLTZ, // Vector compare less than zero.
139 VCGTU, // Vector compare unsigned greater than.
140 VTST, // Vector test bits.
142 // Vector shift by vector
143 VSHLs, // ...left/right by signed
144 VSHLu, // ...left/right by unsigned
146 // Vector shift by immediate:
148 VSHRsIMM, // ...right (signed)
149 VSHRuIMM, // ...right (unsigned)
151 // Vector rounding shift by immediate:
152 VRSHRsIMM, // ...right (signed)
153 VRSHRuIMM, // ...right (unsigned)
154 VRSHRNIMM, // ...right narrow
156 // Vector saturating shift by immediate:
157 VQSHLsIMM, // ...left (signed)
158 VQSHLuIMM, // ...left (unsigned)
159 VQSHLsuIMM, // ...left (signed to unsigned)
160 VQSHRNsIMM, // ...right narrow (signed)
161 VQSHRNuIMM, // ...right narrow (unsigned)
162 VQSHRNsuIMM, // ...right narrow (signed to unsigned)
164 // Vector saturating rounding shift by immediate:
165 VQRSHRNsIMM, // ...right narrow (signed)
166 VQRSHRNuIMM, // ...right narrow (unsigned)
167 VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
169 // Vector shift and insert:
173 // Vector get lane (VMOV scalar to ARM core register)
174 // (These are used for 8- and 16-bit element types only.)
175 VGETLANEu, // zero-extend vector extract element
176 VGETLANEs, // sign-extend vector extract element
178 // Vector move immediate and move negated immediate:
182 // Vector move f32 immediate:
185 // Move H <-> R, clearing top 16 bits
195 VREV64, // reverse elements within 64-bit doublewords
196 VREV32, // reverse elements within 32-bit words
197 VREV16, // reverse elements within 16-bit halfwords
198 VZIP, // zip (interleave)
199 VUZP, // unzip (deinterleave)
201 VTBL1, // 1-register shuffle with mask
202 VTBL2, // 2-register shuffle with mask
204 // Vector multiply long:
206 VMULLu, // ...unsigned
208 SMULWB, // Signed multiply word by half word, bottom
209 SMULWT, // Signed multiply word by half word, top
210 UMLAL, // 64bit Unsigned Accumulate Multiply
211 SMLAL, // 64bit Signed Accumulate Multiply
212 UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
213 SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
214 SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
215 SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
216 SMLALTT, // 64-bit signed accumulate multiply top, top 16
217 SMLALD, // Signed multiply accumulate long dual
218 SMLALDX, // Signed multiply accumulate long dual exchange
219 SMLSLD, // Signed multiply subtract long dual
220 SMLSLDX, // Signed multiply subtract long dual exchange
221 SMMLAR, // Signed multiply long, round and add
222 SMMLSR, // Signed multiply long, subtract and round
224 // Operands of the standard BUILD_VECTOR node are not legalized, which
225 // is fine if BUILD_VECTORs are always lowered to shuffles or other
226 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
227 // operands need to be legalized. Define an ARM-specific version of
228 // BUILD_VECTOR for this purpose.
234 // Vector OR with immediate
236 // Vector AND with NOT of immediate
239 // Vector bitwise select
242 // Pseudo-instruction representing a memory copy using ldm/stm
246 // Vector load N-element structure to all lanes:
247 VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
252 // NEON loads with post-increment base updates:
265 // NEON stores with post-increment base updates:
275 } // end namespace ARMISD
277 /// Define some predicates that are used for node matching.
280 bool isBitFieldInvertedMask(unsigned v);
282 } // end namespace ARM
284 //===--------------------------------------------------------------------===//
285 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
287 class ARMTargetLowering : public TargetLowering {
289 explicit ARMTargetLowering(const TargetMachine &TM,
290 const ARMSubtarget &STI);
292 unsigned getJumpTableEncoding() const override;
293 bool useSoftFloat() const override;
295 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
297 /// ReplaceNodeResults - Replace the results of node with an illegal result
298 /// type with new values built out of custom code.
299 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
300 SelectionDAG &DAG) const override;
302 const char *getTargetNodeName(unsigned Opcode) const override;
304 bool isSelectSupported(SelectSupportKind Kind) const override {
305 // ARM does not support scalar condition selects on vectors.
306 return (Kind != ScalarCondVectorVal);
309 bool isReadOnly(const GlobalValue *GV) const;
311 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
312 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
313 EVT VT) const override;
316 EmitInstrWithCustomInserter(MachineInstr &MI,
317 MachineBasicBlock *MBB) const override;
319 void AdjustInstrPostInstrSelection(MachineInstr &MI,
320 SDNode *Node) const override;
322 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
323 SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
324 SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
325 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
327 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
329 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
330 /// unaligned memory accesses of the specified type. Returns whether it
331 /// is "fast" by reference in the second argument.
332 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
334 MachineMemOperand::Flags Flags,
335 bool *Fast) const override;
337 EVT getOptimalMemOpType(uint64_t Size,
338 unsigned DstAlign, unsigned SrcAlign,
339 bool IsMemset, bool ZeroMemset,
341 const AttributeList &FuncAttributes) const override;
343 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
344 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
345 bool isZExtFree(SDValue Val, EVT VT2) const override;
346 bool shouldSinkOperands(Instruction *I,
347 SmallVectorImpl<Use *> &Ops) const override;
349 bool isFNegFree(EVT VT) const override;
351 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
353 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
356 /// isLegalAddressingMode - Return true if the addressing mode represented
357 /// by AM is legal for this target, for a load/store of the specified type.
358 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
359 Type *Ty, unsigned AS,
360 Instruction *I = nullptr) const override;
362 /// getScalingFactorCost - Return the cost of the scaling used in
363 /// addressing mode represented by AM.
364 /// If the AM is supported, the return value must be >= 0.
365 /// If the AM is not supported, the return value must be negative.
366 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
367 unsigned AS) const override;
369 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
371 /// Returns true if the addresing mode representing by AM is legal
372 /// for the Thumb1 target, for a load/store of the specified type.
373 bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
375 /// isLegalICmpImmediate - Return true if the specified immediate is legal
376 /// icmp immediate, that is the target has icmp instructions which can
377 /// compare a register against the immediate without having to materialize
378 /// the immediate into a register.
379 bool isLegalICmpImmediate(int64_t Imm) const override;
381 /// isLegalAddImmediate - Return true if the specified immediate is legal
382 /// add immediate, that is the target has add instructions which can
383 /// add a register and the immediate without having to materialize
384 /// the immediate into a register.
385 bool isLegalAddImmediate(int64_t Imm) const override;
387 /// getPreIndexedAddressParts - returns true by value, base pointer and
388 /// offset pointer and addressing mode by reference if the node's address
389 /// can be legally represented as pre-indexed load / store address.
390 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
391 ISD::MemIndexedMode &AM,
392 SelectionDAG &DAG) const override;
394 /// getPostIndexedAddressParts - returns true by value, base pointer and
395 /// offset pointer and addressing mode by reference if this node can be
396 /// combined with a load / store to form a post-indexed load / store.
397 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
398 SDValue &Offset, ISD::MemIndexedMode &AM,
399 SelectionDAG &DAG) const override;
401 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
402 const APInt &DemandedElts,
403 const SelectionDAG &DAG,
404 unsigned Depth) const override;
406 bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
407 TargetLoweringOpt &TLO) const override;
410 bool ExpandInlineAsm(CallInst *CI) const override;
412 ConstraintType getConstraintType(StringRef Constraint) const override;
414 /// Examine constraint string and operand type and determine a weight value.
415 /// The operand object must already have been set up with the operand type.
416 ConstraintWeight getSingleConstraintMatchWeight(
417 AsmOperandInfo &info, const char *constraint) const override;
419 std::pair<unsigned, const TargetRegisterClass *>
420 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
421 StringRef Constraint, MVT VT) const override;
423 const char *LowerXConstraint(EVT ConstraintVT) const override;
425 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
426 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
427 /// true it means one of the asm constraint of the inline asm instruction
428 /// being processed is 'm'.
429 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
430 std::vector<SDValue> &Ops,
431 SelectionDAG &DAG) const override;
434 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
435 if (ConstraintCode == "Q")
436 return InlineAsm::Constraint_Q;
437 else if (ConstraintCode == "o")
438 return InlineAsm::Constraint_o;
439 else if (ConstraintCode.size() == 2) {
440 if (ConstraintCode[0] == 'U') {
441 switch(ConstraintCode[1]) {
445 return InlineAsm::Constraint_Um;
447 return InlineAsm::Constraint_Un;
449 return InlineAsm::Constraint_Uq;
451 return InlineAsm::Constraint_Us;
453 return InlineAsm::Constraint_Ut;
455 return InlineAsm::Constraint_Uv;
457 return InlineAsm::Constraint_Uy;
461 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
464 const ARMSubtarget* getSubtarget() const {
468 /// getRegClassFor - Return the register class that should be used for the
469 /// specified value type.
470 const TargetRegisterClass *
471 getRegClassFor(MVT VT, bool isDivergent = false) const override;
473 /// Returns true if a cast between SrcAS and DestAS is a noop.
474 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
475 // Addrspacecasts are always noops.
479 bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
480 unsigned &PrefAlign) const override;
482 /// createFastISel - This method returns a target specific FastISel object,
483 /// or null if the target does not support "fast" ISel.
484 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
485 const TargetLibraryInfo *libInfo) const override;
487 Sched::Preference getSchedulingPreference(SDNode *N) const override;
490 isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
491 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
493 /// isFPImmLegal - Returns true if the target can instruction select the
494 /// specified FP immediate natively. If false, the legalizer will
495 /// materialize the FP immediate as a load from a constant pool.
496 bool isFPImmLegal(const APFloat &Imm, EVT VT,
497 bool ForCodeSize = false) const override;
499 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
502 unsigned Intrinsic) const override;
504 /// Returns true if it is beneficial to convert a load of a constant
505 /// to just the constant itself.
506 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
507 Type *Ty) const override;
509 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
511 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
512 unsigned Index) const override;
514 /// Returns true if an argument of type Ty needs to be passed in a
515 /// contiguous block of registers in calling convention CallConv.
516 bool functionArgumentNeedsConsecutiveRegisters(
517 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
519 /// If a physical register, this returns the register that receives the
520 /// exception address on entry to an EH pad.
522 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
524 /// If a physical register, this returns the register that receives the
525 /// exception typeid on entry to a landing pad.
527 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
529 Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const;
530 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
531 AtomicOrdering Ord) const override;
532 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
533 Value *Addr, AtomicOrdering Ord) const override;
535 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
537 Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
538 AtomicOrdering Ord) const override;
539 Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
540 AtomicOrdering Ord) const override;
542 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
544 bool lowerInterleavedLoad(LoadInst *LI,
545 ArrayRef<ShuffleVectorInst *> Shuffles,
546 ArrayRef<unsigned> Indices,
547 unsigned Factor) const override;
548 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
549 unsigned Factor) const override;
551 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
552 TargetLoweringBase::AtomicExpansionKind
553 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
554 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
555 TargetLoweringBase::AtomicExpansionKind
556 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
557 TargetLoweringBase::AtomicExpansionKind
558 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
560 bool useLoadStackGuardNode() const override;
562 void insertSSPDeclarations(Module &M) const override;
563 Value *getSDagStackGuard(const Module &M) const override;
564 Function *getSSPStackGuardCheck(const Module &M) const override;
566 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
567 unsigned &Cost) const override;
569 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
570 const SelectionDAG &DAG) const override {
571 // Do not merge to larger than i32.
572 return (MemVT.getSizeInBits() <= 32);
575 bool isCheapToSpeculateCttz() const override;
576 bool isCheapToSpeculateCtlz() const override;
578 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
579 return VT.isScalarInteger();
582 bool supportSwiftError() const override {
586 bool hasStandaloneRem(EVT VT) const override {
587 return HasStandaloneRem;
590 bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
592 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
593 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
595 /// Returns true if \p VecTy is a legal interleaved access type. This
596 /// function checks the vector element type and the overall width of the
598 bool isLegalInterleavedAccessType(VectorType *VecTy,
599 const DataLayout &DL) const;
601 bool alignLoopsWithOptSize() const override;
603 /// Returns the number of interleaved accesses that will be generated when
604 /// lowering accesses of the given type.
605 unsigned getNumInterleavedAccesses(VectorType *VecTy,
606 const DataLayout &DL) const;
608 void finalizeLowering(MachineFunction &MF) const override;
610 /// Return the correct alignment for the current calling convention.
611 unsigned getABIAlignmentForCallingConv(Type *ArgTy,
612 DataLayout DL) const override;
614 bool isDesirableToCommuteWithShift(const SDNode *N,
615 CombineLevel Level) const override;
617 bool shouldFoldConstantShiftPairToMask(const SDNode *N,
618 CombineLevel Level) const override;
620 bool preferIncOfAddToSubOfNot(EVT VT) const override;
623 std::pair<const TargetRegisterClass *, uint8_t>
624 findRepresentativeClass(const TargetRegisterInfo *TRI,
625 MVT VT) const override;
628 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
629 /// make the right decision when generating code for different targets.
630 const ARMSubtarget *Subtarget;
632 const TargetRegisterInfo *RegInfo;
634 const InstrItineraryData *Itins;
636 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
637 unsigned ARMPCLabelIndex;
639 // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
641 bool InsertFencesForAtomic;
643 bool HasStandaloneRem = true;
645 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
646 void addDRTypeForNEON(MVT VT);
647 void addQRTypeForNEON(MVT VT);
648 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
650 using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
652 void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
653 SDValue &Arg, RegsToPassVector &RegsToPass,
654 CCValAssign &VA, CCValAssign &NextVA,
656 SmallVectorImpl<SDValue> &MemOpChains,
657 ISD::ArgFlagsTy Flags) const;
658 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
659 SDValue &Root, SelectionDAG &DAG,
660 const SDLoc &dl) const;
662 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
663 bool isVarArg) const;
664 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
665 bool isVarArg) const;
666 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
667 const SDLoc &dl, SelectionDAG &DAG,
668 const CCValAssign &VA,
669 ISD::ArgFlagsTy Flags) const;
670 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
671 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
672 SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
673 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
674 const ARMSubtarget *Subtarget) const;
675 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
676 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
677 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
678 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
679 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
680 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
681 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
682 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
683 SelectionDAG &DAG) const;
684 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
686 TLSModel::Model model) const;
687 SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
688 SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
689 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
690 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
691 SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const;
692 SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const;
693 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
694 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
695 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
696 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
697 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
698 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
699 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
700 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
701 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
702 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
703 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
704 const ARMSubtarget *ST) const;
705 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
706 const ARMSubtarget *ST) const;
707 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
708 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
709 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
710 SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
711 void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
712 SmallVectorImpl<SDValue> &Results) const;
713 SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
714 SDValue &Chain) const;
715 SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
716 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
717 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
718 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
719 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
720 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
721 void lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
722 SelectionDAG &DAG) const;
724 unsigned getRegisterByName(const char* RegName, EVT VT,
725 SelectionDAG &DAG) const override;
727 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
728 SmallVectorImpl<SDNode *> &Created) const override;
730 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
731 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
732 /// expanded to FMAs when this method returns true, otherwise fmuladd is
733 /// expanded to fmul + fadd.
735 /// ARM supports both fused and unfused multiply-add operations; we already
736 /// lower a pair of fmul and fadd to the latter so it's not clear that there
737 /// would be a gain or that the gain would be worthwhile enough to risk
738 /// correctness bugs.
739 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override { return false; }
741 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
743 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
744 CallingConv::ID CallConv, bool isVarArg,
745 const SmallVectorImpl<ISD::InputArg> &Ins,
746 const SDLoc &dl, SelectionDAG &DAG,
747 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
748 SDValue ThisVal) const;
750 bool supportSplitCSR(MachineFunction *MF) const override {
751 return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
752 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
755 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
756 void insertCopiesSplitCSR(
757 MachineBasicBlock *Entry,
758 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
761 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
762 const SmallVectorImpl<ISD::InputArg> &Ins,
763 const SDLoc &dl, SelectionDAG &DAG,
764 SmallVectorImpl<SDValue> &InVals) const override;
766 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
767 SDValue &Chain, const Value *OrigArg,
768 unsigned InRegsParamRecordIdx, int ArgOffset,
769 unsigned ArgSize) const;
771 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
772 const SDLoc &dl, SDValue &Chain,
773 unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
774 bool ForceMutable = false) const;
776 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
777 SmallVectorImpl<SDValue> &InVals) const override;
779 /// HandleByVal - Target-specific cleanup for ByVal support.
780 void HandleByVal(CCState *, unsigned &, unsigned) const override;
782 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
783 /// for tail call optimization. Targets which want to do tail call
784 /// optimization should implement this function.
785 bool IsEligibleForTailCallOptimization(
786 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
787 bool isCalleeStructRet, bool isCallerStructRet,
788 const SmallVectorImpl<ISD::OutputArg> &Outs,
789 const SmallVectorImpl<SDValue> &OutVals,
790 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
791 const bool isIndirect) const;
793 bool CanLowerReturn(CallingConv::ID CallConv,
794 MachineFunction &MF, bool isVarArg,
795 const SmallVectorImpl<ISD::OutputArg> &Outs,
796 LLVMContext &Context) const override;
798 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
799 const SmallVectorImpl<ISD::OutputArg> &Outs,
800 const SmallVectorImpl<SDValue> &OutVals,
801 const SDLoc &dl, SelectionDAG &DAG) const override;
803 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
805 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
807 bool shouldConsiderGEPOffsetSplit() const override { return true; }
809 bool isUnsupportedFloatingType(EVT VT) const;
811 SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
812 SDValue ARMcc, SDValue CCR, SDValue Cmp,
813 SelectionDAG &DAG) const;
814 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
815 SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
816 SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
817 const SDLoc &dl, bool InvalidOnQNaN) const;
818 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
820 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
822 void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
823 MachineBasicBlock *DispatchBB, int FI) const;
825 void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
827 bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const;
829 MachineBasicBlock *EmitStructByval(MachineInstr &MI,
830 MachineBasicBlock *MBB) const;
832 MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
833 MachineBasicBlock *MBB) const;
834 MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
835 MachineBasicBlock *MBB) const;
836 void addMVEVectorTypes(bool HasMVEFP);
837 void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
838 void setAllExpand(MVT VT);
841 enum NEONModImmType {
850 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
851 const TargetLibraryInfo *libInfo);
853 } // end namespace ARM
855 } // end namespace llvm
857 #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H