1 //===- ARMISelLowering.h - ARM DAG Lowering Interface -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that ARM uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #ifndef LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
15 #define LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H
17 #include "MCTargetDesc/ARMBaseInfo.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/StringRef.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/ISDOpcodes.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/SelectionDAGNodes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallingConv.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/IRBuilder.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/Support/CodeGen.h"
32 #include "llvm/Support/MachineValueType.h"
40 class FunctionLoweringInfo;
42 class InstrItineraryData;
44 class MachineBasicBlock;
47 class TargetLibraryInfo;
49 class TargetRegisterInfo;
54 // ARM Specific DAG Nodes
55 enum NodeType : unsigned {
56 // Start the numbering where the builtin ops and target ops leave off.
57 FIRST_NUMBER = ISD::BUILTIN_OP_END,
59 Wrapper, // Wrapper - A wrapper node for TargetConstantPool,
60 // TargetExternalSymbol, and TargetGlobalAddress.
61 WrapperPIC, // WrapperPIC - A wrapper node for TargetGlobalAddress in
63 WrapperJT, // WrapperJT - A wrapper node for TargetJumpTable
65 // Add pseudo op to model memcpy for struct byval.
68 CALL, // Function call.
69 CALL_PRED, // Function call that's predicable.
70 CALL_NOLINK, // Function call with branch not branch-and-link.
71 BRCOND, // Conditional branch.
72 BR_JT, // Jumptable branch.
73 BR2_JT, // Jumptable branch (2 level - jumptable entry is a jump).
74 RET_FLAG, // Return with a flag operand.
75 INTRET_FLAG, // Interrupt return with an LR-offset and a flag operand.
77 PIC_ADD, // Add with a PC operand and a PIC label.
79 ASRL, // MVE long arithmetic shift right.
80 LSRL, // MVE long shift right.
81 LSLL, // MVE long shift left.
83 CMP, // ARM compare instructions.
84 CMN, // ARM CMN instructions.
85 CMPZ, // ARM compare that sets only Z flag.
86 CMPFP, // ARM VFP compare instruction, sets FPSCR.
87 CMPFPw0, // ARM VFP compare against zero instruction, sets FPSCR.
88 FMSTAT, // ARM fmstat instruction.
90 CMOV, // ARM conditional move instructions.
91 SUBS, // Flag-setting subtraction.
93 SSAT, // Signed saturation
94 USAT, // Unsigned saturation
98 SRL_FLAG, // V,Flag = srl_flag X -> srl X, 1 + save carry out.
99 SRA_FLAG, // V,Flag = sra_flag X -> sra X, 1 + save carry out.
100 RRX, // V = RRX X, Flag -> srl X, 1 + shift in carry flag.
102 ADDC, // Add with carry
103 ADDE, // Add using carry
104 SUBC, // Sub with carry
105 SUBE, // Sub using carry
106 LSLS, // Shift left producing carry
108 VMOVRRD, // double to two gprs.
109 VMOVDRR, // Two gprs to double.
110 VMOVSR, // move gpr to single, used for f32 literal constructed in a gpr
112 EH_SJLJ_SETJMP, // SjLj exception handling setjmp.
113 EH_SJLJ_LONGJMP, // SjLj exception handling longjmp.
114 EH_SJLJ_SETUP_DISPATCH, // SjLj exception handling setup_dispatch.
116 TC_RETURN, // Tail call return pseudo.
120 DYN_ALLOC, // Dynamic allocation on the stack.
122 MEMBARRIER_MCR, // Memory barrier (MCR)
126 WIN__CHKSTK, // Windows' __chkstk call to do stack probing.
127 WIN__DBZCHK, // Windows' divide by zero check
129 WLS, // Low-overhead loops, While Loop Start
130 LOOP_DEC, // Really a part of LE, performs the sub
131 LE, // Low-overhead loops, Loop End
133 PREDICATE_CAST, // Predicate cast for MVE i1 types
135 VCMP, // Vector compare.
136 VCMPZ, // Vector compare to zero.
137 VTST, // Vector test bits.
139 // Vector shift by vector
140 VSHLs, // ...left/right by signed
141 VSHLu, // ...left/right by unsigned
143 // Vector shift by immediate:
145 VSHRsIMM, // ...right (signed)
146 VSHRuIMM, // ...right (unsigned)
148 // Vector rounding shift by immediate:
149 VRSHRsIMM, // ...right (signed)
150 VRSHRuIMM, // ...right (unsigned)
151 VRSHRNIMM, // ...right narrow
153 // Vector saturating shift by immediate:
154 VQSHLsIMM, // ...left (signed)
155 VQSHLuIMM, // ...left (unsigned)
156 VQSHLsuIMM, // ...left (signed to unsigned)
157 VQSHRNsIMM, // ...right narrow (signed)
158 VQSHRNuIMM, // ...right narrow (unsigned)
159 VQSHRNsuIMM, // ...right narrow (signed to unsigned)
161 // Vector saturating rounding shift by immediate:
162 VQRSHRNsIMM, // ...right narrow (signed)
163 VQRSHRNuIMM, // ...right narrow (unsigned)
164 VQRSHRNsuIMM, // ...right narrow (signed to unsigned)
166 // Vector shift and insert:
170 // Vector get lane (VMOV scalar to ARM core register)
171 // (These are used for 8- and 16-bit element types only.)
172 VGETLANEu, // zero-extend vector extract element
173 VGETLANEs, // sign-extend vector extract element
175 // Vector move immediate and move negated immediate:
179 // Vector move f32 immediate:
182 // Move H <-> R, clearing top 16 bits
192 VREV64, // reverse elements within 64-bit doublewords
193 VREV32, // reverse elements within 32-bit words
194 VREV16, // reverse elements within 16-bit halfwords
195 VZIP, // zip (interleave)
196 VUZP, // unzip (deinterleave)
198 VTBL1, // 1-register shuffle with mask
199 VTBL2, // 2-register shuffle with mask
202 // Vector multiply long:
204 VMULLu, // ...unsigned
206 SMULWB, // Signed multiply word by half word, bottom
207 SMULWT, // Signed multiply word by half word, top
208 UMLAL, // 64bit Unsigned Accumulate Multiply
209 SMLAL, // 64bit Signed Accumulate Multiply
210 UMAAL, // 64-bit Unsigned Accumulate Accumulate Multiply
211 SMLALBB, // 64-bit signed accumulate multiply bottom, bottom 16
212 SMLALBT, // 64-bit signed accumulate multiply bottom, top 16
213 SMLALTB, // 64-bit signed accumulate multiply top, bottom 16
214 SMLALTT, // 64-bit signed accumulate multiply top, top 16
215 SMLALD, // Signed multiply accumulate long dual
216 SMLALDX, // Signed multiply accumulate long dual exchange
217 SMLSLD, // Signed multiply subtract long dual
218 SMLSLDX, // Signed multiply subtract long dual exchange
219 SMMLAR, // Signed multiply long, round and add
220 SMMLSR, // Signed multiply long, subtract and round
222 // Single Lane QADD8 and QADD16. Only the bottom lane. That's what the b stands for.
228 // Operands of the standard BUILD_VECTOR node are not legalized, which
229 // is fine if BUILD_VECTORs are always lowered to shuffles or other
230 // operations, but for ARM some BUILD_VECTORs are legal as-is and their
231 // operands need to be legalized. Define an ARM-specific version of
232 // BUILD_VECTOR for this purpose.
238 // Vector OR with immediate
240 // Vector AND with NOT of immediate
243 // Vector bitwise select
246 // Pseudo-instruction representing a memory copy using ldm/stm
250 // V8.1MMainline condition select
251 CSINV, // Conditional select invert.
252 CSNEG, // Conditional select negate.
253 CSINC, // Conditional select increment.
255 // Vector load N-element structure to all lanes:
256 VLD1DUP = ISD::FIRST_TARGET_MEMORY_OPCODE,
261 // NEON loads with post-increment base updates:
274 // NEON stores with post-increment base updates:
283 // Load/Store of dual registers
288 } // end namespace ARMISD
290 /// Define some predicates that are used for node matching.
293 bool isBitFieldInvertedMask(unsigned v);
295 } // end namespace ARM
297 //===--------------------------------------------------------------------===//
298 // ARMTargetLowering - ARM Implementation of the TargetLowering interface
300 class ARMTargetLowering : public TargetLowering {
302 explicit ARMTargetLowering(const TargetMachine &TM,
303 const ARMSubtarget &STI);
305 unsigned getJumpTableEncoding() const override;
306 bool useSoftFloat() const override;
308 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
310 /// ReplaceNodeResults - Replace the results of node with an illegal result
311 /// type with new values built out of custom code.
312 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
313 SelectionDAG &DAG) const override;
315 const char *getTargetNodeName(unsigned Opcode) const override;
317 bool isSelectSupported(SelectSupportKind Kind) const override {
318 // ARM does not support scalar condition selects on vectors.
319 return (Kind != ScalarCondVectorVal);
322 bool isReadOnly(const GlobalValue *GV) const;
324 /// getSetCCResultType - Return the value type to use for ISD::SETCC.
325 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
326 EVT VT) const override;
329 EmitInstrWithCustomInserter(MachineInstr &MI,
330 MachineBasicBlock *MBB) const override;
332 void AdjustInstrPostInstrSelection(MachineInstr &MI,
333 SDNode *Node) const override;
335 SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const;
336 SDValue PerformBRCONDCombine(SDNode *N, SelectionDAG &DAG) const;
337 SDValue PerformCMOVToBFICombine(SDNode *N, SelectionDAG &DAG) const;
338 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
340 bool isDesirableToTransformToIntegerOp(unsigned Opc, EVT VT) const override;
342 /// allowsMisalignedMemoryAccesses - Returns true if the target allows
343 /// unaligned memory accesses of the specified type. Returns whether it
344 /// is "fast" by reference in the second argument.
345 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
347 MachineMemOperand::Flags Flags,
348 bool *Fast) const override;
350 EVT getOptimalMemOpType(uint64_t Size,
351 unsigned DstAlign, unsigned SrcAlign,
352 bool IsMemset, bool ZeroMemset,
354 const AttributeList &FuncAttributes) const override;
356 bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
357 bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
358 bool isZExtFree(SDValue Val, EVT VT2) const override;
359 bool shouldSinkOperands(Instruction *I,
360 SmallVectorImpl<Use *> &Ops) const override;
362 bool isFNegFree(EVT VT) const override;
364 bool isVectorLoadExtDesirable(SDValue ExtVal) const override;
366 bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
369 /// isLegalAddressingMode - Return true if the addressing mode represented
370 /// by AM is legal for this target, for a load/store of the specified type.
371 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
372 Type *Ty, unsigned AS,
373 Instruction *I = nullptr) const override;
375 /// getScalingFactorCost - Return the cost of the scaling used in
376 /// addressing mode represented by AM.
377 /// If the AM is supported, the return value must be >= 0.
378 /// If the AM is not supported, the return value must be negative.
379 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
380 unsigned AS) const override;
382 bool isLegalT2ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
384 /// Returns true if the addressing mode representing by AM is legal
385 /// for the Thumb1 target, for a load/store of the specified type.
386 bool isLegalT1ScaledAddressingMode(const AddrMode &AM, EVT VT) const;
388 /// isLegalICmpImmediate - Return true if the specified immediate is legal
389 /// icmp immediate, that is the target has icmp instructions which can
390 /// compare a register against the immediate without having to materialize
391 /// the immediate into a register.
392 bool isLegalICmpImmediate(int64_t Imm) const override;
394 /// isLegalAddImmediate - Return true if the specified immediate is legal
395 /// add immediate, that is the target has add instructions which can
396 /// add a register and the immediate without having to materialize
397 /// the immediate into a register.
398 bool isLegalAddImmediate(int64_t Imm) const override;
400 /// getPreIndexedAddressParts - returns true by value, base pointer and
401 /// offset pointer and addressing mode by reference if the node's address
402 /// can be legally represented as pre-indexed load / store address.
403 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
404 ISD::MemIndexedMode &AM,
405 SelectionDAG &DAG) const override;
407 /// getPostIndexedAddressParts - returns true by value, base pointer and
408 /// offset pointer and addressing mode by reference if this node can be
409 /// combined with a load / store to form a post-indexed load / store.
410 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
411 SDValue &Offset, ISD::MemIndexedMode &AM,
412 SelectionDAG &DAG) const override;
414 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
415 const APInt &DemandedElts,
416 const SelectionDAG &DAG,
417 unsigned Depth) const override;
419 bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
420 TargetLoweringOpt &TLO) const override;
423 bool ExpandInlineAsm(CallInst *CI) const override;
425 ConstraintType getConstraintType(StringRef Constraint) const override;
427 /// Examine constraint string and operand type and determine a weight value.
428 /// The operand object must already have been set up with the operand type.
429 ConstraintWeight getSingleConstraintMatchWeight(
430 AsmOperandInfo &info, const char *constraint) const override;
432 std::pair<unsigned, const TargetRegisterClass *>
433 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
434 StringRef Constraint, MVT VT) const override;
436 const char *LowerXConstraint(EVT ConstraintVT) const override;
438 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
439 /// vector. If it is invalid, don't add anything to Ops. If hasMemory is
440 /// true it means one of the asm constraint of the inline asm instruction
441 /// being processed is 'm'.
442 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
443 std::vector<SDValue> &Ops,
444 SelectionDAG &DAG) const override;
447 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
448 if (ConstraintCode == "Q")
449 return InlineAsm::Constraint_Q;
450 else if (ConstraintCode == "o")
451 return InlineAsm::Constraint_o;
452 else if (ConstraintCode.size() == 2) {
453 if (ConstraintCode[0] == 'U') {
454 switch(ConstraintCode[1]) {
458 return InlineAsm::Constraint_Um;
460 return InlineAsm::Constraint_Un;
462 return InlineAsm::Constraint_Uq;
464 return InlineAsm::Constraint_Us;
466 return InlineAsm::Constraint_Ut;
468 return InlineAsm::Constraint_Uv;
470 return InlineAsm::Constraint_Uy;
474 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
477 const ARMSubtarget* getSubtarget() const {
481 /// getRegClassFor - Return the register class that should be used for the
482 /// specified value type.
483 const TargetRegisterClass *
484 getRegClassFor(MVT VT, bool isDivergent = false) const override;
486 /// Returns true if a cast between SrcAS and DestAS is a noop.
487 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
488 // Addrspacecasts are always noops.
492 bool shouldAlignPointerArgs(CallInst *CI, unsigned &MinSize,
493 unsigned &PrefAlign) const override;
495 /// createFastISel - This method returns a target specific FastISel object,
496 /// or null if the target does not support "fast" ISel.
497 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
498 const TargetLibraryInfo *libInfo) const override;
500 Sched::Preference getSchedulingPreference(SDNode *N) const override;
503 isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
504 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
506 /// isFPImmLegal - Returns true if the target can instruction select the
507 /// specified FP immediate natively. If false, the legalizer will
508 /// materialize the FP immediate as a load from a constant pool.
509 bool isFPImmLegal(const APFloat &Imm, EVT VT,
510 bool ForCodeSize = false) const override;
512 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
515 unsigned Intrinsic) const override;
517 /// Returns true if it is beneficial to convert a load of a constant
518 /// to just the constant itself.
519 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
520 Type *Ty) const override;
522 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
524 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
525 unsigned Index) const override;
527 /// Returns true if an argument of type Ty needs to be passed in a
528 /// contiguous block of registers in calling convention CallConv.
529 bool functionArgumentNeedsConsecutiveRegisters(
530 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override;
532 /// If a physical register, this returns the register that receives the
533 /// exception address on entry to an EH pad.
535 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
537 /// If a physical register, this returns the register that receives the
538 /// exception typeid on entry to a landing pad.
540 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
542 Instruction *makeDMB(IRBuilder<> &Builder, ARM_MB::MemBOpt Domain) const;
543 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
544 AtomicOrdering Ord) const override;
545 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
546 Value *Addr, AtomicOrdering Ord) const override;
548 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
550 Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
551 AtomicOrdering Ord) const override;
552 Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
553 AtomicOrdering Ord) const override;
555 unsigned getMaxSupportedInterleaveFactor() const override;
557 bool lowerInterleavedLoad(LoadInst *LI,
558 ArrayRef<ShuffleVectorInst *> Shuffles,
559 ArrayRef<unsigned> Indices,
560 unsigned Factor) const override;
561 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
562 unsigned Factor) const override;
564 bool shouldInsertFencesForAtomic(const Instruction *I) const override;
565 TargetLoweringBase::AtomicExpansionKind
566 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
567 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
568 TargetLoweringBase::AtomicExpansionKind
569 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
570 TargetLoweringBase::AtomicExpansionKind
571 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
573 bool useLoadStackGuardNode() const override;
575 void insertSSPDeclarations(Module &M) const override;
576 Value *getSDagStackGuard(const Module &M) const override;
577 Function *getSSPStackGuardCheck(const Module &M) const override;
579 bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
580 unsigned &Cost) const override;
582 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
583 const SelectionDAG &DAG) const override {
584 // Do not merge to larger than i32.
585 return (MemVT.getSizeInBits() <= 32);
588 bool isCheapToSpeculateCttz() const override;
589 bool isCheapToSpeculateCtlz() const override;
591 bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
592 return VT.isScalarInteger();
595 bool supportSwiftError() const override {
599 bool hasStandaloneRem(EVT VT) const override {
600 return HasStandaloneRem;
603 bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override;
605 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool isVarArg) const;
606 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool isVarArg) const;
608 /// Returns true if \p VecTy is a legal interleaved access type. This
609 /// function checks the vector element type and the overall width of the
611 bool isLegalInterleavedAccessType(unsigned Factor, VectorType *VecTy,
612 const DataLayout &DL) const;
614 bool alignLoopsWithOptSize() const override;
616 /// Returns the number of interleaved accesses that will be generated when
617 /// lowering accesses of the given type.
618 unsigned getNumInterleavedAccesses(VectorType *VecTy,
619 const DataLayout &DL) const;
621 void finalizeLowering(MachineFunction &MF) const override;
623 /// Return the correct alignment for the current calling convention.
624 Align getABIAlignmentForCallingConv(Type *ArgTy,
625 DataLayout DL) const override;
627 bool isDesirableToCommuteWithShift(const SDNode *N,
628 CombineLevel Level) const override;
630 bool shouldFoldConstantShiftPairToMask(const SDNode *N,
631 CombineLevel Level) const override;
633 bool preferIncOfAddToSubOfNot(EVT VT) const override;
636 std::pair<const TargetRegisterClass *, uint8_t>
637 findRepresentativeClass(const TargetRegisterInfo *TRI,
638 MVT VT) const override;
641 /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
642 /// make the right decision when generating code for different targets.
643 const ARMSubtarget *Subtarget;
645 const TargetRegisterInfo *RegInfo;
647 const InstrItineraryData *Itins;
649 /// ARMPCLabelIndex - Keep track of the number of ARM PC labels created.
650 unsigned ARMPCLabelIndex;
652 // TODO: remove this, and have shouldInsertFencesForAtomic do the proper
654 bool InsertFencesForAtomic;
656 bool HasStandaloneRem = true;
658 void addTypeForNEON(MVT VT, MVT PromotedLdStVT, MVT PromotedBitwiseVT);
659 void addDRTypeForNEON(MVT VT);
660 void addQRTypeForNEON(MVT VT);
661 std::pair<SDValue, SDValue> getARMXALUOOp(SDValue Op, SelectionDAG &DAG, SDValue &ARMcc) const;
663 using RegsToPassVector = SmallVector<std::pair<unsigned, SDValue>, 8>;
665 void PassF64ArgInRegs(const SDLoc &dl, SelectionDAG &DAG, SDValue Chain,
666 SDValue &Arg, RegsToPassVector &RegsToPass,
667 CCValAssign &VA, CCValAssign &NextVA,
669 SmallVectorImpl<SDValue> &MemOpChains,
670 ISD::ArgFlagsTy Flags) const;
671 SDValue GetF64FormalArgument(CCValAssign &VA, CCValAssign &NextVA,
672 SDValue &Root, SelectionDAG &DAG,
673 const SDLoc &dl) const;
675 CallingConv::ID getEffectiveCallingConv(CallingConv::ID CC,
676 bool isVarArg) const;
677 CCAssignFn *CCAssignFnForNode(CallingConv::ID CC, bool Return,
678 bool isVarArg) const;
679 SDValue LowerMemOpCallTo(SDValue Chain, SDValue StackPtr, SDValue Arg,
680 const SDLoc &dl, SelectionDAG &DAG,
681 const CCValAssign &VA,
682 ISD::ArgFlagsTy Flags) const;
683 SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
684 SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
685 SDValue LowerEH_SJLJ_SETUP_DISPATCH(SDValue Op, SelectionDAG &DAG) const;
686 SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG,
687 const ARMSubtarget *Subtarget) const;
688 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG,
689 const ARMSubtarget *Subtarget) const;
690 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
691 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
692 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
693 SDValue LowerGlobalAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
694 SDValue LowerGlobalAddressELF(SDValue Op, SelectionDAG &DAG) const;
695 SDValue LowerGlobalAddressWindows(SDValue Op, SelectionDAG &DAG) const;
696 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
697 SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
698 SelectionDAG &DAG) const;
699 SDValue LowerToTLSExecModels(GlobalAddressSDNode *GA,
701 TLSModel::Model model) const;
702 SDValue LowerGlobalTLSAddressDarwin(SDValue Op, SelectionDAG &DAG) const;
703 SDValue LowerGlobalTLSAddressWindows(SDValue Op, SelectionDAG &DAG) const;
704 SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
705 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
706 SDValue LowerSignedALUO(SDValue Op, SelectionDAG &DAG) const;
707 SDValue LowerUnsignedALUO(SDValue Op, SelectionDAG &DAG) const;
708 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
709 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
710 SDValue LowerBRCOND(SDValue Op, SelectionDAG &DAG) const;
711 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
712 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
713 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
714 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
715 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
716 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
717 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
718 SDValue LowerConstantFP(SDValue Op, SelectionDAG &DAG,
719 const ARMSubtarget *ST) const;
720 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
721 const ARMSubtarget *ST) const;
722 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
723 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
724 SDValue LowerDivRem(SDValue Op, SelectionDAG &DAG) const;
725 SDValue LowerDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed) const;
726 void ExpandDIV_Windows(SDValue Op, SelectionDAG &DAG, bool Signed,
727 SmallVectorImpl<SDValue> &Results) const;
728 SDValue LowerWindowsDIVLibCall(SDValue Op, SelectionDAG &DAG, bool Signed,
729 SDValue &Chain) const;
730 SDValue LowerREM(SDNode *N, SelectionDAG &DAG) const;
731 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
732 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
733 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
734 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
735 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
736 void lowerABS(SDNode *N, SmallVectorImpl<SDValue> &Results,
737 SelectionDAG &DAG) const;
738 void LowerLOAD(SDNode *N, SmallVectorImpl<SDValue> &Results,
739 SelectionDAG &DAG) const;
741 Register getRegisterByName(const char* RegName, LLT VT,
742 const MachineFunction &MF) const override;
744 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
745 SmallVectorImpl<SDNode *> &Created) const override;
747 bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
748 EVT VT) const override;
750 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
752 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
753 CallingConv::ID CallConv, bool isVarArg,
754 const SmallVectorImpl<ISD::InputArg> &Ins,
755 const SDLoc &dl, SelectionDAG &DAG,
756 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
757 SDValue ThisVal) const;
759 bool supportSplitCSR(MachineFunction *MF) const override {
760 return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
761 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
764 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
765 void insertCopiesSplitCSR(
766 MachineBasicBlock *Entry,
767 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
770 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
771 const SmallVectorImpl<ISD::InputArg> &Ins,
772 const SDLoc &dl, SelectionDAG &DAG,
773 SmallVectorImpl<SDValue> &InVals) const override;
775 int StoreByValRegs(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &dl,
776 SDValue &Chain, const Value *OrigArg,
777 unsigned InRegsParamRecordIdx, int ArgOffset,
778 unsigned ArgSize) const;
780 void VarArgStyleRegisters(CCState &CCInfo, SelectionDAG &DAG,
781 const SDLoc &dl, SDValue &Chain,
782 unsigned ArgOffset, unsigned TotalArgRegsSaveSize,
783 bool ForceMutable = false) const;
785 SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
786 SmallVectorImpl<SDValue> &InVals) const override;
788 /// HandleByVal - Target-specific cleanup for ByVal support.
789 void HandleByVal(CCState *, unsigned &, unsigned) const override;
791 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
792 /// for tail call optimization. Targets which want to do tail call
793 /// optimization should implement this function.
794 bool IsEligibleForTailCallOptimization(
795 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
796 bool isCalleeStructRet, bool isCallerStructRet,
797 const SmallVectorImpl<ISD::OutputArg> &Outs,
798 const SmallVectorImpl<SDValue> &OutVals,
799 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG,
800 const bool isIndirect) const;
802 bool CanLowerReturn(CallingConv::ID CallConv,
803 MachineFunction &MF, bool isVarArg,
804 const SmallVectorImpl<ISD::OutputArg> &Outs,
805 LLVMContext &Context) const override;
807 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
808 const SmallVectorImpl<ISD::OutputArg> &Outs,
809 const SmallVectorImpl<SDValue> &OutVals,
810 const SDLoc &dl, SelectionDAG &DAG) const override;
812 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
814 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
816 bool shouldConsiderGEPOffsetSplit() const override { return true; }
818 bool isUnsupportedFloatingType(EVT VT) const;
820 SDValue getCMOV(const SDLoc &dl, EVT VT, SDValue FalseVal, SDValue TrueVal,
821 SDValue ARMcc, SDValue CCR, SDValue Cmp,
822 SelectionDAG &DAG) const;
823 SDValue getARMCmp(SDValue LHS, SDValue RHS, ISD::CondCode CC,
824 SDValue &ARMcc, SelectionDAG &DAG, const SDLoc &dl) const;
825 SDValue getVFPCmp(SDValue LHS, SDValue RHS, SelectionDAG &DAG,
826 const SDLoc &dl) const;
827 SDValue duplicateCmp(SDValue Cmp, SelectionDAG &DAG) const;
829 SDValue OptimizeVFPBrcond(SDValue Op, SelectionDAG &DAG) const;
831 void SetupEntryBlockForSjLj(MachineInstr &MI, MachineBasicBlock *MBB,
832 MachineBasicBlock *DispatchBB, int FI) const;
834 void EmitSjLjDispatchBlock(MachineInstr &MI, MachineBasicBlock *MBB) const;
836 bool RemapAddSubWithFlags(MachineInstr &MI, MachineBasicBlock *BB) const;
838 MachineBasicBlock *EmitStructByval(MachineInstr &MI,
839 MachineBasicBlock *MBB) const;
841 MachineBasicBlock *EmitLowered__chkstk(MachineInstr &MI,
842 MachineBasicBlock *MBB) const;
843 MachineBasicBlock *EmitLowered__dbzchk(MachineInstr &MI,
844 MachineBasicBlock *MBB) const;
845 void addMVEVectorTypes(bool HasMVEFP);
846 void addAllExtLoads(const MVT From, const MVT To, LegalizeAction Action);
847 void setAllExpand(MVT VT);
850 enum VMOVModImmType {
859 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
860 const TargetLibraryInfo *libInfo);
862 } // end namespace ARM
864 } // end namespace llvm
866 #endif // LLVM_LIB_TARGET_ARM_ARMISELLOWERING_H