1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/IR/CallingConv.h"
23 #include "llvm/IR/Instruction.h"
27 namespace AArch64ISD {
29 enum NodeType : unsigned {
30 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32 CALL, // Function call.
34 // Produces the full sequence of instructions for getting the thread pointer
35 // offset of a variable into X0, using the TLSDesc model.
37 ADRP, // Page address of a TargetGlobalAddress operand.
39 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
40 LOADgot, // Load from automatically generated descriptor (e.g. Global
41 // Offset Table, TLS record).
42 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
43 BRCOND, // Conditional branch instruction; "b.cond".
45 FCSEL, // Conditional move instruction.
46 CSINV, // Conditional select invert.
47 CSNEG, // Conditional select negate.
48 CSINC, // Conditional select increment.
50 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
54 SBC, // adc, sbc instructions
56 // Arithmetic instructions which write flags.
63 // Conditional compares. Operands: left,right,falsecc,cc,flags
68 // Floating point comparison
74 // Scalar-to-vector duplication
81 // Vector immedate moves
90 // Vector immediate ops
94 // Vector bit select: similar to ISD::VSELECT but not all bits within an
95 // element must be identical.
98 // Vector arithmetic negation
113 // Vector shift by scalar
118 // Vector shift by scalar (again)
125 // Vector comparisons
135 // Vector zero comparisons
147 // Vector across-lanes addition
148 // Only the lower result lane is defined.
152 // Vector across-lanes min/max
153 // Only the lower result lane is defined.
159 // Vector bitwise negation
162 // Vector bitwise selection
165 // Compare-and-branch
174 // Custom prefetch handling
177 // {s|u}int to FP within a FP register.
181 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
182 /// world w.r.t vectors; which causes additional REV instructions to be
183 /// generated to compensate for the byte-swapping. But sometimes we do
184 /// need to re-interpret the data in SIMD vector registers in big-endian
185 /// mode without emitting such REV instructions.
191 // Reciprocal estimates and steps.
195 // NEON Load/Store with post-increment base updates
196 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
221 } // end namespace AArch64ISD
225 // Any instruction that defines a 32-bit result zeros out the high half of the
226 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
227 // be copying from a truncate. But any other 32-bit operation will zero-extend
229 // FIXME: X86 also checks for CMOV here. Do we need something similar?
230 static inline bool isDef32(const SDNode &N) {
231 unsigned Opc = N.getOpcode();
232 return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
233 Opc != ISD::CopyFromReg;
236 } // end anonymous namespace
238 class AArch64Subtarget;
239 class AArch64TargetMachine;
241 class AArch64TargetLowering : public TargetLowering {
243 explicit AArch64TargetLowering(const TargetMachine &TM,
244 const AArch64Subtarget &STI);
246 /// Selects the correct CCAssignFn for a given CallingConvention value.
247 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
249 /// Selects the correct CCAssignFn for a given CallingConvention value.
250 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
252 /// Determine which of the bits specified in Mask are known to be either zero
253 /// or one and return them in the KnownZero/KnownOne bitsets.
254 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
255 const APInt &DemandedElts,
256 const SelectionDAG &DAG,
257 unsigned Depth = 0) const override;
259 bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
260 TargetLoweringOpt &TLO) const override;
262 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
264 /// Returns true if the target allows unaligned memory accesses of the
266 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
268 bool *Fast = nullptr) const override;
270 /// Provide custom lowering hooks for some operations.
271 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
273 const char *getTargetNodeName(unsigned Opcode) const override;
275 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
277 /// Returns true if a cast between SrcAS and DestAS is a noop.
278 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
279 // Addrspacecasts are always noops.
283 /// This method returns a target specific FastISel object, or null if the
284 /// target does not support "fast" ISel.
285 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
286 const TargetLibraryInfo *libInfo) const override;
288 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
290 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
292 /// Return true if the given shuffle mask can be codegen'd directly, or if it
293 /// should be stack expanded.
294 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
296 /// Return the ISD::SETCC ValueType.
297 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
298 EVT VT) const override;
300 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
302 MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
303 MachineBasicBlock *BB) const;
305 MachineBasicBlock *EmitLoweredCatchRet(MachineInstr &MI,
306 MachineBasicBlock *BB) const;
308 MachineBasicBlock *EmitLoweredCatchPad(MachineInstr &MI,
309 MachineBasicBlock *BB) const;
312 EmitInstrWithCustomInserter(MachineInstr &MI,
313 MachineBasicBlock *MBB) const override;
315 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
317 unsigned Intrinsic) const override;
319 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
320 EVT NewVT) const override;
322 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
323 bool isTruncateFree(EVT VT1, EVT VT2) const override;
325 bool isProfitableToHoist(Instruction *I) const override;
327 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
328 bool isZExtFree(EVT VT1, EVT VT2) const override;
329 bool isZExtFree(SDValue Val, EVT VT2) const override;
331 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
333 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
335 bool lowerInterleavedLoad(LoadInst *LI,
336 ArrayRef<ShuffleVectorInst *> Shuffles,
337 ArrayRef<unsigned> Indices,
338 unsigned Factor) const override;
339 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
340 unsigned Factor) const override;
342 bool isLegalAddImmediate(int64_t) const override;
343 bool isLegalICmpImmediate(int64_t) const override;
345 bool shouldConsiderGEPOffsetSplit() const override;
347 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
348 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
349 MachineFunction &MF) const override;
351 /// Return true if the addressing mode represented by AM is legal for this
352 /// target, for a load/store of the specified type.
353 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
355 Instruction *I = nullptr) const override;
357 /// Return the cost of the scaling factor used in the addressing
358 /// mode represented by AM for this target, for a load/store
359 /// of the specified type.
360 /// If the AM is supported, the return value must be >= 0.
361 /// If the AM is not supported, it returns a negative value.
362 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
363 unsigned AS) const override;
365 /// Return true if an FMA operation is faster than a pair of fmul and fadd
366 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
367 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
368 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
370 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
372 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
373 bool isDesirableToCommuteWithShift(const SDNode *N,
374 CombineLevel Level) const override;
376 /// Returns true if it is beneficial to convert a load of a constant
377 /// to just the constant itself.
378 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
379 Type *Ty) const override;
381 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
383 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
384 unsigned Index) const override;
386 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
387 AtomicOrdering Ord) const override;
388 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
389 Value *Addr, AtomicOrdering Ord) const override;
391 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
393 TargetLoweringBase::AtomicExpansionKind
394 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
395 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
396 TargetLoweringBase::AtomicExpansionKind
397 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
399 TargetLoweringBase::AtomicExpansionKind
400 shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
402 bool useLoadStackGuardNode() const override;
403 TargetLoweringBase::LegalizeTypeAction
404 getPreferredVectorAction(MVT VT) const override;
406 /// If the target has a standard location for the stack protector cookie,
407 /// returns the address of that location. Otherwise, returns nullptr.
408 Value *getIRStackGuard(IRBuilder<> &IRB) const override;
410 void insertSSPDeclarations(Module &M) const override;
411 Value *getSDagStackGuard(const Module &M) const override;
412 Value *getSSPStackGuardCheck(const Module &M) const override;
414 /// If the target has a standard location for the unsafe stack pointer,
415 /// returns the address of that location. Otherwise, returns nullptr.
416 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
418 /// If a physical register, this returns the register that receives the
419 /// exception address on entry to an EH pad.
421 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
422 // FIXME: This is a guess. Has this been defined yet?
426 /// If a physical register, this returns the register that receives the
427 /// exception typeid on entry to a landing pad.
429 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
430 // FIXME: This is a guess. Has this been defined yet?
434 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
436 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
437 const SelectionDAG &DAG) const override {
438 // Do not merge to float value size (128 bytes) if no implicit
439 // float attribute is set.
441 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
442 Attribute::NoImplicitFloat);
445 return (MemVT.getSizeInBits() <= 64);
449 bool isCheapToSpeculateCttz() const override {
453 bool isCheapToSpeculateCtlz() const override {
457 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
459 bool hasAndNotCompare(SDValue V) const override {
460 // We can use bics for any scalar.
461 return V.getValueType().isScalarInteger();
464 bool hasAndNot(SDValue Y) const override {
465 EVT VT = Y.getValueType();
468 return hasAndNotCompare(Y);
470 return VT.getSizeInBits() >= 64; // vector 'bic'
473 bool shouldTransformSignedTruncationCheck(EVT XVT,
474 unsigned KeptBits) const override {
475 // For vectors, we don't have a preference..
479 auto VTIsOk = [](EVT VT) -> bool {
480 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
484 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
485 // XVT will be larger than KeptBitsVT.
486 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
487 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
490 bool hasBitPreservingFPLogic(EVT VT) const override {
491 // FIXME: Is this always true? It should be true for vectors at least.
492 return VT == MVT::f32 || VT == MVT::f64;
495 bool supportSplitCSR(MachineFunction *MF) const override {
496 return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
497 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
499 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
500 void insertCopiesSplitCSR(
501 MachineBasicBlock *Entry,
502 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
504 bool supportSwiftError() const override {
508 /// Enable aggressive FMA fusion on targets that want it.
509 bool enableAggressiveFMAFusion(EVT VT) const override;
511 /// Returns the size of the platform's va_list object.
512 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
514 /// Returns true if \p VecTy is a legal interleaved access type. This
515 /// function checks the vector element type and the overall width of the
517 bool isLegalInterleavedAccessType(VectorType *VecTy,
518 const DataLayout &DL) const;
520 /// Returns the number of interleaved accesses that will be generated when
521 /// lowering accesses of the given type.
522 unsigned getNumInterleavedAccesses(VectorType *VecTy,
523 const DataLayout &DL) const;
525 MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
527 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
528 CallingConv::ID CallConv,
529 bool isVarArg) const override;
530 /// Used for exception handling on Win64.
531 bool needsFixedCatchObjects() const override;
533 /// Keep a pointer to the AArch64Subtarget around so that we can
534 /// make the right decision when generating code for different targets.
535 const AArch64Subtarget *Subtarget;
537 bool isExtFreeImpl(const Instruction *Ext) const override;
539 void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
540 void addDRTypeForNEON(MVT VT);
541 void addQRTypeForNEON(MVT VT);
543 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
545 const SmallVectorImpl<ISD::InputArg> &Ins,
546 const SDLoc &DL, SelectionDAG &DAG,
547 SmallVectorImpl<SDValue> &InVals) const override;
549 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
550 SmallVectorImpl<SDValue> &InVals) const override;
552 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
553 CallingConv::ID CallConv, bool isVarArg,
554 const SmallVectorImpl<ISD::InputArg> &Ins,
555 const SDLoc &DL, SelectionDAG &DAG,
556 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
557 SDValue ThisVal) const;
559 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
561 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
563 bool isEligibleForTailCallOptimization(
564 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
565 const SmallVectorImpl<ISD::OutputArg> &Outs,
566 const SmallVectorImpl<SDValue> &OutVals,
567 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
569 /// Finds the incoming stack arguments which overlap the given fixed stack
570 /// object and incorporates their load into the current chain. This prevents
571 /// an upcoming store from clobbering the stack argument before it's used.
572 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
573 MachineFrameInfo &MFI, int ClobberedFI) const;
575 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
577 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
578 SDValue &Chain) const;
580 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
582 const SmallVectorImpl<ISD::OutputArg> &Outs,
583 LLVMContext &Context) const override;
585 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
586 const SmallVectorImpl<ISD::OutputArg> &Outs,
587 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
588 SelectionDAG &DAG) const override;
590 SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
591 unsigned Flag) const;
592 SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
593 unsigned Flag) const;
594 SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
595 unsigned Flag) const;
596 SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
597 unsigned Flag) const;
598 template <class NodeTy>
599 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
600 template <class NodeTy>
601 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
602 template <class NodeTy>
603 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
604 template <class NodeTy>
605 SDValue getAddrTiny(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
606 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
607 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
608 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
609 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
610 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
611 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
612 SelectionDAG &DAG) const;
613 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
614 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
615 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
616 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
617 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
618 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
619 SDValue TVal, SDValue FVal, const SDLoc &dl,
620 SelectionDAG &DAG) const;
621 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
622 SDValue LowerBR_JT(SDValue Op, SelectionDAG &DAG) const;
623 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
624 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
625 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
626 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
627 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
628 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
629 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
630 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
631 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
632 SDValue LowerSPONENTRY(SDValue Op, SelectionDAG &DAG) const;
633 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
634 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
635 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
636 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
637 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
638 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
639 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
640 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
641 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
642 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
643 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
644 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
645 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
646 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
647 RTLIB::Libcall Call) const;
648 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
649 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
650 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
651 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
652 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
653 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
654 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
655 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
656 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
657 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
658 SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
659 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
660 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
661 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
663 SelectionDAG &DAG) const;
665 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
666 SmallVectorImpl<SDNode *> &Created) const override;
667 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
668 int &ExtraSteps, bool &UseOneConst,
669 bool Reciprocal) const override;
670 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
671 int &ExtraSteps) const override;
672 unsigned combineRepeatedFPDivisors() const override;
674 ConstraintType getConstraintType(StringRef Constraint) const override;
675 unsigned getRegisterByName(const char* RegName, EVT VT,
676 SelectionDAG &DAG) const override;
678 /// Examine constraint string and operand type and determine a weight value.
679 /// The operand object must already have been set up with the operand type.
681 getSingleConstraintMatchWeight(AsmOperandInfo &info,
682 const char *constraint) const override;
684 std::pair<unsigned, const TargetRegisterClass *>
685 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
686 StringRef Constraint, MVT VT) const override;
688 const char *LowerXConstraint(EVT ConstraintVT) const override;
690 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
691 std::vector<SDValue> &Ops,
692 SelectionDAG &DAG) const override;
694 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
695 if (ConstraintCode == "Q")
696 return InlineAsm::Constraint_Q;
697 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
698 // followed by llvm_unreachable so we'll leave them unimplemented in
699 // the backend for now.
700 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
703 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
704 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
705 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
706 ISD::MemIndexedMode &AM, bool &IsInc,
707 SelectionDAG &DAG) const;
708 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
709 ISD::MemIndexedMode &AM,
710 SelectionDAG &DAG) const override;
711 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
712 SDValue &Offset, ISD::MemIndexedMode &AM,
713 SelectionDAG &DAG) const override;
715 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
716 SelectionDAG &DAG) const override;
718 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
720 void finalizeLowering(MachineFunction &MF) const override;
724 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
725 const TargetLibraryInfo *libInfo);
726 } // end namespace AArch64
728 } // end namespace llvm