1 //==-- AArch64ISelLowering.h - AArch64 DAG Lowering Interface ----*- C++ -*-==//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that AArch64 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
16 #define LLVM_LIB_TARGET_AARCH64_AARCH64ISELLOWERING_H
19 #include "llvm/CodeGen/CallingConvLower.h"
20 #include "llvm/CodeGen/SelectionDAG.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/IR/CallingConv.h"
23 #include "llvm/IR/Instruction.h"
27 namespace AArch64ISD {
29 enum NodeType : unsigned {
30 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 WrapperLarge, // 4-instruction MOVZ/MOVK sequence for 64-bit addresses.
32 CALL, // Function call.
34 // Produces the full sequence of instructions for getting the thread pointer
35 // offset of a variable into X0, using the TLSDesc model.
37 ADRP, // Page address of a TargetGlobalAddress operand.
38 ADDlow, // Add the low 12 bits of a TargetGlobalAddress operand.
39 LOADgot, // Load from automatically generated descriptor (e.g. Global
40 // Offset Table, TLS record).
41 RET_FLAG, // Return with a flag operand. Operand 0 is the chain operand.
42 BRCOND, // Conditional branch instruction; "b.cond".
44 FCSEL, // Conditional move instruction.
45 CSINV, // Conditional select invert.
46 CSNEG, // Conditional select negate.
47 CSINC, // Conditional select increment.
49 // Pointer to the thread's local storage area. Materialised from TPIDR_EL0 on
53 SBC, // adc, sbc instructions
55 // Arithmetic instructions which write flags.
62 // Conditional compares. Operands: left,right,falsecc,cc,flags
67 // Floating point comparison
73 // Scalar-to-vector duplication
80 // Vector immedate moves
89 // Vector immediate ops
93 // Vector bit select: similar to ISD::VSELECT but not all bits within an
94 // element must be identical.
97 // Vector arithmetic negation
112 // Vector shift by scalar
117 // Vector shift by scalar (again)
124 // Vector comparisons
134 // Vector zero comparisons
146 // Vector across-lanes addition
147 // Only the lower result lane is defined.
151 // Vector across-lanes min/max
152 // Only the lower result lane is defined.
158 // Vector bitwise negation
161 // Vector bitwise selection
164 // Compare-and-branch
173 // Custom prefetch handling
176 // {s|u}int to FP within a FP register.
180 /// Natural vector cast. ISD::BITCAST is not natural in the big-endian
181 /// world w.r.t vectors; which causes additional REV instructions to be
182 /// generated to compensate for the byte-swapping. But sometimes we do
183 /// need to re-interpret the data in SIMD vector registers in big-endian
184 /// mode without emitting such REV instructions.
190 // Reciprocal estimates and steps.
194 // NEON Load/Store with post-increment base updates
195 LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
220 } // end namespace AArch64ISD
224 // Any instruction that defines a 32-bit result zeros out the high half of the
225 // register. Truncate can be lowered to EXTRACT_SUBREG. CopyFromReg may
226 // be copying from a truncate. But any other 32-bit operation will zero-extend
228 // FIXME: X86 also checks for CMOV here. Do we need something similar?
229 static inline bool isDef32(const SDNode &N) {
230 unsigned Opc = N.getOpcode();
231 return Opc != ISD::TRUNCATE && Opc != TargetOpcode::EXTRACT_SUBREG &&
232 Opc != ISD::CopyFromReg;
235 } // end anonymous namespace
237 class AArch64Subtarget;
238 class AArch64TargetMachine;
240 class AArch64TargetLowering : public TargetLowering {
242 explicit AArch64TargetLowering(const TargetMachine &TM,
243 const AArch64Subtarget &STI);
245 /// Selects the correct CCAssignFn for a given CallingConvention value.
246 CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
248 /// Selects the correct CCAssignFn for a given CallingConvention value.
249 CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC) const;
251 /// Determine which of the bits specified in Mask are known to be either zero
252 /// or one and return them in the KnownZero/KnownOne bitsets.
253 void computeKnownBitsForTargetNode(const SDValue Op, KnownBits &Known,
254 const APInt &DemandedElts,
255 const SelectionDAG &DAG,
256 unsigned Depth = 0) const override;
258 bool targetShrinkDemandedConstant(SDValue Op, const APInt &Demanded,
259 TargetLoweringOpt &TLO) const override;
261 MVT getScalarShiftAmountTy(const DataLayout &DL, EVT) const override;
263 /// Returns true if the target allows unaligned memory accesses of the
265 bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0,
267 bool *Fast = nullptr) const override;
269 /// Provide custom lowering hooks for some operations.
270 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
272 const char *getTargetNodeName(unsigned Opcode) const override;
274 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
276 /// Returns true if a cast between SrcAS and DestAS is a noop.
277 bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override {
278 // Addrspacecasts are always noops.
282 /// This method returns a target specific FastISel object, or null if the
283 /// target does not support "fast" ISel.
284 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
285 const TargetLibraryInfo *libInfo) const override;
287 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
289 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
291 /// Return true if the given shuffle mask can be codegen'd directly, or if it
292 /// should be stack expanded.
293 bool isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const override;
295 /// Return the ISD::SETCC ValueType.
296 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
297 EVT VT) const override;
299 SDValue ReconstructShuffle(SDValue Op, SelectionDAG &DAG) const;
301 MachineBasicBlock *EmitF128CSEL(MachineInstr &MI,
302 MachineBasicBlock *BB) const;
305 EmitInstrWithCustomInserter(MachineInstr &MI,
306 MachineBasicBlock *MBB) const override;
308 bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
310 unsigned Intrinsic) const override;
312 bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
313 EVT NewVT) const override;
315 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
316 bool isTruncateFree(EVT VT1, EVT VT2) const override;
318 bool isProfitableToHoist(Instruction *I) const override;
320 bool isZExtFree(Type *Ty1, Type *Ty2) const override;
321 bool isZExtFree(EVT VT1, EVT VT2) const override;
322 bool isZExtFree(SDValue Val, EVT VT2) const override;
324 bool hasPairedLoad(EVT LoadedType, unsigned &RequiredAligment) const override;
326 unsigned getMaxSupportedInterleaveFactor() const override { return 4; }
328 bool lowerInterleavedLoad(LoadInst *LI,
329 ArrayRef<ShuffleVectorInst *> Shuffles,
330 ArrayRef<unsigned> Indices,
331 unsigned Factor) const override;
332 bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
333 unsigned Factor) const override;
335 bool isLegalAddImmediate(int64_t) const override;
336 bool isLegalICmpImmediate(int64_t) const override;
338 bool shouldConsiderGEPOffsetSplit() const override;
340 EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
341 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
342 MachineFunction &MF) const override;
344 /// Return true if the addressing mode represented by AM is legal for this
345 /// target, for a load/store of the specified type.
346 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
348 Instruction *I = nullptr) const override;
350 /// Return the cost of the scaling factor used in the addressing
351 /// mode represented by AM for this target, for a load/store
352 /// of the specified type.
353 /// If the AM is supported, the return value must be >= 0.
354 /// If the AM is not supported, it returns a negative value.
355 int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM, Type *Ty,
356 unsigned AS) const override;
358 /// Return true if an FMA operation is faster than a pair of fmul and fadd
359 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
360 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
361 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
363 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
365 /// Returns false if N is a bit extraction pattern of (X >> C) & Mask.
366 bool isDesirableToCommuteWithShift(const SDNode *N,
367 CombineLevel Level) const override;
369 /// Returns true if it is beneficial to convert a load of a constant
370 /// to just the constant itself.
371 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
372 Type *Ty) const override;
374 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
376 bool isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
377 unsigned Index) const override;
379 Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
380 AtomicOrdering Ord) const override;
381 Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
382 Value *Addr, AtomicOrdering Ord) const override;
384 void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const override;
386 TargetLoweringBase::AtomicExpansionKind
387 shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
388 bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
389 TargetLoweringBase::AtomicExpansionKind
390 shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
392 bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
394 bool useLoadStackGuardNode() const override;
395 TargetLoweringBase::LegalizeTypeAction
396 getPreferredVectorAction(EVT VT) const override;
398 /// If the target has a standard location for the stack protector cookie,
399 /// returns the address of that location. Otherwise, returns nullptr.
400 Value *getIRStackGuard(IRBuilder<> &IRB) const override;
402 /// If the target has a standard location for the unsafe stack pointer,
403 /// returns the address of that location. Otherwise, returns nullptr.
404 Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const override;
406 /// If a physical register, this returns the register that receives the
407 /// exception address on entry to an EH pad.
409 getExceptionPointerRegister(const Constant *PersonalityFn) const override {
410 // FIXME: This is a guess. Has this been defined yet?
414 /// If a physical register, this returns the register that receives the
415 /// exception typeid on entry to a landing pad.
417 getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
418 // FIXME: This is a guess. Has this been defined yet?
422 bool isIntDivCheap(EVT VT, AttributeList Attr) const override;
424 bool canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
425 const SelectionDAG &DAG) const override {
426 // Do not merge to float value size (128 bytes) if no implicit
427 // float attribute is set.
429 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
430 Attribute::NoImplicitFloat);
433 return (MemVT.getSizeInBits() <= 64);
437 bool isCheapToSpeculateCttz() const override {
441 bool isCheapToSpeculateCtlz() const override {
445 bool isMaskAndCmp0FoldingBeneficial(const Instruction &AndI) const override;
447 bool hasAndNotCompare(SDValue V) const override {
448 // We can use bics for any scalar.
449 return V.getValueType().isScalarInteger();
452 bool hasAndNot(SDValue Y) const override {
453 EVT VT = Y.getValueType();
456 return hasAndNotCompare(Y);
458 return VT.getSizeInBits() >= 64; // vector 'bic'
461 bool shouldTransformSignedTruncationCheck(EVT XVT,
462 unsigned KeptBits) const override {
463 // For vectors, we don't have a preference..
467 auto VTIsOk = [](EVT VT) -> bool {
468 return VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 ||
472 // We are ok with KeptBitsVT being byte/word/dword, what SXT supports.
473 // XVT will be larger than KeptBitsVT.
474 MVT KeptBitsVT = MVT::getIntegerVT(KeptBits);
475 return VTIsOk(XVT) && VTIsOk(KeptBitsVT);
478 bool hasBitPreservingFPLogic(EVT VT) const override {
479 // FIXME: Is this always true? It should be true for vectors at least.
480 return VT == MVT::f32 || VT == MVT::f64;
483 bool supportSplitCSR(MachineFunction *MF) const override {
484 return MF->getFunction().getCallingConv() == CallingConv::CXX_FAST_TLS &&
485 MF->getFunction().hasFnAttribute(Attribute::NoUnwind);
487 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
488 void insertCopiesSplitCSR(
489 MachineBasicBlock *Entry,
490 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
492 bool supportSwiftError() const override {
496 /// Enable aggressive FMA fusion on targets that want it.
497 bool enableAggressiveFMAFusion(EVT VT) const override;
499 /// Returns the size of the platform's va_list object.
500 unsigned getVaListSizeInBits(const DataLayout &DL) const override;
502 /// Returns true if \p VecTy is a legal interleaved access type. This
503 /// function checks the vector element type and the overall width of the
505 bool isLegalInterleavedAccessType(VectorType *VecTy,
506 const DataLayout &DL) const;
508 /// Returns the number of interleaved accesses that will be generated when
509 /// lowering accesses of the given type.
510 unsigned getNumInterleavedAccesses(VectorType *VecTy,
511 const DataLayout &DL) const;
513 MachineMemOperand::Flags getMMOFlags(const Instruction &I) const override;
515 bool functionArgumentNeedsConsecutiveRegisters(Type *Ty,
516 CallingConv::ID CallConv,
517 bool isVarArg) const override;
519 /// Keep a pointer to the AArch64Subtarget around so that we can
520 /// make the right decision when generating code for different targets.
521 const AArch64Subtarget *Subtarget;
523 bool isExtFreeImpl(const Instruction *Ext) const override;
525 void addTypeForNEON(MVT VT, MVT PromotedBitwiseVT);
526 void addDRTypeForNEON(MVT VT);
527 void addQRTypeForNEON(MVT VT);
529 SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
531 const SmallVectorImpl<ISD::InputArg> &Ins,
532 const SDLoc &DL, SelectionDAG &DAG,
533 SmallVectorImpl<SDValue> &InVals) const override;
535 SDValue LowerCall(CallLoweringInfo & /*CLI*/,
536 SmallVectorImpl<SDValue> &InVals) const override;
538 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
539 CallingConv::ID CallConv, bool isVarArg,
540 const SmallVectorImpl<ISD::InputArg> &Ins,
541 const SDLoc &DL, SelectionDAG &DAG,
542 SmallVectorImpl<SDValue> &InVals, bool isThisReturn,
543 SDValue ThisVal) const;
545 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
547 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
549 bool isEligibleForTailCallOptimization(
550 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
551 const SmallVectorImpl<ISD::OutputArg> &Outs,
552 const SmallVectorImpl<SDValue> &OutVals,
553 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const;
555 /// Finds the incoming stack arguments which overlap the given fixed stack
556 /// object and incorporates their load into the current chain. This prevents
557 /// an upcoming store from clobbering the stack argument before it's used.
558 SDValue addTokenForArgument(SDValue Chain, SelectionDAG &DAG,
559 MachineFrameInfo &MFI, int ClobberedFI) const;
561 bool DoesCalleeRestoreStack(CallingConv::ID CallCC, bool TailCallOpt) const;
563 void saveVarArgRegisters(CCState &CCInfo, SelectionDAG &DAG, const SDLoc &DL,
564 SDValue &Chain) const;
566 bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
568 const SmallVectorImpl<ISD::OutputArg> &Outs,
569 LLVMContext &Context) const override;
571 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
572 const SmallVectorImpl<ISD::OutputArg> &Outs,
573 const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
574 SelectionDAG &DAG) const override;
576 SDValue getTargetNode(GlobalAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
577 unsigned Flag) const;
578 SDValue getTargetNode(JumpTableSDNode *N, EVT Ty, SelectionDAG &DAG,
579 unsigned Flag) const;
580 SDValue getTargetNode(ConstantPoolSDNode *N, EVT Ty, SelectionDAG &DAG,
581 unsigned Flag) const;
582 SDValue getTargetNode(BlockAddressSDNode *N, EVT Ty, SelectionDAG &DAG,
583 unsigned Flag) const;
584 template <class NodeTy>
585 SDValue getGOT(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
586 template <class NodeTy>
587 SDValue getAddrLarge(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
588 template <class NodeTy>
589 SDValue getAddr(NodeTy *N, SelectionDAG &DAG, unsigned Flags = 0) const;
590 SDValue LowerADDROFRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
591 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
592 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
593 SDValue LowerDarwinGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
594 SDValue LowerELFGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
595 SDValue LowerELFTLSDescCallSeq(SDValue SymAddr, const SDLoc &DL,
596 SelectionDAG &DAG) const;
597 SDValue LowerWindowsGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
598 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
599 SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG) const;
600 SDValue LowerSELECT(SDValue Op, SelectionDAG &DAG) const;
601 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
602 SDValue LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, SDValue RHS,
603 SDValue TVal, SDValue FVal, const SDLoc &dl,
604 SelectionDAG &DAG) const;
605 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
606 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
607 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
608 SDValue LowerAAPCS_VASTART(SDValue Op, SelectionDAG &DAG) const;
609 SDValue LowerDarwin_VASTART(SDValue Op, SelectionDAG &DAG) const;
610 SDValue LowerWin64_VASTART(SDValue Op, SelectionDAG &DAG) const;
611 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
612 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
613 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
614 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
615 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
616 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
617 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
618 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
619 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
620 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
621 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
622 SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
623 SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
624 SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
625 SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG) const;
626 SDValue LowerVSETCC(SDValue Op, SelectionDAG &DAG) const;
627 SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) const;
628 SDValue LowerF128Call(SDValue Op, SelectionDAG &DAG,
629 RTLIB::Libcall Call) const;
630 SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
631 SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const;
632 SDValue LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const;
633 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const;
634 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
635 SDValue LowerVectorAND(SDValue Op, SelectionDAG &DAG) const;
636 SDValue LowerVectorOR(SDValue Op, SelectionDAG &DAG) const;
637 SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
638 SDValue LowerFSINCOS(SDValue Op, SelectionDAG &DAG) const;
639 SDValue LowerVECREDUCE(SDValue Op, SelectionDAG &DAG) const;
640 SDValue LowerATOMIC_LOAD_SUB(SDValue Op, SelectionDAG &DAG) const;
641 SDValue LowerATOMIC_LOAD_AND(SDValue Op, SelectionDAG &DAG) const;
642 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
643 SDValue LowerWindowsDYNAMIC_STACKALLOC(SDValue Op, SDValue Chain,
645 SelectionDAG &DAG) const;
647 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
648 SmallVectorImpl<SDNode *> &Created) const override;
649 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
650 int &ExtraSteps, bool &UseOneConst,
651 bool Reciprocal) const override;
652 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
653 int &ExtraSteps) const override;
654 unsigned combineRepeatedFPDivisors() const override;
656 ConstraintType getConstraintType(StringRef Constraint) const override;
657 unsigned getRegisterByName(const char* RegName, EVT VT,
658 SelectionDAG &DAG) const override;
660 /// Examine constraint string and operand type and determine a weight value.
661 /// The operand object must already have been set up with the operand type.
663 getSingleConstraintMatchWeight(AsmOperandInfo &info,
664 const char *constraint) const override;
666 std::pair<unsigned, const TargetRegisterClass *>
667 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
668 StringRef Constraint, MVT VT) const override;
670 const char *LowerXConstraint(EVT ConstraintVT) const override;
672 void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
673 std::vector<SDValue> &Ops,
674 SelectionDAG &DAG) const override;
676 unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
677 if (ConstraintCode == "Q")
678 return InlineAsm::Constraint_Q;
679 // FIXME: clang has code for 'Ump', 'Utf', 'Usa', and 'Ush' but these are
680 // followed by llvm_unreachable so we'll leave them unimplemented in
681 // the backend for now.
682 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
685 bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
686 bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
687 bool getIndexedAddressParts(SDNode *Op, SDValue &Base, SDValue &Offset,
688 ISD::MemIndexedMode &AM, bool &IsInc,
689 SelectionDAG &DAG) const;
690 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base, SDValue &Offset,
691 ISD::MemIndexedMode &AM,
692 SelectionDAG &DAG) const override;
693 bool getPostIndexedAddressParts(SDNode *N, SDNode *Op, SDValue &Base,
694 SDValue &Offset, ISD::MemIndexedMode &AM,
695 SelectionDAG &DAG) const override;
697 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
698 SelectionDAG &DAG) const override;
700 bool shouldNormalizeToSelectSequence(LLVMContext &, EVT) const override;
702 void finalizeLowering(MachineFunction &MF) const override;
706 FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
707 const TargetLibraryInfo *libInfo);
708 } // end namespace AArch64
710 } // end namespace llvm