1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that PPC uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
19 #include "PPCInstrInfo.h"
20 #include "PPCRegisterInfo.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/Target/TargetLowering.h"
27 enum NodeType : unsigned {
28 // Start the numbering where the builtin ops and target ops leave off.
29 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 /// FSEL - Traditional three-operand fsel node.
35 /// FCFID - The FCFID instruction, taking an f64 operand and producing
36 /// and f64 value containing the FP representation of the integer that
37 /// was temporarily in the f64 operand.
40 /// Newer FCFID[US] integer-to-floating-point conversion instructions for
41 /// unsigned integers and single-precision outputs.
42 FCFIDU, FCFIDS, FCFIDUS,
44 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
45 /// operand, producing an f64 value containing the integer representation
49 /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
50 /// unsigned integers with round toward zero.
53 /// VEXTS, ByteWidth - takes an input in VSFRC and produces an output in
54 /// VSFRC that is sign-extended from ByteWidth to a 64-byte integer.
57 /// Reciprocal estimate instructions (unary FP ops).
60 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
61 // three v4f32 operands and producing a v4f32 result.
64 /// VPERM - The PPC VPERM Instruction.
68 /// XXSPLT - The PPC VSX splat instructions
72 /// XXINSERT - The PPC VSX insert instruction
76 /// VECSHL - The PPC VSX shift left instruction
80 /// The CMPB instruction (takes two operands of i32 or i64).
83 /// Hi/Lo - These represent the high and low 16-bit parts of a global
84 /// address respectively. These nodes have two operands, the first of
85 /// which must be a TargetGlobalAddress, and the second of which must be a
86 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
87 /// though these are usually folded into other nodes.
90 /// The following two target-specific nodes are used for calls through
91 /// function pointers in the 64-bit SVR4 ABI.
93 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
94 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
95 /// compute an allocation on the stack.
98 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
99 /// compute an offset from native SP to the address of the most recent
103 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
104 /// at function entry, used for PIC code.
107 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit
108 /// shift amounts. These nodes are generated by the multi-precision shift
112 /// The combination of sra[wd]i and addze used to implemented signed
113 /// integer division by a power of 2. The first operand is the dividend,
114 /// and the second is the constant shift amount (representing the
118 /// CALL - A direct function call.
119 /// CALL_NOP is a call with the special NOP which follows 64-bit
123 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
124 /// MTCTR instruction.
127 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
128 /// BCTRL instruction.
131 /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
132 /// instruction and the TOC reload required on SVR4 PPC64.
135 /// Return with a flag operand, matched by 'blr'
138 /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
139 /// This copies the bits corresponding to the specified CRREG into the
140 /// resultant GPR. Bits corresponding to other CR regs are undefined.
143 /// Direct move from a VSX register to a GPR
146 /// Direct move from a GPR to a VSX register (algebraic)
149 /// Direct move from a GPR to a VSX register (zero)
152 /// Extract a subvector from signed integer vector and convert to FP.
153 /// It is primarily used to convert a (widened) illegal integer vector
154 /// type to a legal floating point vector type.
155 /// For example v2i32 -> widened to v4i32 -> v2f64
158 /// Extract a subvector from unsigned integer vector and convert to FP.
159 /// As with SINT_VEC_TO_FP, used for converting illegal types.
162 // FIXME: Remove these once the ANDI glue bug is fixed:
163 /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
164 /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
165 /// implement truncation of i32 or i64 to i1.
166 ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT,
168 // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
169 // target (returns (Lo, Hi)). It takes a chain operand.
172 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
175 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
178 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
179 /// instructions. For lack of better number, we use the opcode number
180 /// encoding for the OPC field to identify the compare. For example, 838
184 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
185 /// altivec VCMP*o instructions. For lack of better number, we use the
186 /// opcode number encoding for the OPC field to identify the compare. For
187 /// example, 838 is VCMPGTSH.
190 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
191 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
192 /// condition register to branch on, OPC is the branch opcode to use (e.g.
193 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
194 /// an optional input flag argument.
197 /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
201 /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
202 /// towards zero. Used only as part of the long double-to-int
203 /// conversion sequence.
206 /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
209 /// TC_RETURN - A tail call return.
211 /// operand #1 callee (register or absolute)
212 /// operand #2 stack adjustment
213 /// operand #3 optional in flag
216 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
220 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
224 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
225 /// local dynamic TLS on PPC32.
228 /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
229 /// TLS model, produces an ADDIS8 instruction that adds the GOT
230 /// base to sym\@got\@tprel\@ha.
233 /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
234 /// TLS model, produces a LD instruction with base register G8RReg
235 /// and offset sym\@got\@tprel\@l. This completes the addition that
236 /// finds the offset of "sym" relative to the thread pointer.
239 /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
240 /// model, produces an ADD instruction that adds the contents of
241 /// G8RReg to the thread pointer. Symbol contains a relocation
242 /// sym\@tls which is to be replaced by the thread pointer and
243 /// identifies to the linker that the instruction is part of a
247 /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS
248 /// model, produces an ADDIS8 instruction that adds the GOT base
249 /// register to sym\@got\@tlsgd\@ha.
252 /// %X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
253 /// model, produces an ADDI8 instruction that adds G8RReg to
254 /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
255 /// ADDIS_TLSGD_L_ADDR until after register assignment.
258 /// %X3 = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
259 /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
260 /// ADDIS_TLSGD_L_ADDR until after register assignment.
263 /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
264 /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
265 /// register assignment.
268 /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
269 /// model, produces an ADDIS8 instruction that adds the GOT base
270 /// register to sym\@got\@tlsld\@ha.
273 /// %X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
274 /// model, produces an ADDI8 instruction that adds G8RReg to
275 /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
276 /// ADDIS_TLSLD_L_ADDR until after register assignment.
279 /// %X3 = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
280 /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
281 /// ADDIS_TLSLD_L_ADDR until after register assignment.
284 /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
285 /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
286 /// following register assignment.
289 /// G8RC = ADDIS_DTPREL_HA %X3, Symbol - For the local-dynamic TLS
290 /// model, produces an ADDIS8 instruction that adds X3 to
294 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
295 /// model, produces an ADDI8 instruction that adds G8RReg to
296 /// sym\@got\@dtprel\@l.
299 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
300 /// during instruction selection to optimize a BUILD_VECTOR into
301 /// operations on splats. This is necessary to avoid losing these
302 /// optimizations due to constant folding.
305 /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
306 /// operand identifies the operating system entry point.
309 /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
312 /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
313 /// history rolling buffer entry.
316 /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
319 /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
320 /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
321 /// or stxvd2x instruction. The chain is necessary because the
322 /// sequence replaces a load and needs to provide the same number
326 /// An SDNode for swaps that are not associated with any loads/stores
327 /// and thereby have no chain.
330 /// QVFPERM = This corresponds to the QPX qvfperm instruction.
333 /// QVGPCI = This corresponds to the QPX qvgpci instruction.
336 /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
339 /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
342 /// QBFLT = Access the underlying QPX floating-point boolean
346 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
347 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
348 /// the GPRC input, then stores it through Ptr. Type can be either i16 or
350 STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE,
352 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
353 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
354 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
358 /// STFIWX - The STFIWX instruction. The first operand is an input token
359 /// chain, then an f64 value to store, then an address to store it to.
362 /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
363 /// load which sign-extends from a 32-bit integer value into the
364 /// destination 64-bit register.
367 /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
368 /// load which zero-extends from a 32-bit integer value into the
369 /// destination 64-bit register.
372 /// GPRC, CHAIN = LXSIZX, CHAIN, Ptr, ByteWidth - This is a load of an
373 /// integer smaller than 64 bits into a VSR. The integer is zero-extended.
374 /// This can be used for converting loaded integers to floating point.
377 /// STXSIX - The STXSI[bh]X instruction. The first operand is an input
378 /// chain, then an f64 value to store, then an address to store it to,
379 /// followed by a byte-width for the store.
382 /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
383 /// Maps directly to an lxvd2x instruction that will be followed by
387 /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
388 /// Maps directly to an stxvd2x instruction that will be preceded by
392 /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
393 /// The 4xf32 load used for v4i1 constants.
396 /// GPRC = TOC_ENTRY GA, TOC
397 /// Loads the entry for GA from the TOC, where the TOC base is given by
398 /// the last operand.
403 /// Define some predicates that are used for node matching.
405 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
406 /// VPKUHUM instruction.
407 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
410 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
411 /// VPKUWUM instruction.
412 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
415 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
416 /// VPKUDUM instruction.
417 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
420 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
421 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
422 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
423 unsigned ShuffleKind, SelectionDAG &DAG);
425 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
426 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
427 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
428 unsigned ShuffleKind, SelectionDAG &DAG);
430 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
431 /// a VMRGEW or VMRGOW instruction
432 bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
433 unsigned ShuffleKind, SelectionDAG &DAG);
435 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
436 /// shift amount, otherwise return -1.
437 int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
440 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
441 /// specifies a splat of a single element that is suitable for input to
442 /// VSPLTB/VSPLTH/VSPLTW.
443 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
445 /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
446 /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
447 /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
448 /// vector into the other. This function will also set a couple of
449 /// output parameters for how much the source vector needs to be shifted and
450 /// what byte number needs to be specified for the instruction to put the
451 /// element in the desired location of the target vector.
452 bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
453 unsigned &InsertAtByte, bool &Swap, bool IsLE);
455 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
456 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
457 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
459 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
460 /// formed by using a vspltis[bhw] instruction of the specified element
461 /// size, return the constant being splatted. The ByteSize field indicates
462 /// the number of bytes of each element [124] -> [bhw].
463 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
465 /// If this is a qvaligni shuffle mask, return the shift
466 /// amount, otherwise return -1.
467 int isQVALIGNIShuffleMask(SDNode *N);
470 class PPCTargetLowering : public TargetLowering {
471 const PPCSubtarget &Subtarget;
474 explicit PPCTargetLowering(const PPCTargetMachine &TM,
475 const PPCSubtarget &STI);
477 /// getTargetNodeName() - This method returns the name of a target specific
479 const char *getTargetNodeName(unsigned Opcode) const override;
481 /// getPreferredVectorAction - The code we generate when vector types are
482 /// legalized by promoting the integer element type is often much worse
483 /// than code we generate if we widen the type for applicable vector types.
484 /// The issue with promoting is that the vector is scalaraized, individual
485 /// elements promoted and then the vector is rebuilt. So say we load a pair
486 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
487 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
488 /// then the VPERM for the shuffle. All in all a very slow sequence.
489 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT)
491 if (VT.getScalarSizeInBits() % 8 == 0)
492 return TypeWidenVector;
493 return TargetLoweringBase::getPreferredVectorAction(VT);
495 bool useSoftFloat() const override;
497 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
501 bool isCheapToSpeculateCttz() const override {
505 bool isCheapToSpeculateCtlz() const override {
509 bool isCtlzFast() const override {
513 bool hasAndNotCompare(SDValue) const override {
517 bool supportSplitCSR(MachineFunction *MF) const override {
519 MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
520 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
523 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
525 void insertCopiesSplitCSR(
526 MachineBasicBlock *Entry,
527 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
529 /// getSetCCResultType - Return the ISD::SETCC ValueType
530 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
531 EVT VT) const override;
533 /// Return true if target always beneficiates from combining into FMA for a
534 /// given value type. This must typically return false on targets where FMA
535 /// takes more cycles to execute than FADD.
536 bool enableAggressiveFMAFusion(EVT VT) const override;
538 /// getPreIndexedAddressParts - returns true by value, base pointer and
539 /// offset pointer and addressing mode by reference if the node's address
540 /// can be legally represented as pre-indexed load / store address.
541 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
543 ISD::MemIndexedMode &AM,
544 SelectionDAG &DAG) const override;
546 /// SelectAddressRegReg - Given the specified addressed, check to see if it
547 /// can be represented as an indexed [r+r] operation. Returns false if it
548 /// can be more efficiently represented with [r+imm].
549 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
550 SelectionDAG &DAG) const;
552 /// SelectAddressRegImm - Returns true if the address N can be represented
553 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
554 /// is not better represented as reg+reg. If Aligned is true, only accept
555 /// displacements suitable for STD and friends, i.e. multiples of 4.
556 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
557 SelectionDAG &DAG, bool Aligned) const;
559 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
560 /// represented as an indexed [r+r] operation.
561 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
562 SelectionDAG &DAG) const;
564 Sched::Preference getSchedulingPreference(SDNode *N) const override;
566 /// LowerOperation - Provide custom lowering hooks for some operations.
568 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
570 /// ReplaceNodeResults - Replace the results of node with an illegal result
571 /// type with new values built out of custom code.
573 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
574 SelectionDAG &DAG) const override;
576 SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
577 SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
579 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
581 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
582 std::vector<SDNode *> *Created) const override;
584 unsigned getRegisterByName(const char* RegName, EVT VT,
585 SelectionDAG &DAG) const override;
587 void computeKnownBitsForTargetNode(const SDValue Op,
590 const SelectionDAG &DAG,
591 unsigned Depth = 0) const override;
593 unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
595 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
599 Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
600 bool IsStore, bool IsLoad) const override;
601 Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
602 bool IsStore, bool IsLoad) const override;
605 EmitInstrWithCustomInserter(MachineInstr &MI,
606 MachineBasicBlock *MBB) const override;
607 MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
608 MachineBasicBlock *MBB,
611 unsigned CmpOpcode = 0,
612 unsigned CmpPred = 0) const;
613 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
614 MachineBasicBlock *MBB,
617 unsigned CmpOpcode = 0,
618 unsigned CmpPred = 0) const;
620 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
621 MachineBasicBlock *MBB) const;
623 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
624 MachineBasicBlock *MBB) const;
626 ConstraintType getConstraintType(StringRef Constraint) const override;
628 /// Examine constraint string and operand type and determine a weight value.
629 /// The operand object must already have been set up with the operand type.
630 ConstraintWeight getSingleConstraintMatchWeight(
631 AsmOperandInfo &info, const char *constraint) const override;
633 std::pair<unsigned, const TargetRegisterClass *>
634 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
635 StringRef Constraint, MVT VT) const override;
637 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
638 /// function arguments in the caller parameter area. This is the actual
639 /// alignment, not its logarithm.
640 unsigned getByValTypeAlignment(Type *Ty,
641 const DataLayout &DL) const override;
643 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
644 /// vector. If it is invalid, don't add anything to Ops.
645 void LowerAsmOperandForConstraint(SDValue Op,
646 std::string &Constraint,
647 std::vector<SDValue> &Ops,
648 SelectionDAG &DAG) const override;
651 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
652 if (ConstraintCode == "es")
653 return InlineAsm::Constraint_es;
654 else if (ConstraintCode == "o")
655 return InlineAsm::Constraint_o;
656 else if (ConstraintCode == "Q")
657 return InlineAsm::Constraint_Q;
658 else if (ConstraintCode == "Z")
659 return InlineAsm::Constraint_Z;
660 else if (ConstraintCode == "Zy")
661 return InlineAsm::Constraint_Zy;
662 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
665 /// isLegalAddressingMode - Return true if the addressing mode represented
666 /// by AM is legal for this target, for a load/store of the specified type.
667 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
668 Type *Ty, unsigned AS) const override;
670 /// isLegalICmpImmediate - Return true if the specified immediate is legal
671 /// icmp immediate, that is the target has icmp instructions which can
672 /// compare a register against the immediate without having to materialize
673 /// the immediate into a register.
674 bool isLegalICmpImmediate(int64_t Imm) const override;
676 /// isLegalAddImmediate - Return true if the specified immediate is legal
677 /// add immediate, that is the target has add instructions which can
678 /// add a register and the immediate without having to materialize
679 /// the immediate into a register.
680 bool isLegalAddImmediate(int64_t Imm) const override;
682 /// isTruncateFree - Return true if it's free to truncate a value of
683 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
684 /// register X1 to i32 by referencing its sub-register R1.
685 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
686 bool isTruncateFree(EVT VT1, EVT VT2) const override;
688 bool isZExtFree(SDValue Val, EVT VT2) const override;
690 bool isFPExtFree(EVT VT) const override;
692 /// \brief Returns true if it is beneficial to convert a load of a constant
693 /// to just the constant itself.
694 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
695 Type *Ty) const override;
697 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
699 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
701 unsigned Intrinsic) const override;
703 /// getOptimalMemOpType - Returns the target specific optimal type for load
704 /// and store operations as a result of memset, memcpy, and memmove
705 /// lowering. If DstAlign is zero that means it's safe to destination
706 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
707 /// means there isn't a need to check it against alignment requirement,
708 /// probably because the source does not need to be loaded. If 'IsMemset' is
709 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
710 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
711 /// source is constant so it does not need to be loaded.
712 /// It returns EVT::Other if the type should be determined using generic
713 /// target-independent logic.
715 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
716 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
717 MachineFunction &MF) const override;
719 /// Is unaligned memory access allowed for the given type, and is it fast
720 /// relative to software emulation.
721 bool allowsMisalignedMemoryAccesses(EVT VT,
724 bool *Fast = nullptr) const override;
726 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
727 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
728 /// expanded to FMAs when this method returns true, otherwise fmuladd is
729 /// expanded to fmul + fadd.
730 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
732 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
734 // Should we expand the build vector with shuffles?
736 shouldExpandBuildVectorWithShuffles(EVT VT,
737 unsigned DefinedValues) const override;
739 /// createFastISel - This method returns a target-specific FastISel object,
740 /// or null if the target does not support "fast" instruction selection.
741 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
742 const TargetLibraryInfo *LibInfo) const override;
744 /// \brief Returns true if an argument of type Ty needs to be passed in a
745 /// contiguous block of registers in calling convention CallConv.
746 bool functionArgumentNeedsConsecutiveRegisters(
747 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
748 // We support any array type as "consecutive" block in the parameter
749 // save area. The element type defines the alignment requirement and
750 // whether the argument should go in GPRs, FPRs, or VRs if available.
752 // Note that clang uses this capability both to implement the ELFv2
753 // homogeneous float/vector aggregate ABI, and to avoid having to use
754 // "byval" when passing aggregates that might fully fit in registers.
755 return Ty->isArrayTy();
758 /// If a physical register, this returns the register that receives the
759 /// exception address on entry to an EH pad.
761 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
763 /// If a physical register, this returns the register that receives the
764 /// exception typeid on entry to a landing pad.
766 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
768 /// Override to support customized stack guard loading.
769 bool useLoadStackGuardNode() const override;
770 void insertSSPDeclarations(Module &M) const override;
772 bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
774 unsigned getJumpTableEncoding() const override;
775 bool isJumpTableRelative() const override;
776 SDValue getPICJumpTableRelocBase(SDValue Table,
777 SelectionDAG &DAG) const override;
778 const MCExpr *getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
780 MCContext &Ctx) const override;
783 struct ReuseLoadInfo {
787 MachinePointerInfo MPI;
788 bool IsDereferenceable;
792 const MDNode *Ranges;
795 : IsDereferenceable(false), IsInvariant(false), Alignment(0),
798 MachineMemOperand::Flags MMOFlags() const {
799 MachineMemOperand::Flags F = MachineMemOperand::MONone;
800 if (IsDereferenceable)
801 F |= MachineMemOperand::MODereferenceable;
803 F |= MachineMemOperand::MOInvariant;
808 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
810 ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
811 void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
812 SelectionDAG &DAG) const;
814 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
815 SelectionDAG &DAG, const SDLoc &dl) const;
816 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
817 const SDLoc &dl) const;
819 bool directMoveIsProfitable(const SDValue &Op) const;
820 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
821 const SDLoc &dl) const;
823 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
824 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
827 IsEligibleForTailCallOptimization(SDValue Callee,
828 CallingConv::ID CalleeCC,
830 const SmallVectorImpl<ISD::InputArg> &Ins,
831 SelectionDAG& DAG) const;
834 IsEligibleForTailCallOptimization_64SVR4(
836 CallingConv::ID CalleeCC,
837 ImmutableCallSite *CS,
839 const SmallVectorImpl<ISD::OutputArg> &Outs,
840 const SmallVectorImpl<ISD::InputArg> &Ins,
841 SelectionDAG& DAG) const;
843 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
844 SDValue Chain, SDValue &LROpOut,
846 const SDLoc &dl) const;
848 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
849 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
850 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
851 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
852 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
853 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
854 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
855 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
856 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
857 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
858 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
859 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
860 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
861 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
862 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
863 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
864 SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
865 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
866 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
867 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
868 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
869 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
870 const SDLoc &dl) const;
871 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
872 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
873 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
874 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
875 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
876 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
877 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
878 SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
879 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
880 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
881 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
882 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
883 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
885 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
886 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
888 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
889 CallingConv::ID CallConv, bool isVarArg,
890 const SmallVectorImpl<ISD::InputArg> &Ins,
891 const SDLoc &dl, SelectionDAG &DAG,
892 SmallVectorImpl<SDValue> &InVals) const;
893 SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
894 bool isTailCall, bool isVarArg, bool isPatchPoint,
895 bool hasNest, SelectionDAG &DAG,
896 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
897 SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
898 SDValue &Callee, int SPDiff, unsigned NumBytes,
899 const SmallVectorImpl<ISD::InputArg> &Ins,
900 SmallVectorImpl<SDValue> &InVals,
901 ImmutableCallSite *CS) const;
904 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
905 const SmallVectorImpl<ISD::InputArg> &Ins,
906 const SDLoc &dl, SelectionDAG &DAG,
907 SmallVectorImpl<SDValue> &InVals) const override;
910 LowerCall(TargetLowering::CallLoweringInfo &CLI,
911 SmallVectorImpl<SDValue> &InVals) const override;
914 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
916 const SmallVectorImpl<ISD::OutputArg> &Outs,
917 LLVMContext &Context) const override;
919 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
920 const SmallVectorImpl<ISD::OutputArg> &Outs,
921 const SmallVectorImpl<SDValue> &OutVals,
922 const SDLoc &dl, SelectionDAG &DAG) const override;
924 SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
925 SelectionDAG &DAG, SDValue ArgVal,
926 const SDLoc &dl) const;
928 SDValue LowerFormalArguments_Darwin(
929 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
930 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
931 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
932 SDValue LowerFormalArguments_64SVR4(
933 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
934 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
935 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
936 SDValue LowerFormalArguments_32SVR4(
937 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
938 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
939 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
941 SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
942 SDValue CallSeqStart,
943 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
944 const SDLoc &dl) const;
946 SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
947 CallingConv::ID CallConv, bool isVarArg,
948 bool isTailCall, bool isPatchPoint,
949 const SmallVectorImpl<ISD::OutputArg> &Outs,
950 const SmallVectorImpl<SDValue> &OutVals,
951 const SmallVectorImpl<ISD::InputArg> &Ins,
952 const SDLoc &dl, SelectionDAG &DAG,
953 SmallVectorImpl<SDValue> &InVals,
954 ImmutableCallSite *CS) const;
955 SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
956 CallingConv::ID CallConv, bool isVarArg,
957 bool isTailCall, bool isPatchPoint,
958 const SmallVectorImpl<ISD::OutputArg> &Outs,
959 const SmallVectorImpl<SDValue> &OutVals,
960 const SmallVectorImpl<ISD::InputArg> &Ins,
961 const SDLoc &dl, SelectionDAG &DAG,
962 SmallVectorImpl<SDValue> &InVals,
963 ImmutableCallSite *CS) const;
964 SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
965 CallingConv::ID CallConv, bool isVarArg,
966 bool isTailCall, bool isPatchPoint,
967 const SmallVectorImpl<ISD::OutputArg> &Outs,
968 const SmallVectorImpl<SDValue> &OutVals,
969 const SmallVectorImpl<ISD::InputArg> &Ins,
970 const SDLoc &dl, SelectionDAG &DAG,
971 SmallVectorImpl<SDValue> &InVals,
972 ImmutableCallSite *CS) const;
974 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
975 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
977 SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
978 SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
979 SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
980 SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
982 /// ConvertSETCCToSubtract - looks at SETCC that compares ints. It replaces
983 /// SETCC with integer subtraction when (1) there is a legal way of doing it
984 /// (2) keeping the result of comparison in GPR has performance benefit.
985 SDValue ConvertSETCCToSubtract(SDNode *N, DAGCombinerInfo &DCI) const;
987 SDValue getSqrtEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
988 int &RefinementSteps, bool &UseOneConstNR,
989 bool Reciprocal) const override;
990 SDValue getRecipEstimate(SDValue Operand, SelectionDAG &DAG, int Enabled,
991 int &RefinementSteps) const override;
992 unsigned combineRepeatedFPDivisors() const override;
994 CCAssignFn *useFastISelCCs(unsigned Flag) const;
997 combineElementTruncationToVectorTruncation(SDNode *N,
998 DAGCombinerInfo &DCI) const;
1002 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
1003 const TargetLibraryInfo *LibInfo);
1006 bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
1007 CCValAssign::LocInfo &LocInfo,
1008 ISD::ArgFlagsTy &ArgFlags,
1011 bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
1013 CCValAssign::LocInfo &LocInfo,
1014 ISD::ArgFlagsTy &ArgFlags,
1018 CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128(unsigned &ValNo, MVT &ValVT,
1020 CCValAssign::LocInfo &LocInfo,
1021 ISD::ArgFlagsTy &ArgFlags,
1024 bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
1026 CCValAssign::LocInfo &LocInfo,
1027 ISD::ArgFlagsTy &ArgFlags,
1031 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H