1 //===-- PPCISelLowering.h - PPC32 DAG Lowering Interface --------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that PPC uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #ifndef LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
16 #define LLVM_LIB_TARGET_POWERPC_PPCISELLOWERING_H
19 #include "PPCInstrInfo.h"
20 #include "PPCRegisterInfo.h"
21 #include "llvm/CodeGen/CallingConvLower.h"
22 #include "llvm/CodeGen/SelectionDAG.h"
23 #include "llvm/Target/TargetLowering.h"
27 enum NodeType : unsigned {
28 // Start the numbering where the builtin ops and target ops leave off.
29 FIRST_NUMBER = ISD::BUILTIN_OP_END,
31 /// FSEL - Traditional three-operand fsel node.
35 /// FCFID - The FCFID instruction, taking an f64 operand and producing
36 /// and f64 value containing the FP representation of the integer that
37 /// was temporarily in the f64 operand.
40 /// Newer FCFID[US] integer-to-floating-point conversion instructions for
41 /// unsigned integers and single-precision outputs.
42 FCFIDU, FCFIDS, FCFIDUS,
44 /// FCTI[D,W]Z - The FCTIDZ and FCTIWZ instructions, taking an f32 or f64
45 /// operand, producing an f64 value containing the integer representation
49 /// Newer FCTI[D,W]UZ floating-point-to-integer conversion instructions for
50 /// unsigned integers.
53 /// Reciprocal estimate instructions (unary FP ops).
56 // VMADDFP, VNMSUBFP - The VMADDFP and VNMSUBFP instructions, taking
57 // three v4f32 operands and producing a v4f32 result.
60 /// VPERM - The PPC VPERM Instruction.
64 /// XXSPLT - The PPC VSX splat instructions
68 /// XXINSERT - The PPC VSX insert instruction
72 /// VECSHL - The PPC VSX shift left instruction
76 /// The CMPB instruction (takes two operands of i32 or i64).
79 /// Hi/Lo - These represent the high and low 16-bit parts of a global
80 /// address respectively. These nodes have two operands, the first of
81 /// which must be a TargetGlobalAddress, and the second of which must be a
82 /// Constant. Selected naively, these turn into 'lis G+C' and 'li G+C',
83 /// though these are usually folded into other nodes.
86 /// The following two target-specific nodes are used for calls through
87 /// function pointers in the 64-bit SVR4 ABI.
89 /// OPRC, CHAIN = DYNALLOC(CHAIN, NEGSIZE, FRAME_INDEX)
90 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
91 /// compute an allocation on the stack.
94 /// This instruction is lowered in PPCRegisterInfo::eliminateFrameIndex to
95 /// compute an offset from native SP to the address of the most recent
99 /// GlobalBaseReg - On Darwin, this node represents the result of the mflr
100 /// at function entry, used for PIC code.
103 /// These nodes represent the 32-bit PPC shifts that operate on 6-bit
104 /// shift amounts. These nodes are generated by the multi-precision shift
108 /// The combination of sra[wd]i and addze used to implemented signed
109 /// integer division by a power of 2. The first operand is the dividend,
110 /// and the second is the constant shift amount (representing the
114 /// CALL - A direct function call.
115 /// CALL_NOP is a call with the special NOP which follows 64-bit
119 /// CHAIN,FLAG = MTCTR(VAL, CHAIN[, INFLAG]) - Directly corresponds to a
120 /// MTCTR instruction.
123 /// CHAIN,FLAG = BCTRL(CHAIN, INFLAG) - Directly corresponds to a
124 /// BCTRL instruction.
127 /// CHAIN,FLAG = BCTRL(CHAIN, ADDR, INFLAG) - The combination of a bctrl
128 /// instruction and the TOC reload required on SVR4 PPC64.
131 /// Return with a flag operand, matched by 'blr'
134 /// R32 = MFOCRF(CRREG, INFLAG) - Represents the MFOCRF instruction.
135 /// This copies the bits corresponding to the specified CRREG into the
136 /// resultant GPR. Bits corresponding to other CR regs are undefined.
139 /// Direct move from a VSX register to a GPR
142 /// Direct move from a GPR to a VSX register (algebraic)
145 /// Direct move from a GPR to a VSX register (zero)
148 /// Extract a subvector from signed integer vector and convert to FP.
149 /// It is primarily used to convert a (widened) illegal integer vector
150 /// type to a legal floating point vector type.
151 /// For example v2i32 -> widened to v4i32 -> v2f64
154 /// Extract a subvector from unsigned integer vector and convert to FP.
155 /// As with SINT_VEC_TO_FP, used for converting illegal types.
158 // FIXME: Remove these once the ANDI glue bug is fixed:
159 /// i1 = ANDIo_1_[EQ|GT]_BIT(i32 or i64 x) - Represents the result of the
160 /// eq or gt bit of CR0 after executing andi. x, 1. This is used to
161 /// implement truncation of i32 or i64 to i1.
162 ANDIo_1_EQ_BIT, ANDIo_1_GT_BIT,
164 // READ_TIME_BASE - A read of the 64-bit time-base register on a 32-bit
165 // target (returns (Lo, Hi)). It takes a chain operand.
168 // EH_SJLJ_SETJMP - SjLj exception handling setjmp.
171 // EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
174 /// RESVEC = VCMP(LHS, RHS, OPC) - Represents one of the altivec VCMP*
175 /// instructions. For lack of better number, we use the opcode number
176 /// encoding for the OPC field to identify the compare. For example, 838
180 /// RESVEC, OUTFLAG = VCMPo(LHS, RHS, OPC) - Represents one of the
181 /// altivec VCMP*o instructions. For lack of better number, we use the
182 /// opcode number encoding for the OPC field to identify the compare. For
183 /// example, 838 is VCMPGTSH.
186 /// CHAIN = COND_BRANCH CHAIN, CRRC, OPC, DESTBB [, INFLAG] - This
187 /// corresponds to the COND_BRANCH pseudo instruction. CRRC is the
188 /// condition register to branch on, OPC is the branch opcode to use (e.g.
189 /// PPC::BLE), DESTBB is the destination block to branch to, and INFLAG is
190 /// an optional input flag argument.
193 /// CHAIN = BDNZ CHAIN, DESTBB - These are used to create counter-based
197 /// F8RC = FADDRTZ F8RC, F8RC - This is an FADD done with rounding
198 /// towards zero. Used only as part of the long double-to-int
199 /// conversion sequence.
202 /// F8RC = MFFS - This moves the FPSCR (not modeled) into the register.
205 /// TC_RETURN - A tail call return.
207 /// operand #1 callee (register or absolute)
208 /// operand #2 stack adjustment
209 /// operand #3 optional in flag
212 /// ch, gl = CR6[UN]SET ch, inglue - Toggle CR bit 6 for SVR4 vararg calls
216 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by initial-exec TLS
220 /// GPRC = address of _GLOBAL_OFFSET_TABLE_. Used by general dynamic and
221 /// local dynamic TLS on PPC32.
224 /// G8RC = ADDIS_GOT_TPREL_HA %X2, Symbol - Used by the initial-exec
225 /// TLS model, produces an ADDIS8 instruction that adds the GOT
226 /// base to sym\@got\@tprel\@ha.
229 /// G8RC = LD_GOT_TPREL_L Symbol, G8RReg - Used by the initial-exec
230 /// TLS model, produces a LD instruction with base register G8RReg
231 /// and offset sym\@got\@tprel\@l. This completes the addition that
232 /// finds the offset of "sym" relative to the thread pointer.
235 /// G8RC = ADD_TLS G8RReg, Symbol - Used by the initial-exec TLS
236 /// model, produces an ADD instruction that adds the contents of
237 /// G8RReg to the thread pointer. Symbol contains a relocation
238 /// sym\@tls which is to be replaced by the thread pointer and
239 /// identifies to the linker that the instruction is part of a
243 /// G8RC = ADDIS_TLSGD_HA %X2, Symbol - For the general-dynamic TLS
244 /// model, produces an ADDIS8 instruction that adds the GOT base
245 /// register to sym\@got\@tlsgd\@ha.
248 /// %X3 = ADDI_TLSGD_L G8RReg, Symbol - For the general-dynamic TLS
249 /// model, produces an ADDI8 instruction that adds G8RReg to
250 /// sym\@got\@tlsgd\@l and stores the result in X3. Hidden by
251 /// ADDIS_TLSGD_L_ADDR until after register assignment.
254 /// %X3 = GET_TLS_ADDR %X3, Symbol - For the general-dynamic TLS
255 /// model, produces a call to __tls_get_addr(sym\@tlsgd). Hidden by
256 /// ADDIS_TLSGD_L_ADDR until after register assignment.
259 /// G8RC = ADDI_TLSGD_L_ADDR G8RReg, Symbol, Symbol - Op that
260 /// combines ADDI_TLSGD_L and GET_TLS_ADDR until expansion following
261 /// register assignment.
264 /// G8RC = ADDIS_TLSLD_HA %X2, Symbol - For the local-dynamic TLS
265 /// model, produces an ADDIS8 instruction that adds the GOT base
266 /// register to sym\@got\@tlsld\@ha.
269 /// %X3 = ADDI_TLSLD_L G8RReg, Symbol - For the local-dynamic TLS
270 /// model, produces an ADDI8 instruction that adds G8RReg to
271 /// sym\@got\@tlsld\@l and stores the result in X3. Hidden by
272 /// ADDIS_TLSLD_L_ADDR until after register assignment.
275 /// %X3 = GET_TLSLD_ADDR %X3, Symbol - For the local-dynamic TLS
276 /// model, produces a call to __tls_get_addr(sym\@tlsld). Hidden by
277 /// ADDIS_TLSLD_L_ADDR until after register assignment.
280 /// G8RC = ADDI_TLSLD_L_ADDR G8RReg, Symbol, Symbol - Op that
281 /// combines ADDI_TLSLD_L and GET_TLSLD_ADDR until expansion
282 /// following register assignment.
285 /// G8RC = ADDIS_DTPREL_HA %X3, Symbol - For the local-dynamic TLS
286 /// model, produces an ADDIS8 instruction that adds X3 to
290 /// G8RC = ADDI_DTPREL_L G8RReg, Symbol - For the local-dynamic TLS
291 /// model, produces an ADDI8 instruction that adds G8RReg to
292 /// sym\@got\@dtprel\@l.
295 /// VRRC = VADD_SPLAT Elt, EltSize - Temporary node to be expanded
296 /// during instruction selection to optimize a BUILD_VECTOR into
297 /// operations on splats. This is necessary to avoid losing these
298 /// optimizations due to constant folding.
301 /// CHAIN = SC CHAIN, Imm128 - System call. The 7-bit unsigned
302 /// operand identifies the operating system entry point.
305 /// CHAIN = CLRBHRB CHAIN - Clear branch history rolling buffer.
308 /// GPRC, CHAIN = MFBHRBE CHAIN, Entry, Dummy - Move from branch
309 /// history rolling buffer entry.
312 /// CHAIN = RFEBB CHAIN, State - Return from event-based branch.
315 /// VSRC, CHAIN = XXSWAPD CHAIN, VSRC - Occurs only for little
316 /// endian. Maps to an xxswapd instruction that corrects an lxvd2x
317 /// or stxvd2x instruction. The chain is necessary because the
318 /// sequence replaces a load and needs to provide the same number
322 /// An SDNode for swaps that are not associated with any loads/stores
323 /// and thereby have no chain.
326 /// QVFPERM = This corresponds to the QPX qvfperm instruction.
329 /// QVGPCI = This corresponds to the QPX qvgpci instruction.
332 /// QVALIGNI = This corresponds to the QPX qvaligni instruction.
335 /// QVESPLATI = This corresponds to the QPX qvesplati instruction.
338 /// QBFLT = Access the underlying QPX floating-point boolean
342 /// CHAIN = STBRX CHAIN, GPRC, Ptr, Type - This is a
343 /// byte-swapping store instruction. It byte-swaps the low "Type" bits of
344 /// the GPRC input, then stores it through Ptr. Type can be either i16 or
346 STBRX = ISD::FIRST_TARGET_MEMORY_OPCODE,
348 /// GPRC, CHAIN = LBRX CHAIN, Ptr, Type - This is a
349 /// byte-swapping load instruction. It loads "Type" bits, byte swaps it,
350 /// then puts it in the bottom bits of the GPRC. TYPE can be either i16
354 /// STFIWX - The STFIWX instruction. The first operand is an input token
355 /// chain, then an f64 value to store, then an address to store it to.
358 /// GPRC, CHAIN = LFIWAX CHAIN, Ptr - This is a floating-point
359 /// load which sign-extends from a 32-bit integer value into the
360 /// destination 64-bit register.
363 /// GPRC, CHAIN = LFIWZX CHAIN, Ptr - This is a floating-point
364 /// load which zero-extends from a 32-bit integer value into the
365 /// destination 64-bit register.
368 /// VSRC, CHAIN = LXVD2X_LE CHAIN, Ptr - Occurs only for little endian.
369 /// Maps directly to an lxvd2x instruction that will be followed by
373 /// CHAIN = STXVD2X CHAIN, VSRC, Ptr - Occurs only for little endian.
374 /// Maps directly to an stxvd2x instruction that will be preceded by
378 /// QBRC, CHAIN = QVLFSb CHAIN, Ptr
379 /// The 4xf32 load used for v4i1 constants.
382 /// GPRC = TOC_ENTRY GA, TOC
383 /// Loads the entry for GA from the TOC, where the TOC base is given by
384 /// the last operand.
389 /// Define some predicates that are used for node matching.
391 /// isVPKUHUMShuffleMask - Return true if this is the shuffle mask for a
392 /// VPKUHUM instruction.
393 bool isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
396 /// isVPKUWUMShuffleMask - Return true if this is the shuffle mask for a
397 /// VPKUWUM instruction.
398 bool isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
401 /// isVPKUDUMShuffleMask - Return true if this is the shuffle mask for a
402 /// VPKUDUM instruction.
403 bool isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
406 /// isVMRGLShuffleMask - Return true if this is a shuffle mask suitable for
407 /// a VRGL* instruction with the specified unit size (1,2 or 4 bytes).
408 bool isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
409 unsigned ShuffleKind, SelectionDAG &DAG);
411 /// isVMRGHShuffleMask - Return true if this is a shuffle mask suitable for
412 /// a VRGH* instruction with the specified unit size (1,2 or 4 bytes).
413 bool isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
414 unsigned ShuffleKind, SelectionDAG &DAG);
416 /// isVMRGEOShuffleMask - Return true if this is a shuffle mask suitable for
417 /// a VMRGEW or VMRGOW instruction
418 bool isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
419 unsigned ShuffleKind, SelectionDAG &DAG);
421 /// isVSLDOIShuffleMask - If this is a vsldoi shuffle mask, return the
422 /// shift amount, otherwise return -1.
423 int isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
426 /// isSplatShuffleMask - Return true if the specified VECTOR_SHUFFLE operand
427 /// specifies a splat of a single element that is suitable for input to
428 /// VSPLTB/VSPLTH/VSPLTW.
429 bool isSplatShuffleMask(ShuffleVectorSDNode *N, unsigned EltSize);
431 /// isXXINSERTWMask - Return true if this VECTOR_SHUFFLE can be handled by
432 /// the XXINSERTW instruction introduced in ISA 3.0. This is essentially any
433 /// shuffle of v4f32/v4i32 vectors that just inserts one element from one
434 /// vector into the other. This function will also set a couple of
435 /// output parameters for how much the source vector needs to be shifted and
436 /// what byte number needs to be specified for the instruction to put the
437 /// element in the desired location of the target vector.
438 bool isXXINSERTWMask(ShuffleVectorSDNode *N, unsigned &ShiftElts,
439 unsigned &InsertAtByte, bool &Swap, bool IsLE);
441 /// getVSPLTImmediate - Return the appropriate VSPLT* immediate to splat the
442 /// specified isSplatShuffleMask VECTOR_SHUFFLE mask.
443 unsigned getVSPLTImmediate(SDNode *N, unsigned EltSize, SelectionDAG &DAG);
445 /// get_VSPLTI_elt - If this is a build_vector of constants which can be
446 /// formed by using a vspltis[bhw] instruction of the specified element
447 /// size, return the constant being splatted. The ByteSize field indicates
448 /// the number of bytes of each element [124] -> [bhw].
449 SDValue get_VSPLTI_elt(SDNode *N, unsigned ByteSize, SelectionDAG &DAG);
451 /// If this is a qvaligni shuffle mask, return the shift
452 /// amount, otherwise return -1.
453 int isQVALIGNIShuffleMask(SDNode *N);
456 class PPCTargetLowering : public TargetLowering {
457 const PPCSubtarget &Subtarget;
460 explicit PPCTargetLowering(const PPCTargetMachine &TM,
461 const PPCSubtarget &STI);
463 /// getTargetNodeName() - This method returns the name of a target specific
465 const char *getTargetNodeName(unsigned Opcode) const override;
467 /// getPreferredVectorAction - The code we generate when vector types are
468 /// legalized by promoting the integer element type is often much worse
469 /// than code we generate if we widen the type for applicable vector types.
470 /// The issue with promoting is that the vector is scalaraized, individual
471 /// elements promoted and then the vector is rebuilt. So say we load a pair
472 /// of v4i8's and shuffle them. This will turn into a mess of 8 extending
473 /// loads, moves back into VSR's (or memory ops if we don't have moves) and
474 /// then the VPERM for the shuffle. All in all a very slow sequence.
475 TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(EVT VT)
477 if (VT.getVectorElementType().getSizeInBits() % 8 == 0)
478 return TypeWidenVector;
479 return TargetLoweringBase::getPreferredVectorAction(VT);
481 bool useSoftFloat() const override;
483 MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
487 bool isCheapToSpeculateCttz() const override {
491 bool isCheapToSpeculateCtlz() const override {
495 bool supportSplitCSR(MachineFunction *MF) const override {
497 MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
498 MF->getFunction()->hasFnAttribute(Attribute::NoUnwind);
501 void initializeSplitCSR(MachineBasicBlock *Entry) const override;
503 void insertCopiesSplitCSR(
504 MachineBasicBlock *Entry,
505 const SmallVectorImpl<MachineBasicBlock *> &Exits) const override;
507 /// getSetCCResultType - Return the ISD::SETCC ValueType
508 EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
509 EVT VT) const override;
511 /// Return true if target always beneficiates from combining into FMA for a
512 /// given value type. This must typically return false on targets where FMA
513 /// takes more cycles to execute than FADD.
514 bool enableAggressiveFMAFusion(EVT VT) const override;
516 /// getPreIndexedAddressParts - returns true by value, base pointer and
517 /// offset pointer and addressing mode by reference if the node's address
518 /// can be legally represented as pre-indexed load / store address.
519 bool getPreIndexedAddressParts(SDNode *N, SDValue &Base,
521 ISD::MemIndexedMode &AM,
522 SelectionDAG &DAG) const override;
524 /// SelectAddressRegReg - Given the specified addressed, check to see if it
525 /// can be represented as an indexed [r+r] operation. Returns false if it
526 /// can be more efficiently represented with [r+imm].
527 bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
528 SelectionDAG &DAG) const;
530 /// SelectAddressRegImm - Returns true if the address N can be represented
531 /// by a base register plus a signed 16-bit displacement [r+imm], and if it
532 /// is not better represented as reg+reg. If Aligned is true, only accept
533 /// displacements suitable for STD and friends, i.e. multiples of 4.
534 bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
535 SelectionDAG &DAG, bool Aligned) const;
537 /// SelectAddressRegRegOnly - Given the specified addressed, force it to be
538 /// represented as an indexed [r+r] operation.
539 bool SelectAddressRegRegOnly(SDValue N, SDValue &Base, SDValue &Index,
540 SelectionDAG &DAG) const;
542 Sched::Preference getSchedulingPreference(SDNode *N) const override;
544 /// LowerOperation - Provide custom lowering hooks for some operations.
546 SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
548 /// ReplaceNodeResults - Replace the results of node with an illegal result
549 /// type with new values built out of custom code.
551 void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
552 SelectionDAG &DAG) const override;
554 SDValue expandVSXLoadForLE(SDNode *N, DAGCombinerInfo &DCI) const;
555 SDValue expandVSXStoreForLE(SDNode *N, DAGCombinerInfo &DCI) const;
557 SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
559 SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
560 std::vector<SDNode *> *Created) const override;
562 unsigned getRegisterByName(const char* RegName, EVT VT,
563 SelectionDAG &DAG) const override;
565 void computeKnownBitsForTargetNode(const SDValue Op,
568 const SelectionDAG &DAG,
569 unsigned Depth = 0) const override;
571 unsigned getPrefLoopAlignment(MachineLoop *ML) const override;
573 bool shouldInsertFencesForAtomic(const Instruction *I) const override {
577 Instruction* emitLeadingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
578 bool IsStore, bool IsLoad) const override;
579 Instruction* emitTrailingFence(IRBuilder<> &Builder, AtomicOrdering Ord,
580 bool IsStore, bool IsLoad) const override;
583 EmitInstrWithCustomInserter(MachineInstr &MI,
584 MachineBasicBlock *MBB) const override;
585 MachineBasicBlock *EmitAtomicBinary(MachineInstr &MI,
586 MachineBasicBlock *MBB,
588 unsigned BinOpcode) const;
589 MachineBasicBlock *EmitPartwordAtomicBinary(MachineInstr &MI,
590 MachineBasicBlock *MBB,
592 unsigned Opcode) const;
594 MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
595 MachineBasicBlock *MBB) const;
597 MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
598 MachineBasicBlock *MBB) const;
600 ConstraintType getConstraintType(StringRef Constraint) const override;
602 /// Examine constraint string and operand type and determine a weight value.
603 /// The operand object must already have been set up with the operand type.
604 ConstraintWeight getSingleConstraintMatchWeight(
605 AsmOperandInfo &info, const char *constraint) const override;
607 std::pair<unsigned, const TargetRegisterClass *>
608 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
609 StringRef Constraint, MVT VT) const override;
611 /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
612 /// function arguments in the caller parameter area. This is the actual
613 /// alignment, not its logarithm.
614 unsigned getByValTypeAlignment(Type *Ty,
615 const DataLayout &DL) const override;
617 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
618 /// vector. If it is invalid, don't add anything to Ops.
619 void LowerAsmOperandForConstraint(SDValue Op,
620 std::string &Constraint,
621 std::vector<SDValue> &Ops,
622 SelectionDAG &DAG) const override;
625 getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
626 if (ConstraintCode == "es")
627 return InlineAsm::Constraint_es;
628 else if (ConstraintCode == "o")
629 return InlineAsm::Constraint_o;
630 else if (ConstraintCode == "Q")
631 return InlineAsm::Constraint_Q;
632 else if (ConstraintCode == "Z")
633 return InlineAsm::Constraint_Z;
634 else if (ConstraintCode == "Zy")
635 return InlineAsm::Constraint_Zy;
636 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
639 /// isLegalAddressingMode - Return true if the addressing mode represented
640 /// by AM is legal for this target, for a load/store of the specified type.
641 bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
642 Type *Ty, unsigned AS) const override;
644 /// isLegalICmpImmediate - Return true if the specified immediate is legal
645 /// icmp immediate, that is the target has icmp instructions which can
646 /// compare a register against the immediate without having to materialize
647 /// the immediate into a register.
648 bool isLegalICmpImmediate(int64_t Imm) const override;
650 /// isLegalAddImmediate - Return true if the specified immediate is legal
651 /// add immediate, that is the target has add instructions which can
652 /// add a register and the immediate without having to materialize
653 /// the immediate into a register.
654 bool isLegalAddImmediate(int64_t Imm) const override;
656 /// isTruncateFree - Return true if it's free to truncate a value of
657 /// type Ty1 to type Ty2. e.g. On PPC it's free to truncate a i64 value in
658 /// register X1 to i32 by referencing its sub-register R1.
659 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
660 bool isTruncateFree(EVT VT1, EVT VT2) const override;
662 bool isZExtFree(SDValue Val, EVT VT2) const override;
664 bool isFPExtFree(EVT VT) const override;
666 /// \brief Returns true if it is beneficial to convert a load of a constant
667 /// to just the constant itself.
668 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
669 Type *Ty) const override;
671 bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
673 bool getTgtMemIntrinsic(IntrinsicInfo &Info,
675 unsigned Intrinsic) const override;
677 /// getOptimalMemOpType - Returns the target specific optimal type for load
678 /// and store operations as a result of memset, memcpy, and memmove
679 /// lowering. If DstAlign is zero that means it's safe to destination
680 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
681 /// means there isn't a need to check it against alignment requirement,
682 /// probably because the source does not need to be loaded. If 'IsMemset' is
683 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
684 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
685 /// source is constant so it does not need to be loaded.
686 /// It returns EVT::Other if the type should be determined using generic
687 /// target-independent logic.
689 getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
690 bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
691 MachineFunction &MF) const override;
693 /// Is unaligned memory access allowed for the given type, and is it fast
694 /// relative to software emulation.
695 bool allowsMisalignedMemoryAccesses(EVT VT,
698 bool *Fast = nullptr) const override;
700 /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
701 /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
702 /// expanded to FMAs when this method returns true, otherwise fmuladd is
703 /// expanded to fmul + fadd.
704 bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
706 const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
708 // Should we expand the build vector with shuffles?
710 shouldExpandBuildVectorWithShuffles(EVT VT,
711 unsigned DefinedValues) const override;
713 /// createFastISel - This method returns a target-specific FastISel object,
714 /// or null if the target does not support "fast" instruction selection.
715 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
716 const TargetLibraryInfo *LibInfo) const override;
718 /// \brief Returns true if an argument of type Ty needs to be passed in a
719 /// contiguous block of registers in calling convention CallConv.
720 bool functionArgumentNeedsConsecutiveRegisters(
721 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const override {
722 // We support any array type as "consecutive" block in the parameter
723 // save area. The element type defines the alignment requirement and
724 // whether the argument should go in GPRs, FPRs, or VRs if available.
726 // Note that clang uses this capability both to implement the ELFv2
727 // homogeneous float/vector aggregate ABI, and to avoid having to use
728 // "byval" when passing aggregates that might fully fit in registers.
729 return Ty->isArrayTy();
732 /// If a physical register, this returns the register that receives the
733 /// exception address on entry to an EH pad.
735 getExceptionPointerRegister(const Constant *PersonalityFn) const override;
737 /// If a physical register, this returns the register that receives the
738 /// exception typeid on entry to a landing pad.
740 getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
742 /// Override to support customized stack guard loading.
743 bool useLoadStackGuardNode() const override;
744 void insertSSPDeclarations(Module &M) const override;
747 struct ReuseLoadInfo {
751 MachinePointerInfo MPI;
755 const MDNode *Ranges;
757 ReuseLoadInfo() : IsInvariant(false), Alignment(0), Ranges(nullptr) {}
760 bool canReuseLoadAddress(SDValue Op, EVT MemVT, ReuseLoadInfo &RLI,
762 ISD::LoadExtType ET = ISD::NON_EXTLOAD) const;
763 void spliceIntoChain(SDValue ResChain, SDValue NewResChain,
764 SelectionDAG &DAG) const;
766 void LowerFP_TO_INTForReuse(SDValue Op, ReuseLoadInfo &RLI,
767 SelectionDAG &DAG, const SDLoc &dl) const;
768 SDValue LowerFP_TO_INTDirectMove(SDValue Op, SelectionDAG &DAG,
769 const SDLoc &dl) const;
770 SDValue LowerINT_TO_FPDirectMove(SDValue Op, SelectionDAG &DAG,
771 const SDLoc &dl) const;
773 SDValue getFramePointerFrameIndex(SelectionDAG & DAG) const;
774 SDValue getReturnAddrFrameIndex(SelectionDAG & DAG) const;
777 IsEligibleForTailCallOptimization(SDValue Callee,
778 CallingConv::ID CalleeCC,
780 const SmallVectorImpl<ISD::InputArg> &Ins,
781 SelectionDAG& DAG) const;
784 IsEligibleForTailCallOptimization_64SVR4(
786 CallingConv::ID CalleeCC,
787 ImmutableCallSite *CS,
789 const SmallVectorImpl<ISD::OutputArg> &Outs,
790 const SmallVectorImpl<ISD::InputArg> &Ins,
791 SelectionDAG& DAG) const;
793 SDValue EmitTailCallLoadFPAndRetAddr(SelectionDAG &DAG, int SPDiff,
794 SDValue Chain, SDValue &LROpOut,
796 const SDLoc &dl) const;
798 SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
799 SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
800 SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
801 SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
802 SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
803 SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
804 SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
805 SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
806 SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
807 SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
808 SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
809 SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) const;
810 SDValue LowerVACOPY(SDValue Op, SelectionDAG &DAG) const;
811 SDValue LowerSTACKRESTORE(SDValue Op, SelectionDAG &DAG) const;
812 SDValue LowerGET_DYNAMIC_AREA_OFFSET(SDValue Op, SelectionDAG &DAG) const;
813 SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
814 SDValue LowerEH_DWARF_CFA(SDValue Op, SelectionDAG &DAG) const;
815 SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG) const;
816 SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG) const;
817 SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
818 SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const;
819 SDValue LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG,
820 const SDLoc &dl) const;
821 SDValue LowerINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
822 SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
823 SDValue LowerSHL_PARTS(SDValue Op, SelectionDAG &DAG) const;
824 SDValue LowerSRL_PARTS(SDValue Op, SelectionDAG &DAG) const;
825 SDValue LowerSRA_PARTS(SDValue Op, SelectionDAG &DAG) const;
826 SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
827 SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
828 SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
829 SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
830 SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) const;
831 SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
832 SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) const;
834 SDValue LowerVectorLoad(SDValue Op, SelectionDAG &DAG) const;
835 SDValue LowerVectorStore(SDValue Op, SelectionDAG &DAG) const;
837 SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
838 CallingConv::ID CallConv, bool isVarArg,
839 const SmallVectorImpl<ISD::InputArg> &Ins,
840 const SDLoc &dl, SelectionDAG &DAG,
841 SmallVectorImpl<SDValue> &InVals) const;
842 SDValue FinishCall(CallingConv::ID CallConv, const SDLoc &dl,
843 bool isTailCall, bool isVarArg, bool isPatchPoint,
844 bool hasNest, SelectionDAG &DAG,
845 SmallVector<std::pair<unsigned, SDValue>, 8> &RegsToPass,
846 SDValue InFlag, SDValue Chain, SDValue CallSeqStart,
847 SDValue &Callee, int SPDiff, unsigned NumBytes,
848 const SmallVectorImpl<ISD::InputArg> &Ins,
849 SmallVectorImpl<SDValue> &InVals,
850 ImmutableCallSite *CS) const;
853 LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
854 const SmallVectorImpl<ISD::InputArg> &Ins,
855 const SDLoc &dl, SelectionDAG &DAG,
856 SmallVectorImpl<SDValue> &InVals) const override;
859 LowerCall(TargetLowering::CallLoweringInfo &CLI,
860 SmallVectorImpl<SDValue> &InVals) const override;
863 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
865 const SmallVectorImpl<ISD::OutputArg> &Outs,
866 LLVMContext &Context) const override;
868 SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
869 const SmallVectorImpl<ISD::OutputArg> &Outs,
870 const SmallVectorImpl<SDValue> &OutVals,
871 const SDLoc &dl, SelectionDAG &DAG) const override;
873 SDValue extendArgForPPC64(ISD::ArgFlagsTy Flags, EVT ObjectVT,
874 SelectionDAG &DAG, SDValue ArgVal,
875 const SDLoc &dl) const;
877 SDValue LowerFormalArguments_Darwin(
878 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
879 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
880 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
881 SDValue LowerFormalArguments_64SVR4(
882 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
883 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
884 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
885 SDValue LowerFormalArguments_32SVR4(
886 SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
887 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
888 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const;
890 SDValue createMemcpyOutsideCallSeq(SDValue Arg, SDValue PtrOff,
891 SDValue CallSeqStart,
892 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
893 const SDLoc &dl) const;
895 SDValue LowerCall_Darwin(SDValue Chain, SDValue Callee,
896 CallingConv::ID CallConv, bool isVarArg,
897 bool isTailCall, bool isPatchPoint,
898 const SmallVectorImpl<ISD::OutputArg> &Outs,
899 const SmallVectorImpl<SDValue> &OutVals,
900 const SmallVectorImpl<ISD::InputArg> &Ins,
901 const SDLoc &dl, SelectionDAG &DAG,
902 SmallVectorImpl<SDValue> &InVals,
903 ImmutableCallSite *CS) const;
904 SDValue LowerCall_64SVR4(SDValue Chain, SDValue Callee,
905 CallingConv::ID CallConv, bool isVarArg,
906 bool isTailCall, bool isPatchPoint,
907 const SmallVectorImpl<ISD::OutputArg> &Outs,
908 const SmallVectorImpl<SDValue> &OutVals,
909 const SmallVectorImpl<ISD::InputArg> &Ins,
910 const SDLoc &dl, SelectionDAG &DAG,
911 SmallVectorImpl<SDValue> &InVals,
912 ImmutableCallSite *CS) const;
913 SDValue LowerCall_32SVR4(SDValue Chain, SDValue Callee,
914 CallingConv::ID CallConv, bool isVarArg,
915 bool isTailCall, bool isPatchPoint,
916 const SmallVectorImpl<ISD::OutputArg> &Outs,
917 const SmallVectorImpl<SDValue> &OutVals,
918 const SmallVectorImpl<ISD::InputArg> &Ins,
919 const SDLoc &dl, SelectionDAG &DAG,
920 SmallVectorImpl<SDValue> &InVals,
921 ImmutableCallSite *CS) const;
923 SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
924 SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
926 SDValue DAGCombineExtBoolTrunc(SDNode *N, DAGCombinerInfo &DCI) const;
927 SDValue DAGCombineBuildVector(SDNode *N, DAGCombinerInfo &DCI) const;
928 SDValue DAGCombineTruncBoolExt(SDNode *N, DAGCombinerInfo &DCI) const;
929 SDValue combineFPToIntToFP(SDNode *N, DAGCombinerInfo &DCI) const;
931 SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
932 unsigned &RefinementSteps,
933 bool &UseOneConstNR) const override;
934 SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
935 unsigned &RefinementSteps) const override;
936 unsigned combineRepeatedFPDivisors() const override;
938 CCAssignFn *useFastISelCCs(unsigned Flag) const;
942 FastISel *createFastISel(FunctionLoweringInfo &FuncInfo,
943 const TargetLibraryInfo *LibInfo);
946 bool CC_PPC32_SVR4_Custom_Dummy(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
947 CCValAssign::LocInfo &LocInfo,
948 ISD::ArgFlagsTy &ArgFlags,
951 bool CC_PPC32_SVR4_Custom_AlignArgRegs(unsigned &ValNo, MVT &ValVT,
953 CCValAssign::LocInfo &LocInfo,
954 ISD::ArgFlagsTy &ArgFlags,
957 bool CC_PPC32_SVR4_Custom_AlignFPArgRegs(unsigned &ValNo, MVT &ValVT,
959 CCValAssign::LocInfo &LocInfo,
960 ISD::ArgFlagsTy &ArgFlags,
964 #endif // LLVM_TARGET_POWERPC_PPC32ISELLOWERING_H